repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
AstWalker.visit
|
python
|
def visit(self, node, **kwargs):
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
|
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L570-L582
| null |
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
@coroutine
def _checkIfCode(self, inCodeBlockObj):
"""Checks whether or not a given line appears to be Python code."""
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
@coroutine
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
@coroutine
def __writeDocstring(self):
"""
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
"""
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
@staticmethod
def _checkMemberName(name):
"""
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
"""
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
def _processMembers(self, node, contextTag):
"""
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
"""
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
AstWalker._getFullPathName
|
python
|
def _getFullPathName(self, containingNodes):
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
|
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L584-L592
| null |
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
@coroutine
def _checkIfCode(self, inCodeBlockObj):
"""Checks whether or not a given line appears to be Python code."""
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
@coroutine
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
@coroutine
def __writeDocstring(self):
"""
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
"""
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
@staticmethod
def _checkMemberName(name):
"""
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
"""
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
def _processMembers(self, node, contextTag):
"""
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
"""
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def visit(self, node, **kwargs):
"""
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
AstWalker.visit_Module
|
python
|
def visit_Module(self, node, **kwargs):
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
|
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L594-L614
|
[
"def _processDocstring(self, node, tail='', **kwargs):\n \"\"\"\n Handles a docstring for functions, classes, and modules.\n\n Basically just figures out the bounds of the docstring and sends it\n off to the parser to do the actual work.\n \"\"\"\n typeName = type(node).__name__\n # Modules don't have lineno defined, but it's always 0 for them.\n curLineNum = startLineNum = 0\n if typeName != 'Module':\n startLineNum = curLineNum = node.lineno - 1\n # Figure out where both our enclosing object and our docstring start.\n line = ''\n while curLineNum < len(self.lines):\n line = self.lines[curLineNum]\n match = AstWalker.__docstrMarkerRE.match(line)\n if match:\n break\n curLineNum += 1\n docstringStart = curLineNum\n # Figure out where our docstring ends.\n if not AstWalker.__docstrOneLineRE.match(line):\n # Skip for the special case of a single-line docstring.\n curLineNum += 1\n while curLineNum < len(self.lines):\n line = self.lines[curLineNum]\n if line.find(match.group(2)) >= 0:\n break\n curLineNum += 1\n endLineNum = curLineNum + 1\n\n # Isolate our enclosing object's declaration.\n defLines = self.lines[startLineNum: docstringStart]\n # Isolate our docstring.\n self.docLines = self.lines[docstringStart: endLineNum]\n\n # If we have a docstring, extract information from it.\n if self.docLines:\n # Get rid of the docstring delineators.\n self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',\n self.docLines[0])\n self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',\n self.docLines[-1])\n # Handle special strings within the docstring.\n docstringConverter = self.__alterDocstring(\n tail, self.__writeDocstring())\n for lineInfo in enumerate(self.docLines):\n docstringConverter.send(lineInfo)\n docstringConverter.send((len(self.docLines) - 1, None))\n\n # Add a Doxygen @brief tag to any single-line description.\n if self.options.autobrief:\n safetyCounter = 0\n while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':\n del self.docLines[0]\n self.docLines.append('')\n safetyCounter += 1\n if safetyCounter >= len(self.docLines):\n # Escape the effectively empty docstring.\n break\n if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (\n self.docLines[1].strip(whitespace + '#') == '' or\n self.docLines[1].strip(whitespace + '#').startswith('@'))):\n self.docLines[0] = \"## @brief {0}\".format(self.docLines[0].lstrip('#'))\n if len(self.docLines) > 1 and self.docLines[1] == '# @par':\n self.docLines[1] = '#'\n\n if defLines:\n match = AstWalker.__indentRE.match(defLines[0])\n indentStr = match and match.group(1) or ''\n self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)\n for docLine in self.docLines]\n\n # Taking away a docstring from an interface method definition sometimes\n # leaves broken code as the docstring may be the only code in it.\n # Here we manually insert a pass statement to rectify this problem.\n if typeName != 'Module':\n if docstringStart < len(self.lines):\n match = AstWalker.__indentRE.match(self.lines[docstringStart])\n indentStr = match and match.group(1) or ''\n else:\n indentStr = ''\n containingNodes = kwargs.get('containingNodes', []) or []\n fullPathNamespace = self._getFullPathName(containingNodes)\n parentType = fullPathNamespace[-2][1]\n if parentType == 'interface' and typeName == 'FunctionDef' \\\n or fullPathNamespace[-1][1] == 'interface':\n defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],\n linesep, indentStr)\n elif self.options.autobrief and typeName == 'ClassDef':\n # If we're parsing docstrings separate out class attribute\n # definitions to get better Doxygen output.\n for firstVarLineNum, firstVarLine in enumerate(self.docLines):\n if '@property\\t' in firstVarLine:\n break\n lastVarLineNum = len(self.docLines)\n if lastVarLineNum > 0 and '@property\\t' in firstVarLine:\n while lastVarLineNum > firstVarLineNum:\n lastVarLineNum -= 1\n if '@property\\t' in self.docLines[lastVarLineNum]:\n break\n lastVarLineNum += 1\n if firstVarLineNum < len(self.docLines):\n indentLineNum = endLineNum\n indentStr = ''\n while not indentStr and indentLineNum < len(self.lines):\n match = AstWalker.__indentRE.match(self.lines[indentLineNum])\n indentStr = match and match.group(1) or ''\n indentLineNum += 1\n varLines = ['{0}{1}'.format(linesep, docLine).replace(\n linesep, linesep + indentStr)\n for docLine in self.docLines[\n firstVarLineNum: lastVarLineNum]]\n defLines.extend(varLines)\n self.docLines[firstVarLineNum: lastVarLineNum] = []\n # After the property shuffling we will need to relocate\n # any existing namespace information.\n namespaceLoc = defLines[-1].find('\\n# @namespace')\n if namespaceLoc >= 0:\n self.docLines[-1] += defLines[-1][namespaceLoc:]\n defLines[-1] = defLines[-1][:namespaceLoc]\n\n # For classes and functions, apply our changes and reverse the\n # order of the declaration and docstring, and for modules just\n # apply our changes.\n if typeName != 'Module':\n self.lines[startLineNum: endLineNum] = self.docLines + defLines\n else:\n self.lines[startLineNum: endLineNum] = defLines + self.docLines\n",
"def generic_visit(self, node, **kwargs):\n \"\"\"\n Extract useful information from relevant nodes including docstrings.\n\n This is virtually identical to the standard version contained in\n NodeVisitor. It is only overridden because we're tracking extra\n information (the hierarchy of containing nodes) not preserved in\n the original.\n \"\"\"\n for field, value in iter_fields(node):\n if isinstance(value, list):\n for item in value:\n if isinstance(item, AST):\n self.visit(item, containingNodes=kwargs['containingNodes'])\n elif isinstance(value, AST):\n self.visit(value, containingNodes=kwargs['containingNodes'])\n",
"def _getFullPathName(self, containingNodes):\n \"\"\"\n Returns the full node hierarchy rooted at module name.\n\n The list representing the full path through containing nodes\n (starting with the module itself) is returned.\n \"\"\"\n assert isinstance(containingNodes, list)\n return [(self.options.fullPathNamespace, 'module')] + containingNodes\n"
] |
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
@coroutine
def _checkIfCode(self, inCodeBlockObj):
"""Checks whether or not a given line appears to be Python code."""
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
@coroutine
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
@coroutine
def __writeDocstring(self):
"""
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
"""
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
@staticmethod
def _checkMemberName(name):
"""
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
"""
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
def _processMembers(self, node, contextTag):
"""
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
"""
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def visit(self, node, **kwargs):
"""
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
AstWalker.visit_Assign
|
python
|
def visit_Assign(self, node, **kwargs):
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
|
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L616-L656
|
[
"def _checkMemberName(name):\n \"\"\"\n See if a member name indicates that it should be private.\n\n Private variables in Python (starting with a double underscore but\n not ending in a double underscore) and bed lumps (variables that\n are not really private but are by common convention treated as\n protected because they begin with a single underscore) get Doxygen\n tags labeling them appropriately.\n \"\"\"\n assert isinstance(name, str)\n restrictionLevel = None\n if not name.endswith('__'):\n if name.startswith('__'):\n restrictionLevel = 'private'\n elif name.startswith('_'):\n restrictionLevel = 'protected'\n return restrictionLevel\n",
"def generic_visit(self, node, **kwargs):\n \"\"\"\n Extract useful information from relevant nodes including docstrings.\n\n This is virtually identical to the standard version contained in\n NodeVisitor. It is only overridden because we're tracking extra\n information (the hierarchy of containing nodes) not preserved in\n the original.\n \"\"\"\n for field, value in iter_fields(node):\n if isinstance(value, list):\n for item in value:\n if isinstance(item, AST):\n self.visit(item, containingNodes=kwargs['containingNodes'])\n elif isinstance(value, AST):\n self.visit(value, containingNodes=kwargs['containingNodes'])\n"
] |
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
@coroutine
def _checkIfCode(self, inCodeBlockObj):
"""Checks whether or not a given line appears to be Python code."""
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
@coroutine
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
@coroutine
def __writeDocstring(self):
"""
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
"""
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
@staticmethod
def _checkMemberName(name):
"""
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
"""
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
def _processMembers(self, node, contextTag):
"""
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
"""
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def visit(self, node, **kwargs):
"""
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
AstWalker.visit_Call
|
python
|
def visit_Call(self, node, **kwargs):
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
|
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L658-L678
|
[
"def generic_visit(self, node, **kwargs):\n \"\"\"\n Extract useful information from relevant nodes including docstrings.\n\n This is virtually identical to the standard version contained in\n NodeVisitor. It is only overridden because we're tracking extra\n information (the hierarchy of containing nodes) not preserved in\n the original.\n \"\"\"\n for field, value in iter_fields(node):\n if isinstance(value, list):\n for item in value:\n if isinstance(item, AST):\n self.visit(item, containingNodes=kwargs['containingNodes'])\n elif isinstance(value, AST):\n self.visit(value, containingNodes=kwargs['containingNodes'])\n"
] |
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
@coroutine
def _checkIfCode(self, inCodeBlockObj):
"""Checks whether or not a given line appears to be Python code."""
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
@coroutine
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
@coroutine
def __writeDocstring(self):
"""
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
"""
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
@staticmethod
def _checkMemberName(name):
"""
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
"""
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
def _processMembers(self, node, contextTag):
"""
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
"""
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def visit(self, node, **kwargs):
"""
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
AstWalker.visit_FunctionDef
|
python
|
def visit_FunctionDef(self, node, **kwargs):
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
|
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L680-L708
|
[
"def _processDocstring(self, node, tail='', **kwargs):\n \"\"\"\n Handles a docstring for functions, classes, and modules.\n\n Basically just figures out the bounds of the docstring and sends it\n off to the parser to do the actual work.\n \"\"\"\n typeName = type(node).__name__\n # Modules don't have lineno defined, but it's always 0 for them.\n curLineNum = startLineNum = 0\n if typeName != 'Module':\n startLineNum = curLineNum = node.lineno - 1\n # Figure out where both our enclosing object and our docstring start.\n line = ''\n while curLineNum < len(self.lines):\n line = self.lines[curLineNum]\n match = AstWalker.__docstrMarkerRE.match(line)\n if match:\n break\n curLineNum += 1\n docstringStart = curLineNum\n # Figure out where our docstring ends.\n if not AstWalker.__docstrOneLineRE.match(line):\n # Skip for the special case of a single-line docstring.\n curLineNum += 1\n while curLineNum < len(self.lines):\n line = self.lines[curLineNum]\n if line.find(match.group(2)) >= 0:\n break\n curLineNum += 1\n endLineNum = curLineNum + 1\n\n # Isolate our enclosing object's declaration.\n defLines = self.lines[startLineNum: docstringStart]\n # Isolate our docstring.\n self.docLines = self.lines[docstringStart: endLineNum]\n\n # If we have a docstring, extract information from it.\n if self.docLines:\n # Get rid of the docstring delineators.\n self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',\n self.docLines[0])\n self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',\n self.docLines[-1])\n # Handle special strings within the docstring.\n docstringConverter = self.__alterDocstring(\n tail, self.__writeDocstring())\n for lineInfo in enumerate(self.docLines):\n docstringConverter.send(lineInfo)\n docstringConverter.send((len(self.docLines) - 1, None))\n\n # Add a Doxygen @brief tag to any single-line description.\n if self.options.autobrief:\n safetyCounter = 0\n while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':\n del self.docLines[0]\n self.docLines.append('')\n safetyCounter += 1\n if safetyCounter >= len(self.docLines):\n # Escape the effectively empty docstring.\n break\n if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (\n self.docLines[1].strip(whitespace + '#') == '' or\n self.docLines[1].strip(whitespace + '#').startswith('@'))):\n self.docLines[0] = \"## @brief {0}\".format(self.docLines[0].lstrip('#'))\n if len(self.docLines) > 1 and self.docLines[1] == '# @par':\n self.docLines[1] = '#'\n\n if defLines:\n match = AstWalker.__indentRE.match(defLines[0])\n indentStr = match and match.group(1) or ''\n self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)\n for docLine in self.docLines]\n\n # Taking away a docstring from an interface method definition sometimes\n # leaves broken code as the docstring may be the only code in it.\n # Here we manually insert a pass statement to rectify this problem.\n if typeName != 'Module':\n if docstringStart < len(self.lines):\n match = AstWalker.__indentRE.match(self.lines[docstringStart])\n indentStr = match and match.group(1) or ''\n else:\n indentStr = ''\n containingNodes = kwargs.get('containingNodes', []) or []\n fullPathNamespace = self._getFullPathName(containingNodes)\n parentType = fullPathNamespace[-2][1]\n if parentType == 'interface' and typeName == 'FunctionDef' \\\n or fullPathNamespace[-1][1] == 'interface':\n defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],\n linesep, indentStr)\n elif self.options.autobrief and typeName == 'ClassDef':\n # If we're parsing docstrings separate out class attribute\n # definitions to get better Doxygen output.\n for firstVarLineNum, firstVarLine in enumerate(self.docLines):\n if '@property\\t' in firstVarLine:\n break\n lastVarLineNum = len(self.docLines)\n if lastVarLineNum > 0 and '@property\\t' in firstVarLine:\n while lastVarLineNum > firstVarLineNum:\n lastVarLineNum -= 1\n if '@property\\t' in self.docLines[lastVarLineNum]:\n break\n lastVarLineNum += 1\n if firstVarLineNum < len(self.docLines):\n indentLineNum = endLineNum\n indentStr = ''\n while not indentStr and indentLineNum < len(self.lines):\n match = AstWalker.__indentRE.match(self.lines[indentLineNum])\n indentStr = match and match.group(1) or ''\n indentLineNum += 1\n varLines = ['{0}{1}'.format(linesep, docLine).replace(\n linesep, linesep + indentStr)\n for docLine in self.docLines[\n firstVarLineNum: lastVarLineNum]]\n defLines.extend(varLines)\n self.docLines[firstVarLineNum: lastVarLineNum] = []\n # After the property shuffling we will need to relocate\n # any existing namespace information.\n namespaceLoc = defLines[-1].find('\\n# @namespace')\n if namespaceLoc >= 0:\n self.docLines[-1] += defLines[-1][namespaceLoc:]\n defLines[-1] = defLines[-1][:namespaceLoc]\n\n # For classes and functions, apply our changes and reverse the\n # order of the declaration and docstring, and for modules just\n # apply our changes.\n if typeName != 'Module':\n self.lines[startLineNum: endLineNum] = self.docLines + defLines\n else:\n self.lines[startLineNum: endLineNum] = defLines + self.docLines\n",
"def _processMembers(self, node, contextTag):\n \"\"\"\n Mark up members if they should be private.\n\n If the name indicates it should be private or protected, apply\n the appropriate Doxygen tags.\n \"\"\"\n restrictionLevel = self._checkMemberName(node.name)\n if restrictionLevel:\n workTag = '{0}{1}# @{2}'.format(contextTag,\n linesep,\n restrictionLevel)\n else:\n workTag = contextTag\n return workTag\n",
"def generic_visit(self, node, **kwargs):\n \"\"\"\n Extract useful information from relevant nodes including docstrings.\n\n This is virtually identical to the standard version contained in\n NodeVisitor. It is only overridden because we're tracking extra\n information (the hierarchy of containing nodes) not preserved in\n the original.\n \"\"\"\n for field, value in iter_fields(node):\n if isinstance(value, list):\n for item in value:\n if isinstance(item, AST):\n self.visit(item, containingNodes=kwargs['containingNodes'])\n elif isinstance(value, AST):\n self.visit(value, containingNodes=kwargs['containingNodes'])\n",
"def _getFullPathName(self, containingNodes):\n \"\"\"\n Returns the full node hierarchy rooted at module name.\n\n The list representing the full path through containing nodes\n (starting with the module itself) is returned.\n \"\"\"\n assert isinstance(containingNodes, list)\n return [(self.options.fullPathNamespace, 'module')] + containingNodes\n"
] |
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
@coroutine
def _checkIfCode(self, inCodeBlockObj):
"""Checks whether or not a given line appears to be Python code."""
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
@coroutine
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
@coroutine
def __writeDocstring(self):
"""
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
"""
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
@staticmethod
def _checkMemberName(name):
"""
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
"""
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
def _processMembers(self, node, contextTag):
"""
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
"""
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def visit(self, node, **kwargs):
"""
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
AstWalker.visit_ClassDef
|
python
|
def visit_ClassDef(self, node, **kwargs):
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
|
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L710-L766
|
[
"def _processDocstring(self, node, tail='', **kwargs):\n \"\"\"\n Handles a docstring for functions, classes, and modules.\n\n Basically just figures out the bounds of the docstring and sends it\n off to the parser to do the actual work.\n \"\"\"\n typeName = type(node).__name__\n # Modules don't have lineno defined, but it's always 0 for them.\n curLineNum = startLineNum = 0\n if typeName != 'Module':\n startLineNum = curLineNum = node.lineno - 1\n # Figure out where both our enclosing object and our docstring start.\n line = ''\n while curLineNum < len(self.lines):\n line = self.lines[curLineNum]\n match = AstWalker.__docstrMarkerRE.match(line)\n if match:\n break\n curLineNum += 1\n docstringStart = curLineNum\n # Figure out where our docstring ends.\n if not AstWalker.__docstrOneLineRE.match(line):\n # Skip for the special case of a single-line docstring.\n curLineNum += 1\n while curLineNum < len(self.lines):\n line = self.lines[curLineNum]\n if line.find(match.group(2)) >= 0:\n break\n curLineNum += 1\n endLineNum = curLineNum + 1\n\n # Isolate our enclosing object's declaration.\n defLines = self.lines[startLineNum: docstringStart]\n # Isolate our docstring.\n self.docLines = self.lines[docstringStart: endLineNum]\n\n # If we have a docstring, extract information from it.\n if self.docLines:\n # Get rid of the docstring delineators.\n self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',\n self.docLines[0])\n self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',\n self.docLines[-1])\n # Handle special strings within the docstring.\n docstringConverter = self.__alterDocstring(\n tail, self.__writeDocstring())\n for lineInfo in enumerate(self.docLines):\n docstringConverter.send(lineInfo)\n docstringConverter.send((len(self.docLines) - 1, None))\n\n # Add a Doxygen @brief tag to any single-line description.\n if self.options.autobrief:\n safetyCounter = 0\n while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':\n del self.docLines[0]\n self.docLines.append('')\n safetyCounter += 1\n if safetyCounter >= len(self.docLines):\n # Escape the effectively empty docstring.\n break\n if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (\n self.docLines[1].strip(whitespace + '#') == '' or\n self.docLines[1].strip(whitespace + '#').startswith('@'))):\n self.docLines[0] = \"## @brief {0}\".format(self.docLines[0].lstrip('#'))\n if len(self.docLines) > 1 and self.docLines[1] == '# @par':\n self.docLines[1] = '#'\n\n if defLines:\n match = AstWalker.__indentRE.match(defLines[0])\n indentStr = match and match.group(1) or ''\n self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)\n for docLine in self.docLines]\n\n # Taking away a docstring from an interface method definition sometimes\n # leaves broken code as the docstring may be the only code in it.\n # Here we manually insert a pass statement to rectify this problem.\n if typeName != 'Module':\n if docstringStart < len(self.lines):\n match = AstWalker.__indentRE.match(self.lines[docstringStart])\n indentStr = match and match.group(1) or ''\n else:\n indentStr = ''\n containingNodes = kwargs.get('containingNodes', []) or []\n fullPathNamespace = self._getFullPathName(containingNodes)\n parentType = fullPathNamespace[-2][1]\n if parentType == 'interface' and typeName == 'FunctionDef' \\\n or fullPathNamespace[-1][1] == 'interface':\n defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],\n linesep, indentStr)\n elif self.options.autobrief and typeName == 'ClassDef':\n # If we're parsing docstrings separate out class attribute\n # definitions to get better Doxygen output.\n for firstVarLineNum, firstVarLine in enumerate(self.docLines):\n if '@property\\t' in firstVarLine:\n break\n lastVarLineNum = len(self.docLines)\n if lastVarLineNum > 0 and '@property\\t' in firstVarLine:\n while lastVarLineNum > firstVarLineNum:\n lastVarLineNum -= 1\n if '@property\\t' in self.docLines[lastVarLineNum]:\n break\n lastVarLineNum += 1\n if firstVarLineNum < len(self.docLines):\n indentLineNum = endLineNum\n indentStr = ''\n while not indentStr and indentLineNum < len(self.lines):\n match = AstWalker.__indentRE.match(self.lines[indentLineNum])\n indentStr = match and match.group(1) or ''\n indentLineNum += 1\n varLines = ['{0}{1}'.format(linesep, docLine).replace(\n linesep, linesep + indentStr)\n for docLine in self.docLines[\n firstVarLineNum: lastVarLineNum]]\n defLines.extend(varLines)\n self.docLines[firstVarLineNum: lastVarLineNum] = []\n # After the property shuffling we will need to relocate\n # any existing namespace information.\n namespaceLoc = defLines[-1].find('\\n# @namespace')\n if namespaceLoc >= 0:\n self.docLines[-1] += defLines[-1][namespaceLoc:]\n defLines[-1] = defLines[-1][:namespaceLoc]\n\n # For classes and functions, apply our changes and reverse the\n # order of the declaration and docstring, and for modules just\n # apply our changes.\n if typeName != 'Module':\n self.lines[startLineNum: endLineNum] = self.docLines + defLines\n else:\n self.lines[startLineNum: endLineNum] = defLines + self.docLines\n",
"def _processMembers(self, node, contextTag):\n \"\"\"\n Mark up members if they should be private.\n\n If the name indicates it should be private or protected, apply\n the appropriate Doxygen tags.\n \"\"\"\n restrictionLevel = self._checkMemberName(node.name)\n if restrictionLevel:\n workTag = '{0}{1}# @{2}'.format(contextTag,\n linesep,\n restrictionLevel)\n else:\n workTag = contextTag\n return workTag\n",
"def generic_visit(self, node, **kwargs):\n \"\"\"\n Extract useful information from relevant nodes including docstrings.\n\n This is virtually identical to the standard version contained in\n NodeVisitor. It is only overridden because we're tracking extra\n information (the hierarchy of containing nodes) not preserved in\n the original.\n \"\"\"\n for field, value in iter_fields(node):\n if isinstance(value, list):\n for item in value:\n if isinstance(item, AST):\n self.visit(item, containingNodes=kwargs['containingNodes'])\n elif isinstance(value, AST):\n self.visit(value, containingNodes=kwargs['containingNodes'])\n",
"def _getFullPathName(self, containingNodes):\n \"\"\"\n Returns the full node hierarchy rooted at module name.\n\n The list representing the full path through containing nodes\n (starting with the module itself) is returned.\n \"\"\"\n assert isinstance(containingNodes, list)\n return [(self.options.fullPathNamespace, 'module')] + containingNodes\n"
] |
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
@coroutine
def _checkIfCode(self, inCodeBlockObj):
"""Checks whether or not a given line appears to be Python code."""
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
@coroutine
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
@coroutine
def __writeDocstring(self):
"""
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
"""
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
@staticmethod
def _checkMemberName(name):
"""
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
"""
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
def _processMembers(self, node, contextTag):
"""
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
"""
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def visit(self, node, **kwargs):
"""
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def parseLines(self):
"""Form an AST for the code and produce a new version of the source."""
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
|
Feneric/doxypypy
|
doxypypy/doxypypy.py
|
AstWalker.parseLines
|
python
|
def parseLines(self):
inAst = parse(''.join(self.lines), self.inFilename)
# Visit all the nodes in our tree and apply Doxygen tags to the source.
self.visit(inAst)
|
Form an AST for the code and produce a new version of the source.
|
train
|
https://github.com/Feneric/doxypypy/blob/a8555b15fa2a758ea8392372de31c0f635cc0d93/doxypypy/doxypypy.py#L768-L772
|
[
"def visit(self, node, **kwargs):\n \"\"\"\n Visit a node and extract useful information from it.\n\n This is virtually identical to the standard version contained in\n NodeVisitor. It is only overridden because we're tracking extra\n information (the hierarchy of containing nodes) not preserved in\n the original.\n \"\"\"\n containingNodes = kwargs.get('containingNodes', [])\n method = 'visit_' + node.__class__.__name__\n visitor = getattr(self, method, self.generic_visit)\n return visitor(node, containingNodes=containingNodes)\n"
] |
class AstWalker(NodeVisitor):
"""
A walker that'll recursively progress through an AST.
Given an abstract syntax tree for Python code, walk through all the
nodes looking for significant types (for our purposes we only care
about module starts, class definitions, function definitions, variable
assignments, and function calls, as all the information we want to pass
to Doxygen is found within these constructs). If the autobrief option
is set, it further attempts to parse docstrings to create appropriate
Doxygen tags.
"""
# We have a number of regular expressions that we use. They don't
# vary across instances and so are compiled directly in the class
# definition.
__indentRE = regexpCompile(r'^(\s*)\S')
__newlineRE = regexpCompile(r'^#', MULTILINE)
__blanklineRE = regexpCompile(r'^\s*$')
__docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))")
__docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1")
__implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?"
r"(?:module|class|directly)?"
r"(?:Provides|Implements)\(\s*(.+)\s*\)",
IGNORECASE)
__classRE = regexpCompile(r"^\s*class\s+(\S+)\s*\((\S+)\):")
__interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Interface\s*\)\s*:", IGNORECASE)
__attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?"
r"(?:interface\.)?"
r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)",
IGNORECASE)
__singleLineREs = {
' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE),
' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE),
' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE),
' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE),
' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE),
' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE),
' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE)
}
__argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?"
r"(?:A|Kwa)rg(?:ument)?|Attribute)s?"
r"\s*:\s*)$", IGNORECASE)
__argsRE = regexpCompile(r"^\s*(?P<name>\w+)\s*(?P<type>\(?\S*\)?)?\s*"
r"(?:-|:)+\s+(?P<desc>.+)$")
__returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE)
__raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$",
IGNORECASE)
__listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$")
__singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$')
__listItemRE = regexpCompile(r'([\w\.]+),?\s*')
__examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$",
IGNORECASE)
__sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$")
# The error line should match traceback lines, error exception lines, and
# (due to a weird behavior of codeop) single word lines.
__errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$",
IGNORECASE)
def __init__(self, lines, options, inFilename):
"""Initialize a few class variables in preparation for our walk."""
self.lines = lines
self.options = options
self.inFilename = inFilename
self.docLines = []
@staticmethod
def _stripOutAnds(inStr):
"""Takes a string and returns the same without ands or ampersands."""
assert isinstance(inStr, str)
return inStr.replace(' and ', ' ').replace(' & ', ' ')
@staticmethod
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock
@coroutine
def _checkIfCode(self, inCodeBlockObj):
"""Checks whether or not a given line appears to be Python code."""
while True:
line, lines, lineNum = (yield)
testLineNum = 1
currentLineNum = 0
testLine = line.strip()
lineOfCode = None
while lineOfCode is None:
match = AstWalker.__errorLineRE.match(testLine)
if not testLine or testLine == '...' or match:
# These are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
elif testLine.startswith('>>>'):
# This is definitely code.
lineOfCode = True
else:
try:
compLine = compile_command(testLine)
if compLine and lines[currentLineNum].strip().startswith('#'):
lineOfCode = True
else:
line, lines, lineNum = (yield)
line = line.strip()
if line.startswith('>>>'):
# Definitely code, don't compile further.
lineOfCode = True
else:
testLine += linesep + line
testLine = testLine.strip()
testLineNum += 1
except (SyntaxError, RuntimeError):
# This is definitely not code.
lineOfCode = False
except Exception:
# Other errors are ambiguous.
line, lines, lineNum = (yield)
testLine = line.strip()
#testLineNum = 1
currentLineNum = lineNum - testLineNum
if not inCodeBlockObj[0] and lineOfCode:
inCodeBlockObj[0] = True
lines[currentLineNum] = '{0}{1}# @code{1}'.format(
lines[currentLineNum],
linesep
)
elif inCodeBlockObj[0] and lineOfCode is False:
# None is ambiguous, so strict checking
# against False is necessary.
inCodeBlockObj[0] = False
lines[currentLineNum] = '{0}{1}# @endcode{1}'.format(
lines[currentLineNum],
linesep
)
@coroutine
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False
@coroutine
def __writeDocstring(self):
"""
Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send.
"""
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append('')
# Substitute the new block of lines for the original block of lines.
self.docLines[firstLineNum: lastLineNum + 1] = lines
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines
@staticmethod
def _checkMemberName(name):
"""
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
"""
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith('__'):
if name.startswith('__'):
restrictionLevel = 'private'
elif name.startswith('_'):
restrictionLevel = 'protected'
return restrictionLevel
def _processMembers(self, node, contextTag):
"""
Mark up members if they should be private.
If the name indicates it should be private or protected, apply
the appropriate Doxygen tags.
"""
restrictionLevel = self._checkMemberName(node.name)
if restrictionLevel:
workTag = '{0}{1}# @{2}'.format(contextTag,
linesep,
restrictionLevel)
else:
workTag = contextTag
return workTag
def generic_visit(self, node, **kwargs):
"""
Extract useful information from relevant nodes including docstrings.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item, containingNodes=kwargs['containingNodes'])
elif isinstance(value, AST):
self.visit(value, containingNodes=kwargs['containingNodes'])
def visit(self, node, **kwargs):
"""
Visit a node and extract useful information from it.
This is virtually identical to the standard version contained in
NodeVisitor. It is only overridden because we're tracking extra
information (the hierarchy of containing nodes) not preserved in
the original.
"""
containingNodes = kwargs.get('containingNodes', [])
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node, containingNodes=containingNodes)
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes
def visit_Module(self, node, **kwargs):
"""
Handles the module-level docstring.
Process the module-level docstring and create appropriate Doxygen tags
if autobrief option is set.
"""
containingNodes=kwargs.get('containingNodes', [])
if self.options.debug:
stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace,
linesep))
if get_docstring(node):
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
self._processDocstring(node, tail)
# Visit any contained nodes (in this case pretty much everything).
self.generic_visit(node, containingNodes=containingNodes)
def visit_Assign(self, node, **kwargs):
"""
Handles assignments within code.
Variable assignments in Python are used to represent interface
attributes in addition to basic variables. If an assignment appears
to be an attribute, it gets labeled as such for Doxygen. If a variable
name uses Python mangling or is just a bed lump, it is labeled as
private for Doxygen.
"""
lineNum = node.lineno - 1
# Assignments have one Doxygen-significant special case:
# interface attributes.
match = AstWalker.__attributeRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \
'{0}# @hideinitializer{2}{4}{2}'.format(
match.group(1),
match.group(2),
linesep,
match.group(3),
self.lines[lineNum].rstrip()
)
if self.options.debug:
stderr.write("# Attribute {0.id}{1}".format(node.targets[0],
linesep))
if isinstance(node.targets[0], Name):
match = AstWalker.__indentRE.match(self.lines[lineNum])
indentStr = match and match.group(1) or ''
restrictionLevel = self._checkMemberName(node.targets[0].id)
if restrictionLevel:
self.lines[lineNum] = '{0}## @var {1}{2}{0}' \
'# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format(
indentStr,
node.targets[0].id,
linesep,
restrictionLevel,
self.lines[lineNum].rstrip()
)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_Call(self, node, **kwargs):
"""
Handles function calls within code.
Function calls in Python are used to represent interface implementations
in addition to their normal use. If a call appears to mark an
implementation, it gets labeled as such for Doxygen.
"""
lineNum = node.lineno - 1
# Function calls have one Doxygen-significant special case: interface
# implementations.
match = AstWalker.__implementsRE.match(self.lines[lineNum])
if match:
self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format(
match.group(1), match.group(2), linesep,
self.lines[lineNum].rstrip())
if self.options.debug:
stderr.write("# Implements {0}{1}".format(match.group(1),
linesep))
# Visit any contained nodes.
self.generic_visit(node, containingNodes=kwargs['containingNodes'])
def visit_FunctionDef(self, node, **kwargs):
"""
Handles function definitions within code.
Process a function's docstring, keeping well aware of the function's
context and whether or not it's part of an interface definition.
"""
if self.options.debug:
stderr.write("# Function {0.name}{1}".format(node, linesep))
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is nested within another function or even if a class
# is nested within a function.
containingNodes = kwargs.get('containingNodes') or []
containingNodes.append((node.name, 'function'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
modifiedContextTag = self._processMembers(node, contextTag)
tail = '@namespace {0}'.format(modifiedContextTag)
else:
tail = self._processMembers(node, '')
if get_docstring(node):
self._processDocstring(node, tail,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def visit_ClassDef(self, node, **kwargs):
"""
Handles class definitions within code.
Process the docstring. Note though that in Python Class definitions
are used to define interfaces in addition to classes.
If a class definition appears to be an interface definition tag it as an
interface definition for Doxygen. Otherwise tag it as a class
definition for Doxygen.
"""
lineNum = node.lineno - 1
# Push either 'interface' or 'class' onto our containing nodes
# hierarchy so we can keep track of context. This will let us tell
# if a function is a method or an interface method definition or if
# a class is fully contained within another class.
containingNodes = kwargs.get('containingNodes') or []
if not self.options.object_respect:
# Remove object class of the inherited class list to avoid that all
# new-style class inherits from object in the hierarchy class
line = self.lines[lineNum]
match = AstWalker.__classRE.match(line)
if match:
if match.group(2) == 'object':
self.lines[lineNum] = line[:match.start(2)] + line[match.end(2):]
match = AstWalker.__interfaceRE.match(self.lines[lineNum])
if match:
if self.options.debug:
stderr.write("# Interface {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'interface'))
else:
if self.options.debug:
stderr.write("# Class {0.name}{1}".format(node, linesep))
containingNodes.append((node.name, 'class'))
if self.options.topLevelNamespace:
fullPathNamespace = self._getFullPathName(containingNodes)
contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace)
tail = '@namespace {0}'.format(contextTag)
else:
tail = ''
# Class definitions have one Doxygen-significant special case:
# interface definitions.
if match:
contextTag = '{0}{1}# @interface {2}'.format(tail,
linesep,
match.group(1))
else:
contextTag = tail
contextTag = self._processMembers(node, contextTag)
if get_docstring(node):
self._processDocstring(node, contextTag,
containingNodes=containingNodes)
# Visit any contained nodes.
self.generic_visit(node, containingNodes=containingNodes)
# Remove the item we pushed onto the containing nodes hierarchy.
containingNodes.pop()
def getLines(self):
"""Return the modified file once processing has been completed."""
return linesep.join(line.rstrip() for line in self.lines)
|
pepkit/peppy
|
peppy/sample.py
|
merge_sample
|
python
|
def merge_sample(sample, sample_subann,
data_sources=None, derived_attributes=None):
merged_attrs = {}
if sample_subann is None:
_LOGGER.log(5, "No data for sample merge, skipping")
return merged_attrs
if SAMPLE_NAME_COLNAME not in sample_subann.columns:
raise KeyError(
"Merge table requires a column named '{}'.".
format(SAMPLE_NAME_COLNAME))
_LOGGER.debug("Merging Sample with data sources: {}".
format(data_sources))
# Hash derived columns for faster lookup in case of many samples/columns.
derived_attributes = set(derived_attributes or [])
_LOGGER.debug("Merging Sample with derived attributes: {}".
format(derived_attributes))
sample_name = getattr(sample, SAMPLE_NAME_COLNAME)
sample_indexer = sample_subann[SAMPLE_NAME_COLNAME] == sample_name
this_sample_rows = sample_subann[sample_indexer]
if len(this_sample_rows) == 0:
_LOGGER.debug("No merge rows for sample '%s', skipping", sample.name)
return merged_attrs
_LOGGER.log(5, "%d rows to merge", len(this_sample_rows))
_LOGGER.log(5, "Merge rows dict: {}".format(this_sample_rows.to_dict()))
# For each row in the merge table of this sample:
# 1) populate any derived columns
# 2) derived columns --> space-delimited strings
# 3) update the sample values with the merge table
# Keep track of merged cols,
# so we don't re-derive them later.
merged_attrs = {key: "" for key in this_sample_rows.columns}
subsamples = []
_LOGGER.debug(this_sample_rows)
for subsample_row_id, row in this_sample_rows.iterrows():
try:
row['subsample_name']
except KeyError:
# default to a numeric count on subsamples if they aren't named
row['subsample_name'] = str(subsample_row_id)
subann_unit = Subsample(row)
subsamples.append(subann_unit)
_LOGGER.debug(subsamples)
rowdata = row.to_dict()
# Iterate over column names to avoid Python3 RuntimeError for
# during-iteration change of dictionary size.
for attr_name in this_sample_rows.columns:
if attr_name == SAMPLE_NAME_COLNAME or \
attr_name not in derived_attributes:
_LOGGER.log(5, "Skipping merger of attribute '%s'", attr_name)
continue
attr_value = rowdata[attr_name]
# Initialize key in parent dict.
col_key = attr_name + COL_KEY_SUFFIX
merged_attrs[col_key] = ""
rowdata[col_key] = attr_value
data_src_path = sample.locate_data_source(
data_sources, attr_name, source_key=rowdata[attr_name],
extra_vars=rowdata) # 1)
rowdata[attr_name] = data_src_path
_LOGGER.log(5, "Adding derived attributes")
for attr in derived_attributes:
# Skip over any attributes that the sample lacks or that are
# covered by the data from the current (row's) data.
if not hasattr(sample, attr) or attr in rowdata:
_LOGGER.log(5, "Skipping column: '%s'", attr)
continue
# Map key to sample's value for the attribute given by column name.
col_key = attr + COL_KEY_SUFFIX
rowdata[col_key] = getattr(sample, attr)
# Map the col/attr name itself to the populated data source
# template string.
rowdata[attr] = sample.locate_data_source(
data_sources, attr, source_key=getattr(sample, attr),
extra_vars=rowdata)
# TODO: this (below) is where we could maintain grouped values
# TODO (cont.): as a collection and defer the true merger.
# Since we are now jamming multiple (merged) entries into a single
# attribute on a Sample, we have to join the individual items into a
# space-delimited string and then use that value as the Sample
# attribute. The intended use case for this sort of merge is for
# multiple data source paths associated with a single Sample, hence
# the choice of space-delimited string as the joined-/merged-entry
# format--it's what's most amenable to use in building up an argument
# string for a pipeline command.
for attname, attval in rowdata.items():
if attname == SAMPLE_NAME_COLNAME or not attval:
_LOGGER.log(5, "Skipping KV: {}={}".format(attname, attval))
continue
_LOGGER.log(5, "merge: sample '%s'; '%s'='%s'",
str(sample.name), str(attname), str(attval))
if attname not in merged_attrs:
new_attval = str(attval).rstrip()
else:
new_attval = "{} {}".format(merged_attrs[attname],
str(attval)).strip()
merged_attrs[attname] = new_attval # 2)
_LOGGER.log(5, "Stored '%s' as value for '%s' in merged_attrs",
new_attval, attname)
# If present, remove sample name from the data with which to update sample.
merged_attrs.pop(SAMPLE_NAME_COLNAME, None)
_LOGGER.log(5, "Updating Sample {}: {}".format(sample.name, merged_attrs))
sample.update(merged_attrs) # 3)
sample.merged_cols = merged_attrs
sample.merged = True
sample.subsamples = subsamples
return sample
|
Use merge table (subannotation) data to augment/modify Sample.
:param Sample sample: sample to modify via merge table data
:param sample_subann: data with which to alter Sample
:param Mapping data_sources: collection of named paths to data locations,
optional
:param Iterable[str] derived_attributes: names of attributes for which
corresponding Sample attribute's value is data-derived, optional
:return Set[str]: names of columns/attributes that were merged
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/sample.py#L913-L1048
| null |
""" Modeling individual samples to process or otherwise use. """
from collections import OrderedDict
import glob
import logging
from operator import itemgetter
import os
import sys
if sys.version_info < (3, 3):
from collections import Mapping
else:
from collections.abc import Mapping
import warnings
from pandas import isnull, Series
import yaml
from . import ASSAY_KEY, SAMPLE_NAME_COLNAME
from attmap import AttMap, PathExAttMap
from .const import \
ALL_INPUTS_ATTR_NAME, DATA_SOURCE_COLNAME, DATA_SOURCES_SECTION, \
NAME_TABLE_ATTR, REQUIRED_INPUTS_ATTR_NAME, SAMPLE_EXECUTION_TOGGLE, \
SAMPLE_SUBANNOTATIONS_KEY, VALID_READ_TYPES
from .utils import check_bam, check_fastq, copy, get_file_size, \
grab_project_data, parse_ftype, sample_folder
COL_KEY_SUFFIX = "_key"
PRJ_REF = "prj"
_LOGGER = logging.getLogger(__name__)
@copy
class Subsample(PathExAttMap):
"""
Class to model Subsamples.
A Subsample is a component of a sample. They are typically used for samples
that have multiple input files of the same type, and are specified in the
PEP by a subannotation table. Each row in the subannotation (or unit) table
corresponds to a Subsample object.
:param Mapping | pandas.core.series.Series series: Subsample data
"""
def __init__(self, series, sample=None):
data = OrderedDict(series)
_LOGGER.debug("Subsample data:\n{}".format(data))
super(Subsample, self).__init__(entries=data)
self.sample = sample
@copy
class Sample(PathExAttMap):
"""
Class to model Samples based on a pandas Series.
:param Mapping | pandas.core.series.Series series: Sample's data.
:Example:
.. code-block:: python
from models import Project, SampleSheet, Sample
prj = Project("ngs")
sheet = SampleSheet("~/projects/example/sheet.csv", prj)
s1 = Sample(sheet.iloc[0])
"""
_FEATURE_ATTR_NAMES = ["read_length", "read_type", "paired"]
# Originally, this object was inheriting from Series,
# but complications with serializing and code maintenance
# made me go back and implement it as a top-level object
def __init__(self, series, prj=None):
# Create data, handling library/protocol.
data = OrderedDict(series)
try:
protocol = data.pop("library")
except KeyError:
pass
else:
data[ASSAY_KEY] = protocol
super(Sample, self).__init__(entries=data)
if PRJ_REF in self and prj:
_LOGGER.warn("Project provided both directly and indirectly; "
"using direct")
if prj or PRJ_REF not in self:
self[PRJ_REF] = prj or None
self.merged_cols = {}
self.derived_cols_done = []
if isinstance(series, Series):
series = series.to_dict(OrderedDict)
elif isinstance(series, Sample):
series = series.as_series().to_dict(OrderedDict)
# Keep a list of attributes that came from the sample sheet,
# so we can create a minimal, ordered representation of the original.
# This allows summarization of the sample (i.e.,
# appending new columns onto the original table)
self.sheet_attributes = series.keys()
# Check if required attributes exist and are not empty.
missing_attributes_message = self.check_valid()
if missing_attributes_message:
raise ValueError(missing_attributes_message)
# Short hand for getting sample_name
self.name = self.sample_name
# Default to no required paths and no YAML file.
self.required_paths = None
self.yaml_file = None
# Not yet merged, potentially toggled when merge step is considered.
self.merged = False
# Collect sample-specific filepaths.
# Only when sample is added to project, can paths be added.
# Essentially, this provides an empty container for tool-specific
# filepaths, into which a pipeline may deposit such filepaths as
# desired. Such use provides a sort of communication interface
# between times and perhaps individuals (processing time vs.
# analysis time, and a pipeline author vs. a pipeline user).
self.paths = Paths()
@staticmethod
def _omit_from_eq(k):
""" Exclude the Project reference from object comparison. """
return k == PRJ_REF
@staticmethod
def _omit_from_repr(k, cls):
""" Exclude the Project reference from representation. """
# TODO: better solution for this cyclical dependency hack
return k == PRJ_REF
def __setitem__(self, key, value):
# TODO: better solution for this cyclical dependency hack
if value.__class__.__name__ == "Project":
self.__dict__[key] = value
else:
super(Sample, self).__setitem__(key, value)
def __str__(self):
return "Sample '{}'".format(self.name)
@property
def input_file_paths(self):
"""
List the sample's data source / input files
:return list[str]: paths to data sources / input file for this Sample.
"""
return self.data_source.split(" ") if self.data_source else []
def as_series(self):
"""
Returns a `pandas.Series` object with all the sample's attributes.
:return pandas.core.series.Series: pandas Series representation
of this Sample, with its attributes.
"""
# Note that this preserves metadata, but it could be excluded
# with self.items() rather than self.__dict__.
return Series(self.__dict__)
def check_valid(self, required=None):
"""
Check provided sample annotation is valid.
:param Iterable[str] required: collection of required sample attribute
names, optional; if unspecified, only a name is required.
:return (Exception | NoneType, str, str): exception and messages about
what's missing/empty; null with empty messages if there was nothing
exceptional or required inputs are absent or not set
"""
missing, empty = [], []
for attr in (required or [SAMPLE_NAME_COLNAME]):
if not hasattr(self, attr):
missing.append(attr)
if attr == "nan":
empty.append(attr)
missing_attributes_message = \
"Sample lacks attribute(s). missing={}; empty={}". \
format(missing, empty) if (missing or empty) else ""
return missing_attributes_message
def determine_missing_requirements(self):
"""
Determine which of this Sample's required attributes/files are missing.
:return (type, str): hypothetical exception type along with message
about what's missing; null and empty if nothing exceptional
is detected
"""
null_return = (None, "", "")
# set_pipeline_attributes must be run first.
if not hasattr(self, "required_inputs"):
_LOGGER.warning("You must run set_pipeline_attributes "
"before determine_missing_requirements")
return null_return
if not self.required_inputs:
_LOGGER.debug("No required inputs")
return null_return
# First, attributes
missing, empty = [], []
for file_attribute in self.required_inputs_attr:
_LOGGER.log(5, "Checking '{}'".format(file_attribute))
try:
attval = getattr(self, file_attribute)
except AttributeError:
_LOGGER.log(5, "Missing required input attribute '%s'",
file_attribute)
missing.append(file_attribute)
continue
if attval == "":
_LOGGER.log(5, "Empty required input attribute '%s'",
file_attribute)
empty.append(file_attribute)
else:
_LOGGER.log(5, "'{}' is valid: '{}'".
format(file_attribute, attval))
if missing or empty:
reason_key = "Missing and/or empty attribute(s)."
reason_detail = "Missing: {}; Empty: {}".format(
", ".join(missing), ", ".join(empty))
return AttributeError, reason_key, reason_detail
# Second, files
missing_files = []
for paths in self.required_inputs:
_LOGGER.log(5, "Text to split and check paths: '%s'", paths)
# There can be multiple, space-separated values here.
for path in paths.split(" "):
_LOGGER.log(5, "Checking path: '{}'".format(path))
if not os.path.exists(path):
_LOGGER.log(5, "Missing required input file: '{}'".
format(path))
missing_files.append(path)
if not missing_files:
return null_return
else:
reason_key = "Missing file(s)"
reason_detail = ", ".join(missing_files)
return IOError, reason_key, reason_detail
def generate_filename(self, delimiter="_"):
"""
Create a name for file in which to represent this Sample.
This uses knowledge of the instance's subtype, sandwiching a delimiter
between the name of this Sample and the name of the subtype before the
extension. If the instance is a base Sample type, then the filename
is simply the sample name with an extension.
:param str delimiter: what to place between sample name and name of
subtype; this is only relevant if the instance is of a subclass
:return str: name for file with which to represent this Sample on disk
"""
base = self.name if type(self) is Sample else \
"{}{}{}".format(self.name, delimiter, self.__class__.__name__)
return "{}.yaml".format(base)
def generate_name(self):
"""
Generate name for the sample by joining some of its attribute strings.
"""
raise NotImplementedError("Not implemented in new code base.")
def get_attr_values(self, attrlist):
"""
Get value corresponding to each given attribute.
:param str attrlist: name of an attribute storing a list of attr names
:return list | NoneType: value (or empty string) corresponding to
each named attribute; null if this Sample's value for the
attribute given by the argument to the "attrlist" parameter is
empty/null, or if this Sample lacks the indicated attribute
"""
# If attribute is None, then value is also None.
attribute_list = getattr(self, attrlist, None)
if not attribute_list:
return None
if not isinstance(attribute_list, list):
attribute_list = [attribute_list]
# Strings contained here are appended later so shouldn't be null.
return [getattr(self, attr, "") for attr in attribute_list]
def get_sheet_dict(self):
"""
Create a K-V pairs for items originally passed in via the sample sheet.
This is useful for summarizing; it provides a representation of the
sample that excludes things like config files and derived entries.
:return OrderedDict: mapping from name to value for data elements
originally provided via the sample sheet (i.e., the a map-like
representation of the instance, excluding derived items)
"""
return OrderedDict(
[[k, getattr(self, k)] for k in self.sheet_attributes])
def infer_attributes(self, implications):
"""
Infer value for additional field(s) from other field(s).
Add columns/fields to the sample based on values in those already-set
that the sample's project defines as indicative of implications for
additional data elements for the sample.
:param Mapping implications: Project's implied columns data
:return None: this function mutates state and is strictly for effect
"""
_LOGGER.log(5, "Sample attribute implications: {}".
format(implications))
if not implications:
return
for implier_name, implied in implications.items():
_LOGGER.debug(
"Setting Sample variable(s) implied by '%s'", implier_name)
try:
implier_value = self[implier_name]
except KeyError:
_LOGGER.debug("No '%s' for this sample", implier_name)
continue
try:
implied_value_by_column = implied[implier_value]
_LOGGER.debug("Implications for '%s' = %s: %s",
implier_name, implier_value,
str(implied_value_by_column))
for colname, implied_value in \
implied_value_by_column.items():
_LOGGER.log(5, "Setting '%s'=%s",
colname, implied_value)
self.__setitem__(colname, implied_value)
except KeyError:
_LOGGER.log(
5, "Unknown implied value for implier '%s' = '%s'",
implier_name, implier_value)
def is_dormant(self):
"""
Determine whether this Sample is inactive.
By default, a Sample is regarded as active. That is, if it lacks an
indication about activation status, it's assumed to be active. If,
however, and there's an indication of such status, it must be '1'
in order to be considered switched 'on.'
:return bool: whether this Sample's been designated as dormant
"""
try:
flag = self[SAMPLE_EXECUTION_TOGGLE]
except KeyError:
# Regard default Sample state as active.
return False
# If specified, the activation flag must be set to '1'.
return flag != "1"
@property
def library(self):
"""
Backwards-compatible alias.
:return str: The protocol / NGS library name for this Sample.
"""
warnings.warn("Replace 'library' with 'protocol'", DeprecationWarning)
return self.protocol
def get_subsample(self, subsample_name):
"""
Retrieve a single subsample by name.
:param str subsample_name: The name of the desired subsample. Should
match the subsample_name column in the subannotation sheet.
:return peppy.Subsample: Requested Subsample object
"""
subsamples = self.get_subsamples([subsample_name])
if len(subsamples) > 1:
_LOGGER.error("More than one subsample with that name.")
try:
return subsamples[0]
except IndexError:
raise ValueError("Sample {} has no subsample named {}.".
format(self.name, subsample_name))
def get_subsamples(self, subsample_names):
"""
Retrieve subsamples assigned to this sample
:param list[str] subsample_names: List of names of subsamples to retrieve
:return list[peppy.Subsample]: List of subsamples
"""
return [s for s in self.subsamples if s.subsample_name in subsample_names]
def locate_data_source(self, data_sources, column_name=DATA_SOURCE_COLNAME,
source_key=None, extra_vars=None):
"""
Uses the template path provided in the project config section
"data_sources" to piece together an actual path by substituting
variables (encoded by "{variable}"") with sample attributes.
:param Mapping data_sources: mapping from key name (as a value in
a cell of a tabular data structure) to, e.g., filepath
:param str column_name: Name of sample attribute
(equivalently, sample sheet column) specifying a derived column.
:param str source_key: The key of the data_source,
used to index into the project config data_sources section.
By default, the source key will be taken as the value of
the specified column (as a sample attribute).
For cases where the sample doesn't have this attribute yet
(e.g. in a merge table), you must specify the source key.
:param dict extra_vars: By default, this will look to
populate the template location using attributes found in the
current sample; however, you may also provide a dict of extra
variables that can also be used for variable replacement.
These extra variables are given a higher priority.
:return str: regex expansion of data source specified in configuration,
with variable substitutions made
:raises ValueError: if argument to data_sources parameter is null/empty
"""
if not data_sources:
return None
if not source_key:
try:
source_key = getattr(self, column_name)
except AttributeError:
reason = "'{attr}': to locate sample's data source, provide " \
"the name of a key from '{sources}' or ensure " \
"sample has attribute '{attr}'".format(
attr=column_name, sources=DATA_SOURCES_SECTION)
raise AttributeError(reason)
try:
regex = data_sources[source_key]
except KeyError:
_LOGGER.debug(
"{}: config lacks entry for data_source key: '{}' "
"in column '{}'; known: {}".format(
self.name, source_key, column_name, data_sources.keys()))
return ""
# Populate any environment variables like $VAR with os.environ["VAR"]
# Now handled upstream, in project.
#regex = os.path.expandvars(regex)
try:
# Grab a temporary dictionary of sample attributes and update these
# with any provided extra variables to use in the replacement.
# This is necessary for derived_attributes in the merge table.
# Here the copy() prevents the actual sample from being
# updated by update().
temp_dict = self.__dict__.copy()
temp_dict.update(extra_vars or dict())
val = regex.format(**temp_dict)
if '*' in val or '[' in val:
_LOGGER.debug("Pre-glob: %s", val)
val_globbed = sorted(glob.glob(val))
if not val_globbed:
_LOGGER.debug("No files match provided glob: '%s'", val)
else:
val = " ".join(val_globbed)
_LOGGER.debug("Post-glob: %s", val)
except Exception as e:
_LOGGER.error("Cannot correctly format data source ({}): {} -- {}".
format(str(type(e).__name__), str(e), regex))
return regex
return val
def make_sample_dirs(self):
"""
Creates sample directory structure if it doesn't exist.
"""
for path in self.paths:
if not os.path.exists(path):
os.makedirs(path)
def set_file_paths(self, project=None):
"""
Sets the paths of all files for this sample.
:param attmap.PathExAttMap project: object with pointers to data paths and
such, either full Project or PathExAttMap with sufficient data
"""
# Any columns specified as "derived" will be constructed
# based on regex in the "data_sources" section of project config.
project = project or self.prj
for col in project.get("derived_attributes", []):
# Only proceed if the specified column exists
# and was not already merged or derived.
if not hasattr(self, col):
_LOGGER.debug("%s lacks attribute '%s'", self.name, col)
continue
elif col in self.merged_cols:
_LOGGER.debug("'%s' is already merged for %s", col, self.name)
continue
elif col in self.derived_cols_done:
_LOGGER.debug("'%s' has been derived for %s", col, self.name)
continue
_LOGGER.debug("Deriving column for %s '%s': '%s'",
self.__class__.__name__, self.name, col)
# Set a variable called {col}_key, so the
# original source can also be retrieved.
col_key = col + COL_KEY_SUFFIX
col_key_val = getattr(self, col)
_LOGGER.debug("Setting '%s' to '%s'", col_key, col_key_val)
setattr(self, col_key, col_key_val)
# Determine the filepath for the current data source and set that
# attribute on this sample if it's non-empty/null.
filepath = self.locate_data_source(
data_sources=project.get(DATA_SOURCES_SECTION),
column_name=col)
if filepath:
_LOGGER.debug("Setting '%s' to '%s'", col, filepath)
setattr(self, col, filepath)
else:
_LOGGER.debug("Not setting null/empty value for data source "
"'{}': {}".format(col, type(filepath)))
self.derived_cols_done.append(col)
# Parent
self.results_subdir = project.metadata.results_subdir
self.paths.sample_root = sample_folder(project, self)
# Track url
bigwig_filename = self.name + ".bigWig"
try:
# Project's public_html folder
self.bigwig = os.path.join(
project.trackhubs.trackhub_dir, bigwig_filename)
self.track_url = \
"{}/{}".format(project.trackhubs.url, bigwig_filename)
except:
_LOGGER.debug("No trackhub/URL")
pass
def set_genome(self, genomes):
"""
Set the genome for this Sample.
:param Mapping[str, str] genomes: genome assembly by organism name
"""
self._set_assembly("genome", genomes)
def set_transcriptome(self, transcriptomes):
"""
Set the transcriptome for this Sample.
:param Mapping[str, str] transcriptomes: transcriptome assembly by
organism name
"""
self._set_assembly("transcriptome", transcriptomes)
def _set_assembly(self, ome, assemblies):
if not assemblies:
_LOGGER.debug("Empty/null assemblies mapping")
return
try:
assembly = assemblies[self.organism]
except AttributeError:
_LOGGER.debug("Sample '%s' lacks organism attribute", self.name)
assembly = None
except KeyError:
_LOGGER.log(5, "Unknown {} value: '{}'".
format(ome, self.organism))
assembly = None
_LOGGER.log(5, "Setting {} as {} on sample: '{}'".
format(assembly, ome, self.name))
setattr(self, ome, assembly)
def set_pipeline_attributes(
self, pipeline_interface, pipeline_name, permissive=True):
"""
Set pipeline-specific sample attributes.
Some sample attributes are relative to a particular pipeline run,
like which files should be considered inputs, what is the total
input file size for the sample, etc. This function sets these
pipeline-specific sample attributes, provided via a PipelineInterface
object and the name of a pipeline to select from that interface.
:param PipelineInterface pipeline_interface: A PipelineInterface
object that has the settings for this given pipeline.
:param str pipeline_name: Which pipeline to choose.
:param bool permissive: whether to simply log a warning or error
message rather than raising an exception if sample file is not
found or otherwise cannot be read, default True
"""
# Settings ending in _attr are lists of attribute keys.
# These attributes are then queried to populate values
# for the primary entries.
req_attr_names = [("ngs_input_files", "ngs_inputs_attr"),
("required_input_files", REQUIRED_INPUTS_ATTR_NAME),
("all_input_files", ALL_INPUTS_ATTR_NAME)]
for name_src_attr, name_dst_attr in req_attr_names:
_LOGGER.log(5, "Value of '%s' will be assigned to '%s'",
name_src_attr, name_dst_attr)
value = pipeline_interface.get_attribute(
pipeline_name, name_src_attr)
_LOGGER.log(5, "Assigning '{}': {}".format(name_dst_attr, value))
setattr(self, name_dst_attr, value)
# Post-processing of input attribute assignments.
# Ensure that there's a valid all_inputs_attr.
if not getattr(self, ALL_INPUTS_ATTR_NAME):
required_inputs = getattr(self, REQUIRED_INPUTS_ATTR_NAME)
setattr(self, ALL_INPUTS_ATTR_NAME, required_inputs)
# Convert attribute keys into values.
if self.ngs_inputs_attr:
_LOGGER.log(5, "Handling NGS input attributes: '%s'", self.name)
# NGS data inputs exit, so we can add attributes like
# read_type, read_length, paired.
self.ngs_inputs = self.get_attr_values("ngs_inputs_attr")
set_rtype_reason = ""
if not hasattr(self, "read_type"):
set_rtype_reason = "read_type not yet set"
elif not self.read_type or self.read_type.lower() \
not in VALID_READ_TYPES:
set_rtype_reason = "current read_type is invalid: '{}'". \
format(self.read_type)
if set_rtype_reason:
_LOGGER.debug(
"Setting read_type for %s '%s': %s",
self.__class__.__name__, self.name, set_rtype_reason)
self.set_read_type(permissive=permissive)
else:
_LOGGER.debug("read_type is already valid: '%s'",
self.read_type)
else:
_LOGGER.log(5, "No NGS inputs: '%s'", self.name)
# Assign values for actual inputs attributes.
self.required_inputs = self.get_attr_values(REQUIRED_INPUTS_ATTR_NAME)
self.all_inputs = self.get_attr_values(ALL_INPUTS_ATTR_NAME)
_LOGGER.debug("All '{}' inputs: {}".format(self.name, self.all_inputs))
self.input_file_size = get_file_size(self.all_inputs)
def set_read_type(self, rlen_sample_size=10, permissive=True):
"""
For a sample with attr `ngs_inputs` set, this sets the
read type (single, paired) and read length of an input file.
:param int rlen_sample_size: Number of reads to sample to infer read type,
default 10.
:param bool permissive: whether to simply log a warning or error message
rather than raising an exception if sample file is not found or
otherwise cannot be read, default True.
"""
# TODO: determine how return is being used and standardized (null vs. bool)
# Initialize the parameters in case there is no input_file, so these
# attributes at least exist - as long as they are not already set!
for attr in ["read_length", "read_type", "paired"]:
if not hasattr(self, attr):
_LOGGER.log(5, "Setting null for missing attribute: '%s'",
attr)
setattr(self, attr, None)
# ngs_inputs must be set
if not self.ngs_inputs:
return False
ngs_paths = " ".join(self.ngs_inputs)
# Determine extant/missing filepaths.
existing_files = list()
missing_files = list()
for path in ngs_paths.split(" "):
if not os.path.exists(path):
missing_files.append(path)
else:
existing_files.append(path)
_LOGGER.debug("{} extant file(s): {}".
format(len(existing_files), existing_files))
_LOGGER.debug("{} missing file(s): {}".
format(len(missing_files), missing_files))
# For samples with multiple original BAM files, check all.
files = list()
check_by_ftype = {"bam": check_bam, "fastq": check_fastq}
for input_file in existing_files:
try:
file_type = parse_ftype(input_file)
read_lengths, paired = check_by_ftype[file_type](
input_file, rlen_sample_size)
except (KeyError, TypeError):
message = "Input file type should be one of: {}".format(
check_by_ftype.keys())
if not permissive:
raise TypeError(message)
_LOGGER.error(message)
return
except NotImplementedError as e:
if not permissive:
raise
_LOGGER.warning(e.message)
return
except IOError:
if not permissive:
raise
_LOGGER.error("Input file does not exist or "
"cannot be read: %s", str(input_file))
for feat_name in self._FEATURE_ATTR_NAMES:
if not hasattr(self, feat_name):
setattr(self, feat_name, None)
return
except OSError as e:
_LOGGER.error(str(e) + " [file: {}]".format(input_file))
for feat_name in self._FEATURE_ATTR_NAMES:
if not hasattr(self, feat_name):
setattr(self, feat_name, None)
return
# Determine most frequent read length among sample.
rlen, _ = sorted(read_lengths.items(), key=itemgetter(1))[-1]
_LOGGER.log(5,
"Selected {} as most frequent read length from "
"sample read length distribution: {}".format(
rlen, read_lengths))
# Decision about paired-end status is majority-rule.
if paired > (rlen_sample_size / 2):
read_type = "paired"
paired = True
else:
read_type = "single"
paired = False
files.append([rlen, read_type, paired])
# Check agreement between different files
# if all values are equal, set to that value;
# if not, set to None and warn the user about the inconsistency
for i, feature in enumerate(self._FEATURE_ATTR_NAMES):
feature_values = set(f[i] for f in files)
if 1 == len(feature_values):
feat_val = files[0][i]
else:
_LOGGER.log(5, "%d values among %d files for feature '%s'",
len(feature_values), len(files), feature)
feat_val = None
_LOGGER.log(5, "Setting '%s' on %s to %s",
feature, self.__class__.__name__, feat_val)
setattr(self, feature, feat_val)
if getattr(self, feature) is None and len(existing_files) > 0:
_LOGGER.warning("Not all input files agree on '%s': '%s'",
feature, self.name)
def to_yaml(self, path=None, subs_folder_path=None, delimiter="_"):
"""
Serializes itself in YAML format.
:param str path: A file path to write yaml to; provide this or
the subs_folder_path
:param str subs_folder_path: path to folder in which to place file
that's being written; provide this or a full filepath
:param str delimiter: text to place between the sample name and the
suffix within the filename; irrelevant if there's no suffix
:return str: filepath used (same as input if given, otherwise the
path value that was inferred)
:raises ValueError: if neither full filepath nor path to extant
parent directory is provided.
"""
# Determine filepath, prioritizing anything given, then falling
# back to a default using this Sample's Project's submission_subdir.
# Use the sample name and YAML extension as the file name,
# interjecting a pipeline name as a subfolder within the Project's
# submission_subdir if such a pipeline name is provided.
if not path:
if not subs_folder_path:
raise ValueError(
"To represent {} on disk, provide a full path or a path "
"to a parent (submissions) folder".
format(self.__class__.__name__))
_LOGGER.debug("Creating filename for %s: '%s'",
self.__class__.__name__, self.name)
filename = self.generate_filename(delimiter=delimiter)
_LOGGER.debug("Filename: '%s'", filename)
path = os.path.join(subs_folder_path, filename)
_LOGGER.debug("Setting %s filepath: '%s'",
self.__class__.__name__, path)
self.yaml_file = path
def obj2dict(obj, name=None,
to_skip=(SAMPLE_SUBANNOTATIONS_KEY, "samples",
NAME_TABLE_ATTR, "sheet_attributes")):
"""
Build representation of object as a dict, recursively
for all objects that might be attributes of self.
:param object obj: what to serialize to write to YAML.
:param str name: name of the object to represent.
:param Iterable[str] to_skip: names of attributes to ignore.
"""
if name:
_LOGGER.log(5, "Converting to dict: '{}'".format(name))
if name == PRJ_REF:
_LOGGER.debug("Attempting to store %s's project data",
self.__class__.__name__)
prj_data = grab_project_data(obj)
_LOGGER.debug("Sample's project data: {}".format(prj_data))
return {k: obj2dict(v, name=k) for k, v in prj_data.items()}
if isinstance(obj, list):
return [obj2dict(i) for i in obj]
if isinstance(obj, AttMap):
return {k: obj2dict(v, name=k) for k, v in obj.__dict__.items()
if k not in to_skip}
elif isinstance(obj, Mapping):
return {k: obj2dict(v, name=k)
for k, v in obj.items() if k not in to_skip}
elif isinstance(obj, (Paths, Sample)):
return {k: obj2dict(v, name=k)
for k, v in obj.__dict__.items() if
k not in to_skip}
elif isinstance(obj, Series):
_LOGGER.warning("Serializing series as mapping, not array-like")
return obj.to_dict()
elif hasattr(obj, 'dtype'): # numpy data types
# TODO: this fails with ValueError for multi-element array.
return obj.item()
elif isnull(obj):
# Missing values as evaluated by pandas.isnull().
# This gets correctly written into yaml.
return "NaN"
else:
return obj
_LOGGER.debug("Serializing %s: '%s'",
self.__class__.__name__, self.name)
serial = obj2dict(self)
# TODO: this is the way to add the project metadata reference if
# the metadata items are to be accessed directly on the Sample rather
# than through the Project; that is:
#
# sample.output_dir
# instead of
# sample.prj.output_dir
#
# In this case, "prj" should be added to the default argument to the
# to_skip parameter in the function signature, and the instance check
# of the object to serialize against Project can be dropped.
"""
try:
serial.update(self.prj.metadata)
except AttributeError:
_LOGGER.debug("%s lacks %s reference",
self.__class__.__name__, Project.__class__.__name__)
else:
_LOGGER.debug("Added %s metadata to serialized %s",
Project.__class__.__name__, self.__class__.__name__)
"""
with open(self.yaml_file, 'w') as outfile:
_LOGGER.debug("Generating YAML data for %s: '%s'",
self.__class__.__name__, self.name)
try:
yaml_data = yaml.safe_dump(serial, default_flow_style=False)
except yaml.representer.RepresenterError:
_LOGGER.error("SERIALIZED SAMPLE DATA: {}".format(serial))
raise
outfile.write(yaml_data)
def update(self, newdata, **kwargs):
"""
Update Sample object with attributes from a dict.
"""
duplicates = [k for k in set(newdata.keys()) & set(kwargs.keys())
if newdata[k] != kwargs[k]]
if len(duplicates) != 0:
raise ValueError("{} duplicate keys with different values: {}".
format(len(duplicates), ", ".join(duplicates)))
for k, v in newdata.items():
setattr(self, k, v)
for k, v in kwargs.items():
setattr(self, k, v)
@copy
class Paths(object):
""" A class to hold paths as attributes. """
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
def __getitem__(self, key):
return getattr(self, key)
def __iter__(self):
return iter(self.__dict__.values())
def __repr__(self):
return "Paths object."
|
pepkit/peppy
|
peppy/project.py
|
suggest_implied_attributes
|
python
|
def suggest_implied_attributes(prj):
def suggest(key):
return "To declare {}, consider using {}".format(
key, IMPLICATIONS_DECLARATION)
return [suggest(k) for k in prj if k in IDEALLY_IMPLIED]
|
If given project contains what could be implied attributes, suggest that.
:param Iterable prj: Intent is a Project, but this could be any iterable
of strings to check for suitability of declaration as implied attr
:return list[str]: (likely empty) list of warning messages about project
config keys that could be implied attributes
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/project.py#L1068-L1080
| null |
"""
Model a project with individual samples and associated data.
Project Models
=======================
Workflow explained:
- Create a Project object
- Samples are created and added to project (automatically)
In the process, Models will check:
- Project structure (created if not existing)
- Existence of csv sample sheet with minimal fields
- Constructing a path to a sample's input file and checking for its existence
- Read type/length of samples (optionally)
Example:
.. code-block:: python
from models import Project
prj = Project("config.yaml")
# that's it!
Explore:
.. code-block:: python
# see all samples
prj.samples
# get fastq file of first sample
prj.samples[0].fastq
# get all bam files of WGBS samples
[s.mapped for s in prj.samples if s.protocol == "WGBS"]
prj.metadata.results # results directory of project
# export again the project's annotation
prj.sample_table.write(os.path.join(prj.metadata.output_dir, "sample_annotation.csv"))
# project options are read from the config file
# but can be changed on the fly:
prj = Project("test.yaml")
# change options on the fly
prj.config["merge_technical"] = False
# annotation sheet not specified initially in config file
prj.add_sample_sheet("sample_annotation.csv")
"""
from collections import Counter
import logging
import os
import sys
if sys.version_info < (3, 3):
from collections import Iterable, Mapping
else:
from collections.abc import Iterable, Mapping
import warnings
import pandas as pd
import yaml
from attmap import PathExAttMap
from divvy import ComputingConfiguration
from .const import *
from .exceptions import PeppyError
from .sample import merge_sample, Sample
from .utils import \
add_project_sample_constants, copy, fetch_samples, infer_delimiter, is_url, \
non_null_value, warn_derived_cols, warn_implied_cols
MAX_PROJECT_SAMPLES_REPR = 12
NEW_PIPES_KEY = "pipeline_interfaces"
OLD_PIPES_KEY = "pipelines_dir"
OLD_ANNS_META_KEY = "sample_annotation"
OLD_SUBS_META_KEY = "sample_subannotation"
READ_CSV_KWARGS = {"engine": "python", "dtype": str, "index_col": False,
"keep_default_na": False}
GENOMES_KEY = "genomes"
TRANSCRIPTOMES_KEY = "transcriptomes"
IDEALLY_IMPLIED = [GENOMES_KEY, TRANSCRIPTOMES_KEY]
_LOGGER = logging.getLogger(__name__)
class ProjectContext(object):
""" Wrap a Project to provide protocol-specific Sample selection. """
def __init__(self, prj, selector_attribute=ASSAY_KEY,
selector_include=None, selector_exclude=None):
""" Project and what to include/exclude defines the context. """
self.prj = prj
self.include = selector_include
self.exclude = selector_exclude
self.attribute = selector_attribute
def __getattr__(self, item):
""" Samples are context-specific; other requests are handled
locally or dispatched to Project. """
if item == "samples":
return fetch_samples(
self.prj, selector_attribute=self.attribute,
selector_include=self.include, selector_exclude=self.exclude)
if item in ["prj", "include", "exclude"]:
# Attributes requests that this context/wrapper handles
return self.__dict__[item]
else:
# Dispatch attribute request to Project.
return getattr(self.prj, item)
def __getitem__(self, item):
""" Provide the Mapping-like item access to the instance's Project. """
return self.prj[item]
def __enter__(self):
""" References pass through this instance as needed, so the context
provided is the instance itself. """
return self
def __exit__(self, *args):
""" Context teardown. """
pass
@copy
class Project(PathExAttMap):
"""
A class to model a Project (collection of samples and metadata).
:param str config_file: Project config file (YAML).
:param str subproject: Subproject to use within configuration file, optional
:param bool dry: If dry mode is activated, no directories
will be created upon project instantiation.
:param bool permissive: Whether a error should be thrown if
a sample input file(s) do not exist or cannot be open.
:param bool file_checks: Whether sample input files should be checked
for their attributes (read type, read length)
if this is not set in sample metadata.
:param str compute_env_file: Environment configuration YAML file specifying
compute settings.
:param type no_environment_exception: type of exception to raise if environment
settings can't be established, optional; if null (the default),
a warning message will be logged, and no exception will be raised.
:param type no_compute_exception: type of exception to raise if compute
settings can't be established, optional; if null (the default),
a warning message will be logged, and no exception will be raised.
:param bool defer_sample_construction: whether to wait to build this Project's
Sample objects until they're needed, optional; by default, the basic
Sample is created during Project construction
:Example:
.. code-block:: python
from models import Project
prj = Project("config.yaml")
"""
DERIVED_ATTRIBUTES_DEFAULT = [DATA_SOURCE_COLNAME]
def __init__(self, config_file, subproject=None, dry=False,
permissive=True, file_checks=False, compute_env_file=None,
no_environment_exception=None, no_compute_exception=None,
defer_sample_construction=False):
_LOGGER.debug("Creating %s from file: '%s'",
self.__class__.__name__, config_file)
super(Project, self).__init__()
self.dcc = ComputingConfiguration(
config_file=compute_env_file, no_env_error=no_environment_exception,
no_compute_exception=no_compute_exception)
self.permissive = permissive
self.file_checks = file_checks
self._subproject = None
# Include the path to the config file.
self.config_file = os.path.abspath(config_file)
# Parse config file
_LOGGER.debug("Parsing %s config file", self.__class__.__name__)
self.parse_config_file(subproject)
if self.non_null("data_sources"):
# Expand paths now, so that it's not done for every sample.
for src_key, src_val in self.data_sources.items():
src_val = os.path.expandvars(src_val)
if not (os.path.isabs(src_val) or is_url(src_val)):
src_val = os.path.join(os.path.dirname(self.config_file), src_val)
self.data_sources[src_key] = src_val
else:
# Ensure data_sources is at least set if it wasn't parsed.
self["data_sources"] = None
self.name = self.infer_name()
# Set project's directory structure
if not dry:
_LOGGER.debug("Ensuring project directories exist")
self.make_project_dirs()
# Establish derived columns.
try:
# Do not duplicate derived column names.
self.derived_attributes.extend(
[colname for colname in self.DERIVED_ATTRIBUTES_DEFAULT
if colname not in self.derived_attributes])
except AttributeError:
self.derived_attributes = self.DERIVED_ATTRIBUTES_DEFAULT
self.finalize_pipelines_directory()
self["_" + SAMPLE_SUBANNOTATIONS_KEY] = None
path_anns_file = self[METADATA_KEY].get(NAME_TABLE_ATTR)
self_table_attr = "_" + NAME_TABLE_ATTR
self[self_table_attr] = None
if path_anns_file:
_LOGGER.debug("Reading sample annotations sheet: '%s'", path_anns_file)
self[self_table_attr] = self.parse_sample_sheet(path_anns_file)
else:
_LOGGER.warning("No sample annotations sheet in config")
# Basic sample maker will handle name uniqueness check.
if defer_sample_construction or self._sample_table is None:
self._samples = None
else:
self._set_basic_samples()
def __repr__(self):
""" Representation in interpreter. """
if len(self) == 0:
return "{}"
samples_message = "{} (from '{}')". \
format(self.__class__.__name__, self.config_file)
try:
num_samples = len(self._samples)
except (AttributeError, TypeError):
pass
else:
samples_message += " with {} sample(s)".format(num_samples)
if num_samples <= MAX_PROJECT_SAMPLES_REPR:
samples_message += ": {}".format(repr(self._samples))
meta_text = super(Project, self).__repr__()
return "{} -- {}".format(samples_message, meta_text)
def __setitem__(self, key, value):
"""
Override here to handle deprecated special-meaning keys.
:param str key: Key to map to given value
:param object value: Arbitrary value to bind to given key
"""
if key == "derived_columns":
warn_derived_cols()
key = DERIVATIONS_DECLARATION
elif key == "implied_columns":
warn_implied_cols()
key = IMPLICATIONS_DECLARATION
elif key == METADATA_KEY:
value = _Metadata(value)
super(Project, self).__setitem__(key, value)
@property
def constants(self):
"""
Return key-value pairs of pan-Sample constants for this Project.
:return Mapping: collection of KV pairs, each representing a pairing
of attribute name and attribute value
"""
return self._constants
@property
def derived_columns(self):
"""
Collection of sample attributes for which value of each is derived from elsewhere
:return list[str]: sample attribute names for which value is derived
"""
warn_derived_cols()
try:
return self.derived_attributes
except AttributeError:
return []
@property
def implied_columns(self):
"""
Collection of sample attributes for which value of each is implied by other(s)
:return list[str]: sample attribute names for which value is implied by other(s)
"""
warn_implied_cols()
try:
return self.implied_attributes
except AttributeError:
return PathExAttMap()
@property
def num_samples(self):
"""
Count the number of samples available in this Project.
:return int: number of samples available in this Project.
"""
return sum(1 for _ in self.sample_names)
@property
def output_dir(self):
"""
Directory in which to place results and submissions folders.
By default, assume that the project's configuration file specifies
an output directory, and that this is therefore available within
the project metadata. If that assumption does not hold, though,
consider the folder in which the project configuration file lives
to be the project's output directory.
:return str: path to the project's output directory, either as
specified in the configuration file or the folder that contains
the project's configuration file.
"""
try:
return self.metadata.output_dir
except AttributeError:
return os.path.dirname(self.config_file)
@property
def project_folders(self):
"""
Names of folders to nest within a project output directory.
:return Iterable[str]: names of output-nested folders
"""
return ["results_subdir", "submission_subdir"]
@property
def protocols(self):
"""
Determine this Project's unique protocol names.
:return Set[str]: collection of this Project's unique protocol names
"""
protos = set()
for s in self.samples:
try:
protos.add(s.protocol)
except AttributeError:
_LOGGER.debug("Sample '%s' lacks protocol", s.sample_name)
return protos
@property
def required_metadata(self):
"""
Names of metadata fields that must be present for a valid project.
Make a base project as unconstrained as possible by requiring no
specific metadata attributes. It's likely that some common-sense
requirements may arise in domain-specific client applications, in
which case this can be redefined in a subclass.
:return Iterable[str]: names of metadata fields required by a project
"""
return []
@property
def sample_names(self):
""" Names of samples of which this Project is aware. """
dt = getattr(self, NAME_TABLE_ATTR)
try:
return iter(dt[SAMPLE_NAME_COLNAME])
except KeyError:
cols = list(dt.columns)
print("Table columns: {}".format(", ".join(cols)))
if 1 == len(cols):
print("Does delimiter used in the sample sheet match file extension?")
raise
@property
def samples(self):
"""
Generic/base Sample instance for each of this Project's samples.
:return Iterable[Sample]: Sample instance for each
of this Project's samples
"""
if self._samples:
return self._samples
if self.sample_table is None:
_LOGGER.warning("No samples are defined")
return []
self._samples = self._prep_samples()
return self._samples
@property
def sample_annotation(self):
"""
Get the path to the project's sample annotations sheet.
:return str: path to the project's sample annotations sheet
"""
warnings.warn("{} is deprecated; please instead use {}".
format(OLD_ANNS_META_KEY, NAME_TABLE_ATTR),
DeprecationWarning)
return getattr(self, NAME_TABLE_ATTR)
@property
def sample_subannotation(self):
"""
Return the data table that stores metadata for subsamples/units.
:return pandas.core.frame.DataFrame | NoneType: table of
subsamples/units metadata
"""
warnings.warn("{} is deprecated; use {}".
format(OLD_SUBS_META_KEY, SAMPLE_SUBANNOTATIONS_KEY),
DeprecationWarning)
return getattr(self, SAMPLE_SUBANNOTATIONS_KEY)
@property
def sample_table(self):
"""
Return (possibly first parsing/building) the table of samples.
:return pandas.core.frame.DataFrame | NoneType: table of samples'
metadata, if one is defined
"""
from copy import copy as cp
key = NAME_TABLE_ATTR
attr = "_" + key
if self.get(attr) is None:
sheetfile = self[METADATA_KEY].get(key)
if sheetfile is None:
return None
self[attr] = self.parse_sample_sheet(sheetfile)
return cp(self[attr])
@property
def sheet(self):
"""
Annotations/metadata sheet describing this Project's samples.
:return pandas.core.frame.DataFrame: table of samples in this Project
"""
warnings.warn("sheet is deprecated; instead use {}".
format(NAME_TABLE_ATTR), DeprecationWarning)
return getattr(self, NAME_TABLE_ATTR)
@property
def subproject(self):
"""
Return currently active subproject or None if none was activated
:return str: name of currently active subproject
"""
return self._subproject
@property
def subsample_table(self):
"""
Return (possibly first parsing/building) the table of subsamples.
:return pandas.core.frame.DataFrame | NoneType: table of subsamples'
metadata, if the project defines such a table
"""
from copy import copy as cp
key = SAMPLE_SUBANNOTATIONS_KEY
attr = "_" + key
if self.get(attr) is None:
sheetfile = self[METADATA_KEY].get(key)
if sheetfile is None:
return None
self[attr] = pd.read_csv(sheetfile,
sep=infer_delimiter(sheetfile), **READ_CSV_KWARGS)
return cp(self[attr])
@property
def templates_folder(self):
"""
Path to folder with default submission templates.
:return str: path to folder with default submission templates
"""
return self.dcc.templates_folder
def infer_name(self):
"""
Infer project name from config file path.
First assume the name is the folder in which the config file resides,
unless that folder is named "metadata", in which case the project name
is the parent of that folder.
:return str: inferred name for project.
"""
if hasattr(self, "name"):
return self.name
config_folder = os.path.dirname(self.config_file)
project_name = os.path.basename(config_folder)
if project_name == METADATA_KEY:
project_name = os.path.basename(os.path.dirname(config_folder))
return project_name
def get_subsample(self, sample_name, subsample_name):
"""
From indicated sample get particular subsample.
:param str sample_name: Name of Sample from which to get subsample
:param str subsample_name: Name of Subsample to get
:return peppy.Subsample: The Subsample of requested name from indicated
sample matching given name
"""
s = self.get_sample(sample_name)
return s.get_subsample(subsample_name)
def get_sample(self, sample_name):
"""
Get an individual sample object from the project.
Will raise a ValueError if the sample is not found. In the case of multiple
samples with the same name (which is not typically allowed), a warning is
raised and the first sample is returned.
:param str sample_name: The name of a sample to retrieve
:return Sample: The requested Sample object
"""
samples = self.get_samples([sample_name])
if len(samples) > 1:
_LOGGER.warning("More than one sample was detected; returning the first")
try:
return samples[0]
except IndexError:
raise ValueError("Project has no sample named {}.".format(sample_name))
def deactivate_subproject(self):
"""
Bring the original project settings back
This method will bring the original project settings back after the subproject activation.
:return peppy.Project: Updated Project instance
"""
if self.subproject is None:
_LOGGER.warning("No subproject has been activated.")
self.__init__(self.config_file)
return self
def activate_subproject(self, subproject):
"""
Update settings based on subproject-specific values.
This method will update Project attributes, adding new values
associated with the subproject indicated, and in case of collision with
an existing key/attribute the subproject's value will be favored.
:param str subproject: A string with a subproject name to be activated
:return peppy.Project: Updated Project instance
"""
if subproject is None:
raise TypeError("The subproject argument can not be NoneType."
" To deactivate a subproject use the deactivate_subproject method.")
previous = [(k, v) for k, v in self.items() if not k.startswith("_")]
conf_file = self.config_file
self.__init__(conf_file, subproject)
for k, v in previous:
if k.startswith("_"):
continue
if k not in self or (self.is_null(k) and v is not None):
_LOGGER.debug("Restoring {}: {}".format(k, v))
self[k] = v
self._subproject = subproject
return self
def get_samples(self, sample_names):
"""
Returns a list of sample objects given a list of sample names
:param list sample_names: A list of sample names to retrieve
:return list[Sample]: A list of Sample objects
"""
return [s for s in self.samples if s.name in sample_names]
def build_sheet(self, *protocols):
"""
Create table of subset of samples matching one of given protocols.
:return pandas.core.frame.DataFrame: DataFrame with from base version
of each of this Project's samples, for indicated protocol(s) if
given, else all of this Project's samples
"""
# Use all protocols if none are explicitly specified.
known = set(protocols or self.protocols)
selector_include = []
skipped = []
for s in self.samples:
try:
p = s.protocol
except AttributeError:
selector_include.append(s)
else:
if p in known:
selector_include.append(s)
else:
skipped.append(s)
if skipped:
msg_data = "\n".join(["{} ({})".format(s, s.protocol)
for s in skipped])
_LOGGER.debug("Skipped %d sample(s) for protocol. Known: %s\n%s",
len(skipped), ", ".join(known), msg_data)
return pd.DataFrame(selector_include)
def _check_unique_samples(self):
""" Handle scenario in which sample names are not unique. """
# Defining this here but then calling out to the repeats counter has
# a couple of advantages. We get an unbound, isolated method (the
# Project-external repeat sample name counter), but we can still
# do this check from the sample builder, yet have it be override-able.
repeats = {name: n for name, n in Counter(
s.name for s in self._samples).items() if n > 1}
if repeats:
hist_text = "\n".join(
"{}: {}".format(name, n) for name, n in repeats.items())
_LOGGER.warning("Non-unique sample names:\n{}".format(hist_text))
def finalize_pipelines_directory(self, pipe_path=""):
"""
Finalize the establishment of a path to this project's pipelines.
With the passed argument, override anything already set.
Otherwise, prefer path provided in this project's config, then
local pipelines folder, then a location set in project environment.
:param str pipe_path: (absolute) path to pipelines
:raises PipelinesException: if (prioritized) search in attempt to
confirm or set pipelines directory failed
:raises TypeError: if pipeline(s) path(s) argument is provided and
can't be interpreted as a single path or as a flat collection
of path(s)
"""
# Pass pipeline(s) dirpath(s) or use one already set.
if not pipe_path:
try:
pipe_path = self.metadata[NEW_PIPES_KEY]
except KeyError:
pipe_path = []
# Ensure we're working with a flattened list.
if isinstance(pipe_path, str):
pipe_path = [pipe_path]
elif isinstance(pipe_path, Iterable) and \
not isinstance(pipe_path, Mapping):
pipe_path = list(pipe_path)
else:
_LOGGER.debug("Got {} as pipelines path(s) ({})".
format(pipe_path, type(pipe_path)))
pipe_path = []
self[METADATA_KEY][NEW_PIPES_KEY] = pipe_path
def get_arg_string(self, pipeline_name):
"""
For this project, given a pipeline, return an argument string
specified in the project config file.
"""
def make_optarg_text(opt, arg):
""" Transform flag/option into CLI-ready text version. """
if arg:
try:
arg = os.path.expandvars(arg)
except TypeError:
# Rely on direct string formatting of arg.
pass
return "{} {}".format(opt, arg)
else:
return opt
def create_argtext(name):
""" Create command-line argstring text from config section. """
try:
optargs = getattr(self.pipeline_args, name)
except AttributeError:
return ""
# NS using __dict__ will add in the metadata from AttrDict (doh!)
_LOGGER.debug("optargs.items(): {}".format(optargs.items()))
optargs_texts = [make_optarg_text(opt, arg)
for opt, arg in optargs.items()]
_LOGGER.debug("optargs_texts: {}".format(optargs_texts))
# TODO: may need to fix some spacing issues here.
return " ".join(optargs_texts)
default_argtext = create_argtext(DEFAULT_COMPUTE_RESOURCES_NAME)
_LOGGER.debug("Creating additional argstring text for pipeline '%s'",
pipeline_name)
pipeline_argtext = create_argtext(pipeline_name)
if not pipeline_argtext:
# The project config may not have an entry for this pipeline;
# no problem! There are no pipeline-specific args. Return text
# from default arguments, whether empty or not.
return default_argtext
elif default_argtext:
# Non-empty pipeline-specific and default argtext
return " ".join([default_argtext, pipeline_argtext])
else:
# No default argtext, but non-empty pipeline-specific argtext
return pipeline_argtext
def make_project_dirs(self):
"""
Creates project directory structure if it doesn't exist.
"""
for folder_name in self.project_folders:
folder_path = self.metadata[folder_name]
_LOGGER.debug("Ensuring project dir exists: '%s'", folder_path)
if not os.path.exists(folder_path):
_LOGGER.debug("Attempting to create project folder: '%s'",
folder_path)
try:
os.makedirs(folder_path)
except OSError as e:
_LOGGER.warning("Could not create project folder: '%s'",
str(e))
def _set_basic_samples(self):
""" Build the base Sample objects from the annotations sheet data. """
# This should be executed just once, establishing the Project's
# base Sample objects if they don't already exist.
sub_ann = None
try:
sub_ann = self.metadata[SAMPLE_SUBANNOTATIONS_KEY]
except KeyError:
try:
# Backwards compatibility
sub_ann = self.metadata["merge_table"]
except KeyError:
_LOGGER.debug("No sample subannotations")
else:
warnings.warn("merge_table is deprecated; please instead use {}".
format(SAMPLE_SUBANNOTATIONS_KEY), DeprecationWarning)
if sub_ann and os.path.isfile(sub_ann):
_LOGGER.info("Reading subannotations: %s", sub_ann)
subann_table = pd.read_csv(sub_ann,
sep=infer_delimiter(sub_ann), **READ_CSV_KWARGS)
self["_" + SAMPLE_SUBANNOTATIONS_KEY] = subann_table
_LOGGER.debug("Subannotations shape: {}".format(subann_table.shape))
else:
_LOGGER.debug("Alleged path to sample subannotations data is "
"not a file: '%s'", str(sub_ann))
# Set samples and handle non-unique names situation.
self._check_subann_name_overlap()
self._samples = self._prep_samples()
self._check_unique_samples()
def _prep_samples(self):
"""
Merge this Project's Sample object and set file paths.
:return list[Sample]: collection of this Project's Sample objects
"""
samples = []
for _, row in getattr(self, NAME_TABLE_ATTR).iterrows():
sample = Sample(row.dropna(), prj=self)
# Add values that are constant across this Project's samples.
sample = add_project_sample_constants(sample, self)
sample.set_genome(self.get("genomes"))
sample.set_transcriptome(self.get("transcriptomes"))
_LOGGER.debug("Merging sample '%s'", sample.name)
sample.infer_attributes(self.get(IMPLICATIONS_DECLARATION))
merge_sample(sample, getattr(self, SAMPLE_SUBANNOTATIONS_KEY),
self.data_sources, self.derived_attributes)
_LOGGER.debug("Setting sample file paths")
sample.set_file_paths(self)
# Hack for backwards-compatibility
# Pipelines should now use `data_source`)
_LOGGER.debug("Setting sample data path")
try:
sample.data_path = sample.data_source
except AttributeError:
_LOGGER.log(5, "Sample '%s' lacks data source; skipping "
"data path assignment", sample.sample_name)
else:
_LOGGER.log(5, "Path to sample data: '%s'", sample.data_source)
samples.append(sample)
return samples
def _check_subann_name_overlap(self):
"""
Check if all subannotations have a matching sample, and warn if not
:raises warning: if any fo the subannotations sample_names does not have a corresponding Project.sample_name
"""
subs = getattr(self, SAMPLE_SUBANNOTATIONS_KEY)
if subs is not None:
sample_subann_names = subs.sample_name.tolist()
sample_names_list = list(self.sample_names)
info = " matching sample name for subannotation '{}'"
for n in sample_subann_names:
_LOGGER.warning(("Couldn't find" + info).format(n)) if n not in sample_names_list\
else _LOGGER.debug(("Found" + info).format(n))
else:
_LOGGER.debug("No sample subannotations found for this Project.")
def parse_config_file(self, subproject=None):
"""
Parse provided yaml config file and check required fields exist.
:param str subproject: Name of subproject to activate, optional
:raises KeyError: if config file lacks required section(s)
"""
_LOGGER.debug("Setting %s data from '%s'",
self.__class__.__name__, self.config_file)
with open(self.config_file, 'r') as conf_file:
config = yaml.safe_load(conf_file)
assert isinstance(config, Mapping), \
"Config file parse did not yield a Mapping; got {} ({})".\
format(config, type(config))
for msg in suggest_implied_attributes(config):
warnings.warn(msg, DeprecationWarning)
_LOGGER.debug("{} config data: {}".format(
self.__class__.__name__, config))
# Parse yaml into the project's attributes.
_LOGGER.debug("Adding attributes for {}: {}".format(
self.__class__.__name__, config.keys()))
_LOGGER.debug("Config metadata: {}".format(config[METADATA_KEY]))
self.add_entries(config)
_LOGGER.debug("{} now has {} keys: {}".format(
self.__class__.__name__, len(self.keys()), self.keys()))
# Overwrite any config entries with entries in the subproject.
if subproject:
if non_null_value(SUBPROJECTS_SECTION, config):
_LOGGER.debug("Adding entries for subproject '{}'".
format(subproject))
try:
subproj_updates = config[SUBPROJECTS_SECTION][subproject]
except KeyError:
raise MissingSubprojectError(subproject, config[SUBPROJECTS_SECTION])
_LOGGER.debug("Updating with: {}".format(subproj_updates))
self.add_entries(subproj_updates)
self._subproject = subproject
_LOGGER.info("Using subproject: '{}'".format(subproject))
else:
raise MissingSubprojectError(subproject)
else:
_LOGGER.debug("No subproject requested")
# In looper 0.4, for simplicity the paths section was eliminated.
# For backwards compatibility, mirror the paths section into metadata.
if "paths" in config:
_LOGGER.warning(
"Paths section in project config is deprecated. "
"Please move all paths attributes to metadata section. "
"This option will be removed in future versions.")
self.metadata.add_entries(self.paths)
_LOGGER.debug("Metadata: %s", str(self.metadata))
delattr(self, "paths")
self._constants = config.get("constants", dict())
# Ensure required absolute paths are present and absolute.
for var in self.required_metadata:
if var not in self.metadata:
raise ValueError("Missing required metadata item: '{}'".format(var))
self[METADATA_KEY][var] = os.path.expandvars(self.metadata.get(var))
_LOGGER.debug("{} metadata: {}".format(self.__class__.__name__,
self.metadata))
# Some metadata attributes are considered relative to the output_dir
# Here we make these absolute, so they won't be incorrectly made
# relative to the config file.
# These are optional because there are defaults
config_vars = {
# Defaults = {"variable": "default"}, relative to output_dir.
"results_subdir": "results_pipeline",
"submission_subdir": "submission"
}
for key, value in config_vars.items():
if key in self.metadata:
if not os.path.isabs(self.metadata[key]):
self.metadata[key] = \
os.path.join(self.output_dir, self.metadata[key])
else:
self.metadata[key] = os.path.join(self.output_dir, value)
# Variables which are relative to the config file
# All variables in these sections should be relative to project config.
relative_sections = [METADATA_KEY, "pipeline_config"]
_LOGGER.debug("Parsing relative sections")
for sect in relative_sections:
if not hasattr(self, sect):
_LOGGER.log(5, "%s lacks relative section '%s', skipping",
self.__class__.__name__, sect)
continue
relative_vars = getattr(self, sect)
if not relative_vars:
_LOGGER.log(5, "No relative variables, continuing")
continue
for var in relative_vars.keys():
if not hasattr(relative_vars, var) or \
getattr(relative_vars, var) is None:
continue
relpath = getattr(relative_vars, var)
_LOGGER.debug("Ensuring absolute path(s) for '%s'", var)
# Parsed from YAML, so small space of possible datatypes.
if isinstance(relpath, list):
absolute = [self._ensure_absolute(maybe_relpath)
for maybe_relpath in relpath]
else:
absolute = self._ensure_absolute(relpath)
_LOGGER.debug("Setting '%s' to '%s'", var, absolute)
setattr(relative_vars, var, absolute)
if self.dcc.compute is None:
_LOGGER.log(5, "No compute, no submission template")
old_table_keys = [OLD_ANNS_META_KEY, OLD_SUBS_META_KEY]
new_table_keys = [SAMPLE_ANNOTATIONS_KEY, SAMPLE_SUBANNOTATIONS_KEY]
metadata = self[METADATA_KEY]
for k_old, k_new in zip(old_table_keys, new_table_keys):
try:
v = metadata[k_old]
except KeyError:
continue
metadata[k_new] = v
del metadata[k_old]
self[METADATA_KEY] = metadata
if NAME_TABLE_ATTR not in self[METADATA_KEY]:
self[METADATA_KEY][NAME_TABLE_ATTR] = None
def set_project_permissions(self):
""" Make the project's public_html folder executable. """
try:
os.chmod(self.trackhubs.trackhub_dir, 0o0755)
except OSError:
# This currently does not fail now
# ("cannot change folder's mode: %s" % d)
pass
def _ensure_absolute(self, maybe_relpath):
""" Ensure that a possibly relative path is absolute. """
if not isinstance(maybe_relpath, str):
raise TypeError(
"Attempting to ensure non-text value is absolute path: {} ({})".
format(maybe_relpath, type(maybe_relpath)))
_LOGGER.log(5, "Ensuring absolute: '%s'", maybe_relpath)
if os.path.isabs(maybe_relpath) or is_url(maybe_relpath):
_LOGGER.log(5, "Already absolute")
return maybe_relpath
# Maybe we have env vars that make the path absolute?
expanded = os.path.expanduser(os.path.expandvars(maybe_relpath))
_LOGGER.log(5, "Expanded: '%s'", expanded)
if os.path.isabs(expanded):
_LOGGER.log(5, "Expanded is absolute")
return expanded
_LOGGER.log(5, "Making non-absolute path '%s' be absolute",
maybe_relpath)
# Set path to an absolute path, relative to project config.
config_dirpath = os.path.dirname(self.config_file)
_LOGGER.log(5, "config_dirpath: %s", config_dirpath)
abs_path = os.path.join(config_dirpath, maybe_relpath)
return abs_path
@staticmethod
def parse_sample_sheet(sample_file, dtype=str):
"""
Check if csv file exists and has all required columns.
:param str sample_file: path to sample annotations file.
:param type dtype: data type for CSV read.
:return pandas.core.frame.DataFrame: table populated by the project's
sample annotations data
:raises IOError: if given annotations file can't be read.
:raises ValueError: if required column(s) is/are missing.
"""
# Although no null value replacements or supplements are being passed,
# toggling the keep_default_na value to False solved an issue with 'nan'
# and/or 'None' as an argument for an option in the pipeline command
# that's generated from a Sample's attributes.
#
# See https://github.com/pepkit/peppy/issues/159 for the original issue
# and https://github.com/pepkit/peppy/pull/160 for the pull request
# that resolved it.
sep = infer_delimiter(sample_file)
try:
df = pd.read_csv(sample_file, sep=sep, **READ_CSV_KWARGS)
except IOError:
raise Project.MissingSampleSheetError(sample_file)
else:
_LOGGER.info("Setting sample sheet from file '%s'", sample_file)
missing = {SAMPLE_NAME_COLNAME} - set(df.columns)
if len(missing) != 0:
_LOGGER.warning(
"Annotation sheet ('{}') is missing column(s):\n{}\n"
"It has: {}".format(sample_file, "\n".join(missing),
", ".join(list(df.columns))))
return df
class MissingMetadataException(PeppyError):
""" Project needs certain metadata. """
def __init__(self, missing_section, path_config_file=None):
reason = "Project configuration lacks required metadata section {}".\
format(missing_section)
if path_config_file:
reason += "; used config file '{}'".format(path_config_file)
super(Project.MissingMetadataException, self).__init__(reason)
class MissingSampleSheetError(PeppyError):
""" Represent case in which sample sheet is specified but nonexistent. """
def __init__(self, sheetfile):
parent_folder = os.path.dirname(sheetfile)
contents = os.listdir(parent_folder) \
if os.path.isdir(parent_folder) else []
msg = "Missing sample annotation sheet ({}); a project need not use " \
"a sample sheet, but if it does the file must exist.".\
format(sheetfile)
if contents:
msg += " Contents of parent folder: {}".format(", ".join(contents))
super(Project.MissingSampleSheetError, self).__init__(msg)
@staticmethod
def _omit_from_repr(k, cls):
"""
Hook for exclusion of particular value from a representation
:param hashable k: key to consider for omission
:param type cls: data type on which to base the exclusion
:return bool: whether the given key k should be omitted from
text representation
"""
exclusions_by_class = {
"Project": [
"samples", "_samples", "interfaces_by_protocol",
"_" + SAMPLE_SUBANNOTATIONS_KEY, SAMPLE_SUBANNOTATIONS_KEY,
NAME_TABLE_ATTR, "_" + NAME_TABLE_ATTR],
"Subsample": [NAME_TABLE_ATTR, "sample", "merged_cols"],
"Sample": [NAME_TABLE_ATTR, "prj", "merged_cols"]
}
return k in exclusions_by_class.get(
cls.__name__ if isinstance(cls, type) else cls, [])
class MissingSubprojectError(PeppyError):
""" Error when project config lacks a requested subproject. """
def __init__(self, sp, defined=None):
"""
Create exception with missing subproj request.
:param str sp: the requested (and missing) subproject
:param Iterable[str] defined: collection of names of defined subprojects
"""
msg = "Subproject '{}' not found".format(sp)
if isinstance(defined, Iterable):
ctx = "defined subproject(s): {}".format(", ".join(map(str, defined)))
msg = "{}; {}".format(msg, ctx)
super(MissingSubprojectError, self).__init__(msg)
class _Metadata(PathExAttMap):
""" Project section with important information """
def __getattr__(self, item, default=None):
""" Reference the new attribute and warn about deprecation. """
if item == OLD_PIPES_KEY:
_warn_pipes_deprecation()
item = NEW_PIPES_KEY
return super(_Metadata, self).__getattr__(item, default=None)
def __setitem__(self, key, value):
""" Store the new key and warn about deprecation. """
if key == OLD_PIPES_KEY:
_warn_pipes_deprecation()
key = NEW_PIPES_KEY
return super(_Metadata, self).__setitem__(key, value)
def _warn_pipes_deprecation():
""" Handle messaging regarding pipelines pointer deprecation. """
msg = "Use of {} is deprecated; favor {}".\
format(OLD_PIPES_KEY, NEW_PIPES_KEY)
warnings.warn(msg, DeprecationWarning)
|
pepkit/peppy
|
peppy/utils.py
|
alpha_cased
|
python
|
def alpha_cased(text, lower=False):
text = "".join(filter(
lambda c: c.isalpha() or c == GENERIC_PROTOCOL_KEY, text))
return text.lower() if lower else text.upper()
|
Filter text to just letters and homogenize case.
:param str text: what to filter and homogenize.
:param bool lower: whether to convert to lowercase; default uppercase.
:return str: input filtered to just letters, with homogenized case.
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L34-L44
| null |
""" Helpers without an obvious logical home. """
from collections import defaultdict, Iterable
import contextlib
import logging
import os
import random
import string
import subprocess as sp
import sys
if sys.version_info < (3, 0):
from urlparse import urlparse
else:
from urllib.parse import urlparse
if sys.version_info < (3, 3):
from collections import Sized
else:
from collections.abc import Sized
import warnings
import yaml
from .const import GENERIC_PROTOCOL_KEY, SAMPLE_INDEPENDENT_PROJECT_SECTIONS
_LOGGER = logging.getLogger(__name__)
__all__ = [
"CommandChecker", "add_project_sample_constants", "check_bam", "check_fastq",
"get_file_size", "fetch_samples", "grab_project_data", "has_null_value",
"is_command_callable"
]
def alpha_cased(text, lower=False):
"""
Filter text to just letters and homogenize case.
:param str text: what to filter and homogenize.
:param bool lower: whether to convert to lowercase; default uppercase.
:return str: input filtered to just letters, with homogenized case.
"""
text = "".join(filter(
lambda c: c.isalpha() or c == GENERIC_PROTOCOL_KEY, text))
return text.lower() if lower else text.upper()
def add_project_sample_constants(sample, project):
"""
Update a Sample with constants declared by a Project.
:param Sample sample: sample instance for which to update constants
based on Project
:param Project project: Project with which to update Sample; it
may or may not declare constants. If not, no update occurs.
:return Sample: Updates Sample instance, according to any and all
constants declared by the Project.
"""
sample.update(project.constants)
return sample
def check_bam(bam, o):
"""
Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation.
"""
try:
p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE)
# Count paired alignments
paired = 0
read_lengths = defaultdict(int)
while o > 0: # Count down number of lines
line = p.stdout.readline().decode().split("\t")
flag = int(line[1])
read_lengths[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
o -= 1
p.kill()
except OSError:
reason = "Note (samtools not in path): For NGS inputs, " \
"pep needs samtools to auto-populate " \
"'read_length' and 'read_type' attributes; " \
"these attributes were not populated."
raise OSError(reason)
_LOGGER.debug("Read lengths: {}".format(read_lengths))
_LOGGER.debug("paired: {}".format(paired))
return read_lengths, paired
def check_fastq(fastq, o):
raise NotImplementedError("Detection of read type/length for "
"fastq input is not yet implemented.")
def coll_like(c):
"""
Determine whether an object is collection-like.
:param object c: Object to test as collection
:return bool: Whether the argument is a (non-string) collection
"""
return isinstance(c, Iterable) and not isinstance(c, str)
def copy(obj):
def copy(self):
"""
Copy self to a new object.
"""
from copy import deepcopy
return deepcopy(self)
obj.copy = copy
return obj
def expandpath(path):
"""
Expand a filesystem path that may or may not contain user/env vars.
:param str path: path to expand
:return str: expanded version of input path
"""
return os.path.expandvars(os.path.expanduser(path)).replace("//", "/")
def get_file_size(filename):
"""
Get size of all files in gigabytes (Gb).
:param str | collections.Iterable[str] filename: A space-separated
string or list of space-separated strings of absolute file paths.
:return float: size of file(s), in gigabytes.
"""
if filename is None:
return float(0)
if type(filename) is list:
return float(sum([get_file_size(x) for x in filename]))
try:
total_bytes = sum([float(os.stat(f).st_size)
for f in filename.split(" ") if f is not ''])
except OSError:
# File not found
return 0.0
else:
return float(total_bytes) / (1024 ** 3)
def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None):
"""
Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2
"""
if selector_attribute is None or (not selector_include and not selector_exclude):
# Simple; keep all samples. In this case, this function simply
# offers a list rather than an iterator.
return list(proj.samples)
# At least one of the samples has to have the specified attribute
if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]):
raise AttributeError("The Project samples do not have the attribute '{attr}'"
.format(attr=selector_attribute))
# Intersection between selector_include and selector_exclude is nonsense user error.
if selector_include and selector_exclude:
raise TypeError("Specify only selector_include or selector_exclude parameter, "
"not both.")
# Ensure that we're working with sets.
def make_set(items):
if isinstance(items, str):
items = [items]
return items
# Use the attr check here rather than exception block in case the
# hypothetical AttributeError would occur; we want such
# an exception to arise, not to catch it as if the Sample lacks "protocol"
if not selector_include:
# Loose; keep all samples not in the selector_exclude.
def keep(s):
return not hasattr(s, selector_attribute) or \
getattr(s, selector_attribute) not in make_set(selector_exclude)
else:
# Strict; keep only samples in the selector_include.
def keep(s):
return hasattr(s, selector_attribute) and \
getattr(s, selector_attribute) in make_set(selector_include)
return list(filter(keep, proj.samples))
def grab_project_data(prj):
"""
From the given Project, grab Sample-independent data.
There are some aspects of a Project of which it's beneficial for a Sample
to be aware, particularly for post-hoc analysis. Since Sample objects
within a Project are mutually independent, though, each doesn't need to
know about any of the others. A Project manages its, Sample instances,
so for each Sample knowledge of Project data is limited. This method
facilitates adoption of that conceptual model.
:param Project prj: Project from which to grab data
:return Mapping: Sample-independent data sections from given Project
"""
if not prj:
return {}
data = {}
for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS:
try:
data[section] = getattr(prj, section)
except AttributeError:
_LOGGER.debug("Project lacks section '%s', skipping", section)
return data
def has_null_value(k, m):
"""
Determine whether a mapping has a null value for a given key.
:param Hashable k: Key to test for null value
:param Mapping m: Mapping to test for null value for given key
:return bool: Whether given mapping contains given key with null value
"""
return k in m and is_null_like(m[k])
def import_from_source(module_filepath):
"""
Import a module from a particular filesystem location.
:param str module_filepath: path to the file that constitutes the module
to import
:return module: module imported from the given location, named as indicated
:raises ValueError: if path provided does not point to an extant file
"""
import sys
if not os.path.exists(module_filepath):
raise ValueError("Path to alleged module file doesn't point to an "
"extant file: '{}'".format(module_filepath))
# Randomly generate module name.
fname_chars = string.ascii_letters + string.digits
name = "".join(random.choice(fname_chars) for _ in range(20))
# Import logic is version-dependent.
if sys.version_info >= (3, 5):
from importlib import util as _il_util
modspec = _il_util.spec_from_file_location(
name, module_filepath)
mod = _il_util.module_from_spec(modspec)
modspec.loader.exec_module(mod)
elif sys.version_info < (3, 3):
import imp
mod = imp.load_source(name, module_filepath)
else:
# 3.3 or 3.4
from importlib import machinery as _il_mach
loader = _il_mach.SourceFileLoader(name, module_filepath)
mod = loader.load_module()
return mod
def infer_delimiter(filepath):
"""
From extension infer delimiter used in a separated values file.
:param str filepath: path to file about which to make inference
:return str | NoneType: extension if inference succeeded; else null
"""
ext = os.path.splitext(filepath)[1][1:].lower()
return {"txt": "\t", "tsv": "\t", "csv": ","}.get(ext)
def is_null_like(x):
"""
Determine whether an object is effectively null.
:param object x: Object for which null likeness is to be determined.
:return bool: Whether given object is effectively "null."
"""
return x in [None, ""] or \
(coll_like(x) and isinstance(x, Sized) and 0 == len(x))
def is_url(maybe_url):
"""
Determine whether a path is a URL.
:param str maybe_url: path to investigate as URL
:return bool: whether path appears to be a URL
"""
return urlparse(maybe_url).scheme != ""
def non_null_value(k, m):
"""
Determine whether a mapping has a non-null value for a given key.
:param Hashable k: Key to test for non-null value
:param Mapping m: Mapping to test for non-null value for given key
:return bool: Whether given mapping contains given key with non-null value
"""
return k in m and not is_null_like(m[k])
def parse_ftype(input_file):
"""
Checks determine filetype from extension.
:param str input_file: String to check.
:return str: filetype (extension without dot prefix)
:raises TypeError: if file does not appear of a supported type
"""
if input_file.endswith(".bam"):
return "bam"
elif input_file.endswith(".fastq") or \
input_file.endswith(".fq") or \
input_file.endswith(".fq.gz") or \
input_file.endswith(".fastq.gz"):
return "fastq"
else:
raise TypeError("Type of input file ends in neither '.bam' "
"nor '.fastq' [file: '" + input_file + "']")
def parse_text_data(lines_or_path, delimiter=os.linesep):
"""
Interpret input argument as lines of data. This is intended to support
multiple input argument types to core model constructors.
:param str | collections.Iterable lines_or_path:
:param str delimiter: line separator used when parsing a raw string that's
not a file
:return collections.Iterable: lines of text data
:raises ValueError: if primary data argument is neither a string nor
another iterable
"""
if os.path.isfile(lines_or_path):
with open(lines_or_path, 'r') as f:
return f.readlines()
else:
_LOGGER.debug("Not a file: '{}'".format(lines_or_path))
if isinstance(lines_or_path, str):
return lines_or_path.split(delimiter)
elif isinstance(lines_or_path, Iterable):
return lines_or_path
else:
raise ValueError("Unable to parse as data lines {} ({})".
format(lines_or_path, type(lines_or_path)))
def sample_folder(prj, sample):
"""
Get the path to this Project's root folder for the given Sample.
:param attmap.PathExAttMap | Project prj: project with which sample is associated
:param Mapping sample: Sample or sample data for which to get root output
folder path.
:return str: this Project's root folder for the given Sample
"""
return os.path.join(prj.metadata.results_subdir,
sample["sample_name"])
@contextlib.contextmanager
def standard_stream_redirector(stream):
"""
Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams
"""
import sys
genuine_stdout, genuine_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stream, stream
try:
yield
finally:
sys.stdout, sys.stderr = genuine_stdout, genuine_stderr
def warn_derived_cols():
""" Produce deprecation warning about derived columns. """
_warn_cols_to_attrs("derived")
def warn_implied_cols():
""" Produce deprecation warning about implied columns. """
_warn_cols_to_attrs("implied")
def _warn_cols_to_attrs(prefix):
""" Produce deprecation warning about 'columns' rather than 'attributes' """
warnings.warn("{pfx}_columns should be encoded and referenced "
"as {pfx}_attributes".format(pfx=prefix), DeprecationWarning)
class CommandChecker(object):
"""
Validate PATH availability of executables referenced by a config file.
:param str path_conf_file: path to configuration file with
sections detailing executable tools to validate
:param Iterable[str] sections_to_check: names of
sections of the given configuration file that are relevant;
optional, will default to all sections if not given, but some
may be excluded via another optional parameter
:param Iterable[str] sections_to_skip: analogous to
the check names parameter, but for specific sections to skip.
"""
def __init__(self, path_conf_file,
sections_to_check=None, sections_to_skip=None):
super(CommandChecker, self).__init__()
self._logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
# TODO: could provide parse strategy as parameter to supplement YAML.
# TODO: could also derive parsing behavior from extension.
self.path = path_conf_file
with open(self.path, 'r') as conf_file:
conf_data = yaml.safe_load(conf_file)
# Determine which sections to validate.
sections = {sections_to_check} if isinstance(sections_to_check, str) \
else set(sections_to_check or conf_data.keys())
excl = {sections_to_skip} if isinstance(sections_to_skip, str) \
else set(sections_to_skip or [])
sections -= excl
self._logger.info("Validating %d sections: %s",
len(sections),
", ".join(["'{}'".format(s) for s in sections]))
# Store per-command mapping of status, nested under section.
self.section_to_status_by_command = defaultdict(dict)
# Store only information about the failures.
self.failures_by_section = defaultdict(list) # Access by section.
self.failures = set() # Access by command.
for s in sections:
# Fetch section data or skip.
try:
section_data = conf_data[s]
except KeyError:
_LOGGER.info("No section '%s' in file '%s', skipping",
s, self.path)
continue
# Test each of the section's commands.
try:
# Is section's data a mapping?
commands_iter = section_data.items()
self._logger.debug("Processing section '%s' data "
"as mapping", s)
for name, command in commands_iter:
failed = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"FAILURE" if failed else "SUCCESS")
except AttributeError:
self._logger.debug("Processing section '%s' data as list", s)
commands_iter = conf_data[s]
for cmd_item in commands_iter:
# Item is K-V pair?
try:
name, command = cmd_item
except ValueError:
# Treat item as command itself.
name, command = "", cmd_item
success = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"SUCCESS" if success else "FAILURE")
def _store_status(self, section, command, name):
"""
Based on new command execution attempt, update instance's
data structures with information about the success/fail status.
Return the result of the execution test.
"""
succeeded = is_command_callable(command, name)
# Store status regardless of its value in the instance's largest DS.
self.section_to_status_by_command[section][command] = succeeded
if not succeeded:
# Only update the failure-specific structures conditionally.
self.failures_by_section[section].append(command)
self.failures.add(command)
return succeeded
@property
def failed(self):
"""
Determine whether *every* command succeeded for *every* config file
section that was validated during instance construction.
:return bool: conjunction of execution success test result values,
obtained by testing each executable in every validated section
"""
# This will raise exception even if validation was attempted,
# but no sections were used. Effectively, delegate responsibility
# to the caller to initiate validation only if doing so is relevant.
if not self.section_to_status_by_command:
raise ValueError("No commands validated")
return 0 == len(self.failures)
def is_command_callable(command, name=""):
"""
Check if command can be called.
:param str command: actual command to call
:param str name: nickname/alias by which to reference the command, optional
:return bool: whether given command's call succeeded
"""
# Use `command` to see if command is callable, store exit code
code = os.system(
"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
if code != 0:
alias_value = " ('{}') ".format(name) if name else " "
_LOGGER.debug("Command '{0}' is not callable: {1}".
format(alias_value, command))
return not bool(code)
|
pepkit/peppy
|
peppy/utils.py
|
check_bam
|
python
|
def check_bam(bam, o):
try:
p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE)
# Count paired alignments
paired = 0
read_lengths = defaultdict(int)
while o > 0: # Count down number of lines
line = p.stdout.readline().decode().split("\t")
flag = int(line[1])
read_lengths[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
o -= 1
p.kill()
except OSError:
reason = "Note (samtools not in path): For NGS inputs, " \
"pep needs samtools to auto-populate " \
"'read_length' and 'read_type' attributes; " \
"these attributes were not populated."
raise OSError(reason)
_LOGGER.debug("Read lengths: {}".format(read_lengths))
_LOGGER.debug("paired: {}".format(paired))
return read_lengths, paired
|
Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation.
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L62-L91
| null |
""" Helpers without an obvious logical home. """
from collections import defaultdict, Iterable
import contextlib
import logging
import os
import random
import string
import subprocess as sp
import sys
if sys.version_info < (3, 0):
from urlparse import urlparse
else:
from urllib.parse import urlparse
if sys.version_info < (3, 3):
from collections import Sized
else:
from collections.abc import Sized
import warnings
import yaml
from .const import GENERIC_PROTOCOL_KEY, SAMPLE_INDEPENDENT_PROJECT_SECTIONS
_LOGGER = logging.getLogger(__name__)
__all__ = [
"CommandChecker", "add_project_sample_constants", "check_bam", "check_fastq",
"get_file_size", "fetch_samples", "grab_project_data", "has_null_value",
"is_command_callable"
]
def alpha_cased(text, lower=False):
"""
Filter text to just letters and homogenize case.
:param str text: what to filter and homogenize.
:param bool lower: whether to convert to lowercase; default uppercase.
:return str: input filtered to just letters, with homogenized case.
"""
text = "".join(filter(
lambda c: c.isalpha() or c == GENERIC_PROTOCOL_KEY, text))
return text.lower() if lower else text.upper()
def add_project_sample_constants(sample, project):
"""
Update a Sample with constants declared by a Project.
:param Sample sample: sample instance for which to update constants
based on Project
:param Project project: Project with which to update Sample; it
may or may not declare constants. If not, no update occurs.
:return Sample: Updates Sample instance, according to any and all
constants declared by the Project.
"""
sample.update(project.constants)
return sample
def check_fastq(fastq, o):
raise NotImplementedError("Detection of read type/length for "
"fastq input is not yet implemented.")
def coll_like(c):
"""
Determine whether an object is collection-like.
:param object c: Object to test as collection
:return bool: Whether the argument is a (non-string) collection
"""
return isinstance(c, Iterable) and not isinstance(c, str)
def copy(obj):
def copy(self):
"""
Copy self to a new object.
"""
from copy import deepcopy
return deepcopy(self)
obj.copy = copy
return obj
def expandpath(path):
"""
Expand a filesystem path that may or may not contain user/env vars.
:param str path: path to expand
:return str: expanded version of input path
"""
return os.path.expandvars(os.path.expanduser(path)).replace("//", "/")
def get_file_size(filename):
"""
Get size of all files in gigabytes (Gb).
:param str | collections.Iterable[str] filename: A space-separated
string or list of space-separated strings of absolute file paths.
:return float: size of file(s), in gigabytes.
"""
if filename is None:
return float(0)
if type(filename) is list:
return float(sum([get_file_size(x) for x in filename]))
try:
total_bytes = sum([float(os.stat(f).st_size)
for f in filename.split(" ") if f is not ''])
except OSError:
# File not found
return 0.0
else:
return float(total_bytes) / (1024 ** 3)
def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None):
"""
Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2
"""
if selector_attribute is None or (not selector_include and not selector_exclude):
# Simple; keep all samples. In this case, this function simply
# offers a list rather than an iterator.
return list(proj.samples)
# At least one of the samples has to have the specified attribute
if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]):
raise AttributeError("The Project samples do not have the attribute '{attr}'"
.format(attr=selector_attribute))
# Intersection between selector_include and selector_exclude is nonsense user error.
if selector_include and selector_exclude:
raise TypeError("Specify only selector_include or selector_exclude parameter, "
"not both.")
# Ensure that we're working with sets.
def make_set(items):
if isinstance(items, str):
items = [items]
return items
# Use the attr check here rather than exception block in case the
# hypothetical AttributeError would occur; we want such
# an exception to arise, not to catch it as if the Sample lacks "protocol"
if not selector_include:
# Loose; keep all samples not in the selector_exclude.
def keep(s):
return not hasattr(s, selector_attribute) or \
getattr(s, selector_attribute) not in make_set(selector_exclude)
else:
# Strict; keep only samples in the selector_include.
def keep(s):
return hasattr(s, selector_attribute) and \
getattr(s, selector_attribute) in make_set(selector_include)
return list(filter(keep, proj.samples))
def grab_project_data(prj):
"""
From the given Project, grab Sample-independent data.
There are some aspects of a Project of which it's beneficial for a Sample
to be aware, particularly for post-hoc analysis. Since Sample objects
within a Project are mutually independent, though, each doesn't need to
know about any of the others. A Project manages its, Sample instances,
so for each Sample knowledge of Project data is limited. This method
facilitates adoption of that conceptual model.
:param Project prj: Project from which to grab data
:return Mapping: Sample-independent data sections from given Project
"""
if not prj:
return {}
data = {}
for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS:
try:
data[section] = getattr(prj, section)
except AttributeError:
_LOGGER.debug("Project lacks section '%s', skipping", section)
return data
def has_null_value(k, m):
"""
Determine whether a mapping has a null value for a given key.
:param Hashable k: Key to test for null value
:param Mapping m: Mapping to test for null value for given key
:return bool: Whether given mapping contains given key with null value
"""
return k in m and is_null_like(m[k])
def import_from_source(module_filepath):
"""
Import a module from a particular filesystem location.
:param str module_filepath: path to the file that constitutes the module
to import
:return module: module imported from the given location, named as indicated
:raises ValueError: if path provided does not point to an extant file
"""
import sys
if not os.path.exists(module_filepath):
raise ValueError("Path to alleged module file doesn't point to an "
"extant file: '{}'".format(module_filepath))
# Randomly generate module name.
fname_chars = string.ascii_letters + string.digits
name = "".join(random.choice(fname_chars) for _ in range(20))
# Import logic is version-dependent.
if sys.version_info >= (3, 5):
from importlib import util as _il_util
modspec = _il_util.spec_from_file_location(
name, module_filepath)
mod = _il_util.module_from_spec(modspec)
modspec.loader.exec_module(mod)
elif sys.version_info < (3, 3):
import imp
mod = imp.load_source(name, module_filepath)
else:
# 3.3 or 3.4
from importlib import machinery as _il_mach
loader = _il_mach.SourceFileLoader(name, module_filepath)
mod = loader.load_module()
return mod
def infer_delimiter(filepath):
"""
From extension infer delimiter used in a separated values file.
:param str filepath: path to file about which to make inference
:return str | NoneType: extension if inference succeeded; else null
"""
ext = os.path.splitext(filepath)[1][1:].lower()
return {"txt": "\t", "tsv": "\t", "csv": ","}.get(ext)
def is_null_like(x):
"""
Determine whether an object is effectively null.
:param object x: Object for which null likeness is to be determined.
:return bool: Whether given object is effectively "null."
"""
return x in [None, ""] or \
(coll_like(x) and isinstance(x, Sized) and 0 == len(x))
def is_url(maybe_url):
"""
Determine whether a path is a URL.
:param str maybe_url: path to investigate as URL
:return bool: whether path appears to be a URL
"""
return urlparse(maybe_url).scheme != ""
def non_null_value(k, m):
"""
Determine whether a mapping has a non-null value for a given key.
:param Hashable k: Key to test for non-null value
:param Mapping m: Mapping to test for non-null value for given key
:return bool: Whether given mapping contains given key with non-null value
"""
return k in m and not is_null_like(m[k])
def parse_ftype(input_file):
"""
Checks determine filetype from extension.
:param str input_file: String to check.
:return str: filetype (extension without dot prefix)
:raises TypeError: if file does not appear of a supported type
"""
if input_file.endswith(".bam"):
return "bam"
elif input_file.endswith(".fastq") or \
input_file.endswith(".fq") or \
input_file.endswith(".fq.gz") or \
input_file.endswith(".fastq.gz"):
return "fastq"
else:
raise TypeError("Type of input file ends in neither '.bam' "
"nor '.fastq' [file: '" + input_file + "']")
def parse_text_data(lines_or_path, delimiter=os.linesep):
"""
Interpret input argument as lines of data. This is intended to support
multiple input argument types to core model constructors.
:param str | collections.Iterable lines_or_path:
:param str delimiter: line separator used when parsing a raw string that's
not a file
:return collections.Iterable: lines of text data
:raises ValueError: if primary data argument is neither a string nor
another iterable
"""
if os.path.isfile(lines_or_path):
with open(lines_or_path, 'r') as f:
return f.readlines()
else:
_LOGGER.debug("Not a file: '{}'".format(lines_or_path))
if isinstance(lines_or_path, str):
return lines_or_path.split(delimiter)
elif isinstance(lines_or_path, Iterable):
return lines_or_path
else:
raise ValueError("Unable to parse as data lines {} ({})".
format(lines_or_path, type(lines_or_path)))
def sample_folder(prj, sample):
"""
Get the path to this Project's root folder for the given Sample.
:param attmap.PathExAttMap | Project prj: project with which sample is associated
:param Mapping sample: Sample or sample data for which to get root output
folder path.
:return str: this Project's root folder for the given Sample
"""
return os.path.join(prj.metadata.results_subdir,
sample["sample_name"])
@contextlib.contextmanager
def standard_stream_redirector(stream):
"""
Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams
"""
import sys
genuine_stdout, genuine_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stream, stream
try:
yield
finally:
sys.stdout, sys.stderr = genuine_stdout, genuine_stderr
def warn_derived_cols():
""" Produce deprecation warning about derived columns. """
_warn_cols_to_attrs("derived")
def warn_implied_cols():
""" Produce deprecation warning about implied columns. """
_warn_cols_to_attrs("implied")
def _warn_cols_to_attrs(prefix):
""" Produce deprecation warning about 'columns' rather than 'attributes' """
warnings.warn("{pfx}_columns should be encoded and referenced "
"as {pfx}_attributes".format(pfx=prefix), DeprecationWarning)
class CommandChecker(object):
"""
Validate PATH availability of executables referenced by a config file.
:param str path_conf_file: path to configuration file with
sections detailing executable tools to validate
:param Iterable[str] sections_to_check: names of
sections of the given configuration file that are relevant;
optional, will default to all sections if not given, but some
may be excluded via another optional parameter
:param Iterable[str] sections_to_skip: analogous to
the check names parameter, but for specific sections to skip.
"""
def __init__(self, path_conf_file,
sections_to_check=None, sections_to_skip=None):
super(CommandChecker, self).__init__()
self._logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
# TODO: could provide parse strategy as parameter to supplement YAML.
# TODO: could also derive parsing behavior from extension.
self.path = path_conf_file
with open(self.path, 'r') as conf_file:
conf_data = yaml.safe_load(conf_file)
# Determine which sections to validate.
sections = {sections_to_check} if isinstance(sections_to_check, str) \
else set(sections_to_check or conf_data.keys())
excl = {sections_to_skip} if isinstance(sections_to_skip, str) \
else set(sections_to_skip or [])
sections -= excl
self._logger.info("Validating %d sections: %s",
len(sections),
", ".join(["'{}'".format(s) for s in sections]))
# Store per-command mapping of status, nested under section.
self.section_to_status_by_command = defaultdict(dict)
# Store only information about the failures.
self.failures_by_section = defaultdict(list) # Access by section.
self.failures = set() # Access by command.
for s in sections:
# Fetch section data or skip.
try:
section_data = conf_data[s]
except KeyError:
_LOGGER.info("No section '%s' in file '%s', skipping",
s, self.path)
continue
# Test each of the section's commands.
try:
# Is section's data a mapping?
commands_iter = section_data.items()
self._logger.debug("Processing section '%s' data "
"as mapping", s)
for name, command in commands_iter:
failed = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"FAILURE" if failed else "SUCCESS")
except AttributeError:
self._logger.debug("Processing section '%s' data as list", s)
commands_iter = conf_data[s]
for cmd_item in commands_iter:
# Item is K-V pair?
try:
name, command = cmd_item
except ValueError:
# Treat item as command itself.
name, command = "", cmd_item
success = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"SUCCESS" if success else "FAILURE")
def _store_status(self, section, command, name):
"""
Based on new command execution attempt, update instance's
data structures with information about the success/fail status.
Return the result of the execution test.
"""
succeeded = is_command_callable(command, name)
# Store status regardless of its value in the instance's largest DS.
self.section_to_status_by_command[section][command] = succeeded
if not succeeded:
# Only update the failure-specific structures conditionally.
self.failures_by_section[section].append(command)
self.failures.add(command)
return succeeded
@property
def failed(self):
"""
Determine whether *every* command succeeded for *every* config file
section that was validated during instance construction.
:return bool: conjunction of execution success test result values,
obtained by testing each executable in every validated section
"""
# This will raise exception even if validation was attempted,
# but no sections were used. Effectively, delegate responsibility
# to the caller to initiate validation only if doing so is relevant.
if not self.section_to_status_by_command:
raise ValueError("No commands validated")
return 0 == len(self.failures)
def is_command_callable(command, name=""):
"""
Check if command can be called.
:param str command: actual command to call
:param str name: nickname/alias by which to reference the command, optional
:return bool: whether given command's call succeeded
"""
# Use `command` to see if command is callable, store exit code
code = os.system(
"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
if code != 0:
alias_value = " ('{}') ".format(name) if name else " "
_LOGGER.debug("Command '{0}' is not callable: {1}".
format(alias_value, command))
return not bool(code)
|
pepkit/peppy
|
peppy/utils.py
|
expandpath
|
python
|
def expandpath(path):
return os.path.expandvars(os.path.expanduser(path)).replace("//", "/")
|
Expand a filesystem path that may or may not contain user/env vars.
:param str path: path to expand
:return str: expanded version of input path
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L121-L128
| null |
""" Helpers without an obvious logical home. """
from collections import defaultdict, Iterable
import contextlib
import logging
import os
import random
import string
import subprocess as sp
import sys
if sys.version_info < (3, 0):
from urlparse import urlparse
else:
from urllib.parse import urlparse
if sys.version_info < (3, 3):
from collections import Sized
else:
from collections.abc import Sized
import warnings
import yaml
from .const import GENERIC_PROTOCOL_KEY, SAMPLE_INDEPENDENT_PROJECT_SECTIONS
_LOGGER = logging.getLogger(__name__)
__all__ = [
"CommandChecker", "add_project_sample_constants", "check_bam", "check_fastq",
"get_file_size", "fetch_samples", "grab_project_data", "has_null_value",
"is_command_callable"
]
def alpha_cased(text, lower=False):
"""
Filter text to just letters and homogenize case.
:param str text: what to filter and homogenize.
:param bool lower: whether to convert to lowercase; default uppercase.
:return str: input filtered to just letters, with homogenized case.
"""
text = "".join(filter(
lambda c: c.isalpha() or c == GENERIC_PROTOCOL_KEY, text))
return text.lower() if lower else text.upper()
def add_project_sample_constants(sample, project):
"""
Update a Sample with constants declared by a Project.
:param Sample sample: sample instance for which to update constants
based on Project
:param Project project: Project with which to update Sample; it
may or may not declare constants. If not, no update occurs.
:return Sample: Updates Sample instance, according to any and all
constants declared by the Project.
"""
sample.update(project.constants)
return sample
def check_bam(bam, o):
"""
Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation.
"""
try:
p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE)
# Count paired alignments
paired = 0
read_lengths = defaultdict(int)
while o > 0: # Count down number of lines
line = p.stdout.readline().decode().split("\t")
flag = int(line[1])
read_lengths[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
o -= 1
p.kill()
except OSError:
reason = "Note (samtools not in path): For NGS inputs, " \
"pep needs samtools to auto-populate " \
"'read_length' and 'read_type' attributes; " \
"these attributes were not populated."
raise OSError(reason)
_LOGGER.debug("Read lengths: {}".format(read_lengths))
_LOGGER.debug("paired: {}".format(paired))
return read_lengths, paired
def check_fastq(fastq, o):
raise NotImplementedError("Detection of read type/length for "
"fastq input is not yet implemented.")
def coll_like(c):
"""
Determine whether an object is collection-like.
:param object c: Object to test as collection
:return bool: Whether the argument is a (non-string) collection
"""
return isinstance(c, Iterable) and not isinstance(c, str)
def copy(obj):
def copy(self):
"""
Copy self to a new object.
"""
from copy import deepcopy
return deepcopy(self)
obj.copy = copy
return obj
def get_file_size(filename):
"""
Get size of all files in gigabytes (Gb).
:param str | collections.Iterable[str] filename: A space-separated
string or list of space-separated strings of absolute file paths.
:return float: size of file(s), in gigabytes.
"""
if filename is None:
return float(0)
if type(filename) is list:
return float(sum([get_file_size(x) for x in filename]))
try:
total_bytes = sum([float(os.stat(f).st_size)
for f in filename.split(" ") if f is not ''])
except OSError:
# File not found
return 0.0
else:
return float(total_bytes) / (1024 ** 3)
def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None):
"""
Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2
"""
if selector_attribute is None or (not selector_include and not selector_exclude):
# Simple; keep all samples. In this case, this function simply
# offers a list rather than an iterator.
return list(proj.samples)
# At least one of the samples has to have the specified attribute
if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]):
raise AttributeError("The Project samples do not have the attribute '{attr}'"
.format(attr=selector_attribute))
# Intersection between selector_include and selector_exclude is nonsense user error.
if selector_include and selector_exclude:
raise TypeError("Specify only selector_include or selector_exclude parameter, "
"not both.")
# Ensure that we're working with sets.
def make_set(items):
if isinstance(items, str):
items = [items]
return items
# Use the attr check here rather than exception block in case the
# hypothetical AttributeError would occur; we want such
# an exception to arise, not to catch it as if the Sample lacks "protocol"
if not selector_include:
# Loose; keep all samples not in the selector_exclude.
def keep(s):
return not hasattr(s, selector_attribute) or \
getattr(s, selector_attribute) not in make_set(selector_exclude)
else:
# Strict; keep only samples in the selector_include.
def keep(s):
return hasattr(s, selector_attribute) and \
getattr(s, selector_attribute) in make_set(selector_include)
return list(filter(keep, proj.samples))
def grab_project_data(prj):
"""
From the given Project, grab Sample-independent data.
There are some aspects of a Project of which it's beneficial for a Sample
to be aware, particularly for post-hoc analysis. Since Sample objects
within a Project are mutually independent, though, each doesn't need to
know about any of the others. A Project manages its, Sample instances,
so for each Sample knowledge of Project data is limited. This method
facilitates adoption of that conceptual model.
:param Project prj: Project from which to grab data
:return Mapping: Sample-independent data sections from given Project
"""
if not prj:
return {}
data = {}
for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS:
try:
data[section] = getattr(prj, section)
except AttributeError:
_LOGGER.debug("Project lacks section '%s', skipping", section)
return data
def has_null_value(k, m):
"""
Determine whether a mapping has a null value for a given key.
:param Hashable k: Key to test for null value
:param Mapping m: Mapping to test for null value for given key
:return bool: Whether given mapping contains given key with null value
"""
return k in m and is_null_like(m[k])
def import_from_source(module_filepath):
"""
Import a module from a particular filesystem location.
:param str module_filepath: path to the file that constitutes the module
to import
:return module: module imported from the given location, named as indicated
:raises ValueError: if path provided does not point to an extant file
"""
import sys
if not os.path.exists(module_filepath):
raise ValueError("Path to alleged module file doesn't point to an "
"extant file: '{}'".format(module_filepath))
# Randomly generate module name.
fname_chars = string.ascii_letters + string.digits
name = "".join(random.choice(fname_chars) for _ in range(20))
# Import logic is version-dependent.
if sys.version_info >= (3, 5):
from importlib import util as _il_util
modspec = _il_util.spec_from_file_location(
name, module_filepath)
mod = _il_util.module_from_spec(modspec)
modspec.loader.exec_module(mod)
elif sys.version_info < (3, 3):
import imp
mod = imp.load_source(name, module_filepath)
else:
# 3.3 or 3.4
from importlib import machinery as _il_mach
loader = _il_mach.SourceFileLoader(name, module_filepath)
mod = loader.load_module()
return mod
def infer_delimiter(filepath):
"""
From extension infer delimiter used in a separated values file.
:param str filepath: path to file about which to make inference
:return str | NoneType: extension if inference succeeded; else null
"""
ext = os.path.splitext(filepath)[1][1:].lower()
return {"txt": "\t", "tsv": "\t", "csv": ","}.get(ext)
def is_null_like(x):
"""
Determine whether an object is effectively null.
:param object x: Object for which null likeness is to be determined.
:return bool: Whether given object is effectively "null."
"""
return x in [None, ""] or \
(coll_like(x) and isinstance(x, Sized) and 0 == len(x))
def is_url(maybe_url):
"""
Determine whether a path is a URL.
:param str maybe_url: path to investigate as URL
:return bool: whether path appears to be a URL
"""
return urlparse(maybe_url).scheme != ""
def non_null_value(k, m):
"""
Determine whether a mapping has a non-null value for a given key.
:param Hashable k: Key to test for non-null value
:param Mapping m: Mapping to test for non-null value for given key
:return bool: Whether given mapping contains given key with non-null value
"""
return k in m and not is_null_like(m[k])
def parse_ftype(input_file):
"""
Checks determine filetype from extension.
:param str input_file: String to check.
:return str: filetype (extension without dot prefix)
:raises TypeError: if file does not appear of a supported type
"""
if input_file.endswith(".bam"):
return "bam"
elif input_file.endswith(".fastq") or \
input_file.endswith(".fq") or \
input_file.endswith(".fq.gz") or \
input_file.endswith(".fastq.gz"):
return "fastq"
else:
raise TypeError("Type of input file ends in neither '.bam' "
"nor '.fastq' [file: '" + input_file + "']")
def parse_text_data(lines_or_path, delimiter=os.linesep):
"""
Interpret input argument as lines of data. This is intended to support
multiple input argument types to core model constructors.
:param str | collections.Iterable lines_or_path:
:param str delimiter: line separator used when parsing a raw string that's
not a file
:return collections.Iterable: lines of text data
:raises ValueError: if primary data argument is neither a string nor
another iterable
"""
if os.path.isfile(lines_or_path):
with open(lines_or_path, 'r') as f:
return f.readlines()
else:
_LOGGER.debug("Not a file: '{}'".format(lines_or_path))
if isinstance(lines_or_path, str):
return lines_or_path.split(delimiter)
elif isinstance(lines_or_path, Iterable):
return lines_or_path
else:
raise ValueError("Unable to parse as data lines {} ({})".
format(lines_or_path, type(lines_or_path)))
def sample_folder(prj, sample):
"""
Get the path to this Project's root folder for the given Sample.
:param attmap.PathExAttMap | Project prj: project with which sample is associated
:param Mapping sample: Sample or sample data for which to get root output
folder path.
:return str: this Project's root folder for the given Sample
"""
return os.path.join(prj.metadata.results_subdir,
sample["sample_name"])
@contextlib.contextmanager
def standard_stream_redirector(stream):
"""
Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams
"""
import sys
genuine_stdout, genuine_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stream, stream
try:
yield
finally:
sys.stdout, sys.stderr = genuine_stdout, genuine_stderr
def warn_derived_cols():
""" Produce deprecation warning about derived columns. """
_warn_cols_to_attrs("derived")
def warn_implied_cols():
""" Produce deprecation warning about implied columns. """
_warn_cols_to_attrs("implied")
def _warn_cols_to_attrs(prefix):
""" Produce deprecation warning about 'columns' rather than 'attributes' """
warnings.warn("{pfx}_columns should be encoded and referenced "
"as {pfx}_attributes".format(pfx=prefix), DeprecationWarning)
class CommandChecker(object):
"""
Validate PATH availability of executables referenced by a config file.
:param str path_conf_file: path to configuration file with
sections detailing executable tools to validate
:param Iterable[str] sections_to_check: names of
sections of the given configuration file that are relevant;
optional, will default to all sections if not given, but some
may be excluded via another optional parameter
:param Iterable[str] sections_to_skip: analogous to
the check names parameter, but for specific sections to skip.
"""
def __init__(self, path_conf_file,
sections_to_check=None, sections_to_skip=None):
super(CommandChecker, self).__init__()
self._logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
# TODO: could provide parse strategy as parameter to supplement YAML.
# TODO: could also derive parsing behavior from extension.
self.path = path_conf_file
with open(self.path, 'r') as conf_file:
conf_data = yaml.safe_load(conf_file)
# Determine which sections to validate.
sections = {sections_to_check} if isinstance(sections_to_check, str) \
else set(sections_to_check or conf_data.keys())
excl = {sections_to_skip} if isinstance(sections_to_skip, str) \
else set(sections_to_skip or [])
sections -= excl
self._logger.info("Validating %d sections: %s",
len(sections),
", ".join(["'{}'".format(s) for s in sections]))
# Store per-command mapping of status, nested under section.
self.section_to_status_by_command = defaultdict(dict)
# Store only information about the failures.
self.failures_by_section = defaultdict(list) # Access by section.
self.failures = set() # Access by command.
for s in sections:
# Fetch section data or skip.
try:
section_data = conf_data[s]
except KeyError:
_LOGGER.info("No section '%s' in file '%s', skipping",
s, self.path)
continue
# Test each of the section's commands.
try:
# Is section's data a mapping?
commands_iter = section_data.items()
self._logger.debug("Processing section '%s' data "
"as mapping", s)
for name, command in commands_iter:
failed = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"FAILURE" if failed else "SUCCESS")
except AttributeError:
self._logger.debug("Processing section '%s' data as list", s)
commands_iter = conf_data[s]
for cmd_item in commands_iter:
# Item is K-V pair?
try:
name, command = cmd_item
except ValueError:
# Treat item as command itself.
name, command = "", cmd_item
success = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"SUCCESS" if success else "FAILURE")
def _store_status(self, section, command, name):
"""
Based on new command execution attempt, update instance's
data structures with information about the success/fail status.
Return the result of the execution test.
"""
succeeded = is_command_callable(command, name)
# Store status regardless of its value in the instance's largest DS.
self.section_to_status_by_command[section][command] = succeeded
if not succeeded:
# Only update the failure-specific structures conditionally.
self.failures_by_section[section].append(command)
self.failures.add(command)
return succeeded
@property
def failed(self):
"""
Determine whether *every* command succeeded for *every* config file
section that was validated during instance construction.
:return bool: conjunction of execution success test result values,
obtained by testing each executable in every validated section
"""
# This will raise exception even if validation was attempted,
# but no sections were used. Effectively, delegate responsibility
# to the caller to initiate validation only if doing so is relevant.
if not self.section_to_status_by_command:
raise ValueError("No commands validated")
return 0 == len(self.failures)
def is_command_callable(command, name=""):
"""
Check if command can be called.
:param str command: actual command to call
:param str name: nickname/alias by which to reference the command, optional
:return bool: whether given command's call succeeded
"""
# Use `command` to see if command is callable, store exit code
code = os.system(
"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
if code != 0:
alias_value = " ('{}') ".format(name) if name else " "
_LOGGER.debug("Command '{0}' is not callable: {1}".
format(alias_value, command))
return not bool(code)
|
pepkit/peppy
|
peppy/utils.py
|
get_file_size
|
python
|
def get_file_size(filename):
if filename is None:
return float(0)
if type(filename) is list:
return float(sum([get_file_size(x) for x in filename]))
try:
total_bytes = sum([float(os.stat(f).st_size)
for f in filename.split(" ") if f is not ''])
except OSError:
# File not found
return 0.0
else:
return float(total_bytes) / (1024 ** 3)
|
Get size of all files in gigabytes (Gb).
:param str | collections.Iterable[str] filename: A space-separated
string or list of space-separated strings of absolute file paths.
:return float: size of file(s), in gigabytes.
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L131-L150
| null |
""" Helpers without an obvious logical home. """
from collections import defaultdict, Iterable
import contextlib
import logging
import os
import random
import string
import subprocess as sp
import sys
if sys.version_info < (3, 0):
from urlparse import urlparse
else:
from urllib.parse import urlparse
if sys.version_info < (3, 3):
from collections import Sized
else:
from collections.abc import Sized
import warnings
import yaml
from .const import GENERIC_PROTOCOL_KEY, SAMPLE_INDEPENDENT_PROJECT_SECTIONS
_LOGGER = logging.getLogger(__name__)
__all__ = [
"CommandChecker", "add_project_sample_constants", "check_bam", "check_fastq",
"get_file_size", "fetch_samples", "grab_project_data", "has_null_value",
"is_command_callable"
]
def alpha_cased(text, lower=False):
"""
Filter text to just letters and homogenize case.
:param str text: what to filter and homogenize.
:param bool lower: whether to convert to lowercase; default uppercase.
:return str: input filtered to just letters, with homogenized case.
"""
text = "".join(filter(
lambda c: c.isalpha() or c == GENERIC_PROTOCOL_KEY, text))
return text.lower() if lower else text.upper()
def add_project_sample_constants(sample, project):
"""
Update a Sample with constants declared by a Project.
:param Sample sample: sample instance for which to update constants
based on Project
:param Project project: Project with which to update Sample; it
may or may not declare constants. If not, no update occurs.
:return Sample: Updates Sample instance, according to any and all
constants declared by the Project.
"""
sample.update(project.constants)
return sample
def check_bam(bam, o):
"""
Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation.
"""
try:
p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE)
# Count paired alignments
paired = 0
read_lengths = defaultdict(int)
while o > 0: # Count down number of lines
line = p.stdout.readline().decode().split("\t")
flag = int(line[1])
read_lengths[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
o -= 1
p.kill()
except OSError:
reason = "Note (samtools not in path): For NGS inputs, " \
"pep needs samtools to auto-populate " \
"'read_length' and 'read_type' attributes; " \
"these attributes were not populated."
raise OSError(reason)
_LOGGER.debug("Read lengths: {}".format(read_lengths))
_LOGGER.debug("paired: {}".format(paired))
return read_lengths, paired
def check_fastq(fastq, o):
raise NotImplementedError("Detection of read type/length for "
"fastq input is not yet implemented.")
def coll_like(c):
"""
Determine whether an object is collection-like.
:param object c: Object to test as collection
:return bool: Whether the argument is a (non-string) collection
"""
return isinstance(c, Iterable) and not isinstance(c, str)
def copy(obj):
def copy(self):
"""
Copy self to a new object.
"""
from copy import deepcopy
return deepcopy(self)
obj.copy = copy
return obj
def expandpath(path):
"""
Expand a filesystem path that may or may not contain user/env vars.
:param str path: path to expand
:return str: expanded version of input path
"""
return os.path.expandvars(os.path.expanduser(path)).replace("//", "/")
def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None):
"""
Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2
"""
if selector_attribute is None or (not selector_include and not selector_exclude):
# Simple; keep all samples. In this case, this function simply
# offers a list rather than an iterator.
return list(proj.samples)
# At least one of the samples has to have the specified attribute
if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]):
raise AttributeError("The Project samples do not have the attribute '{attr}'"
.format(attr=selector_attribute))
# Intersection between selector_include and selector_exclude is nonsense user error.
if selector_include and selector_exclude:
raise TypeError("Specify only selector_include or selector_exclude parameter, "
"not both.")
# Ensure that we're working with sets.
def make_set(items):
if isinstance(items, str):
items = [items]
return items
# Use the attr check here rather than exception block in case the
# hypothetical AttributeError would occur; we want such
# an exception to arise, not to catch it as if the Sample lacks "protocol"
if not selector_include:
# Loose; keep all samples not in the selector_exclude.
def keep(s):
return not hasattr(s, selector_attribute) or \
getattr(s, selector_attribute) not in make_set(selector_exclude)
else:
# Strict; keep only samples in the selector_include.
def keep(s):
return hasattr(s, selector_attribute) and \
getattr(s, selector_attribute) in make_set(selector_include)
return list(filter(keep, proj.samples))
def grab_project_data(prj):
"""
From the given Project, grab Sample-independent data.
There are some aspects of a Project of which it's beneficial for a Sample
to be aware, particularly for post-hoc analysis. Since Sample objects
within a Project are mutually independent, though, each doesn't need to
know about any of the others. A Project manages its, Sample instances,
so for each Sample knowledge of Project data is limited. This method
facilitates adoption of that conceptual model.
:param Project prj: Project from which to grab data
:return Mapping: Sample-independent data sections from given Project
"""
if not prj:
return {}
data = {}
for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS:
try:
data[section] = getattr(prj, section)
except AttributeError:
_LOGGER.debug("Project lacks section '%s', skipping", section)
return data
def has_null_value(k, m):
"""
Determine whether a mapping has a null value for a given key.
:param Hashable k: Key to test for null value
:param Mapping m: Mapping to test for null value for given key
:return bool: Whether given mapping contains given key with null value
"""
return k in m and is_null_like(m[k])
def import_from_source(module_filepath):
"""
Import a module from a particular filesystem location.
:param str module_filepath: path to the file that constitutes the module
to import
:return module: module imported from the given location, named as indicated
:raises ValueError: if path provided does not point to an extant file
"""
import sys
if not os.path.exists(module_filepath):
raise ValueError("Path to alleged module file doesn't point to an "
"extant file: '{}'".format(module_filepath))
# Randomly generate module name.
fname_chars = string.ascii_letters + string.digits
name = "".join(random.choice(fname_chars) for _ in range(20))
# Import logic is version-dependent.
if sys.version_info >= (3, 5):
from importlib import util as _il_util
modspec = _il_util.spec_from_file_location(
name, module_filepath)
mod = _il_util.module_from_spec(modspec)
modspec.loader.exec_module(mod)
elif sys.version_info < (3, 3):
import imp
mod = imp.load_source(name, module_filepath)
else:
# 3.3 or 3.4
from importlib import machinery as _il_mach
loader = _il_mach.SourceFileLoader(name, module_filepath)
mod = loader.load_module()
return mod
def infer_delimiter(filepath):
"""
From extension infer delimiter used in a separated values file.
:param str filepath: path to file about which to make inference
:return str | NoneType: extension if inference succeeded; else null
"""
ext = os.path.splitext(filepath)[1][1:].lower()
return {"txt": "\t", "tsv": "\t", "csv": ","}.get(ext)
def is_null_like(x):
"""
Determine whether an object is effectively null.
:param object x: Object for which null likeness is to be determined.
:return bool: Whether given object is effectively "null."
"""
return x in [None, ""] or \
(coll_like(x) and isinstance(x, Sized) and 0 == len(x))
def is_url(maybe_url):
"""
Determine whether a path is a URL.
:param str maybe_url: path to investigate as URL
:return bool: whether path appears to be a URL
"""
return urlparse(maybe_url).scheme != ""
def non_null_value(k, m):
"""
Determine whether a mapping has a non-null value for a given key.
:param Hashable k: Key to test for non-null value
:param Mapping m: Mapping to test for non-null value for given key
:return bool: Whether given mapping contains given key with non-null value
"""
return k in m and not is_null_like(m[k])
def parse_ftype(input_file):
"""
Checks determine filetype from extension.
:param str input_file: String to check.
:return str: filetype (extension without dot prefix)
:raises TypeError: if file does not appear of a supported type
"""
if input_file.endswith(".bam"):
return "bam"
elif input_file.endswith(".fastq") or \
input_file.endswith(".fq") or \
input_file.endswith(".fq.gz") or \
input_file.endswith(".fastq.gz"):
return "fastq"
else:
raise TypeError("Type of input file ends in neither '.bam' "
"nor '.fastq' [file: '" + input_file + "']")
def parse_text_data(lines_or_path, delimiter=os.linesep):
"""
Interpret input argument as lines of data. This is intended to support
multiple input argument types to core model constructors.
:param str | collections.Iterable lines_or_path:
:param str delimiter: line separator used when parsing a raw string that's
not a file
:return collections.Iterable: lines of text data
:raises ValueError: if primary data argument is neither a string nor
another iterable
"""
if os.path.isfile(lines_or_path):
with open(lines_or_path, 'r') as f:
return f.readlines()
else:
_LOGGER.debug("Not a file: '{}'".format(lines_or_path))
if isinstance(lines_or_path, str):
return lines_or_path.split(delimiter)
elif isinstance(lines_or_path, Iterable):
return lines_or_path
else:
raise ValueError("Unable to parse as data lines {} ({})".
format(lines_or_path, type(lines_or_path)))
def sample_folder(prj, sample):
"""
Get the path to this Project's root folder for the given Sample.
:param attmap.PathExAttMap | Project prj: project with which sample is associated
:param Mapping sample: Sample or sample data for which to get root output
folder path.
:return str: this Project's root folder for the given Sample
"""
return os.path.join(prj.metadata.results_subdir,
sample["sample_name"])
@contextlib.contextmanager
def standard_stream_redirector(stream):
"""
Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams
"""
import sys
genuine_stdout, genuine_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stream, stream
try:
yield
finally:
sys.stdout, sys.stderr = genuine_stdout, genuine_stderr
def warn_derived_cols():
""" Produce deprecation warning about derived columns. """
_warn_cols_to_attrs("derived")
def warn_implied_cols():
""" Produce deprecation warning about implied columns. """
_warn_cols_to_attrs("implied")
def _warn_cols_to_attrs(prefix):
""" Produce deprecation warning about 'columns' rather than 'attributes' """
warnings.warn("{pfx}_columns should be encoded and referenced "
"as {pfx}_attributes".format(pfx=prefix), DeprecationWarning)
class CommandChecker(object):
"""
Validate PATH availability of executables referenced by a config file.
:param str path_conf_file: path to configuration file with
sections detailing executable tools to validate
:param Iterable[str] sections_to_check: names of
sections of the given configuration file that are relevant;
optional, will default to all sections if not given, but some
may be excluded via another optional parameter
:param Iterable[str] sections_to_skip: analogous to
the check names parameter, but for specific sections to skip.
"""
def __init__(self, path_conf_file,
sections_to_check=None, sections_to_skip=None):
super(CommandChecker, self).__init__()
self._logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
# TODO: could provide parse strategy as parameter to supplement YAML.
# TODO: could also derive parsing behavior from extension.
self.path = path_conf_file
with open(self.path, 'r') as conf_file:
conf_data = yaml.safe_load(conf_file)
# Determine which sections to validate.
sections = {sections_to_check} if isinstance(sections_to_check, str) \
else set(sections_to_check or conf_data.keys())
excl = {sections_to_skip} if isinstance(sections_to_skip, str) \
else set(sections_to_skip or [])
sections -= excl
self._logger.info("Validating %d sections: %s",
len(sections),
", ".join(["'{}'".format(s) for s in sections]))
# Store per-command mapping of status, nested under section.
self.section_to_status_by_command = defaultdict(dict)
# Store only information about the failures.
self.failures_by_section = defaultdict(list) # Access by section.
self.failures = set() # Access by command.
for s in sections:
# Fetch section data or skip.
try:
section_data = conf_data[s]
except KeyError:
_LOGGER.info("No section '%s' in file '%s', skipping",
s, self.path)
continue
# Test each of the section's commands.
try:
# Is section's data a mapping?
commands_iter = section_data.items()
self._logger.debug("Processing section '%s' data "
"as mapping", s)
for name, command in commands_iter:
failed = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"FAILURE" if failed else "SUCCESS")
except AttributeError:
self._logger.debug("Processing section '%s' data as list", s)
commands_iter = conf_data[s]
for cmd_item in commands_iter:
# Item is K-V pair?
try:
name, command = cmd_item
except ValueError:
# Treat item as command itself.
name, command = "", cmd_item
success = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"SUCCESS" if success else "FAILURE")
def _store_status(self, section, command, name):
"""
Based on new command execution attempt, update instance's
data structures with information about the success/fail status.
Return the result of the execution test.
"""
succeeded = is_command_callable(command, name)
# Store status regardless of its value in the instance's largest DS.
self.section_to_status_by_command[section][command] = succeeded
if not succeeded:
# Only update the failure-specific structures conditionally.
self.failures_by_section[section].append(command)
self.failures.add(command)
return succeeded
@property
def failed(self):
"""
Determine whether *every* command succeeded for *every* config file
section that was validated during instance construction.
:return bool: conjunction of execution success test result values,
obtained by testing each executable in every validated section
"""
# This will raise exception even if validation was attempted,
# but no sections were used. Effectively, delegate responsibility
# to the caller to initiate validation only if doing so is relevant.
if not self.section_to_status_by_command:
raise ValueError("No commands validated")
return 0 == len(self.failures)
def is_command_callable(command, name=""):
"""
Check if command can be called.
:param str command: actual command to call
:param str name: nickname/alias by which to reference the command, optional
:return bool: whether given command's call succeeded
"""
# Use `command` to see if command is callable, store exit code
code = os.system(
"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
if code != 0:
alias_value = " ('{}') ".format(name) if name else " "
_LOGGER.debug("Command '{0}' is not callable: {1}".
format(alias_value, command))
return not bool(code)
|
pepkit/peppy
|
peppy/utils.py
|
fetch_samples
|
python
|
def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None):
if selector_attribute is None or (not selector_include and not selector_exclude):
# Simple; keep all samples. In this case, this function simply
# offers a list rather than an iterator.
return list(proj.samples)
# At least one of the samples has to have the specified attribute
if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]):
raise AttributeError("The Project samples do not have the attribute '{attr}'"
.format(attr=selector_attribute))
# Intersection between selector_include and selector_exclude is nonsense user error.
if selector_include and selector_exclude:
raise TypeError("Specify only selector_include or selector_exclude parameter, "
"not both.")
# Ensure that we're working with sets.
def make_set(items):
if isinstance(items, str):
items = [items]
return items
# Use the attr check here rather than exception block in case the
# hypothetical AttributeError would occur; we want such
# an exception to arise, not to catch it as if the Sample lacks "protocol"
if not selector_include:
# Loose; keep all samples not in the selector_exclude.
def keep(s):
return not hasattr(s, selector_attribute) or \
getattr(s, selector_attribute) not in make_set(selector_exclude)
else:
# Strict; keep only samples in the selector_include.
def keep(s):
return hasattr(s, selector_attribute) and \
getattr(s, selector_attribute) in make_set(selector_include)
return list(filter(keep, proj.samples))
|
Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L153-L211
| null |
""" Helpers without an obvious logical home. """
from collections import defaultdict, Iterable
import contextlib
import logging
import os
import random
import string
import subprocess as sp
import sys
if sys.version_info < (3, 0):
from urlparse import urlparse
else:
from urllib.parse import urlparse
if sys.version_info < (3, 3):
from collections import Sized
else:
from collections.abc import Sized
import warnings
import yaml
from .const import GENERIC_PROTOCOL_KEY, SAMPLE_INDEPENDENT_PROJECT_SECTIONS
_LOGGER = logging.getLogger(__name__)
__all__ = [
"CommandChecker", "add_project_sample_constants", "check_bam", "check_fastq",
"get_file_size", "fetch_samples", "grab_project_data", "has_null_value",
"is_command_callable"
]
def alpha_cased(text, lower=False):
"""
Filter text to just letters and homogenize case.
:param str text: what to filter and homogenize.
:param bool lower: whether to convert to lowercase; default uppercase.
:return str: input filtered to just letters, with homogenized case.
"""
text = "".join(filter(
lambda c: c.isalpha() or c == GENERIC_PROTOCOL_KEY, text))
return text.lower() if lower else text.upper()
def add_project_sample_constants(sample, project):
"""
Update a Sample with constants declared by a Project.
:param Sample sample: sample instance for which to update constants
based on Project
:param Project project: Project with which to update Sample; it
may or may not declare constants. If not, no update occurs.
:return Sample: Updates Sample instance, according to any and all
constants declared by the Project.
"""
sample.update(project.constants)
return sample
def check_bam(bam, o):
"""
Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation.
"""
try:
p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE)
# Count paired alignments
paired = 0
read_lengths = defaultdict(int)
while o > 0: # Count down number of lines
line = p.stdout.readline().decode().split("\t")
flag = int(line[1])
read_lengths[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
o -= 1
p.kill()
except OSError:
reason = "Note (samtools not in path): For NGS inputs, " \
"pep needs samtools to auto-populate " \
"'read_length' and 'read_type' attributes; " \
"these attributes were not populated."
raise OSError(reason)
_LOGGER.debug("Read lengths: {}".format(read_lengths))
_LOGGER.debug("paired: {}".format(paired))
return read_lengths, paired
def check_fastq(fastq, o):
raise NotImplementedError("Detection of read type/length for "
"fastq input is not yet implemented.")
def coll_like(c):
"""
Determine whether an object is collection-like.
:param object c: Object to test as collection
:return bool: Whether the argument is a (non-string) collection
"""
return isinstance(c, Iterable) and not isinstance(c, str)
def copy(obj):
def copy(self):
"""
Copy self to a new object.
"""
from copy import deepcopy
return deepcopy(self)
obj.copy = copy
return obj
def expandpath(path):
"""
Expand a filesystem path that may or may not contain user/env vars.
:param str path: path to expand
:return str: expanded version of input path
"""
return os.path.expandvars(os.path.expanduser(path)).replace("//", "/")
def get_file_size(filename):
"""
Get size of all files in gigabytes (Gb).
:param str | collections.Iterable[str] filename: A space-separated
string or list of space-separated strings of absolute file paths.
:return float: size of file(s), in gigabytes.
"""
if filename is None:
return float(0)
if type(filename) is list:
return float(sum([get_file_size(x) for x in filename]))
try:
total_bytes = sum([float(os.stat(f).st_size)
for f in filename.split(" ") if f is not ''])
except OSError:
# File not found
return 0.0
else:
return float(total_bytes) / (1024 ** 3)
def grab_project_data(prj):
"""
From the given Project, grab Sample-independent data.
There are some aspects of a Project of which it's beneficial for a Sample
to be aware, particularly for post-hoc analysis. Since Sample objects
within a Project are mutually independent, though, each doesn't need to
know about any of the others. A Project manages its, Sample instances,
so for each Sample knowledge of Project data is limited. This method
facilitates adoption of that conceptual model.
:param Project prj: Project from which to grab data
:return Mapping: Sample-independent data sections from given Project
"""
if not prj:
return {}
data = {}
for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS:
try:
data[section] = getattr(prj, section)
except AttributeError:
_LOGGER.debug("Project lacks section '%s', skipping", section)
return data
def has_null_value(k, m):
"""
Determine whether a mapping has a null value for a given key.
:param Hashable k: Key to test for null value
:param Mapping m: Mapping to test for null value for given key
:return bool: Whether given mapping contains given key with null value
"""
return k in m and is_null_like(m[k])
def import_from_source(module_filepath):
"""
Import a module from a particular filesystem location.
:param str module_filepath: path to the file that constitutes the module
to import
:return module: module imported from the given location, named as indicated
:raises ValueError: if path provided does not point to an extant file
"""
import sys
if not os.path.exists(module_filepath):
raise ValueError("Path to alleged module file doesn't point to an "
"extant file: '{}'".format(module_filepath))
# Randomly generate module name.
fname_chars = string.ascii_letters + string.digits
name = "".join(random.choice(fname_chars) for _ in range(20))
# Import logic is version-dependent.
if sys.version_info >= (3, 5):
from importlib import util as _il_util
modspec = _il_util.spec_from_file_location(
name, module_filepath)
mod = _il_util.module_from_spec(modspec)
modspec.loader.exec_module(mod)
elif sys.version_info < (3, 3):
import imp
mod = imp.load_source(name, module_filepath)
else:
# 3.3 or 3.4
from importlib import machinery as _il_mach
loader = _il_mach.SourceFileLoader(name, module_filepath)
mod = loader.load_module()
return mod
def infer_delimiter(filepath):
"""
From extension infer delimiter used in a separated values file.
:param str filepath: path to file about which to make inference
:return str | NoneType: extension if inference succeeded; else null
"""
ext = os.path.splitext(filepath)[1][1:].lower()
return {"txt": "\t", "tsv": "\t", "csv": ","}.get(ext)
def is_null_like(x):
"""
Determine whether an object is effectively null.
:param object x: Object for which null likeness is to be determined.
:return bool: Whether given object is effectively "null."
"""
return x in [None, ""] or \
(coll_like(x) and isinstance(x, Sized) and 0 == len(x))
def is_url(maybe_url):
"""
Determine whether a path is a URL.
:param str maybe_url: path to investigate as URL
:return bool: whether path appears to be a URL
"""
return urlparse(maybe_url).scheme != ""
def non_null_value(k, m):
"""
Determine whether a mapping has a non-null value for a given key.
:param Hashable k: Key to test for non-null value
:param Mapping m: Mapping to test for non-null value for given key
:return bool: Whether given mapping contains given key with non-null value
"""
return k in m and not is_null_like(m[k])
def parse_ftype(input_file):
"""
Checks determine filetype from extension.
:param str input_file: String to check.
:return str: filetype (extension without dot prefix)
:raises TypeError: if file does not appear of a supported type
"""
if input_file.endswith(".bam"):
return "bam"
elif input_file.endswith(".fastq") or \
input_file.endswith(".fq") or \
input_file.endswith(".fq.gz") or \
input_file.endswith(".fastq.gz"):
return "fastq"
else:
raise TypeError("Type of input file ends in neither '.bam' "
"nor '.fastq' [file: '" + input_file + "']")
def parse_text_data(lines_or_path, delimiter=os.linesep):
"""
Interpret input argument as lines of data. This is intended to support
multiple input argument types to core model constructors.
:param str | collections.Iterable lines_or_path:
:param str delimiter: line separator used when parsing a raw string that's
not a file
:return collections.Iterable: lines of text data
:raises ValueError: if primary data argument is neither a string nor
another iterable
"""
if os.path.isfile(lines_or_path):
with open(lines_or_path, 'r') as f:
return f.readlines()
else:
_LOGGER.debug("Not a file: '{}'".format(lines_or_path))
if isinstance(lines_or_path, str):
return lines_or_path.split(delimiter)
elif isinstance(lines_or_path, Iterable):
return lines_or_path
else:
raise ValueError("Unable to parse as data lines {} ({})".
format(lines_or_path, type(lines_or_path)))
def sample_folder(prj, sample):
"""
Get the path to this Project's root folder for the given Sample.
:param attmap.PathExAttMap | Project prj: project with which sample is associated
:param Mapping sample: Sample or sample data for which to get root output
folder path.
:return str: this Project's root folder for the given Sample
"""
return os.path.join(prj.metadata.results_subdir,
sample["sample_name"])
@contextlib.contextmanager
def standard_stream_redirector(stream):
"""
Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams
"""
import sys
genuine_stdout, genuine_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stream, stream
try:
yield
finally:
sys.stdout, sys.stderr = genuine_stdout, genuine_stderr
def warn_derived_cols():
""" Produce deprecation warning about derived columns. """
_warn_cols_to_attrs("derived")
def warn_implied_cols():
""" Produce deprecation warning about implied columns. """
_warn_cols_to_attrs("implied")
def _warn_cols_to_attrs(prefix):
""" Produce deprecation warning about 'columns' rather than 'attributes' """
warnings.warn("{pfx}_columns should be encoded and referenced "
"as {pfx}_attributes".format(pfx=prefix), DeprecationWarning)
class CommandChecker(object):
"""
Validate PATH availability of executables referenced by a config file.
:param str path_conf_file: path to configuration file with
sections detailing executable tools to validate
:param Iterable[str] sections_to_check: names of
sections of the given configuration file that are relevant;
optional, will default to all sections if not given, but some
may be excluded via another optional parameter
:param Iterable[str] sections_to_skip: analogous to
the check names parameter, but for specific sections to skip.
"""
def __init__(self, path_conf_file,
sections_to_check=None, sections_to_skip=None):
super(CommandChecker, self).__init__()
self._logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
# TODO: could provide parse strategy as parameter to supplement YAML.
# TODO: could also derive parsing behavior from extension.
self.path = path_conf_file
with open(self.path, 'r') as conf_file:
conf_data = yaml.safe_load(conf_file)
# Determine which sections to validate.
sections = {sections_to_check} if isinstance(sections_to_check, str) \
else set(sections_to_check or conf_data.keys())
excl = {sections_to_skip} if isinstance(sections_to_skip, str) \
else set(sections_to_skip or [])
sections -= excl
self._logger.info("Validating %d sections: %s",
len(sections),
", ".join(["'{}'".format(s) for s in sections]))
# Store per-command mapping of status, nested under section.
self.section_to_status_by_command = defaultdict(dict)
# Store only information about the failures.
self.failures_by_section = defaultdict(list) # Access by section.
self.failures = set() # Access by command.
for s in sections:
# Fetch section data or skip.
try:
section_data = conf_data[s]
except KeyError:
_LOGGER.info("No section '%s' in file '%s', skipping",
s, self.path)
continue
# Test each of the section's commands.
try:
# Is section's data a mapping?
commands_iter = section_data.items()
self._logger.debug("Processing section '%s' data "
"as mapping", s)
for name, command in commands_iter:
failed = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"FAILURE" if failed else "SUCCESS")
except AttributeError:
self._logger.debug("Processing section '%s' data as list", s)
commands_iter = conf_data[s]
for cmd_item in commands_iter:
# Item is K-V pair?
try:
name, command = cmd_item
except ValueError:
# Treat item as command itself.
name, command = "", cmd_item
success = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"SUCCESS" if success else "FAILURE")
def _store_status(self, section, command, name):
"""
Based on new command execution attempt, update instance's
data structures with information about the success/fail status.
Return the result of the execution test.
"""
succeeded = is_command_callable(command, name)
# Store status regardless of its value in the instance's largest DS.
self.section_to_status_by_command[section][command] = succeeded
if not succeeded:
# Only update the failure-specific structures conditionally.
self.failures_by_section[section].append(command)
self.failures.add(command)
return succeeded
@property
def failed(self):
"""
Determine whether *every* command succeeded for *every* config file
section that was validated during instance construction.
:return bool: conjunction of execution success test result values,
obtained by testing each executable in every validated section
"""
# This will raise exception even if validation was attempted,
# but no sections were used. Effectively, delegate responsibility
# to the caller to initiate validation only if doing so is relevant.
if not self.section_to_status_by_command:
raise ValueError("No commands validated")
return 0 == len(self.failures)
def is_command_callable(command, name=""):
"""
Check if command can be called.
:param str command: actual command to call
:param str name: nickname/alias by which to reference the command, optional
:return bool: whether given command's call succeeded
"""
# Use `command` to see if command is callable, store exit code
code = os.system(
"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
if code != 0:
alias_value = " ('{}') ".format(name) if name else " "
_LOGGER.debug("Command '{0}' is not callable: {1}".
format(alias_value, command))
return not bool(code)
|
pepkit/peppy
|
peppy/utils.py
|
grab_project_data
|
python
|
def grab_project_data(prj):
if not prj:
return {}
data = {}
for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS:
try:
data[section] = getattr(prj, section)
except AttributeError:
_LOGGER.debug("Project lacks section '%s', skipping", section)
return data
|
From the given Project, grab Sample-independent data.
There are some aspects of a Project of which it's beneficial for a Sample
to be aware, particularly for post-hoc analysis. Since Sample objects
within a Project are mutually independent, though, each doesn't need to
know about any of the others. A Project manages its, Sample instances,
so for each Sample knowledge of Project data is limited. This method
facilitates adoption of that conceptual model.
:param Project prj: Project from which to grab data
:return Mapping: Sample-independent data sections from given Project
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L214-L236
| null |
""" Helpers without an obvious logical home. """
from collections import defaultdict, Iterable
import contextlib
import logging
import os
import random
import string
import subprocess as sp
import sys
if sys.version_info < (3, 0):
from urlparse import urlparse
else:
from urllib.parse import urlparse
if sys.version_info < (3, 3):
from collections import Sized
else:
from collections.abc import Sized
import warnings
import yaml
from .const import GENERIC_PROTOCOL_KEY, SAMPLE_INDEPENDENT_PROJECT_SECTIONS
_LOGGER = logging.getLogger(__name__)
__all__ = [
"CommandChecker", "add_project_sample_constants", "check_bam", "check_fastq",
"get_file_size", "fetch_samples", "grab_project_data", "has_null_value",
"is_command_callable"
]
def alpha_cased(text, lower=False):
"""
Filter text to just letters and homogenize case.
:param str text: what to filter and homogenize.
:param bool lower: whether to convert to lowercase; default uppercase.
:return str: input filtered to just letters, with homogenized case.
"""
text = "".join(filter(
lambda c: c.isalpha() or c == GENERIC_PROTOCOL_KEY, text))
return text.lower() if lower else text.upper()
def add_project_sample_constants(sample, project):
"""
Update a Sample with constants declared by a Project.
:param Sample sample: sample instance for which to update constants
based on Project
:param Project project: Project with which to update Sample; it
may or may not declare constants. If not, no update occurs.
:return Sample: Updates Sample instance, according to any and all
constants declared by the Project.
"""
sample.update(project.constants)
return sample
def check_bam(bam, o):
"""
Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation.
"""
try:
p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE)
# Count paired alignments
paired = 0
read_lengths = defaultdict(int)
while o > 0: # Count down number of lines
line = p.stdout.readline().decode().split("\t")
flag = int(line[1])
read_lengths[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
o -= 1
p.kill()
except OSError:
reason = "Note (samtools not in path): For NGS inputs, " \
"pep needs samtools to auto-populate " \
"'read_length' and 'read_type' attributes; " \
"these attributes were not populated."
raise OSError(reason)
_LOGGER.debug("Read lengths: {}".format(read_lengths))
_LOGGER.debug("paired: {}".format(paired))
return read_lengths, paired
def check_fastq(fastq, o):
raise NotImplementedError("Detection of read type/length for "
"fastq input is not yet implemented.")
def coll_like(c):
"""
Determine whether an object is collection-like.
:param object c: Object to test as collection
:return bool: Whether the argument is a (non-string) collection
"""
return isinstance(c, Iterable) and not isinstance(c, str)
def copy(obj):
def copy(self):
"""
Copy self to a new object.
"""
from copy import deepcopy
return deepcopy(self)
obj.copy = copy
return obj
def expandpath(path):
"""
Expand a filesystem path that may or may not contain user/env vars.
:param str path: path to expand
:return str: expanded version of input path
"""
return os.path.expandvars(os.path.expanduser(path)).replace("//", "/")
def get_file_size(filename):
"""
Get size of all files in gigabytes (Gb).
:param str | collections.Iterable[str] filename: A space-separated
string or list of space-separated strings of absolute file paths.
:return float: size of file(s), in gigabytes.
"""
if filename is None:
return float(0)
if type(filename) is list:
return float(sum([get_file_size(x) for x in filename]))
try:
total_bytes = sum([float(os.stat(f).st_size)
for f in filename.split(" ") if f is not ''])
except OSError:
# File not found
return 0.0
else:
return float(total_bytes) / (1024 ** 3)
def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None):
"""
Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2
"""
if selector_attribute is None or (not selector_include and not selector_exclude):
# Simple; keep all samples. In this case, this function simply
# offers a list rather than an iterator.
return list(proj.samples)
# At least one of the samples has to have the specified attribute
if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]):
raise AttributeError("The Project samples do not have the attribute '{attr}'"
.format(attr=selector_attribute))
# Intersection between selector_include and selector_exclude is nonsense user error.
if selector_include and selector_exclude:
raise TypeError("Specify only selector_include or selector_exclude parameter, "
"not both.")
# Ensure that we're working with sets.
def make_set(items):
if isinstance(items, str):
items = [items]
return items
# Use the attr check here rather than exception block in case the
# hypothetical AttributeError would occur; we want such
# an exception to arise, not to catch it as if the Sample lacks "protocol"
if not selector_include:
# Loose; keep all samples not in the selector_exclude.
def keep(s):
return not hasattr(s, selector_attribute) or \
getattr(s, selector_attribute) not in make_set(selector_exclude)
else:
# Strict; keep only samples in the selector_include.
def keep(s):
return hasattr(s, selector_attribute) and \
getattr(s, selector_attribute) in make_set(selector_include)
return list(filter(keep, proj.samples))
def has_null_value(k, m):
"""
Determine whether a mapping has a null value for a given key.
:param Hashable k: Key to test for null value
:param Mapping m: Mapping to test for null value for given key
:return bool: Whether given mapping contains given key with null value
"""
return k in m and is_null_like(m[k])
def import_from_source(module_filepath):
"""
Import a module from a particular filesystem location.
:param str module_filepath: path to the file that constitutes the module
to import
:return module: module imported from the given location, named as indicated
:raises ValueError: if path provided does not point to an extant file
"""
import sys
if not os.path.exists(module_filepath):
raise ValueError("Path to alleged module file doesn't point to an "
"extant file: '{}'".format(module_filepath))
# Randomly generate module name.
fname_chars = string.ascii_letters + string.digits
name = "".join(random.choice(fname_chars) for _ in range(20))
# Import logic is version-dependent.
if sys.version_info >= (3, 5):
from importlib import util as _il_util
modspec = _il_util.spec_from_file_location(
name, module_filepath)
mod = _il_util.module_from_spec(modspec)
modspec.loader.exec_module(mod)
elif sys.version_info < (3, 3):
import imp
mod = imp.load_source(name, module_filepath)
else:
# 3.3 or 3.4
from importlib import machinery as _il_mach
loader = _il_mach.SourceFileLoader(name, module_filepath)
mod = loader.load_module()
return mod
def infer_delimiter(filepath):
"""
From extension infer delimiter used in a separated values file.
:param str filepath: path to file about which to make inference
:return str | NoneType: extension if inference succeeded; else null
"""
ext = os.path.splitext(filepath)[1][1:].lower()
return {"txt": "\t", "tsv": "\t", "csv": ","}.get(ext)
def is_null_like(x):
"""
Determine whether an object is effectively null.
:param object x: Object for which null likeness is to be determined.
:return bool: Whether given object is effectively "null."
"""
return x in [None, ""] or \
(coll_like(x) and isinstance(x, Sized) and 0 == len(x))
def is_url(maybe_url):
"""
Determine whether a path is a URL.
:param str maybe_url: path to investigate as URL
:return bool: whether path appears to be a URL
"""
return urlparse(maybe_url).scheme != ""
def non_null_value(k, m):
"""
Determine whether a mapping has a non-null value for a given key.
:param Hashable k: Key to test for non-null value
:param Mapping m: Mapping to test for non-null value for given key
:return bool: Whether given mapping contains given key with non-null value
"""
return k in m and not is_null_like(m[k])
def parse_ftype(input_file):
"""
Checks determine filetype from extension.
:param str input_file: String to check.
:return str: filetype (extension without dot prefix)
:raises TypeError: if file does not appear of a supported type
"""
if input_file.endswith(".bam"):
return "bam"
elif input_file.endswith(".fastq") or \
input_file.endswith(".fq") or \
input_file.endswith(".fq.gz") or \
input_file.endswith(".fastq.gz"):
return "fastq"
else:
raise TypeError("Type of input file ends in neither '.bam' "
"nor '.fastq' [file: '" + input_file + "']")
def parse_text_data(lines_or_path, delimiter=os.linesep):
"""
Interpret input argument as lines of data. This is intended to support
multiple input argument types to core model constructors.
:param str | collections.Iterable lines_or_path:
:param str delimiter: line separator used when parsing a raw string that's
not a file
:return collections.Iterable: lines of text data
:raises ValueError: if primary data argument is neither a string nor
another iterable
"""
if os.path.isfile(lines_or_path):
with open(lines_or_path, 'r') as f:
return f.readlines()
else:
_LOGGER.debug("Not a file: '{}'".format(lines_or_path))
if isinstance(lines_or_path, str):
return lines_or_path.split(delimiter)
elif isinstance(lines_or_path, Iterable):
return lines_or_path
else:
raise ValueError("Unable to parse as data lines {} ({})".
format(lines_or_path, type(lines_or_path)))
def sample_folder(prj, sample):
"""
Get the path to this Project's root folder for the given Sample.
:param attmap.PathExAttMap | Project prj: project with which sample is associated
:param Mapping sample: Sample or sample data for which to get root output
folder path.
:return str: this Project's root folder for the given Sample
"""
return os.path.join(prj.metadata.results_subdir,
sample["sample_name"])
@contextlib.contextmanager
def standard_stream_redirector(stream):
"""
Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams
"""
import sys
genuine_stdout, genuine_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stream, stream
try:
yield
finally:
sys.stdout, sys.stderr = genuine_stdout, genuine_stderr
def warn_derived_cols():
""" Produce deprecation warning about derived columns. """
_warn_cols_to_attrs("derived")
def warn_implied_cols():
""" Produce deprecation warning about implied columns. """
_warn_cols_to_attrs("implied")
def _warn_cols_to_attrs(prefix):
""" Produce deprecation warning about 'columns' rather than 'attributes' """
warnings.warn("{pfx}_columns should be encoded and referenced "
"as {pfx}_attributes".format(pfx=prefix), DeprecationWarning)
class CommandChecker(object):
"""
Validate PATH availability of executables referenced by a config file.
:param str path_conf_file: path to configuration file with
sections detailing executable tools to validate
:param Iterable[str] sections_to_check: names of
sections of the given configuration file that are relevant;
optional, will default to all sections if not given, but some
may be excluded via another optional parameter
:param Iterable[str] sections_to_skip: analogous to
the check names parameter, but for specific sections to skip.
"""
def __init__(self, path_conf_file,
sections_to_check=None, sections_to_skip=None):
super(CommandChecker, self).__init__()
self._logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
# TODO: could provide parse strategy as parameter to supplement YAML.
# TODO: could also derive parsing behavior from extension.
self.path = path_conf_file
with open(self.path, 'r') as conf_file:
conf_data = yaml.safe_load(conf_file)
# Determine which sections to validate.
sections = {sections_to_check} if isinstance(sections_to_check, str) \
else set(sections_to_check or conf_data.keys())
excl = {sections_to_skip} if isinstance(sections_to_skip, str) \
else set(sections_to_skip or [])
sections -= excl
self._logger.info("Validating %d sections: %s",
len(sections),
", ".join(["'{}'".format(s) for s in sections]))
# Store per-command mapping of status, nested under section.
self.section_to_status_by_command = defaultdict(dict)
# Store only information about the failures.
self.failures_by_section = defaultdict(list) # Access by section.
self.failures = set() # Access by command.
for s in sections:
# Fetch section data or skip.
try:
section_data = conf_data[s]
except KeyError:
_LOGGER.info("No section '%s' in file '%s', skipping",
s, self.path)
continue
# Test each of the section's commands.
try:
# Is section's data a mapping?
commands_iter = section_data.items()
self._logger.debug("Processing section '%s' data "
"as mapping", s)
for name, command in commands_iter:
failed = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"FAILURE" if failed else "SUCCESS")
except AttributeError:
self._logger.debug("Processing section '%s' data as list", s)
commands_iter = conf_data[s]
for cmd_item in commands_iter:
# Item is K-V pair?
try:
name, command = cmd_item
except ValueError:
# Treat item as command itself.
name, command = "", cmd_item
success = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"SUCCESS" if success else "FAILURE")
def _store_status(self, section, command, name):
"""
Based on new command execution attempt, update instance's
data structures with information about the success/fail status.
Return the result of the execution test.
"""
succeeded = is_command_callable(command, name)
# Store status regardless of its value in the instance's largest DS.
self.section_to_status_by_command[section][command] = succeeded
if not succeeded:
# Only update the failure-specific structures conditionally.
self.failures_by_section[section].append(command)
self.failures.add(command)
return succeeded
@property
def failed(self):
"""
Determine whether *every* command succeeded for *every* config file
section that was validated during instance construction.
:return bool: conjunction of execution success test result values,
obtained by testing each executable in every validated section
"""
# This will raise exception even if validation was attempted,
# but no sections were used. Effectively, delegate responsibility
# to the caller to initiate validation only if doing so is relevant.
if not self.section_to_status_by_command:
raise ValueError("No commands validated")
return 0 == len(self.failures)
def is_command_callable(command, name=""):
"""
Check if command can be called.
:param str command: actual command to call
:param str name: nickname/alias by which to reference the command, optional
:return bool: whether given command's call succeeded
"""
# Use `command` to see if command is callable, store exit code
code = os.system(
"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
if code != 0:
alias_value = " ('{}') ".format(name) if name else " "
_LOGGER.debug("Command '{0}' is not callable: {1}".
format(alias_value, command))
return not bool(code)
|
pepkit/peppy
|
peppy/utils.py
|
import_from_source
|
python
|
def import_from_source(module_filepath):
import sys
if not os.path.exists(module_filepath):
raise ValueError("Path to alleged module file doesn't point to an "
"extant file: '{}'".format(module_filepath))
# Randomly generate module name.
fname_chars = string.ascii_letters + string.digits
name = "".join(random.choice(fname_chars) for _ in range(20))
# Import logic is version-dependent.
if sys.version_info >= (3, 5):
from importlib import util as _il_util
modspec = _il_util.spec_from_file_location(
name, module_filepath)
mod = _il_util.module_from_spec(modspec)
modspec.loader.exec_module(mod)
elif sys.version_info < (3, 3):
import imp
mod = imp.load_source(name, module_filepath)
else:
# 3.3 or 3.4
from importlib import machinery as _il_mach
loader = _il_mach.SourceFileLoader(name, module_filepath)
mod = loader.load_module()
return mod
|
Import a module from a particular filesystem location.
:param str module_filepath: path to the file that constitutes the module
to import
:return module: module imported from the given location, named as indicated
:raises ValueError: if path provided does not point to an extant file
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L250-L285
| null |
""" Helpers without an obvious logical home. """
from collections import defaultdict, Iterable
import contextlib
import logging
import os
import random
import string
import subprocess as sp
import sys
if sys.version_info < (3, 0):
from urlparse import urlparse
else:
from urllib.parse import urlparse
if sys.version_info < (3, 3):
from collections import Sized
else:
from collections.abc import Sized
import warnings
import yaml
from .const import GENERIC_PROTOCOL_KEY, SAMPLE_INDEPENDENT_PROJECT_SECTIONS
_LOGGER = logging.getLogger(__name__)
__all__ = [
"CommandChecker", "add_project_sample_constants", "check_bam", "check_fastq",
"get_file_size", "fetch_samples", "grab_project_data", "has_null_value",
"is_command_callable"
]
def alpha_cased(text, lower=False):
"""
Filter text to just letters and homogenize case.
:param str text: what to filter and homogenize.
:param bool lower: whether to convert to lowercase; default uppercase.
:return str: input filtered to just letters, with homogenized case.
"""
text = "".join(filter(
lambda c: c.isalpha() or c == GENERIC_PROTOCOL_KEY, text))
return text.lower() if lower else text.upper()
def add_project_sample_constants(sample, project):
"""
Update a Sample with constants declared by a Project.
:param Sample sample: sample instance for which to update constants
based on Project
:param Project project: Project with which to update Sample; it
may or may not declare constants. If not, no update occurs.
:return Sample: Updates Sample instance, according to any and all
constants declared by the Project.
"""
sample.update(project.constants)
return sample
def check_bam(bam, o):
"""
Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation.
"""
try:
p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE)
# Count paired alignments
paired = 0
read_lengths = defaultdict(int)
while o > 0: # Count down number of lines
line = p.stdout.readline().decode().split("\t")
flag = int(line[1])
read_lengths[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
o -= 1
p.kill()
except OSError:
reason = "Note (samtools not in path): For NGS inputs, " \
"pep needs samtools to auto-populate " \
"'read_length' and 'read_type' attributes; " \
"these attributes were not populated."
raise OSError(reason)
_LOGGER.debug("Read lengths: {}".format(read_lengths))
_LOGGER.debug("paired: {}".format(paired))
return read_lengths, paired
def check_fastq(fastq, o):
raise NotImplementedError("Detection of read type/length for "
"fastq input is not yet implemented.")
def coll_like(c):
"""
Determine whether an object is collection-like.
:param object c: Object to test as collection
:return bool: Whether the argument is a (non-string) collection
"""
return isinstance(c, Iterable) and not isinstance(c, str)
def copy(obj):
def copy(self):
"""
Copy self to a new object.
"""
from copy import deepcopy
return deepcopy(self)
obj.copy = copy
return obj
def expandpath(path):
"""
Expand a filesystem path that may or may not contain user/env vars.
:param str path: path to expand
:return str: expanded version of input path
"""
return os.path.expandvars(os.path.expanduser(path)).replace("//", "/")
def get_file_size(filename):
"""
Get size of all files in gigabytes (Gb).
:param str | collections.Iterable[str] filename: A space-separated
string or list of space-separated strings of absolute file paths.
:return float: size of file(s), in gigabytes.
"""
if filename is None:
return float(0)
if type(filename) is list:
return float(sum([get_file_size(x) for x in filename]))
try:
total_bytes = sum([float(os.stat(f).st_size)
for f in filename.split(" ") if f is not ''])
except OSError:
# File not found
return 0.0
else:
return float(total_bytes) / (1024 ** 3)
def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None):
"""
Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2
"""
if selector_attribute is None or (not selector_include and not selector_exclude):
# Simple; keep all samples. In this case, this function simply
# offers a list rather than an iterator.
return list(proj.samples)
# At least one of the samples has to have the specified attribute
if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]):
raise AttributeError("The Project samples do not have the attribute '{attr}'"
.format(attr=selector_attribute))
# Intersection between selector_include and selector_exclude is nonsense user error.
if selector_include and selector_exclude:
raise TypeError("Specify only selector_include or selector_exclude parameter, "
"not both.")
# Ensure that we're working with sets.
def make_set(items):
if isinstance(items, str):
items = [items]
return items
# Use the attr check here rather than exception block in case the
# hypothetical AttributeError would occur; we want such
# an exception to arise, not to catch it as if the Sample lacks "protocol"
if not selector_include:
# Loose; keep all samples not in the selector_exclude.
def keep(s):
return not hasattr(s, selector_attribute) or \
getattr(s, selector_attribute) not in make_set(selector_exclude)
else:
# Strict; keep only samples in the selector_include.
def keep(s):
return hasattr(s, selector_attribute) and \
getattr(s, selector_attribute) in make_set(selector_include)
return list(filter(keep, proj.samples))
def grab_project_data(prj):
"""
From the given Project, grab Sample-independent data.
There are some aspects of a Project of which it's beneficial for a Sample
to be aware, particularly for post-hoc analysis. Since Sample objects
within a Project are mutually independent, though, each doesn't need to
know about any of the others. A Project manages its, Sample instances,
so for each Sample knowledge of Project data is limited. This method
facilitates adoption of that conceptual model.
:param Project prj: Project from which to grab data
:return Mapping: Sample-independent data sections from given Project
"""
if not prj:
return {}
data = {}
for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS:
try:
data[section] = getattr(prj, section)
except AttributeError:
_LOGGER.debug("Project lacks section '%s', skipping", section)
return data
def has_null_value(k, m):
"""
Determine whether a mapping has a null value for a given key.
:param Hashable k: Key to test for null value
:param Mapping m: Mapping to test for null value for given key
:return bool: Whether given mapping contains given key with null value
"""
return k in m and is_null_like(m[k])
def infer_delimiter(filepath):
"""
From extension infer delimiter used in a separated values file.
:param str filepath: path to file about which to make inference
:return str | NoneType: extension if inference succeeded; else null
"""
ext = os.path.splitext(filepath)[1][1:].lower()
return {"txt": "\t", "tsv": "\t", "csv": ","}.get(ext)
def is_null_like(x):
"""
Determine whether an object is effectively null.
:param object x: Object for which null likeness is to be determined.
:return bool: Whether given object is effectively "null."
"""
return x in [None, ""] or \
(coll_like(x) and isinstance(x, Sized) and 0 == len(x))
def is_url(maybe_url):
"""
Determine whether a path is a URL.
:param str maybe_url: path to investigate as URL
:return bool: whether path appears to be a URL
"""
return urlparse(maybe_url).scheme != ""
def non_null_value(k, m):
"""
Determine whether a mapping has a non-null value for a given key.
:param Hashable k: Key to test for non-null value
:param Mapping m: Mapping to test for non-null value for given key
:return bool: Whether given mapping contains given key with non-null value
"""
return k in m and not is_null_like(m[k])
def parse_ftype(input_file):
"""
Checks determine filetype from extension.
:param str input_file: String to check.
:return str: filetype (extension without dot prefix)
:raises TypeError: if file does not appear of a supported type
"""
if input_file.endswith(".bam"):
return "bam"
elif input_file.endswith(".fastq") or \
input_file.endswith(".fq") or \
input_file.endswith(".fq.gz") or \
input_file.endswith(".fastq.gz"):
return "fastq"
else:
raise TypeError("Type of input file ends in neither '.bam' "
"nor '.fastq' [file: '" + input_file + "']")
def parse_text_data(lines_or_path, delimiter=os.linesep):
"""
Interpret input argument as lines of data. This is intended to support
multiple input argument types to core model constructors.
:param str | collections.Iterable lines_or_path:
:param str delimiter: line separator used when parsing a raw string that's
not a file
:return collections.Iterable: lines of text data
:raises ValueError: if primary data argument is neither a string nor
another iterable
"""
if os.path.isfile(lines_or_path):
with open(lines_or_path, 'r') as f:
return f.readlines()
else:
_LOGGER.debug("Not a file: '{}'".format(lines_or_path))
if isinstance(lines_or_path, str):
return lines_or_path.split(delimiter)
elif isinstance(lines_or_path, Iterable):
return lines_or_path
else:
raise ValueError("Unable to parse as data lines {} ({})".
format(lines_or_path, type(lines_or_path)))
def sample_folder(prj, sample):
"""
Get the path to this Project's root folder for the given Sample.
:param attmap.PathExAttMap | Project prj: project with which sample is associated
:param Mapping sample: Sample or sample data for which to get root output
folder path.
:return str: this Project's root folder for the given Sample
"""
return os.path.join(prj.metadata.results_subdir,
sample["sample_name"])
@contextlib.contextmanager
def standard_stream_redirector(stream):
"""
Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams
"""
import sys
genuine_stdout, genuine_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stream, stream
try:
yield
finally:
sys.stdout, sys.stderr = genuine_stdout, genuine_stderr
def warn_derived_cols():
""" Produce deprecation warning about derived columns. """
_warn_cols_to_attrs("derived")
def warn_implied_cols():
""" Produce deprecation warning about implied columns. """
_warn_cols_to_attrs("implied")
def _warn_cols_to_attrs(prefix):
""" Produce deprecation warning about 'columns' rather than 'attributes' """
warnings.warn("{pfx}_columns should be encoded and referenced "
"as {pfx}_attributes".format(pfx=prefix), DeprecationWarning)
class CommandChecker(object):
"""
Validate PATH availability of executables referenced by a config file.
:param str path_conf_file: path to configuration file with
sections detailing executable tools to validate
:param Iterable[str] sections_to_check: names of
sections of the given configuration file that are relevant;
optional, will default to all sections if not given, but some
may be excluded via another optional parameter
:param Iterable[str] sections_to_skip: analogous to
the check names parameter, but for specific sections to skip.
"""
def __init__(self, path_conf_file,
sections_to_check=None, sections_to_skip=None):
super(CommandChecker, self).__init__()
self._logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
# TODO: could provide parse strategy as parameter to supplement YAML.
# TODO: could also derive parsing behavior from extension.
self.path = path_conf_file
with open(self.path, 'r') as conf_file:
conf_data = yaml.safe_load(conf_file)
# Determine which sections to validate.
sections = {sections_to_check} if isinstance(sections_to_check, str) \
else set(sections_to_check or conf_data.keys())
excl = {sections_to_skip} if isinstance(sections_to_skip, str) \
else set(sections_to_skip or [])
sections -= excl
self._logger.info("Validating %d sections: %s",
len(sections),
", ".join(["'{}'".format(s) for s in sections]))
# Store per-command mapping of status, nested under section.
self.section_to_status_by_command = defaultdict(dict)
# Store only information about the failures.
self.failures_by_section = defaultdict(list) # Access by section.
self.failures = set() # Access by command.
for s in sections:
# Fetch section data or skip.
try:
section_data = conf_data[s]
except KeyError:
_LOGGER.info("No section '%s' in file '%s', skipping",
s, self.path)
continue
# Test each of the section's commands.
try:
# Is section's data a mapping?
commands_iter = section_data.items()
self._logger.debug("Processing section '%s' data "
"as mapping", s)
for name, command in commands_iter:
failed = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"FAILURE" if failed else "SUCCESS")
except AttributeError:
self._logger.debug("Processing section '%s' data as list", s)
commands_iter = conf_data[s]
for cmd_item in commands_iter:
# Item is K-V pair?
try:
name, command = cmd_item
except ValueError:
# Treat item as command itself.
name, command = "", cmd_item
success = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"SUCCESS" if success else "FAILURE")
def _store_status(self, section, command, name):
"""
Based on new command execution attempt, update instance's
data structures with information about the success/fail status.
Return the result of the execution test.
"""
succeeded = is_command_callable(command, name)
# Store status regardless of its value in the instance's largest DS.
self.section_to_status_by_command[section][command] = succeeded
if not succeeded:
# Only update the failure-specific structures conditionally.
self.failures_by_section[section].append(command)
self.failures.add(command)
return succeeded
@property
def failed(self):
"""
Determine whether *every* command succeeded for *every* config file
section that was validated during instance construction.
:return bool: conjunction of execution success test result values,
obtained by testing each executable in every validated section
"""
# This will raise exception even if validation was attempted,
# but no sections were used. Effectively, delegate responsibility
# to the caller to initiate validation only if doing so is relevant.
if not self.section_to_status_by_command:
raise ValueError("No commands validated")
return 0 == len(self.failures)
def is_command_callable(command, name=""):
"""
Check if command can be called.
:param str command: actual command to call
:param str name: nickname/alias by which to reference the command, optional
:return bool: whether given command's call succeeded
"""
# Use `command` to see if command is callable, store exit code
code = os.system(
"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
if code != 0:
alias_value = " ('{}') ".format(name) if name else " "
_LOGGER.debug("Command '{0}' is not callable: {1}".
format(alias_value, command))
return not bool(code)
|
pepkit/peppy
|
peppy/utils.py
|
infer_delimiter
|
python
|
def infer_delimiter(filepath):
ext = os.path.splitext(filepath)[1][1:].lower()
return {"txt": "\t", "tsv": "\t", "csv": ","}.get(ext)
|
From extension infer delimiter used in a separated values file.
:param str filepath: path to file about which to make inference
:return str | NoneType: extension if inference succeeded; else null
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L288-L296
| null |
""" Helpers without an obvious logical home. """
from collections import defaultdict, Iterable
import contextlib
import logging
import os
import random
import string
import subprocess as sp
import sys
if sys.version_info < (3, 0):
from urlparse import urlparse
else:
from urllib.parse import urlparse
if sys.version_info < (3, 3):
from collections import Sized
else:
from collections.abc import Sized
import warnings
import yaml
from .const import GENERIC_PROTOCOL_KEY, SAMPLE_INDEPENDENT_PROJECT_SECTIONS
_LOGGER = logging.getLogger(__name__)
__all__ = [
"CommandChecker", "add_project_sample_constants", "check_bam", "check_fastq",
"get_file_size", "fetch_samples", "grab_project_data", "has_null_value",
"is_command_callable"
]
def alpha_cased(text, lower=False):
"""
Filter text to just letters and homogenize case.
:param str text: what to filter and homogenize.
:param bool lower: whether to convert to lowercase; default uppercase.
:return str: input filtered to just letters, with homogenized case.
"""
text = "".join(filter(
lambda c: c.isalpha() or c == GENERIC_PROTOCOL_KEY, text))
return text.lower() if lower else text.upper()
def add_project_sample_constants(sample, project):
"""
Update a Sample with constants declared by a Project.
:param Sample sample: sample instance for which to update constants
based on Project
:param Project project: Project with which to update Sample; it
may or may not declare constants. If not, no update occurs.
:return Sample: Updates Sample instance, according to any and all
constants declared by the Project.
"""
sample.update(project.constants)
return sample
def check_bam(bam, o):
"""
Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation.
"""
try:
p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE)
# Count paired alignments
paired = 0
read_lengths = defaultdict(int)
while o > 0: # Count down number of lines
line = p.stdout.readline().decode().split("\t")
flag = int(line[1])
read_lengths[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
o -= 1
p.kill()
except OSError:
reason = "Note (samtools not in path): For NGS inputs, " \
"pep needs samtools to auto-populate " \
"'read_length' and 'read_type' attributes; " \
"these attributes were not populated."
raise OSError(reason)
_LOGGER.debug("Read lengths: {}".format(read_lengths))
_LOGGER.debug("paired: {}".format(paired))
return read_lengths, paired
def check_fastq(fastq, o):
raise NotImplementedError("Detection of read type/length for "
"fastq input is not yet implemented.")
def coll_like(c):
"""
Determine whether an object is collection-like.
:param object c: Object to test as collection
:return bool: Whether the argument is a (non-string) collection
"""
return isinstance(c, Iterable) and not isinstance(c, str)
def copy(obj):
def copy(self):
"""
Copy self to a new object.
"""
from copy import deepcopy
return deepcopy(self)
obj.copy = copy
return obj
def expandpath(path):
"""
Expand a filesystem path that may or may not contain user/env vars.
:param str path: path to expand
:return str: expanded version of input path
"""
return os.path.expandvars(os.path.expanduser(path)).replace("//", "/")
def get_file_size(filename):
"""
Get size of all files in gigabytes (Gb).
:param str | collections.Iterable[str] filename: A space-separated
string or list of space-separated strings of absolute file paths.
:return float: size of file(s), in gigabytes.
"""
if filename is None:
return float(0)
if type(filename) is list:
return float(sum([get_file_size(x) for x in filename]))
try:
total_bytes = sum([float(os.stat(f).st_size)
for f in filename.split(" ") if f is not ''])
except OSError:
# File not found
return 0.0
else:
return float(total_bytes) / (1024 ** 3)
def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None):
"""
Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2
"""
if selector_attribute is None or (not selector_include and not selector_exclude):
# Simple; keep all samples. In this case, this function simply
# offers a list rather than an iterator.
return list(proj.samples)
# At least one of the samples has to have the specified attribute
if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]):
raise AttributeError("The Project samples do not have the attribute '{attr}'"
.format(attr=selector_attribute))
# Intersection between selector_include and selector_exclude is nonsense user error.
if selector_include and selector_exclude:
raise TypeError("Specify only selector_include or selector_exclude parameter, "
"not both.")
# Ensure that we're working with sets.
def make_set(items):
if isinstance(items, str):
items = [items]
return items
# Use the attr check here rather than exception block in case the
# hypothetical AttributeError would occur; we want such
# an exception to arise, not to catch it as if the Sample lacks "protocol"
if not selector_include:
# Loose; keep all samples not in the selector_exclude.
def keep(s):
return not hasattr(s, selector_attribute) or \
getattr(s, selector_attribute) not in make_set(selector_exclude)
else:
# Strict; keep only samples in the selector_include.
def keep(s):
return hasattr(s, selector_attribute) and \
getattr(s, selector_attribute) in make_set(selector_include)
return list(filter(keep, proj.samples))
def grab_project_data(prj):
"""
From the given Project, grab Sample-independent data.
There are some aspects of a Project of which it's beneficial for a Sample
to be aware, particularly for post-hoc analysis. Since Sample objects
within a Project are mutually independent, though, each doesn't need to
know about any of the others. A Project manages its, Sample instances,
so for each Sample knowledge of Project data is limited. This method
facilitates adoption of that conceptual model.
:param Project prj: Project from which to grab data
:return Mapping: Sample-independent data sections from given Project
"""
if not prj:
return {}
data = {}
for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS:
try:
data[section] = getattr(prj, section)
except AttributeError:
_LOGGER.debug("Project lacks section '%s', skipping", section)
return data
def has_null_value(k, m):
"""
Determine whether a mapping has a null value for a given key.
:param Hashable k: Key to test for null value
:param Mapping m: Mapping to test for null value for given key
:return bool: Whether given mapping contains given key with null value
"""
return k in m and is_null_like(m[k])
def import_from_source(module_filepath):
"""
Import a module from a particular filesystem location.
:param str module_filepath: path to the file that constitutes the module
to import
:return module: module imported from the given location, named as indicated
:raises ValueError: if path provided does not point to an extant file
"""
import sys
if not os.path.exists(module_filepath):
raise ValueError("Path to alleged module file doesn't point to an "
"extant file: '{}'".format(module_filepath))
# Randomly generate module name.
fname_chars = string.ascii_letters + string.digits
name = "".join(random.choice(fname_chars) for _ in range(20))
# Import logic is version-dependent.
if sys.version_info >= (3, 5):
from importlib import util as _il_util
modspec = _il_util.spec_from_file_location(
name, module_filepath)
mod = _il_util.module_from_spec(modspec)
modspec.loader.exec_module(mod)
elif sys.version_info < (3, 3):
import imp
mod = imp.load_source(name, module_filepath)
else:
# 3.3 or 3.4
from importlib import machinery as _il_mach
loader = _il_mach.SourceFileLoader(name, module_filepath)
mod = loader.load_module()
return mod
def is_null_like(x):
"""
Determine whether an object is effectively null.
:param object x: Object for which null likeness is to be determined.
:return bool: Whether given object is effectively "null."
"""
return x in [None, ""] or \
(coll_like(x) and isinstance(x, Sized) and 0 == len(x))
def is_url(maybe_url):
"""
Determine whether a path is a URL.
:param str maybe_url: path to investigate as URL
:return bool: whether path appears to be a URL
"""
return urlparse(maybe_url).scheme != ""
def non_null_value(k, m):
"""
Determine whether a mapping has a non-null value for a given key.
:param Hashable k: Key to test for non-null value
:param Mapping m: Mapping to test for non-null value for given key
:return bool: Whether given mapping contains given key with non-null value
"""
return k in m and not is_null_like(m[k])
def parse_ftype(input_file):
"""
Checks determine filetype from extension.
:param str input_file: String to check.
:return str: filetype (extension without dot prefix)
:raises TypeError: if file does not appear of a supported type
"""
if input_file.endswith(".bam"):
return "bam"
elif input_file.endswith(".fastq") or \
input_file.endswith(".fq") or \
input_file.endswith(".fq.gz") or \
input_file.endswith(".fastq.gz"):
return "fastq"
else:
raise TypeError("Type of input file ends in neither '.bam' "
"nor '.fastq' [file: '" + input_file + "']")
def parse_text_data(lines_or_path, delimiter=os.linesep):
"""
Interpret input argument as lines of data. This is intended to support
multiple input argument types to core model constructors.
:param str | collections.Iterable lines_or_path:
:param str delimiter: line separator used when parsing a raw string that's
not a file
:return collections.Iterable: lines of text data
:raises ValueError: if primary data argument is neither a string nor
another iterable
"""
if os.path.isfile(lines_or_path):
with open(lines_or_path, 'r') as f:
return f.readlines()
else:
_LOGGER.debug("Not a file: '{}'".format(lines_or_path))
if isinstance(lines_or_path, str):
return lines_or_path.split(delimiter)
elif isinstance(lines_or_path, Iterable):
return lines_or_path
else:
raise ValueError("Unable to parse as data lines {} ({})".
format(lines_or_path, type(lines_or_path)))
def sample_folder(prj, sample):
"""
Get the path to this Project's root folder for the given Sample.
:param attmap.PathExAttMap | Project prj: project with which sample is associated
:param Mapping sample: Sample or sample data for which to get root output
folder path.
:return str: this Project's root folder for the given Sample
"""
return os.path.join(prj.metadata.results_subdir,
sample["sample_name"])
@contextlib.contextmanager
def standard_stream_redirector(stream):
"""
Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams
"""
import sys
genuine_stdout, genuine_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stream, stream
try:
yield
finally:
sys.stdout, sys.stderr = genuine_stdout, genuine_stderr
def warn_derived_cols():
""" Produce deprecation warning about derived columns. """
_warn_cols_to_attrs("derived")
def warn_implied_cols():
""" Produce deprecation warning about implied columns. """
_warn_cols_to_attrs("implied")
def _warn_cols_to_attrs(prefix):
""" Produce deprecation warning about 'columns' rather than 'attributes' """
warnings.warn("{pfx}_columns should be encoded and referenced "
"as {pfx}_attributes".format(pfx=prefix), DeprecationWarning)
class CommandChecker(object):
"""
Validate PATH availability of executables referenced by a config file.
:param str path_conf_file: path to configuration file with
sections detailing executable tools to validate
:param Iterable[str] sections_to_check: names of
sections of the given configuration file that are relevant;
optional, will default to all sections if not given, but some
may be excluded via another optional parameter
:param Iterable[str] sections_to_skip: analogous to
the check names parameter, but for specific sections to skip.
"""
def __init__(self, path_conf_file,
sections_to_check=None, sections_to_skip=None):
super(CommandChecker, self).__init__()
self._logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
# TODO: could provide parse strategy as parameter to supplement YAML.
# TODO: could also derive parsing behavior from extension.
self.path = path_conf_file
with open(self.path, 'r') as conf_file:
conf_data = yaml.safe_load(conf_file)
# Determine which sections to validate.
sections = {sections_to_check} if isinstance(sections_to_check, str) \
else set(sections_to_check or conf_data.keys())
excl = {sections_to_skip} if isinstance(sections_to_skip, str) \
else set(sections_to_skip or [])
sections -= excl
self._logger.info("Validating %d sections: %s",
len(sections),
", ".join(["'{}'".format(s) for s in sections]))
# Store per-command mapping of status, nested under section.
self.section_to_status_by_command = defaultdict(dict)
# Store only information about the failures.
self.failures_by_section = defaultdict(list) # Access by section.
self.failures = set() # Access by command.
for s in sections:
# Fetch section data or skip.
try:
section_data = conf_data[s]
except KeyError:
_LOGGER.info("No section '%s' in file '%s', skipping",
s, self.path)
continue
# Test each of the section's commands.
try:
# Is section's data a mapping?
commands_iter = section_data.items()
self._logger.debug("Processing section '%s' data "
"as mapping", s)
for name, command in commands_iter:
failed = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"FAILURE" if failed else "SUCCESS")
except AttributeError:
self._logger.debug("Processing section '%s' data as list", s)
commands_iter = conf_data[s]
for cmd_item in commands_iter:
# Item is K-V pair?
try:
name, command = cmd_item
except ValueError:
# Treat item as command itself.
name, command = "", cmd_item
success = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"SUCCESS" if success else "FAILURE")
def _store_status(self, section, command, name):
"""
Based on new command execution attempt, update instance's
data structures with information about the success/fail status.
Return the result of the execution test.
"""
succeeded = is_command_callable(command, name)
# Store status regardless of its value in the instance's largest DS.
self.section_to_status_by_command[section][command] = succeeded
if not succeeded:
# Only update the failure-specific structures conditionally.
self.failures_by_section[section].append(command)
self.failures.add(command)
return succeeded
@property
def failed(self):
"""
Determine whether *every* command succeeded for *every* config file
section that was validated during instance construction.
:return bool: conjunction of execution success test result values,
obtained by testing each executable in every validated section
"""
# This will raise exception even if validation was attempted,
# but no sections were used. Effectively, delegate responsibility
# to the caller to initiate validation only if doing so is relevant.
if not self.section_to_status_by_command:
raise ValueError("No commands validated")
return 0 == len(self.failures)
def is_command_callable(command, name=""):
"""
Check if command can be called.
:param str command: actual command to call
:param str name: nickname/alias by which to reference the command, optional
:return bool: whether given command's call succeeded
"""
# Use `command` to see if command is callable, store exit code
code = os.system(
"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
if code != 0:
alias_value = " ('{}') ".format(name) if name else " "
_LOGGER.debug("Command '{0}' is not callable: {1}".
format(alias_value, command))
return not bool(code)
|
pepkit/peppy
|
peppy/utils.py
|
is_null_like
|
python
|
def is_null_like(x):
return x in [None, ""] or \
(coll_like(x) and isinstance(x, Sized) and 0 == len(x))
|
Determine whether an object is effectively null.
:param object x: Object for which null likeness is to be determined.
:return bool: Whether given object is effectively "null."
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L299-L307
|
[
"def coll_like(c):\n \"\"\"\n Determine whether an object is collection-like.\n\n :param object c: Object to test as collection\n :return bool: Whether the argument is a (non-string) collection\n \"\"\"\n return isinstance(c, Iterable) and not isinstance(c, str)\n"
] |
""" Helpers without an obvious logical home. """
from collections import defaultdict, Iterable
import contextlib
import logging
import os
import random
import string
import subprocess as sp
import sys
if sys.version_info < (3, 0):
from urlparse import urlparse
else:
from urllib.parse import urlparse
if sys.version_info < (3, 3):
from collections import Sized
else:
from collections.abc import Sized
import warnings
import yaml
from .const import GENERIC_PROTOCOL_KEY, SAMPLE_INDEPENDENT_PROJECT_SECTIONS
_LOGGER = logging.getLogger(__name__)
__all__ = [
"CommandChecker", "add_project_sample_constants", "check_bam", "check_fastq",
"get_file_size", "fetch_samples", "grab_project_data", "has_null_value",
"is_command_callable"
]
def alpha_cased(text, lower=False):
"""
Filter text to just letters and homogenize case.
:param str text: what to filter and homogenize.
:param bool lower: whether to convert to lowercase; default uppercase.
:return str: input filtered to just letters, with homogenized case.
"""
text = "".join(filter(
lambda c: c.isalpha() or c == GENERIC_PROTOCOL_KEY, text))
return text.lower() if lower else text.upper()
def add_project_sample_constants(sample, project):
"""
Update a Sample with constants declared by a Project.
:param Sample sample: sample instance for which to update constants
based on Project
:param Project project: Project with which to update Sample; it
may or may not declare constants. If not, no update occurs.
:return Sample: Updates Sample instance, according to any and all
constants declared by the Project.
"""
sample.update(project.constants)
return sample
def check_bam(bam, o):
"""
Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation.
"""
try:
p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE)
# Count paired alignments
paired = 0
read_lengths = defaultdict(int)
while o > 0: # Count down number of lines
line = p.stdout.readline().decode().split("\t")
flag = int(line[1])
read_lengths[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
o -= 1
p.kill()
except OSError:
reason = "Note (samtools not in path): For NGS inputs, " \
"pep needs samtools to auto-populate " \
"'read_length' and 'read_type' attributes; " \
"these attributes were not populated."
raise OSError(reason)
_LOGGER.debug("Read lengths: {}".format(read_lengths))
_LOGGER.debug("paired: {}".format(paired))
return read_lengths, paired
def check_fastq(fastq, o):
raise NotImplementedError("Detection of read type/length for "
"fastq input is not yet implemented.")
def coll_like(c):
"""
Determine whether an object is collection-like.
:param object c: Object to test as collection
:return bool: Whether the argument is a (non-string) collection
"""
return isinstance(c, Iterable) and not isinstance(c, str)
def copy(obj):
def copy(self):
"""
Copy self to a new object.
"""
from copy import deepcopy
return deepcopy(self)
obj.copy = copy
return obj
def expandpath(path):
"""
Expand a filesystem path that may or may not contain user/env vars.
:param str path: path to expand
:return str: expanded version of input path
"""
return os.path.expandvars(os.path.expanduser(path)).replace("//", "/")
def get_file_size(filename):
"""
Get size of all files in gigabytes (Gb).
:param str | collections.Iterable[str] filename: A space-separated
string or list of space-separated strings of absolute file paths.
:return float: size of file(s), in gigabytes.
"""
if filename is None:
return float(0)
if type(filename) is list:
return float(sum([get_file_size(x) for x in filename]))
try:
total_bytes = sum([float(os.stat(f).st_size)
for f in filename.split(" ") if f is not ''])
except OSError:
# File not found
return 0.0
else:
return float(total_bytes) / (1024 ** 3)
def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None):
"""
Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2
"""
if selector_attribute is None or (not selector_include and not selector_exclude):
# Simple; keep all samples. In this case, this function simply
# offers a list rather than an iterator.
return list(proj.samples)
# At least one of the samples has to have the specified attribute
if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]):
raise AttributeError("The Project samples do not have the attribute '{attr}'"
.format(attr=selector_attribute))
# Intersection between selector_include and selector_exclude is nonsense user error.
if selector_include and selector_exclude:
raise TypeError("Specify only selector_include or selector_exclude parameter, "
"not both.")
# Ensure that we're working with sets.
def make_set(items):
if isinstance(items, str):
items = [items]
return items
# Use the attr check here rather than exception block in case the
# hypothetical AttributeError would occur; we want such
# an exception to arise, not to catch it as if the Sample lacks "protocol"
if not selector_include:
# Loose; keep all samples not in the selector_exclude.
def keep(s):
return not hasattr(s, selector_attribute) or \
getattr(s, selector_attribute) not in make_set(selector_exclude)
else:
# Strict; keep only samples in the selector_include.
def keep(s):
return hasattr(s, selector_attribute) and \
getattr(s, selector_attribute) in make_set(selector_include)
return list(filter(keep, proj.samples))
def grab_project_data(prj):
"""
From the given Project, grab Sample-independent data.
There are some aspects of a Project of which it's beneficial for a Sample
to be aware, particularly for post-hoc analysis. Since Sample objects
within a Project are mutually independent, though, each doesn't need to
know about any of the others. A Project manages its, Sample instances,
so for each Sample knowledge of Project data is limited. This method
facilitates adoption of that conceptual model.
:param Project prj: Project from which to grab data
:return Mapping: Sample-independent data sections from given Project
"""
if not prj:
return {}
data = {}
for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS:
try:
data[section] = getattr(prj, section)
except AttributeError:
_LOGGER.debug("Project lacks section '%s', skipping", section)
return data
def has_null_value(k, m):
"""
Determine whether a mapping has a null value for a given key.
:param Hashable k: Key to test for null value
:param Mapping m: Mapping to test for null value for given key
:return bool: Whether given mapping contains given key with null value
"""
return k in m and is_null_like(m[k])
def import_from_source(module_filepath):
"""
Import a module from a particular filesystem location.
:param str module_filepath: path to the file that constitutes the module
to import
:return module: module imported from the given location, named as indicated
:raises ValueError: if path provided does not point to an extant file
"""
import sys
if not os.path.exists(module_filepath):
raise ValueError("Path to alleged module file doesn't point to an "
"extant file: '{}'".format(module_filepath))
# Randomly generate module name.
fname_chars = string.ascii_letters + string.digits
name = "".join(random.choice(fname_chars) for _ in range(20))
# Import logic is version-dependent.
if sys.version_info >= (3, 5):
from importlib import util as _il_util
modspec = _il_util.spec_from_file_location(
name, module_filepath)
mod = _il_util.module_from_spec(modspec)
modspec.loader.exec_module(mod)
elif sys.version_info < (3, 3):
import imp
mod = imp.load_source(name, module_filepath)
else:
# 3.3 or 3.4
from importlib import machinery as _il_mach
loader = _il_mach.SourceFileLoader(name, module_filepath)
mod = loader.load_module()
return mod
def infer_delimiter(filepath):
"""
From extension infer delimiter used in a separated values file.
:param str filepath: path to file about which to make inference
:return str | NoneType: extension if inference succeeded; else null
"""
ext = os.path.splitext(filepath)[1][1:].lower()
return {"txt": "\t", "tsv": "\t", "csv": ","}.get(ext)
def is_url(maybe_url):
"""
Determine whether a path is a URL.
:param str maybe_url: path to investigate as URL
:return bool: whether path appears to be a URL
"""
return urlparse(maybe_url).scheme != ""
def non_null_value(k, m):
"""
Determine whether a mapping has a non-null value for a given key.
:param Hashable k: Key to test for non-null value
:param Mapping m: Mapping to test for non-null value for given key
:return bool: Whether given mapping contains given key with non-null value
"""
return k in m and not is_null_like(m[k])
def parse_ftype(input_file):
"""
Checks determine filetype from extension.
:param str input_file: String to check.
:return str: filetype (extension without dot prefix)
:raises TypeError: if file does not appear of a supported type
"""
if input_file.endswith(".bam"):
return "bam"
elif input_file.endswith(".fastq") or \
input_file.endswith(".fq") or \
input_file.endswith(".fq.gz") or \
input_file.endswith(".fastq.gz"):
return "fastq"
else:
raise TypeError("Type of input file ends in neither '.bam' "
"nor '.fastq' [file: '" + input_file + "']")
def parse_text_data(lines_or_path, delimiter=os.linesep):
"""
Interpret input argument as lines of data. This is intended to support
multiple input argument types to core model constructors.
:param str | collections.Iterable lines_or_path:
:param str delimiter: line separator used when parsing a raw string that's
not a file
:return collections.Iterable: lines of text data
:raises ValueError: if primary data argument is neither a string nor
another iterable
"""
if os.path.isfile(lines_or_path):
with open(lines_or_path, 'r') as f:
return f.readlines()
else:
_LOGGER.debug("Not a file: '{}'".format(lines_or_path))
if isinstance(lines_or_path, str):
return lines_or_path.split(delimiter)
elif isinstance(lines_or_path, Iterable):
return lines_or_path
else:
raise ValueError("Unable to parse as data lines {} ({})".
format(lines_or_path, type(lines_or_path)))
def sample_folder(prj, sample):
"""
Get the path to this Project's root folder for the given Sample.
:param attmap.PathExAttMap | Project prj: project with which sample is associated
:param Mapping sample: Sample or sample data for which to get root output
folder path.
:return str: this Project's root folder for the given Sample
"""
return os.path.join(prj.metadata.results_subdir,
sample["sample_name"])
@contextlib.contextmanager
def standard_stream_redirector(stream):
"""
Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams
"""
import sys
genuine_stdout, genuine_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stream, stream
try:
yield
finally:
sys.stdout, sys.stderr = genuine_stdout, genuine_stderr
def warn_derived_cols():
""" Produce deprecation warning about derived columns. """
_warn_cols_to_attrs("derived")
def warn_implied_cols():
""" Produce deprecation warning about implied columns. """
_warn_cols_to_attrs("implied")
def _warn_cols_to_attrs(prefix):
""" Produce deprecation warning about 'columns' rather than 'attributes' """
warnings.warn("{pfx}_columns should be encoded and referenced "
"as {pfx}_attributes".format(pfx=prefix), DeprecationWarning)
class CommandChecker(object):
"""
Validate PATH availability of executables referenced by a config file.
:param str path_conf_file: path to configuration file with
sections detailing executable tools to validate
:param Iterable[str] sections_to_check: names of
sections of the given configuration file that are relevant;
optional, will default to all sections if not given, but some
may be excluded via another optional parameter
:param Iterable[str] sections_to_skip: analogous to
the check names parameter, but for specific sections to skip.
"""
def __init__(self, path_conf_file,
sections_to_check=None, sections_to_skip=None):
super(CommandChecker, self).__init__()
self._logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
# TODO: could provide parse strategy as parameter to supplement YAML.
# TODO: could also derive parsing behavior from extension.
self.path = path_conf_file
with open(self.path, 'r') as conf_file:
conf_data = yaml.safe_load(conf_file)
# Determine which sections to validate.
sections = {sections_to_check} if isinstance(sections_to_check, str) \
else set(sections_to_check or conf_data.keys())
excl = {sections_to_skip} if isinstance(sections_to_skip, str) \
else set(sections_to_skip or [])
sections -= excl
self._logger.info("Validating %d sections: %s",
len(sections),
", ".join(["'{}'".format(s) for s in sections]))
# Store per-command mapping of status, nested under section.
self.section_to_status_by_command = defaultdict(dict)
# Store only information about the failures.
self.failures_by_section = defaultdict(list) # Access by section.
self.failures = set() # Access by command.
for s in sections:
# Fetch section data or skip.
try:
section_data = conf_data[s]
except KeyError:
_LOGGER.info("No section '%s' in file '%s', skipping",
s, self.path)
continue
# Test each of the section's commands.
try:
# Is section's data a mapping?
commands_iter = section_data.items()
self._logger.debug("Processing section '%s' data "
"as mapping", s)
for name, command in commands_iter:
failed = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"FAILURE" if failed else "SUCCESS")
except AttributeError:
self._logger.debug("Processing section '%s' data as list", s)
commands_iter = conf_data[s]
for cmd_item in commands_iter:
# Item is K-V pair?
try:
name, command = cmd_item
except ValueError:
# Treat item as command itself.
name, command = "", cmd_item
success = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"SUCCESS" if success else "FAILURE")
def _store_status(self, section, command, name):
"""
Based on new command execution attempt, update instance's
data structures with information about the success/fail status.
Return the result of the execution test.
"""
succeeded = is_command_callable(command, name)
# Store status regardless of its value in the instance's largest DS.
self.section_to_status_by_command[section][command] = succeeded
if not succeeded:
# Only update the failure-specific structures conditionally.
self.failures_by_section[section].append(command)
self.failures.add(command)
return succeeded
@property
def failed(self):
"""
Determine whether *every* command succeeded for *every* config file
section that was validated during instance construction.
:return bool: conjunction of execution success test result values,
obtained by testing each executable in every validated section
"""
# This will raise exception even if validation was attempted,
# but no sections were used. Effectively, delegate responsibility
# to the caller to initiate validation only if doing so is relevant.
if not self.section_to_status_by_command:
raise ValueError("No commands validated")
return 0 == len(self.failures)
def is_command_callable(command, name=""):
"""
Check if command can be called.
:param str command: actual command to call
:param str name: nickname/alias by which to reference the command, optional
:return bool: whether given command's call succeeded
"""
# Use `command` to see if command is callable, store exit code
code = os.system(
"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
if code != 0:
alias_value = " ('{}') ".format(name) if name else " "
_LOGGER.debug("Command '{0}' is not callable: {1}".
format(alias_value, command))
return not bool(code)
|
pepkit/peppy
|
peppy/utils.py
|
parse_ftype
|
python
|
def parse_ftype(input_file):
if input_file.endswith(".bam"):
return "bam"
elif input_file.endswith(".fastq") or \
input_file.endswith(".fq") or \
input_file.endswith(".fq.gz") or \
input_file.endswith(".fastq.gz"):
return "fastq"
else:
raise TypeError("Type of input file ends in neither '.bam' "
"nor '.fastq' [file: '" + input_file + "']")
|
Checks determine filetype from extension.
:param str input_file: String to check.
:return str: filetype (extension without dot prefix)
:raises TypeError: if file does not appear of a supported type
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L331-L348
| null |
""" Helpers without an obvious logical home. """
from collections import defaultdict, Iterable
import contextlib
import logging
import os
import random
import string
import subprocess as sp
import sys
if sys.version_info < (3, 0):
from urlparse import urlparse
else:
from urllib.parse import urlparse
if sys.version_info < (3, 3):
from collections import Sized
else:
from collections.abc import Sized
import warnings
import yaml
from .const import GENERIC_PROTOCOL_KEY, SAMPLE_INDEPENDENT_PROJECT_SECTIONS
_LOGGER = logging.getLogger(__name__)
__all__ = [
"CommandChecker", "add_project_sample_constants", "check_bam", "check_fastq",
"get_file_size", "fetch_samples", "grab_project_data", "has_null_value",
"is_command_callable"
]
def alpha_cased(text, lower=False):
"""
Filter text to just letters and homogenize case.
:param str text: what to filter and homogenize.
:param bool lower: whether to convert to lowercase; default uppercase.
:return str: input filtered to just letters, with homogenized case.
"""
text = "".join(filter(
lambda c: c.isalpha() or c == GENERIC_PROTOCOL_KEY, text))
return text.lower() if lower else text.upper()
def add_project_sample_constants(sample, project):
"""
Update a Sample with constants declared by a Project.
:param Sample sample: sample instance for which to update constants
based on Project
:param Project project: Project with which to update Sample; it
may or may not declare constants. If not, no update occurs.
:return Sample: Updates Sample instance, according to any and all
constants declared by the Project.
"""
sample.update(project.constants)
return sample
def check_bam(bam, o):
"""
Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation.
"""
try:
p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE)
# Count paired alignments
paired = 0
read_lengths = defaultdict(int)
while o > 0: # Count down number of lines
line = p.stdout.readline().decode().split("\t")
flag = int(line[1])
read_lengths[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
o -= 1
p.kill()
except OSError:
reason = "Note (samtools not in path): For NGS inputs, " \
"pep needs samtools to auto-populate " \
"'read_length' and 'read_type' attributes; " \
"these attributes were not populated."
raise OSError(reason)
_LOGGER.debug("Read lengths: {}".format(read_lengths))
_LOGGER.debug("paired: {}".format(paired))
return read_lengths, paired
def check_fastq(fastq, o):
raise NotImplementedError("Detection of read type/length for "
"fastq input is not yet implemented.")
def coll_like(c):
"""
Determine whether an object is collection-like.
:param object c: Object to test as collection
:return bool: Whether the argument is a (non-string) collection
"""
return isinstance(c, Iterable) and not isinstance(c, str)
def copy(obj):
def copy(self):
"""
Copy self to a new object.
"""
from copy import deepcopy
return deepcopy(self)
obj.copy = copy
return obj
def expandpath(path):
"""
Expand a filesystem path that may or may not contain user/env vars.
:param str path: path to expand
:return str: expanded version of input path
"""
return os.path.expandvars(os.path.expanduser(path)).replace("//", "/")
def get_file_size(filename):
"""
Get size of all files in gigabytes (Gb).
:param str | collections.Iterable[str] filename: A space-separated
string or list of space-separated strings of absolute file paths.
:return float: size of file(s), in gigabytes.
"""
if filename is None:
return float(0)
if type(filename) is list:
return float(sum([get_file_size(x) for x in filename]))
try:
total_bytes = sum([float(os.stat(f).st_size)
for f in filename.split(" ") if f is not ''])
except OSError:
# File not found
return 0.0
else:
return float(total_bytes) / (1024 ** 3)
def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None):
"""
Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2
"""
if selector_attribute is None or (not selector_include and not selector_exclude):
# Simple; keep all samples. In this case, this function simply
# offers a list rather than an iterator.
return list(proj.samples)
# At least one of the samples has to have the specified attribute
if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]):
raise AttributeError("The Project samples do not have the attribute '{attr}'"
.format(attr=selector_attribute))
# Intersection between selector_include and selector_exclude is nonsense user error.
if selector_include and selector_exclude:
raise TypeError("Specify only selector_include or selector_exclude parameter, "
"not both.")
# Ensure that we're working with sets.
def make_set(items):
if isinstance(items, str):
items = [items]
return items
# Use the attr check here rather than exception block in case the
# hypothetical AttributeError would occur; we want such
# an exception to arise, not to catch it as if the Sample lacks "protocol"
if not selector_include:
# Loose; keep all samples not in the selector_exclude.
def keep(s):
return not hasattr(s, selector_attribute) or \
getattr(s, selector_attribute) not in make_set(selector_exclude)
else:
# Strict; keep only samples in the selector_include.
def keep(s):
return hasattr(s, selector_attribute) and \
getattr(s, selector_attribute) in make_set(selector_include)
return list(filter(keep, proj.samples))
def grab_project_data(prj):
"""
From the given Project, grab Sample-independent data.
There are some aspects of a Project of which it's beneficial for a Sample
to be aware, particularly for post-hoc analysis. Since Sample objects
within a Project are mutually independent, though, each doesn't need to
know about any of the others. A Project manages its, Sample instances,
so for each Sample knowledge of Project data is limited. This method
facilitates adoption of that conceptual model.
:param Project prj: Project from which to grab data
:return Mapping: Sample-independent data sections from given Project
"""
if not prj:
return {}
data = {}
for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS:
try:
data[section] = getattr(prj, section)
except AttributeError:
_LOGGER.debug("Project lacks section '%s', skipping", section)
return data
def has_null_value(k, m):
"""
Determine whether a mapping has a null value for a given key.
:param Hashable k: Key to test for null value
:param Mapping m: Mapping to test for null value for given key
:return bool: Whether given mapping contains given key with null value
"""
return k in m and is_null_like(m[k])
def import_from_source(module_filepath):
"""
Import a module from a particular filesystem location.
:param str module_filepath: path to the file that constitutes the module
to import
:return module: module imported from the given location, named as indicated
:raises ValueError: if path provided does not point to an extant file
"""
import sys
if not os.path.exists(module_filepath):
raise ValueError("Path to alleged module file doesn't point to an "
"extant file: '{}'".format(module_filepath))
# Randomly generate module name.
fname_chars = string.ascii_letters + string.digits
name = "".join(random.choice(fname_chars) for _ in range(20))
# Import logic is version-dependent.
if sys.version_info >= (3, 5):
from importlib import util as _il_util
modspec = _il_util.spec_from_file_location(
name, module_filepath)
mod = _il_util.module_from_spec(modspec)
modspec.loader.exec_module(mod)
elif sys.version_info < (3, 3):
import imp
mod = imp.load_source(name, module_filepath)
else:
# 3.3 or 3.4
from importlib import machinery as _il_mach
loader = _il_mach.SourceFileLoader(name, module_filepath)
mod = loader.load_module()
return mod
def infer_delimiter(filepath):
"""
From extension infer delimiter used in a separated values file.
:param str filepath: path to file about which to make inference
:return str | NoneType: extension if inference succeeded; else null
"""
ext = os.path.splitext(filepath)[1][1:].lower()
return {"txt": "\t", "tsv": "\t", "csv": ","}.get(ext)
def is_null_like(x):
"""
Determine whether an object is effectively null.
:param object x: Object for which null likeness is to be determined.
:return bool: Whether given object is effectively "null."
"""
return x in [None, ""] or \
(coll_like(x) and isinstance(x, Sized) and 0 == len(x))
def is_url(maybe_url):
"""
Determine whether a path is a URL.
:param str maybe_url: path to investigate as URL
:return bool: whether path appears to be a URL
"""
return urlparse(maybe_url).scheme != ""
def non_null_value(k, m):
"""
Determine whether a mapping has a non-null value for a given key.
:param Hashable k: Key to test for non-null value
:param Mapping m: Mapping to test for non-null value for given key
:return bool: Whether given mapping contains given key with non-null value
"""
return k in m and not is_null_like(m[k])
def parse_text_data(lines_or_path, delimiter=os.linesep):
"""
Interpret input argument as lines of data. This is intended to support
multiple input argument types to core model constructors.
:param str | collections.Iterable lines_or_path:
:param str delimiter: line separator used when parsing a raw string that's
not a file
:return collections.Iterable: lines of text data
:raises ValueError: if primary data argument is neither a string nor
another iterable
"""
if os.path.isfile(lines_or_path):
with open(lines_or_path, 'r') as f:
return f.readlines()
else:
_LOGGER.debug("Not a file: '{}'".format(lines_or_path))
if isinstance(lines_or_path, str):
return lines_or_path.split(delimiter)
elif isinstance(lines_or_path, Iterable):
return lines_or_path
else:
raise ValueError("Unable to parse as data lines {} ({})".
format(lines_or_path, type(lines_or_path)))
def sample_folder(prj, sample):
"""
Get the path to this Project's root folder for the given Sample.
:param attmap.PathExAttMap | Project prj: project with which sample is associated
:param Mapping sample: Sample or sample data for which to get root output
folder path.
:return str: this Project's root folder for the given Sample
"""
return os.path.join(prj.metadata.results_subdir,
sample["sample_name"])
@contextlib.contextmanager
def standard_stream_redirector(stream):
"""
Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams
"""
import sys
genuine_stdout, genuine_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stream, stream
try:
yield
finally:
sys.stdout, sys.stderr = genuine_stdout, genuine_stderr
def warn_derived_cols():
""" Produce deprecation warning about derived columns. """
_warn_cols_to_attrs("derived")
def warn_implied_cols():
""" Produce deprecation warning about implied columns. """
_warn_cols_to_attrs("implied")
def _warn_cols_to_attrs(prefix):
""" Produce deprecation warning about 'columns' rather than 'attributes' """
warnings.warn("{pfx}_columns should be encoded and referenced "
"as {pfx}_attributes".format(pfx=prefix), DeprecationWarning)
class CommandChecker(object):
"""
Validate PATH availability of executables referenced by a config file.
:param str path_conf_file: path to configuration file with
sections detailing executable tools to validate
:param Iterable[str] sections_to_check: names of
sections of the given configuration file that are relevant;
optional, will default to all sections if not given, but some
may be excluded via another optional parameter
:param Iterable[str] sections_to_skip: analogous to
the check names parameter, but for specific sections to skip.
"""
def __init__(self, path_conf_file,
sections_to_check=None, sections_to_skip=None):
super(CommandChecker, self).__init__()
self._logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
# TODO: could provide parse strategy as parameter to supplement YAML.
# TODO: could also derive parsing behavior from extension.
self.path = path_conf_file
with open(self.path, 'r') as conf_file:
conf_data = yaml.safe_load(conf_file)
# Determine which sections to validate.
sections = {sections_to_check} if isinstance(sections_to_check, str) \
else set(sections_to_check or conf_data.keys())
excl = {sections_to_skip} if isinstance(sections_to_skip, str) \
else set(sections_to_skip or [])
sections -= excl
self._logger.info("Validating %d sections: %s",
len(sections),
", ".join(["'{}'".format(s) for s in sections]))
# Store per-command mapping of status, nested under section.
self.section_to_status_by_command = defaultdict(dict)
# Store only information about the failures.
self.failures_by_section = defaultdict(list) # Access by section.
self.failures = set() # Access by command.
for s in sections:
# Fetch section data or skip.
try:
section_data = conf_data[s]
except KeyError:
_LOGGER.info("No section '%s' in file '%s', skipping",
s, self.path)
continue
# Test each of the section's commands.
try:
# Is section's data a mapping?
commands_iter = section_data.items()
self._logger.debug("Processing section '%s' data "
"as mapping", s)
for name, command in commands_iter:
failed = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"FAILURE" if failed else "SUCCESS")
except AttributeError:
self._logger.debug("Processing section '%s' data as list", s)
commands_iter = conf_data[s]
for cmd_item in commands_iter:
# Item is K-V pair?
try:
name, command = cmd_item
except ValueError:
# Treat item as command itself.
name, command = "", cmd_item
success = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"SUCCESS" if success else "FAILURE")
def _store_status(self, section, command, name):
"""
Based on new command execution attempt, update instance's
data structures with information about the success/fail status.
Return the result of the execution test.
"""
succeeded = is_command_callable(command, name)
# Store status regardless of its value in the instance's largest DS.
self.section_to_status_by_command[section][command] = succeeded
if not succeeded:
# Only update the failure-specific structures conditionally.
self.failures_by_section[section].append(command)
self.failures.add(command)
return succeeded
@property
def failed(self):
"""
Determine whether *every* command succeeded for *every* config file
section that was validated during instance construction.
:return bool: conjunction of execution success test result values,
obtained by testing each executable in every validated section
"""
# This will raise exception even if validation was attempted,
# but no sections were used. Effectively, delegate responsibility
# to the caller to initiate validation only if doing so is relevant.
if not self.section_to_status_by_command:
raise ValueError("No commands validated")
return 0 == len(self.failures)
def is_command_callable(command, name=""):
"""
Check if command can be called.
:param str command: actual command to call
:param str name: nickname/alias by which to reference the command, optional
:return bool: whether given command's call succeeded
"""
# Use `command` to see if command is callable, store exit code
code = os.system(
"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
if code != 0:
alias_value = " ('{}') ".format(name) if name else " "
_LOGGER.debug("Command '{0}' is not callable: {1}".
format(alias_value, command))
return not bool(code)
|
pepkit/peppy
|
peppy/utils.py
|
parse_text_data
|
python
|
def parse_text_data(lines_or_path, delimiter=os.linesep):
if os.path.isfile(lines_or_path):
with open(lines_or_path, 'r') as f:
return f.readlines()
else:
_LOGGER.debug("Not a file: '{}'".format(lines_or_path))
if isinstance(lines_or_path, str):
return lines_or_path.split(delimiter)
elif isinstance(lines_or_path, Iterable):
return lines_or_path
else:
raise ValueError("Unable to parse as data lines {} ({})".
format(lines_or_path, type(lines_or_path)))
|
Interpret input argument as lines of data. This is intended to support
multiple input argument types to core model constructors.
:param str | collections.Iterable lines_or_path:
:param str delimiter: line separator used when parsing a raw string that's
not a file
:return collections.Iterable: lines of text data
:raises ValueError: if primary data argument is neither a string nor
another iterable
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L351-L376
| null |
""" Helpers without an obvious logical home. """
from collections import defaultdict, Iterable
import contextlib
import logging
import os
import random
import string
import subprocess as sp
import sys
if sys.version_info < (3, 0):
from urlparse import urlparse
else:
from urllib.parse import urlparse
if sys.version_info < (3, 3):
from collections import Sized
else:
from collections.abc import Sized
import warnings
import yaml
from .const import GENERIC_PROTOCOL_KEY, SAMPLE_INDEPENDENT_PROJECT_SECTIONS
_LOGGER = logging.getLogger(__name__)
__all__ = [
"CommandChecker", "add_project_sample_constants", "check_bam", "check_fastq",
"get_file_size", "fetch_samples", "grab_project_data", "has_null_value",
"is_command_callable"
]
def alpha_cased(text, lower=False):
"""
Filter text to just letters and homogenize case.
:param str text: what to filter and homogenize.
:param bool lower: whether to convert to lowercase; default uppercase.
:return str: input filtered to just letters, with homogenized case.
"""
text = "".join(filter(
lambda c: c.isalpha() or c == GENERIC_PROTOCOL_KEY, text))
return text.lower() if lower else text.upper()
def add_project_sample_constants(sample, project):
"""
Update a Sample with constants declared by a Project.
:param Sample sample: sample instance for which to update constants
based on Project
:param Project project: Project with which to update Sample; it
may or may not declare constants. If not, no update occurs.
:return Sample: Updates Sample instance, according to any and all
constants declared by the Project.
"""
sample.update(project.constants)
return sample
def check_bam(bam, o):
"""
Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation.
"""
try:
p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE)
# Count paired alignments
paired = 0
read_lengths = defaultdict(int)
while o > 0: # Count down number of lines
line = p.stdout.readline().decode().split("\t")
flag = int(line[1])
read_lengths[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
o -= 1
p.kill()
except OSError:
reason = "Note (samtools not in path): For NGS inputs, " \
"pep needs samtools to auto-populate " \
"'read_length' and 'read_type' attributes; " \
"these attributes were not populated."
raise OSError(reason)
_LOGGER.debug("Read lengths: {}".format(read_lengths))
_LOGGER.debug("paired: {}".format(paired))
return read_lengths, paired
def check_fastq(fastq, o):
raise NotImplementedError("Detection of read type/length for "
"fastq input is not yet implemented.")
def coll_like(c):
"""
Determine whether an object is collection-like.
:param object c: Object to test as collection
:return bool: Whether the argument is a (non-string) collection
"""
return isinstance(c, Iterable) and not isinstance(c, str)
def copy(obj):
def copy(self):
"""
Copy self to a new object.
"""
from copy import deepcopy
return deepcopy(self)
obj.copy = copy
return obj
def expandpath(path):
"""
Expand a filesystem path that may or may not contain user/env vars.
:param str path: path to expand
:return str: expanded version of input path
"""
return os.path.expandvars(os.path.expanduser(path)).replace("//", "/")
def get_file_size(filename):
"""
Get size of all files in gigabytes (Gb).
:param str | collections.Iterable[str] filename: A space-separated
string or list of space-separated strings of absolute file paths.
:return float: size of file(s), in gigabytes.
"""
if filename is None:
return float(0)
if type(filename) is list:
return float(sum([get_file_size(x) for x in filename]))
try:
total_bytes = sum([float(os.stat(f).st_size)
for f in filename.split(" ") if f is not ''])
except OSError:
# File not found
return 0.0
else:
return float(total_bytes) / (1024 ** 3)
def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None):
"""
Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2
"""
if selector_attribute is None or (not selector_include and not selector_exclude):
# Simple; keep all samples. In this case, this function simply
# offers a list rather than an iterator.
return list(proj.samples)
# At least one of the samples has to have the specified attribute
if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]):
raise AttributeError("The Project samples do not have the attribute '{attr}'"
.format(attr=selector_attribute))
# Intersection between selector_include and selector_exclude is nonsense user error.
if selector_include and selector_exclude:
raise TypeError("Specify only selector_include or selector_exclude parameter, "
"not both.")
# Ensure that we're working with sets.
def make_set(items):
if isinstance(items, str):
items = [items]
return items
# Use the attr check here rather than exception block in case the
# hypothetical AttributeError would occur; we want such
# an exception to arise, not to catch it as if the Sample lacks "protocol"
if not selector_include:
# Loose; keep all samples not in the selector_exclude.
def keep(s):
return not hasattr(s, selector_attribute) or \
getattr(s, selector_attribute) not in make_set(selector_exclude)
else:
# Strict; keep only samples in the selector_include.
def keep(s):
return hasattr(s, selector_attribute) and \
getattr(s, selector_attribute) in make_set(selector_include)
return list(filter(keep, proj.samples))
def grab_project_data(prj):
"""
From the given Project, grab Sample-independent data.
There are some aspects of a Project of which it's beneficial for a Sample
to be aware, particularly for post-hoc analysis. Since Sample objects
within a Project are mutually independent, though, each doesn't need to
know about any of the others. A Project manages its, Sample instances,
so for each Sample knowledge of Project data is limited. This method
facilitates adoption of that conceptual model.
:param Project prj: Project from which to grab data
:return Mapping: Sample-independent data sections from given Project
"""
if not prj:
return {}
data = {}
for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS:
try:
data[section] = getattr(prj, section)
except AttributeError:
_LOGGER.debug("Project lacks section '%s', skipping", section)
return data
def has_null_value(k, m):
"""
Determine whether a mapping has a null value for a given key.
:param Hashable k: Key to test for null value
:param Mapping m: Mapping to test for null value for given key
:return bool: Whether given mapping contains given key with null value
"""
return k in m and is_null_like(m[k])
def import_from_source(module_filepath):
"""
Import a module from a particular filesystem location.
:param str module_filepath: path to the file that constitutes the module
to import
:return module: module imported from the given location, named as indicated
:raises ValueError: if path provided does not point to an extant file
"""
import sys
if not os.path.exists(module_filepath):
raise ValueError("Path to alleged module file doesn't point to an "
"extant file: '{}'".format(module_filepath))
# Randomly generate module name.
fname_chars = string.ascii_letters + string.digits
name = "".join(random.choice(fname_chars) for _ in range(20))
# Import logic is version-dependent.
if sys.version_info >= (3, 5):
from importlib import util as _il_util
modspec = _il_util.spec_from_file_location(
name, module_filepath)
mod = _il_util.module_from_spec(modspec)
modspec.loader.exec_module(mod)
elif sys.version_info < (3, 3):
import imp
mod = imp.load_source(name, module_filepath)
else:
# 3.3 or 3.4
from importlib import machinery as _il_mach
loader = _il_mach.SourceFileLoader(name, module_filepath)
mod = loader.load_module()
return mod
def infer_delimiter(filepath):
"""
From extension infer delimiter used in a separated values file.
:param str filepath: path to file about which to make inference
:return str | NoneType: extension if inference succeeded; else null
"""
ext = os.path.splitext(filepath)[1][1:].lower()
return {"txt": "\t", "tsv": "\t", "csv": ","}.get(ext)
def is_null_like(x):
"""
Determine whether an object is effectively null.
:param object x: Object for which null likeness is to be determined.
:return bool: Whether given object is effectively "null."
"""
return x in [None, ""] or \
(coll_like(x) and isinstance(x, Sized) and 0 == len(x))
def is_url(maybe_url):
"""
Determine whether a path is a URL.
:param str maybe_url: path to investigate as URL
:return bool: whether path appears to be a URL
"""
return urlparse(maybe_url).scheme != ""
def non_null_value(k, m):
"""
Determine whether a mapping has a non-null value for a given key.
:param Hashable k: Key to test for non-null value
:param Mapping m: Mapping to test for non-null value for given key
:return bool: Whether given mapping contains given key with non-null value
"""
return k in m and not is_null_like(m[k])
def parse_ftype(input_file):
"""
Checks determine filetype from extension.
:param str input_file: String to check.
:return str: filetype (extension without dot prefix)
:raises TypeError: if file does not appear of a supported type
"""
if input_file.endswith(".bam"):
return "bam"
elif input_file.endswith(".fastq") or \
input_file.endswith(".fq") or \
input_file.endswith(".fq.gz") or \
input_file.endswith(".fastq.gz"):
return "fastq"
else:
raise TypeError("Type of input file ends in neither '.bam' "
"nor '.fastq' [file: '" + input_file + "']")
def sample_folder(prj, sample):
"""
Get the path to this Project's root folder for the given Sample.
:param attmap.PathExAttMap | Project prj: project with which sample is associated
:param Mapping sample: Sample or sample data for which to get root output
folder path.
:return str: this Project's root folder for the given Sample
"""
return os.path.join(prj.metadata.results_subdir,
sample["sample_name"])
@contextlib.contextmanager
def standard_stream_redirector(stream):
"""
Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams
"""
import sys
genuine_stdout, genuine_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stream, stream
try:
yield
finally:
sys.stdout, sys.stderr = genuine_stdout, genuine_stderr
def warn_derived_cols():
""" Produce deprecation warning about derived columns. """
_warn_cols_to_attrs("derived")
def warn_implied_cols():
""" Produce deprecation warning about implied columns. """
_warn_cols_to_attrs("implied")
def _warn_cols_to_attrs(prefix):
""" Produce deprecation warning about 'columns' rather than 'attributes' """
warnings.warn("{pfx}_columns should be encoded and referenced "
"as {pfx}_attributes".format(pfx=prefix), DeprecationWarning)
class CommandChecker(object):
"""
Validate PATH availability of executables referenced by a config file.
:param str path_conf_file: path to configuration file with
sections detailing executable tools to validate
:param Iterable[str] sections_to_check: names of
sections of the given configuration file that are relevant;
optional, will default to all sections if not given, but some
may be excluded via another optional parameter
:param Iterable[str] sections_to_skip: analogous to
the check names parameter, but for specific sections to skip.
"""
def __init__(self, path_conf_file,
sections_to_check=None, sections_to_skip=None):
super(CommandChecker, self).__init__()
self._logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
# TODO: could provide parse strategy as parameter to supplement YAML.
# TODO: could also derive parsing behavior from extension.
self.path = path_conf_file
with open(self.path, 'r') as conf_file:
conf_data = yaml.safe_load(conf_file)
# Determine which sections to validate.
sections = {sections_to_check} if isinstance(sections_to_check, str) \
else set(sections_to_check or conf_data.keys())
excl = {sections_to_skip} if isinstance(sections_to_skip, str) \
else set(sections_to_skip or [])
sections -= excl
self._logger.info("Validating %d sections: %s",
len(sections),
", ".join(["'{}'".format(s) for s in sections]))
# Store per-command mapping of status, nested under section.
self.section_to_status_by_command = defaultdict(dict)
# Store only information about the failures.
self.failures_by_section = defaultdict(list) # Access by section.
self.failures = set() # Access by command.
for s in sections:
# Fetch section data or skip.
try:
section_data = conf_data[s]
except KeyError:
_LOGGER.info("No section '%s' in file '%s', skipping",
s, self.path)
continue
# Test each of the section's commands.
try:
# Is section's data a mapping?
commands_iter = section_data.items()
self._logger.debug("Processing section '%s' data "
"as mapping", s)
for name, command in commands_iter:
failed = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"FAILURE" if failed else "SUCCESS")
except AttributeError:
self._logger.debug("Processing section '%s' data as list", s)
commands_iter = conf_data[s]
for cmd_item in commands_iter:
# Item is K-V pair?
try:
name, command = cmd_item
except ValueError:
# Treat item as command itself.
name, command = "", cmd_item
success = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"SUCCESS" if success else "FAILURE")
def _store_status(self, section, command, name):
"""
Based on new command execution attempt, update instance's
data structures with information about the success/fail status.
Return the result of the execution test.
"""
succeeded = is_command_callable(command, name)
# Store status regardless of its value in the instance's largest DS.
self.section_to_status_by_command[section][command] = succeeded
if not succeeded:
# Only update the failure-specific structures conditionally.
self.failures_by_section[section].append(command)
self.failures.add(command)
return succeeded
@property
def failed(self):
"""
Determine whether *every* command succeeded for *every* config file
section that was validated during instance construction.
:return bool: conjunction of execution success test result values,
obtained by testing each executable in every validated section
"""
# This will raise exception even if validation was attempted,
# but no sections were used. Effectively, delegate responsibility
# to the caller to initiate validation only if doing so is relevant.
if not self.section_to_status_by_command:
raise ValueError("No commands validated")
return 0 == len(self.failures)
def is_command_callable(command, name=""):
"""
Check if command can be called.
:param str command: actual command to call
:param str name: nickname/alias by which to reference the command, optional
:return bool: whether given command's call succeeded
"""
# Use `command` to see if command is callable, store exit code
code = os.system(
"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
if code != 0:
alias_value = " ('{}') ".format(name) if name else " "
_LOGGER.debug("Command '{0}' is not callable: {1}".
format(alias_value, command))
return not bool(code)
|
pepkit/peppy
|
peppy/utils.py
|
sample_folder
|
python
|
def sample_folder(prj, sample):
return os.path.join(prj.metadata.results_subdir,
sample["sample_name"])
|
Get the path to this Project's root folder for the given Sample.
:param attmap.PathExAttMap | Project prj: project with which sample is associated
:param Mapping sample: Sample or sample data for which to get root output
folder path.
:return str: this Project's root folder for the given Sample
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L379-L389
| null |
""" Helpers without an obvious logical home. """
from collections import defaultdict, Iterable
import contextlib
import logging
import os
import random
import string
import subprocess as sp
import sys
if sys.version_info < (3, 0):
from urlparse import urlparse
else:
from urllib.parse import urlparse
if sys.version_info < (3, 3):
from collections import Sized
else:
from collections.abc import Sized
import warnings
import yaml
from .const import GENERIC_PROTOCOL_KEY, SAMPLE_INDEPENDENT_PROJECT_SECTIONS
_LOGGER = logging.getLogger(__name__)
__all__ = [
"CommandChecker", "add_project_sample_constants", "check_bam", "check_fastq",
"get_file_size", "fetch_samples", "grab_project_data", "has_null_value",
"is_command_callable"
]
def alpha_cased(text, lower=False):
"""
Filter text to just letters and homogenize case.
:param str text: what to filter and homogenize.
:param bool lower: whether to convert to lowercase; default uppercase.
:return str: input filtered to just letters, with homogenized case.
"""
text = "".join(filter(
lambda c: c.isalpha() or c == GENERIC_PROTOCOL_KEY, text))
return text.lower() if lower else text.upper()
def add_project_sample_constants(sample, project):
"""
Update a Sample with constants declared by a Project.
:param Sample sample: sample instance for which to update constants
based on Project
:param Project project: Project with which to update Sample; it
may or may not declare constants. If not, no update occurs.
:return Sample: Updates Sample instance, according to any and all
constants declared by the Project.
"""
sample.update(project.constants)
return sample
def check_bam(bam, o):
"""
Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation.
"""
try:
p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE)
# Count paired alignments
paired = 0
read_lengths = defaultdict(int)
while o > 0: # Count down number of lines
line = p.stdout.readline().decode().split("\t")
flag = int(line[1])
read_lengths[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
o -= 1
p.kill()
except OSError:
reason = "Note (samtools not in path): For NGS inputs, " \
"pep needs samtools to auto-populate " \
"'read_length' and 'read_type' attributes; " \
"these attributes were not populated."
raise OSError(reason)
_LOGGER.debug("Read lengths: {}".format(read_lengths))
_LOGGER.debug("paired: {}".format(paired))
return read_lengths, paired
def check_fastq(fastq, o):
raise NotImplementedError("Detection of read type/length for "
"fastq input is not yet implemented.")
def coll_like(c):
"""
Determine whether an object is collection-like.
:param object c: Object to test as collection
:return bool: Whether the argument is a (non-string) collection
"""
return isinstance(c, Iterable) and not isinstance(c, str)
def copy(obj):
def copy(self):
"""
Copy self to a new object.
"""
from copy import deepcopy
return deepcopy(self)
obj.copy = copy
return obj
def expandpath(path):
"""
Expand a filesystem path that may or may not contain user/env vars.
:param str path: path to expand
:return str: expanded version of input path
"""
return os.path.expandvars(os.path.expanduser(path)).replace("//", "/")
def get_file_size(filename):
"""
Get size of all files in gigabytes (Gb).
:param str | collections.Iterable[str] filename: A space-separated
string or list of space-separated strings of absolute file paths.
:return float: size of file(s), in gigabytes.
"""
if filename is None:
return float(0)
if type(filename) is list:
return float(sum([get_file_size(x) for x in filename]))
try:
total_bytes = sum([float(os.stat(f).st_size)
for f in filename.split(" ") if f is not ''])
except OSError:
# File not found
return 0.0
else:
return float(total_bytes) / (1024 ** 3)
def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None):
"""
Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2
"""
if selector_attribute is None or (not selector_include and not selector_exclude):
# Simple; keep all samples. In this case, this function simply
# offers a list rather than an iterator.
return list(proj.samples)
# At least one of the samples has to have the specified attribute
if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]):
raise AttributeError("The Project samples do not have the attribute '{attr}'"
.format(attr=selector_attribute))
# Intersection between selector_include and selector_exclude is nonsense user error.
if selector_include and selector_exclude:
raise TypeError("Specify only selector_include or selector_exclude parameter, "
"not both.")
# Ensure that we're working with sets.
def make_set(items):
if isinstance(items, str):
items = [items]
return items
# Use the attr check here rather than exception block in case the
# hypothetical AttributeError would occur; we want such
# an exception to arise, not to catch it as if the Sample lacks "protocol"
if not selector_include:
# Loose; keep all samples not in the selector_exclude.
def keep(s):
return not hasattr(s, selector_attribute) or \
getattr(s, selector_attribute) not in make_set(selector_exclude)
else:
# Strict; keep only samples in the selector_include.
def keep(s):
return hasattr(s, selector_attribute) and \
getattr(s, selector_attribute) in make_set(selector_include)
return list(filter(keep, proj.samples))
def grab_project_data(prj):
"""
From the given Project, grab Sample-independent data.
There are some aspects of a Project of which it's beneficial for a Sample
to be aware, particularly for post-hoc analysis. Since Sample objects
within a Project are mutually independent, though, each doesn't need to
know about any of the others. A Project manages its, Sample instances,
so for each Sample knowledge of Project data is limited. This method
facilitates adoption of that conceptual model.
:param Project prj: Project from which to grab data
:return Mapping: Sample-independent data sections from given Project
"""
if not prj:
return {}
data = {}
for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS:
try:
data[section] = getattr(prj, section)
except AttributeError:
_LOGGER.debug("Project lacks section '%s', skipping", section)
return data
def has_null_value(k, m):
"""
Determine whether a mapping has a null value for a given key.
:param Hashable k: Key to test for null value
:param Mapping m: Mapping to test for null value for given key
:return bool: Whether given mapping contains given key with null value
"""
return k in m and is_null_like(m[k])
def import_from_source(module_filepath):
"""
Import a module from a particular filesystem location.
:param str module_filepath: path to the file that constitutes the module
to import
:return module: module imported from the given location, named as indicated
:raises ValueError: if path provided does not point to an extant file
"""
import sys
if not os.path.exists(module_filepath):
raise ValueError("Path to alleged module file doesn't point to an "
"extant file: '{}'".format(module_filepath))
# Randomly generate module name.
fname_chars = string.ascii_letters + string.digits
name = "".join(random.choice(fname_chars) for _ in range(20))
# Import logic is version-dependent.
if sys.version_info >= (3, 5):
from importlib import util as _il_util
modspec = _il_util.spec_from_file_location(
name, module_filepath)
mod = _il_util.module_from_spec(modspec)
modspec.loader.exec_module(mod)
elif sys.version_info < (3, 3):
import imp
mod = imp.load_source(name, module_filepath)
else:
# 3.3 or 3.4
from importlib import machinery as _il_mach
loader = _il_mach.SourceFileLoader(name, module_filepath)
mod = loader.load_module()
return mod
def infer_delimiter(filepath):
"""
From extension infer delimiter used in a separated values file.
:param str filepath: path to file about which to make inference
:return str | NoneType: extension if inference succeeded; else null
"""
ext = os.path.splitext(filepath)[1][1:].lower()
return {"txt": "\t", "tsv": "\t", "csv": ","}.get(ext)
def is_null_like(x):
"""
Determine whether an object is effectively null.
:param object x: Object for which null likeness is to be determined.
:return bool: Whether given object is effectively "null."
"""
return x in [None, ""] or \
(coll_like(x) and isinstance(x, Sized) and 0 == len(x))
def is_url(maybe_url):
"""
Determine whether a path is a URL.
:param str maybe_url: path to investigate as URL
:return bool: whether path appears to be a URL
"""
return urlparse(maybe_url).scheme != ""
def non_null_value(k, m):
"""
Determine whether a mapping has a non-null value for a given key.
:param Hashable k: Key to test for non-null value
:param Mapping m: Mapping to test for non-null value for given key
:return bool: Whether given mapping contains given key with non-null value
"""
return k in m and not is_null_like(m[k])
def parse_ftype(input_file):
"""
Checks determine filetype from extension.
:param str input_file: String to check.
:return str: filetype (extension without dot prefix)
:raises TypeError: if file does not appear of a supported type
"""
if input_file.endswith(".bam"):
return "bam"
elif input_file.endswith(".fastq") or \
input_file.endswith(".fq") or \
input_file.endswith(".fq.gz") or \
input_file.endswith(".fastq.gz"):
return "fastq"
else:
raise TypeError("Type of input file ends in neither '.bam' "
"nor '.fastq' [file: '" + input_file + "']")
def parse_text_data(lines_or_path, delimiter=os.linesep):
"""
Interpret input argument as lines of data. This is intended to support
multiple input argument types to core model constructors.
:param str | collections.Iterable lines_or_path:
:param str delimiter: line separator used when parsing a raw string that's
not a file
:return collections.Iterable: lines of text data
:raises ValueError: if primary data argument is neither a string nor
another iterable
"""
if os.path.isfile(lines_or_path):
with open(lines_or_path, 'r') as f:
return f.readlines()
else:
_LOGGER.debug("Not a file: '{}'".format(lines_or_path))
if isinstance(lines_or_path, str):
return lines_or_path.split(delimiter)
elif isinstance(lines_or_path, Iterable):
return lines_or_path
else:
raise ValueError("Unable to parse as data lines {} ({})".
format(lines_or_path, type(lines_or_path)))
@contextlib.contextmanager
def standard_stream_redirector(stream):
"""
Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams
"""
import sys
genuine_stdout, genuine_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stream, stream
try:
yield
finally:
sys.stdout, sys.stderr = genuine_stdout, genuine_stderr
def warn_derived_cols():
""" Produce deprecation warning about derived columns. """
_warn_cols_to_attrs("derived")
def warn_implied_cols():
""" Produce deprecation warning about implied columns. """
_warn_cols_to_attrs("implied")
def _warn_cols_to_attrs(prefix):
""" Produce deprecation warning about 'columns' rather than 'attributes' """
warnings.warn("{pfx}_columns should be encoded and referenced "
"as {pfx}_attributes".format(pfx=prefix), DeprecationWarning)
class CommandChecker(object):
"""
Validate PATH availability of executables referenced by a config file.
:param str path_conf_file: path to configuration file with
sections detailing executable tools to validate
:param Iterable[str] sections_to_check: names of
sections of the given configuration file that are relevant;
optional, will default to all sections if not given, but some
may be excluded via another optional parameter
:param Iterable[str] sections_to_skip: analogous to
the check names parameter, but for specific sections to skip.
"""
def __init__(self, path_conf_file,
sections_to_check=None, sections_to_skip=None):
super(CommandChecker, self).__init__()
self._logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
# TODO: could provide parse strategy as parameter to supplement YAML.
# TODO: could also derive parsing behavior from extension.
self.path = path_conf_file
with open(self.path, 'r') as conf_file:
conf_data = yaml.safe_load(conf_file)
# Determine which sections to validate.
sections = {sections_to_check} if isinstance(sections_to_check, str) \
else set(sections_to_check or conf_data.keys())
excl = {sections_to_skip} if isinstance(sections_to_skip, str) \
else set(sections_to_skip or [])
sections -= excl
self._logger.info("Validating %d sections: %s",
len(sections),
", ".join(["'{}'".format(s) for s in sections]))
# Store per-command mapping of status, nested under section.
self.section_to_status_by_command = defaultdict(dict)
# Store only information about the failures.
self.failures_by_section = defaultdict(list) # Access by section.
self.failures = set() # Access by command.
for s in sections:
# Fetch section data or skip.
try:
section_data = conf_data[s]
except KeyError:
_LOGGER.info("No section '%s' in file '%s', skipping",
s, self.path)
continue
# Test each of the section's commands.
try:
# Is section's data a mapping?
commands_iter = section_data.items()
self._logger.debug("Processing section '%s' data "
"as mapping", s)
for name, command in commands_iter:
failed = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"FAILURE" if failed else "SUCCESS")
except AttributeError:
self._logger.debug("Processing section '%s' data as list", s)
commands_iter = conf_data[s]
for cmd_item in commands_iter:
# Item is K-V pair?
try:
name, command = cmd_item
except ValueError:
# Treat item as command itself.
name, command = "", cmd_item
success = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"SUCCESS" if success else "FAILURE")
def _store_status(self, section, command, name):
"""
Based on new command execution attempt, update instance's
data structures with information about the success/fail status.
Return the result of the execution test.
"""
succeeded = is_command_callable(command, name)
# Store status regardless of its value in the instance's largest DS.
self.section_to_status_by_command[section][command] = succeeded
if not succeeded:
# Only update the failure-specific structures conditionally.
self.failures_by_section[section].append(command)
self.failures.add(command)
return succeeded
@property
def failed(self):
"""
Determine whether *every* command succeeded for *every* config file
section that was validated during instance construction.
:return bool: conjunction of execution success test result values,
obtained by testing each executable in every validated section
"""
# This will raise exception even if validation was attempted,
# but no sections were used. Effectively, delegate responsibility
# to the caller to initiate validation only if doing so is relevant.
if not self.section_to_status_by_command:
raise ValueError("No commands validated")
return 0 == len(self.failures)
def is_command_callable(command, name=""):
"""
Check if command can be called.
:param str command: actual command to call
:param str name: nickname/alias by which to reference the command, optional
:return bool: whether given command's call succeeded
"""
# Use `command` to see if command is callable, store exit code
code = os.system(
"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
if code != 0:
alias_value = " ('{}') ".format(name) if name else " "
_LOGGER.debug("Command '{0}' is not callable: {1}".
format(alias_value, command))
return not bool(code)
|
pepkit/peppy
|
peppy/utils.py
|
standard_stream_redirector
|
python
|
def standard_stream_redirector(stream):
import sys
genuine_stdout, genuine_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stream, stream
try:
yield
finally:
sys.stdout, sys.stderr = genuine_stdout, genuine_stderr
|
Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L393-L409
| null |
""" Helpers without an obvious logical home. """
from collections import defaultdict, Iterable
import contextlib
import logging
import os
import random
import string
import subprocess as sp
import sys
if sys.version_info < (3, 0):
from urlparse import urlparse
else:
from urllib.parse import urlparse
if sys.version_info < (3, 3):
from collections import Sized
else:
from collections.abc import Sized
import warnings
import yaml
from .const import GENERIC_PROTOCOL_KEY, SAMPLE_INDEPENDENT_PROJECT_SECTIONS
_LOGGER = logging.getLogger(__name__)
__all__ = [
"CommandChecker", "add_project_sample_constants", "check_bam", "check_fastq",
"get_file_size", "fetch_samples", "grab_project_data", "has_null_value",
"is_command_callable"
]
def alpha_cased(text, lower=False):
"""
Filter text to just letters and homogenize case.
:param str text: what to filter and homogenize.
:param bool lower: whether to convert to lowercase; default uppercase.
:return str: input filtered to just letters, with homogenized case.
"""
text = "".join(filter(
lambda c: c.isalpha() or c == GENERIC_PROTOCOL_KEY, text))
return text.lower() if lower else text.upper()
def add_project_sample_constants(sample, project):
"""
Update a Sample with constants declared by a Project.
:param Sample sample: sample instance for which to update constants
based on Project
:param Project project: Project with which to update Sample; it
may or may not declare constants. If not, no update occurs.
:return Sample: Updates Sample instance, according to any and all
constants declared by the Project.
"""
sample.update(project.constants)
return sample
def check_bam(bam, o):
"""
Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation.
"""
try:
p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE)
# Count paired alignments
paired = 0
read_lengths = defaultdict(int)
while o > 0: # Count down number of lines
line = p.stdout.readline().decode().split("\t")
flag = int(line[1])
read_lengths[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
o -= 1
p.kill()
except OSError:
reason = "Note (samtools not in path): For NGS inputs, " \
"pep needs samtools to auto-populate " \
"'read_length' and 'read_type' attributes; " \
"these attributes were not populated."
raise OSError(reason)
_LOGGER.debug("Read lengths: {}".format(read_lengths))
_LOGGER.debug("paired: {}".format(paired))
return read_lengths, paired
def check_fastq(fastq, o):
raise NotImplementedError("Detection of read type/length for "
"fastq input is not yet implemented.")
def coll_like(c):
"""
Determine whether an object is collection-like.
:param object c: Object to test as collection
:return bool: Whether the argument is a (non-string) collection
"""
return isinstance(c, Iterable) and not isinstance(c, str)
def copy(obj):
def copy(self):
"""
Copy self to a new object.
"""
from copy import deepcopy
return deepcopy(self)
obj.copy = copy
return obj
def expandpath(path):
"""
Expand a filesystem path that may or may not contain user/env vars.
:param str path: path to expand
:return str: expanded version of input path
"""
return os.path.expandvars(os.path.expanduser(path)).replace("//", "/")
def get_file_size(filename):
"""
Get size of all files in gigabytes (Gb).
:param str | collections.Iterable[str] filename: A space-separated
string or list of space-separated strings of absolute file paths.
:return float: size of file(s), in gigabytes.
"""
if filename is None:
return float(0)
if type(filename) is list:
return float(sum([get_file_size(x) for x in filename]))
try:
total_bytes = sum([float(os.stat(f).st_size)
for f in filename.split(" ") if f is not ''])
except OSError:
# File not found
return 0.0
else:
return float(total_bytes) / (1024 ** 3)
def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None):
"""
Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2
"""
if selector_attribute is None or (not selector_include and not selector_exclude):
# Simple; keep all samples. In this case, this function simply
# offers a list rather than an iterator.
return list(proj.samples)
# At least one of the samples has to have the specified attribute
if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]):
raise AttributeError("The Project samples do not have the attribute '{attr}'"
.format(attr=selector_attribute))
# Intersection between selector_include and selector_exclude is nonsense user error.
if selector_include and selector_exclude:
raise TypeError("Specify only selector_include or selector_exclude parameter, "
"not both.")
# Ensure that we're working with sets.
def make_set(items):
if isinstance(items, str):
items = [items]
return items
# Use the attr check here rather than exception block in case the
# hypothetical AttributeError would occur; we want such
# an exception to arise, not to catch it as if the Sample lacks "protocol"
if not selector_include:
# Loose; keep all samples not in the selector_exclude.
def keep(s):
return not hasattr(s, selector_attribute) or \
getattr(s, selector_attribute) not in make_set(selector_exclude)
else:
# Strict; keep only samples in the selector_include.
def keep(s):
return hasattr(s, selector_attribute) and \
getattr(s, selector_attribute) in make_set(selector_include)
return list(filter(keep, proj.samples))
def grab_project_data(prj):
"""
From the given Project, grab Sample-independent data.
There are some aspects of a Project of which it's beneficial for a Sample
to be aware, particularly for post-hoc analysis. Since Sample objects
within a Project are mutually independent, though, each doesn't need to
know about any of the others. A Project manages its, Sample instances,
so for each Sample knowledge of Project data is limited. This method
facilitates adoption of that conceptual model.
:param Project prj: Project from which to grab data
:return Mapping: Sample-independent data sections from given Project
"""
if not prj:
return {}
data = {}
for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS:
try:
data[section] = getattr(prj, section)
except AttributeError:
_LOGGER.debug("Project lacks section '%s', skipping", section)
return data
def has_null_value(k, m):
"""
Determine whether a mapping has a null value for a given key.
:param Hashable k: Key to test for null value
:param Mapping m: Mapping to test for null value for given key
:return bool: Whether given mapping contains given key with null value
"""
return k in m and is_null_like(m[k])
def import_from_source(module_filepath):
"""
Import a module from a particular filesystem location.
:param str module_filepath: path to the file that constitutes the module
to import
:return module: module imported from the given location, named as indicated
:raises ValueError: if path provided does not point to an extant file
"""
import sys
if not os.path.exists(module_filepath):
raise ValueError("Path to alleged module file doesn't point to an "
"extant file: '{}'".format(module_filepath))
# Randomly generate module name.
fname_chars = string.ascii_letters + string.digits
name = "".join(random.choice(fname_chars) for _ in range(20))
# Import logic is version-dependent.
if sys.version_info >= (3, 5):
from importlib import util as _il_util
modspec = _il_util.spec_from_file_location(
name, module_filepath)
mod = _il_util.module_from_spec(modspec)
modspec.loader.exec_module(mod)
elif sys.version_info < (3, 3):
import imp
mod = imp.load_source(name, module_filepath)
else:
# 3.3 or 3.4
from importlib import machinery as _il_mach
loader = _il_mach.SourceFileLoader(name, module_filepath)
mod = loader.load_module()
return mod
def infer_delimiter(filepath):
"""
From extension infer delimiter used in a separated values file.
:param str filepath: path to file about which to make inference
:return str | NoneType: extension if inference succeeded; else null
"""
ext = os.path.splitext(filepath)[1][1:].lower()
return {"txt": "\t", "tsv": "\t", "csv": ","}.get(ext)
def is_null_like(x):
"""
Determine whether an object is effectively null.
:param object x: Object for which null likeness is to be determined.
:return bool: Whether given object is effectively "null."
"""
return x in [None, ""] or \
(coll_like(x) and isinstance(x, Sized) and 0 == len(x))
def is_url(maybe_url):
"""
Determine whether a path is a URL.
:param str maybe_url: path to investigate as URL
:return bool: whether path appears to be a URL
"""
return urlparse(maybe_url).scheme != ""
def non_null_value(k, m):
"""
Determine whether a mapping has a non-null value for a given key.
:param Hashable k: Key to test for non-null value
:param Mapping m: Mapping to test for non-null value for given key
:return bool: Whether given mapping contains given key with non-null value
"""
return k in m and not is_null_like(m[k])
def parse_ftype(input_file):
"""
Checks determine filetype from extension.
:param str input_file: String to check.
:return str: filetype (extension without dot prefix)
:raises TypeError: if file does not appear of a supported type
"""
if input_file.endswith(".bam"):
return "bam"
elif input_file.endswith(".fastq") or \
input_file.endswith(".fq") or \
input_file.endswith(".fq.gz") or \
input_file.endswith(".fastq.gz"):
return "fastq"
else:
raise TypeError("Type of input file ends in neither '.bam' "
"nor '.fastq' [file: '" + input_file + "']")
def parse_text_data(lines_or_path, delimiter=os.linesep):
"""
Interpret input argument as lines of data. This is intended to support
multiple input argument types to core model constructors.
:param str | collections.Iterable lines_or_path:
:param str delimiter: line separator used when parsing a raw string that's
not a file
:return collections.Iterable: lines of text data
:raises ValueError: if primary data argument is neither a string nor
another iterable
"""
if os.path.isfile(lines_or_path):
with open(lines_or_path, 'r') as f:
return f.readlines()
else:
_LOGGER.debug("Not a file: '{}'".format(lines_or_path))
if isinstance(lines_or_path, str):
return lines_or_path.split(delimiter)
elif isinstance(lines_or_path, Iterable):
return lines_or_path
else:
raise ValueError("Unable to parse as data lines {} ({})".
format(lines_or_path, type(lines_or_path)))
def sample_folder(prj, sample):
"""
Get the path to this Project's root folder for the given Sample.
:param attmap.PathExAttMap | Project prj: project with which sample is associated
:param Mapping sample: Sample or sample data for which to get root output
folder path.
:return str: this Project's root folder for the given Sample
"""
return os.path.join(prj.metadata.results_subdir,
sample["sample_name"])
@contextlib.contextmanager
def warn_derived_cols():
""" Produce deprecation warning about derived columns. """
_warn_cols_to_attrs("derived")
def warn_implied_cols():
""" Produce deprecation warning about implied columns. """
_warn_cols_to_attrs("implied")
def _warn_cols_to_attrs(prefix):
""" Produce deprecation warning about 'columns' rather than 'attributes' """
warnings.warn("{pfx}_columns should be encoded and referenced "
"as {pfx}_attributes".format(pfx=prefix), DeprecationWarning)
class CommandChecker(object):
"""
Validate PATH availability of executables referenced by a config file.
:param str path_conf_file: path to configuration file with
sections detailing executable tools to validate
:param Iterable[str] sections_to_check: names of
sections of the given configuration file that are relevant;
optional, will default to all sections if not given, but some
may be excluded via another optional parameter
:param Iterable[str] sections_to_skip: analogous to
the check names parameter, but for specific sections to skip.
"""
def __init__(self, path_conf_file,
sections_to_check=None, sections_to_skip=None):
super(CommandChecker, self).__init__()
self._logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
# TODO: could provide parse strategy as parameter to supplement YAML.
# TODO: could also derive parsing behavior from extension.
self.path = path_conf_file
with open(self.path, 'r') as conf_file:
conf_data = yaml.safe_load(conf_file)
# Determine which sections to validate.
sections = {sections_to_check} if isinstance(sections_to_check, str) \
else set(sections_to_check or conf_data.keys())
excl = {sections_to_skip} if isinstance(sections_to_skip, str) \
else set(sections_to_skip or [])
sections -= excl
self._logger.info("Validating %d sections: %s",
len(sections),
", ".join(["'{}'".format(s) for s in sections]))
# Store per-command mapping of status, nested under section.
self.section_to_status_by_command = defaultdict(dict)
# Store only information about the failures.
self.failures_by_section = defaultdict(list) # Access by section.
self.failures = set() # Access by command.
for s in sections:
# Fetch section data or skip.
try:
section_data = conf_data[s]
except KeyError:
_LOGGER.info("No section '%s' in file '%s', skipping",
s, self.path)
continue
# Test each of the section's commands.
try:
# Is section's data a mapping?
commands_iter = section_data.items()
self._logger.debug("Processing section '%s' data "
"as mapping", s)
for name, command in commands_iter:
failed = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"FAILURE" if failed else "SUCCESS")
except AttributeError:
self._logger.debug("Processing section '%s' data as list", s)
commands_iter = conf_data[s]
for cmd_item in commands_iter:
# Item is K-V pair?
try:
name, command = cmd_item
except ValueError:
# Treat item as command itself.
name, command = "", cmd_item
success = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"SUCCESS" if success else "FAILURE")
def _store_status(self, section, command, name):
"""
Based on new command execution attempt, update instance's
data structures with information about the success/fail status.
Return the result of the execution test.
"""
succeeded = is_command_callable(command, name)
# Store status regardless of its value in the instance's largest DS.
self.section_to_status_by_command[section][command] = succeeded
if not succeeded:
# Only update the failure-specific structures conditionally.
self.failures_by_section[section].append(command)
self.failures.add(command)
return succeeded
@property
def failed(self):
"""
Determine whether *every* command succeeded for *every* config file
section that was validated during instance construction.
:return bool: conjunction of execution success test result values,
obtained by testing each executable in every validated section
"""
# This will raise exception even if validation was attempted,
# but no sections were used. Effectively, delegate responsibility
# to the caller to initiate validation only if doing so is relevant.
if not self.section_to_status_by_command:
raise ValueError("No commands validated")
return 0 == len(self.failures)
def is_command_callable(command, name=""):
"""
Check if command can be called.
:param str command: actual command to call
:param str name: nickname/alias by which to reference the command, optional
:return bool: whether given command's call succeeded
"""
# Use `command` to see if command is callable, store exit code
code = os.system(
"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
if code != 0:
alias_value = " ('{}') ".format(name) if name else " "
_LOGGER.debug("Command '{0}' is not callable: {1}".
format(alias_value, command))
return not bool(code)
|
pepkit/peppy
|
peppy/utils.py
|
is_command_callable
|
python
|
def is_command_callable(command, name=""):
# Use `command` to see if command is callable, store exit code
code = os.system(
"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
if code != 0:
alias_value = " ('{}') ".format(name) if name else " "
_LOGGER.debug("Command '{0}' is not callable: {1}".
format(alias_value, command))
return not bool(code)
|
Check if command can be called.
:param str command: actual command to call
:param str name: nickname/alias by which to reference the command, optional
:return bool: whether given command's call succeeded
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L540-L557
| null |
""" Helpers without an obvious logical home. """
from collections import defaultdict, Iterable
import contextlib
import logging
import os
import random
import string
import subprocess as sp
import sys
if sys.version_info < (3, 0):
from urlparse import urlparse
else:
from urllib.parse import urlparse
if sys.version_info < (3, 3):
from collections import Sized
else:
from collections.abc import Sized
import warnings
import yaml
from .const import GENERIC_PROTOCOL_KEY, SAMPLE_INDEPENDENT_PROJECT_SECTIONS
_LOGGER = logging.getLogger(__name__)
__all__ = [
"CommandChecker", "add_project_sample_constants", "check_bam", "check_fastq",
"get_file_size", "fetch_samples", "grab_project_data", "has_null_value",
"is_command_callable"
]
def alpha_cased(text, lower=False):
"""
Filter text to just letters and homogenize case.
:param str text: what to filter and homogenize.
:param bool lower: whether to convert to lowercase; default uppercase.
:return str: input filtered to just letters, with homogenized case.
"""
text = "".join(filter(
lambda c: c.isalpha() or c == GENERIC_PROTOCOL_KEY, text))
return text.lower() if lower else text.upper()
def add_project_sample_constants(sample, project):
"""
Update a Sample with constants declared by a Project.
:param Sample sample: sample instance for which to update constants
based on Project
:param Project project: Project with which to update Sample; it
may or may not declare constants. If not, no update occurs.
:return Sample: Updates Sample instance, according to any and all
constants declared by the Project.
"""
sample.update(project.constants)
return sample
def check_bam(bam, o):
"""
Check reads in BAM file for read type and lengths.
:param str bam: BAM file path.
:param int o: Number of reads to look at for estimation.
"""
try:
p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE)
# Count paired alignments
paired = 0
read_lengths = defaultdict(int)
while o > 0: # Count down number of lines
line = p.stdout.readline().decode().split("\t")
flag = int(line[1])
read_lengths[len(line[9])] += 1
if 1 & flag: # check decimal flag contains 1 (paired)
paired += 1
o -= 1
p.kill()
except OSError:
reason = "Note (samtools not in path): For NGS inputs, " \
"pep needs samtools to auto-populate " \
"'read_length' and 'read_type' attributes; " \
"these attributes were not populated."
raise OSError(reason)
_LOGGER.debug("Read lengths: {}".format(read_lengths))
_LOGGER.debug("paired: {}".format(paired))
return read_lengths, paired
def check_fastq(fastq, o):
raise NotImplementedError("Detection of read type/length for "
"fastq input is not yet implemented.")
def coll_like(c):
"""
Determine whether an object is collection-like.
:param object c: Object to test as collection
:return bool: Whether the argument is a (non-string) collection
"""
return isinstance(c, Iterable) and not isinstance(c, str)
def copy(obj):
def copy(self):
"""
Copy self to a new object.
"""
from copy import deepcopy
return deepcopy(self)
obj.copy = copy
return obj
def expandpath(path):
"""
Expand a filesystem path that may or may not contain user/env vars.
:param str path: path to expand
:return str: expanded version of input path
"""
return os.path.expandvars(os.path.expanduser(path)).replace("//", "/")
def get_file_size(filename):
"""
Get size of all files in gigabytes (Gb).
:param str | collections.Iterable[str] filename: A space-separated
string or list of space-separated strings of absolute file paths.
:return float: size of file(s), in gigabytes.
"""
if filename is None:
return float(0)
if type(filename) is list:
return float(sum([get_file_size(x) for x in filename]))
try:
total_bytes = sum([float(os.stat(f).st_size)
for f in filename.split(" ") if f is not ''])
except OSError:
# File not found
return 0.0
else:
return float(total_bytes) / (1024 ** 3)
def fetch_samples(proj, selector_attribute=None, selector_include=None, selector_exclude=None):
"""
Collect samples of particular protocol(s).
Protocols can't be both positively selected for and negatively
selected against. That is, it makes no sense and is not allowed to
specify both selector_include and selector_exclude protocols. On the other hand, if
neither is provided, all of the Project's Samples are returned.
If selector_include is specified, Samples without a protocol will be excluded,
but if selector_exclude is specified, protocol-less Samples will be included.
:param Project proj: the Project with Samples to fetch
:param Project str: the sample selector_attribute to select for
:param Iterable[str] | str selector_include: protocol(s) of interest;
if specified, a Sample must
:param Iterable[str] | str selector_exclude: protocol(s) to include
:return list[Sample]: Collection of this Project's samples with
protocol that either matches one of those in selector_include, or either
lacks a protocol or does not match one of those in selector_exclude
:raise TypeError: if both selector_include and selector_exclude protocols are
specified; TypeError since it's basically providing two arguments
when only one is accepted, so remain consistent with vanilla Python2
"""
if selector_attribute is None or (not selector_include and not selector_exclude):
# Simple; keep all samples. In this case, this function simply
# offers a list rather than an iterator.
return list(proj.samples)
# At least one of the samples has to have the specified attribute
if proj.samples and not any([hasattr(i, selector_attribute) for i in proj.samples]):
raise AttributeError("The Project samples do not have the attribute '{attr}'"
.format(attr=selector_attribute))
# Intersection between selector_include and selector_exclude is nonsense user error.
if selector_include and selector_exclude:
raise TypeError("Specify only selector_include or selector_exclude parameter, "
"not both.")
# Ensure that we're working with sets.
def make_set(items):
if isinstance(items, str):
items = [items]
return items
# Use the attr check here rather than exception block in case the
# hypothetical AttributeError would occur; we want such
# an exception to arise, not to catch it as if the Sample lacks "protocol"
if not selector_include:
# Loose; keep all samples not in the selector_exclude.
def keep(s):
return not hasattr(s, selector_attribute) or \
getattr(s, selector_attribute) not in make_set(selector_exclude)
else:
# Strict; keep only samples in the selector_include.
def keep(s):
return hasattr(s, selector_attribute) and \
getattr(s, selector_attribute) in make_set(selector_include)
return list(filter(keep, proj.samples))
def grab_project_data(prj):
"""
From the given Project, grab Sample-independent data.
There are some aspects of a Project of which it's beneficial for a Sample
to be aware, particularly for post-hoc analysis. Since Sample objects
within a Project are mutually independent, though, each doesn't need to
know about any of the others. A Project manages its, Sample instances,
so for each Sample knowledge of Project data is limited. This method
facilitates adoption of that conceptual model.
:param Project prj: Project from which to grab data
:return Mapping: Sample-independent data sections from given Project
"""
if not prj:
return {}
data = {}
for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS:
try:
data[section] = getattr(prj, section)
except AttributeError:
_LOGGER.debug("Project lacks section '%s', skipping", section)
return data
def has_null_value(k, m):
"""
Determine whether a mapping has a null value for a given key.
:param Hashable k: Key to test for null value
:param Mapping m: Mapping to test for null value for given key
:return bool: Whether given mapping contains given key with null value
"""
return k in m and is_null_like(m[k])
def import_from_source(module_filepath):
"""
Import a module from a particular filesystem location.
:param str module_filepath: path to the file that constitutes the module
to import
:return module: module imported from the given location, named as indicated
:raises ValueError: if path provided does not point to an extant file
"""
import sys
if not os.path.exists(module_filepath):
raise ValueError("Path to alleged module file doesn't point to an "
"extant file: '{}'".format(module_filepath))
# Randomly generate module name.
fname_chars = string.ascii_letters + string.digits
name = "".join(random.choice(fname_chars) for _ in range(20))
# Import logic is version-dependent.
if sys.version_info >= (3, 5):
from importlib import util as _il_util
modspec = _il_util.spec_from_file_location(
name, module_filepath)
mod = _il_util.module_from_spec(modspec)
modspec.loader.exec_module(mod)
elif sys.version_info < (3, 3):
import imp
mod = imp.load_source(name, module_filepath)
else:
# 3.3 or 3.4
from importlib import machinery as _il_mach
loader = _il_mach.SourceFileLoader(name, module_filepath)
mod = loader.load_module()
return mod
def infer_delimiter(filepath):
"""
From extension infer delimiter used in a separated values file.
:param str filepath: path to file about which to make inference
:return str | NoneType: extension if inference succeeded; else null
"""
ext = os.path.splitext(filepath)[1][1:].lower()
return {"txt": "\t", "tsv": "\t", "csv": ","}.get(ext)
def is_null_like(x):
"""
Determine whether an object is effectively null.
:param object x: Object for which null likeness is to be determined.
:return bool: Whether given object is effectively "null."
"""
return x in [None, ""] or \
(coll_like(x) and isinstance(x, Sized) and 0 == len(x))
def is_url(maybe_url):
"""
Determine whether a path is a URL.
:param str maybe_url: path to investigate as URL
:return bool: whether path appears to be a URL
"""
return urlparse(maybe_url).scheme != ""
def non_null_value(k, m):
"""
Determine whether a mapping has a non-null value for a given key.
:param Hashable k: Key to test for non-null value
:param Mapping m: Mapping to test for non-null value for given key
:return bool: Whether given mapping contains given key with non-null value
"""
return k in m and not is_null_like(m[k])
def parse_ftype(input_file):
"""
Checks determine filetype from extension.
:param str input_file: String to check.
:return str: filetype (extension without dot prefix)
:raises TypeError: if file does not appear of a supported type
"""
if input_file.endswith(".bam"):
return "bam"
elif input_file.endswith(".fastq") or \
input_file.endswith(".fq") or \
input_file.endswith(".fq.gz") or \
input_file.endswith(".fastq.gz"):
return "fastq"
else:
raise TypeError("Type of input file ends in neither '.bam' "
"nor '.fastq' [file: '" + input_file + "']")
def parse_text_data(lines_or_path, delimiter=os.linesep):
"""
Interpret input argument as lines of data. This is intended to support
multiple input argument types to core model constructors.
:param str | collections.Iterable lines_or_path:
:param str delimiter: line separator used when parsing a raw string that's
not a file
:return collections.Iterable: lines of text data
:raises ValueError: if primary data argument is neither a string nor
another iterable
"""
if os.path.isfile(lines_or_path):
with open(lines_or_path, 'r') as f:
return f.readlines()
else:
_LOGGER.debug("Not a file: '{}'".format(lines_or_path))
if isinstance(lines_or_path, str):
return lines_or_path.split(delimiter)
elif isinstance(lines_or_path, Iterable):
return lines_or_path
else:
raise ValueError("Unable to parse as data lines {} ({})".
format(lines_or_path, type(lines_or_path)))
def sample_folder(prj, sample):
"""
Get the path to this Project's root folder for the given Sample.
:param attmap.PathExAttMap | Project prj: project with which sample is associated
:param Mapping sample: Sample or sample data for which to get root output
folder path.
:return str: this Project's root folder for the given Sample
"""
return os.path.join(prj.metadata.results_subdir,
sample["sample_name"])
@contextlib.contextmanager
def standard_stream_redirector(stream):
"""
Temporarily redirect stdout and stderr to another stream.
This can be useful for capturing messages for easier inspection, or
for rerouting and essentially ignoring them, with the destination as
something like an opened os.devnull.
:param FileIO[str] stream: temporary proxy for standard streams
"""
import sys
genuine_stdout, genuine_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stream, stream
try:
yield
finally:
sys.stdout, sys.stderr = genuine_stdout, genuine_stderr
def warn_derived_cols():
""" Produce deprecation warning about derived columns. """
_warn_cols_to_attrs("derived")
def warn_implied_cols():
""" Produce deprecation warning about implied columns. """
_warn_cols_to_attrs("implied")
def _warn_cols_to_attrs(prefix):
""" Produce deprecation warning about 'columns' rather than 'attributes' """
warnings.warn("{pfx}_columns should be encoded and referenced "
"as {pfx}_attributes".format(pfx=prefix), DeprecationWarning)
class CommandChecker(object):
"""
Validate PATH availability of executables referenced by a config file.
:param str path_conf_file: path to configuration file with
sections detailing executable tools to validate
:param Iterable[str] sections_to_check: names of
sections of the given configuration file that are relevant;
optional, will default to all sections if not given, but some
may be excluded via another optional parameter
:param Iterable[str] sections_to_skip: analogous to
the check names parameter, but for specific sections to skip.
"""
def __init__(self, path_conf_file,
sections_to_check=None, sections_to_skip=None):
super(CommandChecker, self).__init__()
self._logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
# TODO: could provide parse strategy as parameter to supplement YAML.
# TODO: could also derive parsing behavior from extension.
self.path = path_conf_file
with open(self.path, 'r') as conf_file:
conf_data = yaml.safe_load(conf_file)
# Determine which sections to validate.
sections = {sections_to_check} if isinstance(sections_to_check, str) \
else set(sections_to_check or conf_data.keys())
excl = {sections_to_skip} if isinstance(sections_to_skip, str) \
else set(sections_to_skip or [])
sections -= excl
self._logger.info("Validating %d sections: %s",
len(sections),
", ".join(["'{}'".format(s) for s in sections]))
# Store per-command mapping of status, nested under section.
self.section_to_status_by_command = defaultdict(dict)
# Store only information about the failures.
self.failures_by_section = defaultdict(list) # Access by section.
self.failures = set() # Access by command.
for s in sections:
# Fetch section data or skip.
try:
section_data = conf_data[s]
except KeyError:
_LOGGER.info("No section '%s' in file '%s', skipping",
s, self.path)
continue
# Test each of the section's commands.
try:
# Is section's data a mapping?
commands_iter = section_data.items()
self._logger.debug("Processing section '%s' data "
"as mapping", s)
for name, command in commands_iter:
failed = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"FAILURE" if failed else "SUCCESS")
except AttributeError:
self._logger.debug("Processing section '%s' data as list", s)
commands_iter = conf_data[s]
for cmd_item in commands_iter:
# Item is K-V pair?
try:
name, command = cmd_item
except ValueError:
# Treat item as command itself.
name, command = "", cmd_item
success = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"SUCCESS" if success else "FAILURE")
def _store_status(self, section, command, name):
"""
Based on new command execution attempt, update instance's
data structures with information about the success/fail status.
Return the result of the execution test.
"""
succeeded = is_command_callable(command, name)
# Store status regardless of its value in the instance's largest DS.
self.section_to_status_by_command[section][command] = succeeded
if not succeeded:
# Only update the failure-specific structures conditionally.
self.failures_by_section[section].append(command)
self.failures.add(command)
return succeeded
@property
def failed(self):
"""
Determine whether *every* command succeeded for *every* config file
section that was validated during instance construction.
:return bool: conjunction of execution success test result values,
obtained by testing each executable in every validated section
"""
# This will raise exception even if validation was attempted,
# but no sections were used. Effectively, delegate responsibility
# to the caller to initiate validation only if doing so is relevant.
if not self.section_to_status_by_command:
raise ValueError("No commands validated")
return 0 == len(self.failures)
|
pepkit/peppy
|
peppy/utils.py
|
CommandChecker._store_status
|
python
|
def _store_status(self, section, command, name):
succeeded = is_command_callable(command, name)
# Store status regardless of its value in the instance's largest DS.
self.section_to_status_by_command[section][command] = succeeded
if not succeeded:
# Only update the failure-specific structures conditionally.
self.failures_by_section[section].append(command)
self.failures.add(command)
return succeeded
|
Based on new command execution attempt, update instance's
data structures with information about the success/fail status.
Return the result of the execution test.
|
train
|
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L508-L521
|
[
"def is_command_callable(command, name=\"\"):\n \"\"\"\n Check if command can be called.\n\n :param str command: actual command to call\n :param str name: nickname/alias by which to reference the command, optional\n :return bool: whether given command's call succeeded\n \"\"\"\n\n # Use `command` to see if command is callable, store exit code\n code = os.system(\n \"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}\".format(command))\n\n if code != 0:\n alias_value = \" ('{}') \".format(name) if name else \" \"\n _LOGGER.debug(\"Command '{0}' is not callable: {1}\".\n format(alias_value, command))\n return not bool(code)\n"
] |
class CommandChecker(object):
"""
Validate PATH availability of executables referenced by a config file.
:param str path_conf_file: path to configuration file with
sections detailing executable tools to validate
:param Iterable[str] sections_to_check: names of
sections of the given configuration file that are relevant;
optional, will default to all sections if not given, but some
may be excluded via another optional parameter
:param Iterable[str] sections_to_skip: analogous to
the check names parameter, but for specific sections to skip.
"""
def __init__(self, path_conf_file,
sections_to_check=None, sections_to_skip=None):
super(CommandChecker, self).__init__()
self._logger = logging.getLogger(
"{}.{}".format(__name__, self.__class__.__name__))
# TODO: could provide parse strategy as parameter to supplement YAML.
# TODO: could also derive parsing behavior from extension.
self.path = path_conf_file
with open(self.path, 'r') as conf_file:
conf_data = yaml.safe_load(conf_file)
# Determine which sections to validate.
sections = {sections_to_check} if isinstance(sections_to_check, str) \
else set(sections_to_check or conf_data.keys())
excl = {sections_to_skip} if isinstance(sections_to_skip, str) \
else set(sections_to_skip or [])
sections -= excl
self._logger.info("Validating %d sections: %s",
len(sections),
", ".join(["'{}'".format(s) for s in sections]))
# Store per-command mapping of status, nested under section.
self.section_to_status_by_command = defaultdict(dict)
# Store only information about the failures.
self.failures_by_section = defaultdict(list) # Access by section.
self.failures = set() # Access by command.
for s in sections:
# Fetch section data or skip.
try:
section_data = conf_data[s]
except KeyError:
_LOGGER.info("No section '%s' in file '%s', skipping",
s, self.path)
continue
# Test each of the section's commands.
try:
# Is section's data a mapping?
commands_iter = section_data.items()
self._logger.debug("Processing section '%s' data "
"as mapping", s)
for name, command in commands_iter:
failed = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"FAILURE" if failed else "SUCCESS")
except AttributeError:
self._logger.debug("Processing section '%s' data as list", s)
commands_iter = conf_data[s]
for cmd_item in commands_iter:
# Item is K-V pair?
try:
name, command = cmd_item
except ValueError:
# Treat item as command itself.
name, command = "", cmd_item
success = self._store_status(section=s, command=command,
name=name)
self._logger.debug("Command '%s': %s", command,
"SUCCESS" if success else "FAILURE")
@property
def failed(self):
"""
Determine whether *every* command succeeded for *every* config file
section that was validated during instance construction.
:return bool: conjunction of execution success test result values,
obtained by testing each executable in every validated section
"""
# This will raise exception even if validation was attempted,
# but no sections were used. Effectively, delegate responsibility
# to the caller to initiate validation only if doing so is relevant.
if not self.section_to_status_by_command:
raise ValueError("No commands validated")
return 0 == len(self.failures)
|
BrewBlox/brewblox-service
|
brewblox_service/scheduler.py
|
create_task
|
python
|
async def create_task(app: web.Application,
coro: Coroutine,
*args, **kwargs
) -> asyncio.Task:
return await get_scheduler(app).create(coro, *args, **kwargs)
|
Convenience function for calling `TaskScheduler.create(coro)`
This will use the default `TaskScheduler` to create a new background task.
Example:
import asyncio
from datetime import datetime
from brewblox_service import scheduler, service
async def current_time(interval):
while True:
await asyncio.sleep(interval)
print(datetime.now())
async def start(app):
await scheduler.create_task(app, current_time(interval=2))
app = service.create_app(default_name='example')
scheduler.setup(app)
app.on_startup.append(start)
service.furnish(app)
service.run(app)
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/scheduler.py#L24-L58
|
[
"def get_scheduler(app: web.Application) -> 'TaskScheduler':\n return features.get(app, TaskScheduler)\n"
] |
"""
Background task scheduling.
"""
import asyncio
from contextlib import suppress
from typing import Any, Coroutine, Set
from aiohttp import web
from brewblox_service import features
CLEANUP_INTERVAL_S = 300
def setup(app: web.Application):
features.add(app, TaskScheduler(app))
def get_scheduler(app: web.Application) -> 'TaskScheduler':
return features.get(app, TaskScheduler)
async def cancel_task(app: web.Application,
task: asyncio.Task,
*args, **kwargs
) -> Any:
"""
Convenience function for calling `TaskScheduler.cancel(task)`
This will use the default `TaskScheduler` to cancel the given task.
Example:
import asyncio
from datetime import datetime
from brewblox_service import scheduler, service
async def current_time(interval):
while True:
await asyncio.sleep(interval)
print(datetime.now())
async def stop_after(app, task, duration):
await asyncio.sleep(duration)
await scheduler.cancel_task(app, task)
print('stopped!')
async def start(app):
# Start first task
task = await scheduler.create_task(app, current_time(interval=2))
# Start second task to stop the first
await scheduler.create_task(app, stop_after(app, task, duration=10))
app = service.create_app(default_name='example')
scheduler.setup(app)
app.on_startup.append(start)
service.furnish(app)
service.run(app)
"""
return await get_scheduler(app).cancel(task, *args, **kwargs)
class TaskScheduler(features.ServiceFeature):
def __init__(self, app: web.Application):
super().__init__(app)
self._tasks: Set[asyncio.Task] = set()
async def startup(self, *_):
await self.create(self._cleanup())
async def shutdown(self, *_):
[task.cancel() for task in self._tasks]
await asyncio.wait(self._tasks)
self._tasks.clear()
async def _cleanup(self):
"""
Periodically removes completed tasks from the collection,
allowing fire-and-forget tasks to be garbage collected.
This does not delete the task object, it merely removes the reference in the scheduler.
"""
while True:
await asyncio.sleep(CLEANUP_INTERVAL_S)
self._tasks = {t for t in self._tasks if not t.done()}
async def create(self, coro: Coroutine) -> asyncio.Task:
"""
Starts execution of a coroutine.
The created asyncio.Task is returned, and added to managed tasks.
The scheduler guarantees that it is cancelled during application shutdown,
regardless of whether it was already cancelled manually.
Args:
coro (Coroutine):
The coroutine to be wrapped in a task, and executed.
Returns:
asyncio.Task: An awaitable Task object.
During Aiohttp shutdown, the scheduler will attempt to cancel and await this task.
The task can be safely cancelled manually, or using `TaskScheduler.cancel(task)`.
"""
task = asyncio.get_event_loop().create_task(coro)
self._tasks.add(task)
return task
async def cancel(self, task: asyncio.Task, wait_for: bool = True) -> Any:
"""
Cancels and waits for an `asyncio.Task` to finish.
Removes it from the collection of managed tasks.
Args:
task (asyncio.Task):
The to be cancelled task.
It is not required that the task was was created with `TaskScheduler.create_task()`.
wait_for (bool, optional):
Whether to wait for the task to finish execution.
If falsey, this function returns immediately after cancelling the task.
Returns:
Any: The return value of `task`. None if `wait_for` is falsey.
"""
if task is None:
return
task.cancel()
with suppress(KeyError):
self._tasks.remove(task)
with suppress(Exception):
return (await task) if wait_for else None
|
BrewBlox/brewblox-service
|
brewblox_service/scheduler.py
|
cancel_task
|
python
|
async def cancel_task(app: web.Application,
task: asyncio.Task,
*args, **kwargs
) -> Any:
return await get_scheduler(app).cancel(task, *args, **kwargs)
|
Convenience function for calling `TaskScheduler.cancel(task)`
This will use the default `TaskScheduler` to cancel the given task.
Example:
import asyncio
from datetime import datetime
from brewblox_service import scheduler, service
async def current_time(interval):
while True:
await asyncio.sleep(interval)
print(datetime.now())
async def stop_after(app, task, duration):
await asyncio.sleep(duration)
await scheduler.cancel_task(app, task)
print('stopped!')
async def start(app):
# Start first task
task = await scheduler.create_task(app, current_time(interval=2))
# Start second task to stop the first
await scheduler.create_task(app, stop_after(app, task, duration=10))
app = service.create_app(default_name='example')
scheduler.setup(app)
app.on_startup.append(start)
service.furnish(app)
service.run(app)
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/scheduler.py#L61-L104
|
[
"def get_scheduler(app: web.Application) -> 'TaskScheduler':\n return features.get(app, TaskScheduler)\n"
] |
"""
Background task scheduling.
"""
import asyncio
from contextlib import suppress
from typing import Any, Coroutine, Set
from aiohttp import web
from brewblox_service import features
CLEANUP_INTERVAL_S = 300
def setup(app: web.Application):
features.add(app, TaskScheduler(app))
def get_scheduler(app: web.Application) -> 'TaskScheduler':
return features.get(app, TaskScheduler)
async def create_task(app: web.Application,
coro: Coroutine,
*args, **kwargs
) -> asyncio.Task:
"""
Convenience function for calling `TaskScheduler.create(coro)`
This will use the default `TaskScheduler` to create a new background task.
Example:
import asyncio
from datetime import datetime
from brewblox_service import scheduler, service
async def current_time(interval):
while True:
await asyncio.sleep(interval)
print(datetime.now())
async def start(app):
await scheduler.create_task(app, current_time(interval=2))
app = service.create_app(default_name='example')
scheduler.setup(app)
app.on_startup.append(start)
service.furnish(app)
service.run(app)
"""
return await get_scheduler(app).create(coro, *args, **kwargs)
class TaskScheduler(features.ServiceFeature):
def __init__(self, app: web.Application):
super().__init__(app)
self._tasks: Set[asyncio.Task] = set()
async def startup(self, *_):
await self.create(self._cleanup())
async def shutdown(self, *_):
[task.cancel() for task in self._tasks]
await asyncio.wait(self._tasks)
self._tasks.clear()
async def _cleanup(self):
"""
Periodically removes completed tasks from the collection,
allowing fire-and-forget tasks to be garbage collected.
This does not delete the task object, it merely removes the reference in the scheduler.
"""
while True:
await asyncio.sleep(CLEANUP_INTERVAL_S)
self._tasks = {t for t in self._tasks if not t.done()}
async def create(self, coro: Coroutine) -> asyncio.Task:
"""
Starts execution of a coroutine.
The created asyncio.Task is returned, and added to managed tasks.
The scheduler guarantees that it is cancelled during application shutdown,
regardless of whether it was already cancelled manually.
Args:
coro (Coroutine):
The coroutine to be wrapped in a task, and executed.
Returns:
asyncio.Task: An awaitable Task object.
During Aiohttp shutdown, the scheduler will attempt to cancel and await this task.
The task can be safely cancelled manually, or using `TaskScheduler.cancel(task)`.
"""
task = asyncio.get_event_loop().create_task(coro)
self._tasks.add(task)
return task
async def cancel(self, task: asyncio.Task, wait_for: bool = True) -> Any:
"""
Cancels and waits for an `asyncio.Task` to finish.
Removes it from the collection of managed tasks.
Args:
task (asyncio.Task):
The to be cancelled task.
It is not required that the task was was created with `TaskScheduler.create_task()`.
wait_for (bool, optional):
Whether to wait for the task to finish execution.
If falsey, this function returns immediately after cancelling the task.
Returns:
Any: The return value of `task`. None if `wait_for` is falsey.
"""
if task is None:
return
task.cancel()
with suppress(KeyError):
self._tasks.remove(task)
with suppress(Exception):
return (await task) if wait_for else None
|
BrewBlox/brewblox-service
|
brewblox_service/scheduler.py
|
TaskScheduler._cleanup
|
python
|
async def _cleanup(self):
while True:
await asyncio.sleep(CLEANUP_INTERVAL_S)
self._tasks = {t for t in self._tasks if not t.done()}
|
Periodically removes completed tasks from the collection,
allowing fire-and-forget tasks to be garbage collected.
This does not delete the task object, it merely removes the reference in the scheduler.
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/scheduler.py#L121-L130
| null |
class TaskScheduler(features.ServiceFeature):
def __init__(self, app: web.Application):
super().__init__(app)
self._tasks: Set[asyncio.Task] = set()
async def startup(self, *_):
await self.create(self._cleanup())
async def shutdown(self, *_):
[task.cancel() for task in self._tasks]
await asyncio.wait(self._tasks)
self._tasks.clear()
async def create(self, coro: Coroutine) -> asyncio.Task:
"""
Starts execution of a coroutine.
The created asyncio.Task is returned, and added to managed tasks.
The scheduler guarantees that it is cancelled during application shutdown,
regardless of whether it was already cancelled manually.
Args:
coro (Coroutine):
The coroutine to be wrapped in a task, and executed.
Returns:
asyncio.Task: An awaitable Task object.
During Aiohttp shutdown, the scheduler will attempt to cancel and await this task.
The task can be safely cancelled manually, or using `TaskScheduler.cancel(task)`.
"""
task = asyncio.get_event_loop().create_task(coro)
self._tasks.add(task)
return task
async def cancel(self, task: asyncio.Task, wait_for: bool = True) -> Any:
"""
Cancels and waits for an `asyncio.Task` to finish.
Removes it from the collection of managed tasks.
Args:
task (asyncio.Task):
The to be cancelled task.
It is not required that the task was was created with `TaskScheduler.create_task()`.
wait_for (bool, optional):
Whether to wait for the task to finish execution.
If falsey, this function returns immediately after cancelling the task.
Returns:
Any: The return value of `task`. None if `wait_for` is falsey.
"""
if task is None:
return
task.cancel()
with suppress(KeyError):
self._tasks.remove(task)
with suppress(Exception):
return (await task) if wait_for else None
|
BrewBlox/brewblox-service
|
brewblox_service/scheduler.py
|
TaskScheduler.create
|
python
|
async def create(self, coro: Coroutine) -> asyncio.Task:
task = asyncio.get_event_loop().create_task(coro)
self._tasks.add(task)
return task
|
Starts execution of a coroutine.
The created asyncio.Task is returned, and added to managed tasks.
The scheduler guarantees that it is cancelled during application shutdown,
regardless of whether it was already cancelled manually.
Args:
coro (Coroutine):
The coroutine to be wrapped in a task, and executed.
Returns:
asyncio.Task: An awaitable Task object.
During Aiohttp shutdown, the scheduler will attempt to cancel and await this task.
The task can be safely cancelled manually, or using `TaskScheduler.cancel(task)`.
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/scheduler.py#L132-L151
| null |
class TaskScheduler(features.ServiceFeature):
def __init__(self, app: web.Application):
super().__init__(app)
self._tasks: Set[asyncio.Task] = set()
async def startup(self, *_):
await self.create(self._cleanup())
async def shutdown(self, *_):
[task.cancel() for task in self._tasks]
await asyncio.wait(self._tasks)
self._tasks.clear()
async def _cleanup(self):
"""
Periodically removes completed tasks from the collection,
allowing fire-and-forget tasks to be garbage collected.
This does not delete the task object, it merely removes the reference in the scheduler.
"""
while True:
await asyncio.sleep(CLEANUP_INTERVAL_S)
self._tasks = {t for t in self._tasks if not t.done()}
async def cancel(self, task: asyncio.Task, wait_for: bool = True) -> Any:
"""
Cancels and waits for an `asyncio.Task` to finish.
Removes it from the collection of managed tasks.
Args:
task (asyncio.Task):
The to be cancelled task.
It is not required that the task was was created with `TaskScheduler.create_task()`.
wait_for (bool, optional):
Whether to wait for the task to finish execution.
If falsey, this function returns immediately after cancelling the task.
Returns:
Any: The return value of `task`. None if `wait_for` is falsey.
"""
if task is None:
return
task.cancel()
with suppress(KeyError):
self._tasks.remove(task)
with suppress(Exception):
return (await task) if wait_for else None
|
BrewBlox/brewblox-service
|
brewblox_service/scheduler.py
|
TaskScheduler.cancel
|
python
|
async def cancel(self, task: asyncio.Task, wait_for: bool = True) -> Any:
if task is None:
return
task.cancel()
with suppress(KeyError):
self._tasks.remove(task)
with suppress(Exception):
return (await task) if wait_for else None
|
Cancels and waits for an `asyncio.Task` to finish.
Removes it from the collection of managed tasks.
Args:
task (asyncio.Task):
The to be cancelled task.
It is not required that the task was was created with `TaskScheduler.create_task()`.
wait_for (bool, optional):
Whether to wait for the task to finish execution.
If falsey, this function returns immediately after cancelling the task.
Returns:
Any: The return value of `task`. None if `wait_for` is falsey.
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/scheduler.py#L153-L180
| null |
class TaskScheduler(features.ServiceFeature):
def __init__(self, app: web.Application):
super().__init__(app)
self._tasks: Set[asyncio.Task] = set()
async def startup(self, *_):
await self.create(self._cleanup())
async def shutdown(self, *_):
[task.cancel() for task in self._tasks]
await asyncio.wait(self._tasks)
self._tasks.clear()
async def _cleanup(self):
"""
Periodically removes completed tasks from the collection,
allowing fire-and-forget tasks to be garbage collected.
This does not delete the task object, it merely removes the reference in the scheduler.
"""
while True:
await asyncio.sleep(CLEANUP_INTERVAL_S)
self._tasks = {t for t in self._tasks if not t.done()}
async def create(self, coro: Coroutine) -> asyncio.Task:
"""
Starts execution of a coroutine.
The created asyncio.Task is returned, and added to managed tasks.
The scheduler guarantees that it is cancelled during application shutdown,
regardless of whether it was already cancelled manually.
Args:
coro (Coroutine):
The coroutine to be wrapped in a task, and executed.
Returns:
asyncio.Task: An awaitable Task object.
During Aiohttp shutdown, the scheduler will attempt to cancel and await this task.
The task can be safely cancelled manually, or using `TaskScheduler.cancel(task)`.
"""
task = asyncio.get_event_loop().create_task(coro)
self._tasks.add(task)
return task
|
BrewBlox/brewblox-service
|
brewblox_service/service.py
|
create_parser
|
python
|
def create_parser(default_name: str) -> argparse.ArgumentParser:
argparser = argparse.ArgumentParser(fromfile_prefix_chars='@')
argparser.add_argument('-H', '--host',
help='Host to which the app binds. [%(default)s]',
default='0.0.0.0')
argparser.add_argument('-p', '--port',
help='Port to which the app binds. [%(default)s]',
default=5000,
type=int)
argparser.add_argument('-o', '--output',
help='Logging output. [%(default)s]')
argparser.add_argument('-n', '--name',
help='Service name. This will be used as prefix for all endpoints. [%(default)s]',
default=default_name)
argparser.add_argument('--debug',
help='Run the app in debug mode. [%(default)s]',
action='store_true')
argparser.add_argument('--eventbus-host',
help='Hostname at which the eventbus can be reached [%(default)s]',
default='eventbus')
argparser.add_argument('--eventbus-port',
help='Port at which the eventbus can be reached [%(default)s]',
default=5672,
type=int)
return argparser
|
Creates the default brewblox_service ArgumentParser.
Service-agnostic arguments are added.
The parser allows calling code to add additional arguments before using it in create_app()
Args:
default_name (str):
default value for the --name commandline argument.
Returns:
argparse.ArgumentParser: a Python ArgumentParser with defaults set.
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/service.py#L68-L106
| null |
"""
Generic startup functions for a brewblox application.
Responsible for parsing user configuration, and creating top-level objects.
This module provides the framework to which service implementations can attach their features.
Example:
# Uses the default argument parser
# To add new commandline arguments, use create_parser()
app = service.create_app(default_name='my_service')
# (Placeholder names)
# All features (endpoints and async handlers) must be created and added to the app here
# The Aiohttp Application will freeze functionality once it has been started
feature_one.setup(app)
feature_two.setup(app)
# Modify added resources to conform to standards
service.furnish(app)
# Run the application
# This function blocks until the application is shut down
service.run(app)
"""
import argparse
import logging
# The argumentparser can't fall back to the default sys.argv if sys is not imported
import sys # noqa
from logging.handlers import TimedRotatingFileHandler
from os import getenv
from typing import List
import aiohttp_swagger
from aiohttp import web
from brewblox_service import brewblox_logger, cors_middleware, features
LOGGER = brewblox_logger(__name__)
routes = web.RouteTableDef()
def _init_logging(args: argparse.Namespace):
level = logging.DEBUG if args.debug else logging.INFO
format = '%(asctime)s %(levelname)-8s %(name)-30s %(message)s'
datefmt = '%Y/%m/%d %H:%M:%S'
logging.basicConfig(level=level, format=format, datefmt=datefmt)
if args.output:
handler = TimedRotatingFileHandler(
args.output,
when='d',
interval=1,
backupCount=7,
encoding='utf-8'
)
handler.setFormatter(logging.Formatter(format, datefmt))
handler.setLevel(level)
logging.getLogger().addHandler(handler)
if not args.debug:
logging.getLogger('aioamqp').setLevel(logging.WARN)
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
logging.getLogger('aiohttp.access').setLevel(logging.WARN)
def create_app(
default_name: str = None,
parser: argparse.ArgumentParser = None,
raw_args: List[str] = None
) -> web.Application:
"""
Creates and configures an Aiohttp application.
Args:
default_name (str, optional):
Default value for the --name commandline argument.
This value is required if `parser` is not provided.
This value will be ignored if `parser` is provided.
parser (argparse.ArgumentParser, optional):
Application-specific parser.
If not provided, the return value of `create_parser()` will be used.
raw_args (list of str, optional):
Explicit commandline arguments.
Defaults to sys.argv[1:]
Returns:
web.Application: A configured Aiohttp Application object.
This Application must be furnished, and is not yet running.
"""
if parser is None:
assert default_name, 'Default service name is required'
parser = create_parser(default_name)
args = parser.parse_args(raw_args)
_init_logging(args)
LOGGER.info(f'Creating [{args.name}] application')
app = web.Application()
app['config'] = vars(args)
return app
def furnish(app: web.Application):
"""
Configures Application routes, readying it for running.
This function modifies routes and resources that were added by calling code,
and must be called immediately prior to `run(app)`.
Args:
app (web.Application):
The Aiohttp Application as created by `create_app()`
"""
app_name = app['config']['name']
prefix = '/' + app_name.lstrip('/')
app.router.add_routes(routes)
cors_middleware.enable_cors(app)
# Configure CORS and prefixes on all endpoints.
known_resources = set()
for route in list(app.router.routes()):
if route.resource in known_resources:
continue
known_resources.add(route.resource)
route.resource.add_prefix(prefix)
# Configure swagger settings
# We set prefix explicitly here
aiohttp_swagger.setup_swagger(app,
swagger_url=prefix + '/api/doc',
description='',
title=f'Brewblox Service "{app_name}"',
api_version='0.0',
contact='development@brewpi.com')
LOGGER.info('Service info: ' + getenv('SERVICE_INFO', 'UNKNOWN'))
for route in app.router.routes():
LOGGER.info(f'Endpoint [{route.method}] {route.resource}')
for name, impl in app.get(features.FEATURES_KEY, {}).items():
LOGGER.info(f'Feature [{name}] {impl}')
def run(app: web.Application):
"""
Runs the application in an async context.
This function will block indefinitely until the application is shut down.
Args:
app (web.Application):
The Aiohttp Application as created by `create_app()`
"""
host = app['config']['host']
port = app['config']['port']
# starts app. run_app() will automatically start the async context.
web.run_app(app, host=host, port=port)
@routes.get('/_service/status')
async def healthcheck(request: web.Request) -> web.Response:
"""
---
tags:
- Service
summary: health check
description: Returns service health.
operationId: _service.status
produces:
- application/json
responses:
"200":
description: successful operation
"""
return web.json_response({'status': 'ok'})
|
BrewBlox/brewblox-service
|
brewblox_service/service.py
|
create_app
|
python
|
def create_app(
default_name: str = None,
parser: argparse.ArgumentParser = None,
raw_args: List[str] = None
) -> web.Application:
if parser is None:
assert default_name, 'Default service name is required'
parser = create_parser(default_name)
args = parser.parse_args(raw_args)
_init_logging(args)
LOGGER.info(f'Creating [{args.name}] application')
app = web.Application()
app['config'] = vars(args)
return app
|
Creates and configures an Aiohttp application.
Args:
default_name (str, optional):
Default value for the --name commandline argument.
This value is required if `parser` is not provided.
This value will be ignored if `parser` is provided.
parser (argparse.ArgumentParser, optional):
Application-specific parser.
If not provided, the return value of `create_parser()` will be used.
raw_args (list of str, optional):
Explicit commandline arguments.
Defaults to sys.argv[1:]
Returns:
web.Application: A configured Aiohttp Application object.
This Application must be furnished, and is not yet running.
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/service.py#L109-L147
|
[
"def _init_logging(args: argparse.Namespace):\n level = logging.DEBUG if args.debug else logging.INFO\n format = '%(asctime)s %(levelname)-8s %(name)-30s %(message)s'\n datefmt = '%Y/%m/%d %H:%M:%S'\n\n logging.basicConfig(level=level, format=format, datefmt=datefmt)\n\n if args.output:\n handler = TimedRotatingFileHandler(\n args.output,\n when='d',\n interval=1,\n backupCount=7,\n encoding='utf-8'\n )\n handler.setFormatter(logging.Formatter(format, datefmt))\n handler.setLevel(level)\n logging.getLogger().addHandler(handler)\n\n if not args.debug:\n logging.getLogger('aioamqp').setLevel(logging.WARN)\n logging.getLogger('asyncio').setLevel(logging.CRITICAL)\n logging.getLogger('aiohttp.access').setLevel(logging.WARN)\n",
"def create_parser(default_name: str) -> argparse.ArgumentParser:\n \"\"\"\n Creates the default brewblox_service ArgumentParser.\n Service-agnostic arguments are added.\n\n The parser allows calling code to add additional arguments before using it in create_app()\n\n Args:\n default_name (str):\n default value for the --name commandline argument.\n\n Returns:\n argparse.ArgumentParser: a Python ArgumentParser with defaults set.\n\n \"\"\"\n argparser = argparse.ArgumentParser(fromfile_prefix_chars='@')\n argparser.add_argument('-H', '--host',\n help='Host to which the app binds. [%(default)s]',\n default='0.0.0.0')\n argparser.add_argument('-p', '--port',\n help='Port to which the app binds. [%(default)s]',\n default=5000,\n type=int)\n argparser.add_argument('-o', '--output',\n help='Logging output. [%(default)s]')\n argparser.add_argument('-n', '--name',\n help='Service name. This will be used as prefix for all endpoints. [%(default)s]',\n default=default_name)\n argparser.add_argument('--debug',\n help='Run the app in debug mode. [%(default)s]',\n action='store_true')\n argparser.add_argument('--eventbus-host',\n help='Hostname at which the eventbus can be reached [%(default)s]',\n default='eventbus')\n argparser.add_argument('--eventbus-port',\n help='Port at which the eventbus can be reached [%(default)s]',\n default=5672,\n type=int)\n return argparser\n"
] |
"""
Generic startup functions for a brewblox application.
Responsible for parsing user configuration, and creating top-level objects.
This module provides the framework to which service implementations can attach their features.
Example:
# Uses the default argument parser
# To add new commandline arguments, use create_parser()
app = service.create_app(default_name='my_service')
# (Placeholder names)
# All features (endpoints and async handlers) must be created and added to the app here
# The Aiohttp Application will freeze functionality once it has been started
feature_one.setup(app)
feature_two.setup(app)
# Modify added resources to conform to standards
service.furnish(app)
# Run the application
# This function blocks until the application is shut down
service.run(app)
"""
import argparse
import logging
# The argumentparser can't fall back to the default sys.argv if sys is not imported
import sys # noqa
from logging.handlers import TimedRotatingFileHandler
from os import getenv
from typing import List
import aiohttp_swagger
from aiohttp import web
from brewblox_service import brewblox_logger, cors_middleware, features
LOGGER = brewblox_logger(__name__)
routes = web.RouteTableDef()
def _init_logging(args: argparse.Namespace):
level = logging.DEBUG if args.debug else logging.INFO
format = '%(asctime)s %(levelname)-8s %(name)-30s %(message)s'
datefmt = '%Y/%m/%d %H:%M:%S'
logging.basicConfig(level=level, format=format, datefmt=datefmt)
if args.output:
handler = TimedRotatingFileHandler(
args.output,
when='d',
interval=1,
backupCount=7,
encoding='utf-8'
)
handler.setFormatter(logging.Formatter(format, datefmt))
handler.setLevel(level)
logging.getLogger().addHandler(handler)
if not args.debug:
logging.getLogger('aioamqp').setLevel(logging.WARN)
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
logging.getLogger('aiohttp.access').setLevel(logging.WARN)
def create_parser(default_name: str) -> argparse.ArgumentParser:
"""
Creates the default brewblox_service ArgumentParser.
Service-agnostic arguments are added.
The parser allows calling code to add additional arguments before using it in create_app()
Args:
default_name (str):
default value for the --name commandline argument.
Returns:
argparse.ArgumentParser: a Python ArgumentParser with defaults set.
"""
argparser = argparse.ArgumentParser(fromfile_prefix_chars='@')
argparser.add_argument('-H', '--host',
help='Host to which the app binds. [%(default)s]',
default='0.0.0.0')
argparser.add_argument('-p', '--port',
help='Port to which the app binds. [%(default)s]',
default=5000,
type=int)
argparser.add_argument('-o', '--output',
help='Logging output. [%(default)s]')
argparser.add_argument('-n', '--name',
help='Service name. This will be used as prefix for all endpoints. [%(default)s]',
default=default_name)
argparser.add_argument('--debug',
help='Run the app in debug mode. [%(default)s]',
action='store_true')
argparser.add_argument('--eventbus-host',
help='Hostname at which the eventbus can be reached [%(default)s]',
default='eventbus')
argparser.add_argument('--eventbus-port',
help='Port at which the eventbus can be reached [%(default)s]',
default=5672,
type=int)
return argparser
def furnish(app: web.Application):
"""
Configures Application routes, readying it for running.
This function modifies routes and resources that were added by calling code,
and must be called immediately prior to `run(app)`.
Args:
app (web.Application):
The Aiohttp Application as created by `create_app()`
"""
app_name = app['config']['name']
prefix = '/' + app_name.lstrip('/')
app.router.add_routes(routes)
cors_middleware.enable_cors(app)
# Configure CORS and prefixes on all endpoints.
known_resources = set()
for route in list(app.router.routes()):
if route.resource in known_resources:
continue
known_resources.add(route.resource)
route.resource.add_prefix(prefix)
# Configure swagger settings
# We set prefix explicitly here
aiohttp_swagger.setup_swagger(app,
swagger_url=prefix + '/api/doc',
description='',
title=f'Brewblox Service "{app_name}"',
api_version='0.0',
contact='development@brewpi.com')
LOGGER.info('Service info: ' + getenv('SERVICE_INFO', 'UNKNOWN'))
for route in app.router.routes():
LOGGER.info(f'Endpoint [{route.method}] {route.resource}')
for name, impl in app.get(features.FEATURES_KEY, {}).items():
LOGGER.info(f'Feature [{name}] {impl}')
def run(app: web.Application):
"""
Runs the application in an async context.
This function will block indefinitely until the application is shut down.
Args:
app (web.Application):
The Aiohttp Application as created by `create_app()`
"""
host = app['config']['host']
port = app['config']['port']
# starts app. run_app() will automatically start the async context.
web.run_app(app, host=host, port=port)
@routes.get('/_service/status')
async def healthcheck(request: web.Request) -> web.Response:
"""
---
tags:
- Service
summary: health check
description: Returns service health.
operationId: _service.status
produces:
- application/json
responses:
"200":
description: successful operation
"""
return web.json_response({'status': 'ok'})
|
BrewBlox/brewblox-service
|
brewblox_service/service.py
|
furnish
|
python
|
def furnish(app: web.Application):
app_name = app['config']['name']
prefix = '/' + app_name.lstrip('/')
app.router.add_routes(routes)
cors_middleware.enable_cors(app)
# Configure CORS and prefixes on all endpoints.
known_resources = set()
for route in list(app.router.routes()):
if route.resource in known_resources:
continue
known_resources.add(route.resource)
route.resource.add_prefix(prefix)
# Configure swagger settings
# We set prefix explicitly here
aiohttp_swagger.setup_swagger(app,
swagger_url=prefix + '/api/doc',
description='',
title=f'Brewblox Service "{app_name}"',
api_version='0.0',
contact='development@brewpi.com')
LOGGER.info('Service info: ' + getenv('SERVICE_INFO', 'UNKNOWN'))
for route in app.router.routes():
LOGGER.info(f'Endpoint [{route.method}] {route.resource}')
for name, impl in app.get(features.FEATURES_KEY, {}).items():
LOGGER.info(f'Feature [{name}] {impl}')
|
Configures Application routes, readying it for running.
This function modifies routes and resources that were added by calling code,
and must be called immediately prior to `run(app)`.
Args:
app (web.Application):
The Aiohttp Application as created by `create_app()`
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/service.py#L150-L189
|
[
"def enable_cors(app: web.Application):\n app.middlewares.append(cors_middleware)\n"
] |
"""
Generic startup functions for a brewblox application.
Responsible for parsing user configuration, and creating top-level objects.
This module provides the framework to which service implementations can attach their features.
Example:
# Uses the default argument parser
# To add new commandline arguments, use create_parser()
app = service.create_app(default_name='my_service')
# (Placeholder names)
# All features (endpoints and async handlers) must be created and added to the app here
# The Aiohttp Application will freeze functionality once it has been started
feature_one.setup(app)
feature_two.setup(app)
# Modify added resources to conform to standards
service.furnish(app)
# Run the application
# This function blocks until the application is shut down
service.run(app)
"""
import argparse
import logging
# The argumentparser can't fall back to the default sys.argv if sys is not imported
import sys # noqa
from logging.handlers import TimedRotatingFileHandler
from os import getenv
from typing import List
import aiohttp_swagger
from aiohttp import web
from brewblox_service import brewblox_logger, cors_middleware, features
LOGGER = brewblox_logger(__name__)
routes = web.RouteTableDef()
def _init_logging(args: argparse.Namespace):
level = logging.DEBUG if args.debug else logging.INFO
format = '%(asctime)s %(levelname)-8s %(name)-30s %(message)s'
datefmt = '%Y/%m/%d %H:%M:%S'
logging.basicConfig(level=level, format=format, datefmt=datefmt)
if args.output:
handler = TimedRotatingFileHandler(
args.output,
when='d',
interval=1,
backupCount=7,
encoding='utf-8'
)
handler.setFormatter(logging.Formatter(format, datefmt))
handler.setLevel(level)
logging.getLogger().addHandler(handler)
if not args.debug:
logging.getLogger('aioamqp').setLevel(logging.WARN)
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
logging.getLogger('aiohttp.access').setLevel(logging.WARN)
def create_parser(default_name: str) -> argparse.ArgumentParser:
"""
Creates the default brewblox_service ArgumentParser.
Service-agnostic arguments are added.
The parser allows calling code to add additional arguments before using it in create_app()
Args:
default_name (str):
default value for the --name commandline argument.
Returns:
argparse.ArgumentParser: a Python ArgumentParser with defaults set.
"""
argparser = argparse.ArgumentParser(fromfile_prefix_chars='@')
argparser.add_argument('-H', '--host',
help='Host to which the app binds. [%(default)s]',
default='0.0.0.0')
argparser.add_argument('-p', '--port',
help='Port to which the app binds. [%(default)s]',
default=5000,
type=int)
argparser.add_argument('-o', '--output',
help='Logging output. [%(default)s]')
argparser.add_argument('-n', '--name',
help='Service name. This will be used as prefix for all endpoints. [%(default)s]',
default=default_name)
argparser.add_argument('--debug',
help='Run the app in debug mode. [%(default)s]',
action='store_true')
argparser.add_argument('--eventbus-host',
help='Hostname at which the eventbus can be reached [%(default)s]',
default='eventbus')
argparser.add_argument('--eventbus-port',
help='Port at which the eventbus can be reached [%(default)s]',
default=5672,
type=int)
return argparser
def create_app(
default_name: str = None,
parser: argparse.ArgumentParser = None,
raw_args: List[str] = None
) -> web.Application:
"""
Creates and configures an Aiohttp application.
Args:
default_name (str, optional):
Default value for the --name commandline argument.
This value is required if `parser` is not provided.
This value will be ignored if `parser` is provided.
parser (argparse.ArgumentParser, optional):
Application-specific parser.
If not provided, the return value of `create_parser()` will be used.
raw_args (list of str, optional):
Explicit commandline arguments.
Defaults to sys.argv[1:]
Returns:
web.Application: A configured Aiohttp Application object.
This Application must be furnished, and is not yet running.
"""
if parser is None:
assert default_name, 'Default service name is required'
parser = create_parser(default_name)
args = parser.parse_args(raw_args)
_init_logging(args)
LOGGER.info(f'Creating [{args.name}] application')
app = web.Application()
app['config'] = vars(args)
return app
def run(app: web.Application):
"""
Runs the application in an async context.
This function will block indefinitely until the application is shut down.
Args:
app (web.Application):
The Aiohttp Application as created by `create_app()`
"""
host = app['config']['host']
port = app['config']['port']
# starts app. run_app() will automatically start the async context.
web.run_app(app, host=host, port=port)
@routes.get('/_service/status')
async def healthcheck(request: web.Request) -> web.Response:
"""
---
tags:
- Service
summary: health check
description: Returns service health.
operationId: _service.status
produces:
- application/json
responses:
"200":
description: successful operation
"""
return web.json_response({'status': 'ok'})
|
BrewBlox/brewblox-service
|
brewblox_service/service.py
|
run
|
python
|
def run(app: web.Application):
host = app['config']['host']
port = app['config']['port']
# starts app. run_app() will automatically start the async context.
web.run_app(app, host=host, port=port)
|
Runs the application in an async context.
This function will block indefinitely until the application is shut down.
Args:
app (web.Application):
The Aiohttp Application as created by `create_app()`
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/service.py#L192-L205
| null |
"""
Generic startup functions for a brewblox application.
Responsible for parsing user configuration, and creating top-level objects.
This module provides the framework to which service implementations can attach their features.
Example:
# Uses the default argument parser
# To add new commandline arguments, use create_parser()
app = service.create_app(default_name='my_service')
# (Placeholder names)
# All features (endpoints and async handlers) must be created and added to the app here
# The Aiohttp Application will freeze functionality once it has been started
feature_one.setup(app)
feature_two.setup(app)
# Modify added resources to conform to standards
service.furnish(app)
# Run the application
# This function blocks until the application is shut down
service.run(app)
"""
import argparse
import logging
# The argumentparser can't fall back to the default sys.argv if sys is not imported
import sys # noqa
from logging.handlers import TimedRotatingFileHandler
from os import getenv
from typing import List
import aiohttp_swagger
from aiohttp import web
from brewblox_service import brewblox_logger, cors_middleware, features
LOGGER = brewblox_logger(__name__)
routes = web.RouteTableDef()
def _init_logging(args: argparse.Namespace):
level = logging.DEBUG if args.debug else logging.INFO
format = '%(asctime)s %(levelname)-8s %(name)-30s %(message)s'
datefmt = '%Y/%m/%d %H:%M:%S'
logging.basicConfig(level=level, format=format, datefmt=datefmt)
if args.output:
handler = TimedRotatingFileHandler(
args.output,
when='d',
interval=1,
backupCount=7,
encoding='utf-8'
)
handler.setFormatter(logging.Formatter(format, datefmt))
handler.setLevel(level)
logging.getLogger().addHandler(handler)
if not args.debug:
logging.getLogger('aioamqp').setLevel(logging.WARN)
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
logging.getLogger('aiohttp.access').setLevel(logging.WARN)
def create_parser(default_name: str) -> argparse.ArgumentParser:
"""
Creates the default brewblox_service ArgumentParser.
Service-agnostic arguments are added.
The parser allows calling code to add additional arguments before using it in create_app()
Args:
default_name (str):
default value for the --name commandline argument.
Returns:
argparse.ArgumentParser: a Python ArgumentParser with defaults set.
"""
argparser = argparse.ArgumentParser(fromfile_prefix_chars='@')
argparser.add_argument('-H', '--host',
help='Host to which the app binds. [%(default)s]',
default='0.0.0.0')
argparser.add_argument('-p', '--port',
help='Port to which the app binds. [%(default)s]',
default=5000,
type=int)
argparser.add_argument('-o', '--output',
help='Logging output. [%(default)s]')
argparser.add_argument('-n', '--name',
help='Service name. This will be used as prefix for all endpoints. [%(default)s]',
default=default_name)
argparser.add_argument('--debug',
help='Run the app in debug mode. [%(default)s]',
action='store_true')
argparser.add_argument('--eventbus-host',
help='Hostname at which the eventbus can be reached [%(default)s]',
default='eventbus')
argparser.add_argument('--eventbus-port',
help='Port at which the eventbus can be reached [%(default)s]',
default=5672,
type=int)
return argparser
def create_app(
default_name: str = None,
parser: argparse.ArgumentParser = None,
raw_args: List[str] = None
) -> web.Application:
"""
Creates and configures an Aiohttp application.
Args:
default_name (str, optional):
Default value for the --name commandline argument.
This value is required if `parser` is not provided.
This value will be ignored if `parser` is provided.
parser (argparse.ArgumentParser, optional):
Application-specific parser.
If not provided, the return value of `create_parser()` will be used.
raw_args (list of str, optional):
Explicit commandline arguments.
Defaults to sys.argv[1:]
Returns:
web.Application: A configured Aiohttp Application object.
This Application must be furnished, and is not yet running.
"""
if parser is None:
assert default_name, 'Default service name is required'
parser = create_parser(default_name)
args = parser.parse_args(raw_args)
_init_logging(args)
LOGGER.info(f'Creating [{args.name}] application')
app = web.Application()
app['config'] = vars(args)
return app
def furnish(app: web.Application):
"""
Configures Application routes, readying it for running.
This function modifies routes and resources that were added by calling code,
and must be called immediately prior to `run(app)`.
Args:
app (web.Application):
The Aiohttp Application as created by `create_app()`
"""
app_name = app['config']['name']
prefix = '/' + app_name.lstrip('/')
app.router.add_routes(routes)
cors_middleware.enable_cors(app)
# Configure CORS and prefixes on all endpoints.
known_resources = set()
for route in list(app.router.routes()):
if route.resource in known_resources:
continue
known_resources.add(route.resource)
route.resource.add_prefix(prefix)
# Configure swagger settings
# We set prefix explicitly here
aiohttp_swagger.setup_swagger(app,
swagger_url=prefix + '/api/doc',
description='',
title=f'Brewblox Service "{app_name}"',
api_version='0.0',
contact='development@brewpi.com')
LOGGER.info('Service info: ' + getenv('SERVICE_INFO', 'UNKNOWN'))
for route in app.router.routes():
LOGGER.info(f'Endpoint [{route.method}] {route.resource}')
for name, impl in app.get(features.FEATURES_KEY, {}).items():
LOGGER.info(f'Feature [{name}] {impl}')
@routes.get('/_service/status')
async def healthcheck(request: web.Request) -> web.Response:
"""
---
tags:
- Service
summary: health check
description: Returns service health.
operationId: _service.status
produces:
- application/json
responses:
"200":
description: successful operation
"""
return web.json_response({'status': 'ok'})
|
BrewBlox/brewblox-service
|
brewblox_service/features.py
|
add
|
python
|
def add(app: web.Application,
feature: Any,
key: Hashable = None,
exist_ok: bool = False
):
if FEATURES_KEY not in app:
app[FEATURES_KEY] = dict()
key = key or type(feature)
if key in app[FEATURES_KEY]:
if exist_ok:
return
else:
raise KeyError(f'Feature "{key}" already registered')
app[FEATURES_KEY][key] = feature
|
Adds a new feature to the app.
Features can either be registered as the default feature for the class,
or be given an explicit name.
Args:
app (web.Application):
The current Aiohttp application.
feature (Any):
The new feature that should be registered.
It is recommended, but not required to use a `ServiceFeature`.
key (Hashable, optional):
The key under which the feature should be registered.
Defaults to `type(feature)`.
exist_ok (bool):
If truthy, this function will do nothing if a feature was already registered for `key`.
Otherwise, an exception is raised.
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/features.py#L14-L53
| null |
"""
Registers and gets features added to Aiohttp by brewblox services.
"""
from abc import ABC, abstractmethod
from enum import Enum, auto
from typing import Any, Hashable, Type
from aiohttp import web
FEATURES_KEY = '#features'
def get(app: web.Application,
feature_type: Type[Any] = None,
key: Hashable = None
) -> Any:
"""
Finds declared feature.
Identification is done based on feature type and key.
Args:
app (web.Application):
The current Aiohttp application.
feature_type (Type[Any]):
The Python type of the desired feature.
If specified, it will be checked against the found feature.
key (Hashable):
A specific identifier for the desired feature.
Defaults to `feature_type`
Returns:
Any: The feature found for the combination of `feature_type` and `key`
"""
key = key or feature_type
if not key:
raise AssertionError('No feature identifier provided')
try:
found = app[FEATURES_KEY][key]
except KeyError:
raise KeyError(f'No feature found for "{key}"')
if feature_type and not isinstance(found, feature_type):
raise AssertionError(f'Found {found} did not match type "{feature_type}"')
return found
class Startup(Enum):
MANAGED = auto()
MANUAL = auto()
AUTODETECT = auto()
class ServiceFeature(ABC):
"""Base class for long-lived Aiohttp handler classes.
For classes with async functionality,
the (synchronous) `__init__()` and `__del__()` functions may not be sufficient.
Aiohttp offers comparable init/deinit hooks, but inside the context of a running event loop.
ServiceFeature registers the `self.startup(self, app)`, `self.before_shutdown(app)`,
and `self.shutdown(self, app)` as lifecycle callbacks.
They will be called by Aiohttp at the appropriate moment.
By overriding these functions, subclasses can perform initialization/deinitialization that requires an event loop.
Note: Aiohttp will not accept registration of new callbacks after it started running.
Startup management can be adjusted by using the `startup` argument in `ServiceFeature.__init__()`
Example class:
import asyncio
import random
from aiohttp import web
from brewblox_service import scheduler, service
from brewblox_service.features import ServiceFeature
class MyFeature(ServiceFeature):
def __init__(self, app: web.Application):
super().__init__(app)
self._task: asyncio.Task = None
async def startup(self, app: web.Application):
# Schedule a long-running background task
self._task = await scheduler.create_task(app, self._hello())
async def before_shutdown(self, app: web.Application):
print('Any minute now...')
async def shutdown(self, app: web.Application):
# Orderly cancel the background task
await scheduler.cancel_task(app, self._task)
async def _hello(self):
while True:
await asyncio.sleep(5)
print(random.choice([
'Hellooo',
'Searching',
'Sentry mode activated',
'Is anyone there?',
'Could you come over here?',
]))
Example use:
app = service.create_app(default_name='example')
scheduler.setup(app)
greeter = MyFeature(app)
service.furnish(app)
service.run(app)
# greeter.startup(app) is called now
# Press Ctrl+C to quit
# greeter.before_shutdown(app) will be called
# greeter.shutdown(app) will be called
"""
def __init__(self, app: web.Application, startup=Startup.AUTODETECT):
"""
ServiceFeature constructor.
Args:
app (web.Application):
The Aiohttp application with which the feature should be associated.
startup (Startup):
How feature lifecycle management should be handled. Default is AUTODETECT.
MANAGED: Feature always registers lifecycle hooks.
This will raise an exception when creating
the feature while the application is running.
MANUAL: Feature will not register lifecycle hooks.
startup() and shutdown() must be called manually.
AUTODETECT: Feature will register lifecycle hooks only if app is not running.
Behaves like MANAGED before application start,
and like MANUAL after application start.
"""
self.__active_app: web.Application = app
if any([
startup == Startup.MANAGED,
startup == Startup.AUTODETECT and not app.frozen
]):
app.on_startup.append(self.startup)
app.on_shutdown.append(self.before_shutdown)
app.on_cleanup.append(self.shutdown)
@property
def app(self) -> web.Application:
"""Currently active `web.Application`
Returns:
web.Application: The current app.
"""
return self.__active_app
@abstractmethod
async def startup(self, app: web.Application):
"""Lifecycle hook for initializing the feature in an async context.
Subclasses are expected to override this function.
Depending on the `startup` argument in `__init__()`,
`startup()` will be called when Aiohttp starts running.
Args:
app (web.Application):
Current Aiohttp application.
"""
pass # pragma: no cover
async def before_shutdown(self, app: web.Application):
"""Lifecycle hook for preparing to shut down the feature.
Subclasses may override this function, but it is not mandatory.
Depending on the `startup` argument in `__init__()`,
`before_shutdown()` will be called when Aiohttp is closing.
Args:
app (web.Application):
Current Aiohttp application.
"""
pass # pragma: no cover
@abstractmethod
async def shutdown(self, app: web.Application = None):
"""Lifecycle hook for shutting down the feature before the event loop is closed.
Subclasses are expected to override this function.
Depending on the `startup` argument in `__init__()`,
`shutdown()` will be called when Aiohttp is closing.
Args:
app (web.Application):
Current Aiohttp application.
"""
pass # pragma: no cover
|
BrewBlox/brewblox-service
|
brewblox_service/features.py
|
get
|
python
|
def get(app: web.Application,
feature_type: Type[Any] = None,
key: Hashable = None
) -> Any:
key = key or feature_type
if not key:
raise AssertionError('No feature identifier provided')
try:
found = app[FEATURES_KEY][key]
except KeyError:
raise KeyError(f'No feature found for "{key}"')
if feature_type and not isinstance(found, feature_type):
raise AssertionError(f'Found {found} did not match type "{feature_type}"')
return found
|
Finds declared feature.
Identification is done based on feature type and key.
Args:
app (web.Application):
The current Aiohttp application.
feature_type (Type[Any]):
The Python type of the desired feature.
If specified, it will be checked against the found feature.
key (Hashable):
A specific identifier for the desired feature.
Defaults to `feature_type`
Returns:
Any: The feature found for the combination of `feature_type` and `key`
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/features.py#L56-L92
| null |
"""
Registers and gets features added to Aiohttp by brewblox services.
"""
from abc import ABC, abstractmethod
from enum import Enum, auto
from typing import Any, Hashable, Type
from aiohttp import web
FEATURES_KEY = '#features'
def add(app: web.Application,
feature: Any,
key: Hashable = None,
exist_ok: bool = False
):
"""
Adds a new feature to the app.
Features can either be registered as the default feature for the class,
or be given an explicit name.
Args:
app (web.Application):
The current Aiohttp application.
feature (Any):
The new feature that should be registered.
It is recommended, but not required to use a `ServiceFeature`.
key (Hashable, optional):
The key under which the feature should be registered.
Defaults to `type(feature)`.
exist_ok (bool):
If truthy, this function will do nothing if a feature was already registered for `key`.
Otherwise, an exception is raised.
"""
if FEATURES_KEY not in app:
app[FEATURES_KEY] = dict()
key = key or type(feature)
if key in app[FEATURES_KEY]:
if exist_ok:
return
else:
raise KeyError(f'Feature "{key}" already registered')
app[FEATURES_KEY][key] = feature
class Startup(Enum):
MANAGED = auto()
MANUAL = auto()
AUTODETECT = auto()
class ServiceFeature(ABC):
"""Base class for long-lived Aiohttp handler classes.
For classes with async functionality,
the (synchronous) `__init__()` and `__del__()` functions may not be sufficient.
Aiohttp offers comparable init/deinit hooks, but inside the context of a running event loop.
ServiceFeature registers the `self.startup(self, app)`, `self.before_shutdown(app)`,
and `self.shutdown(self, app)` as lifecycle callbacks.
They will be called by Aiohttp at the appropriate moment.
By overriding these functions, subclasses can perform initialization/deinitialization that requires an event loop.
Note: Aiohttp will not accept registration of new callbacks after it started running.
Startup management can be adjusted by using the `startup` argument in `ServiceFeature.__init__()`
Example class:
import asyncio
import random
from aiohttp import web
from brewblox_service import scheduler, service
from brewblox_service.features import ServiceFeature
class MyFeature(ServiceFeature):
def __init__(self, app: web.Application):
super().__init__(app)
self._task: asyncio.Task = None
async def startup(self, app: web.Application):
# Schedule a long-running background task
self._task = await scheduler.create_task(app, self._hello())
async def before_shutdown(self, app: web.Application):
print('Any minute now...')
async def shutdown(self, app: web.Application):
# Orderly cancel the background task
await scheduler.cancel_task(app, self._task)
async def _hello(self):
while True:
await asyncio.sleep(5)
print(random.choice([
'Hellooo',
'Searching',
'Sentry mode activated',
'Is anyone there?',
'Could you come over here?',
]))
Example use:
app = service.create_app(default_name='example')
scheduler.setup(app)
greeter = MyFeature(app)
service.furnish(app)
service.run(app)
# greeter.startup(app) is called now
# Press Ctrl+C to quit
# greeter.before_shutdown(app) will be called
# greeter.shutdown(app) will be called
"""
def __init__(self, app: web.Application, startup=Startup.AUTODETECT):
"""
ServiceFeature constructor.
Args:
app (web.Application):
The Aiohttp application with which the feature should be associated.
startup (Startup):
How feature lifecycle management should be handled. Default is AUTODETECT.
MANAGED: Feature always registers lifecycle hooks.
This will raise an exception when creating
the feature while the application is running.
MANUAL: Feature will not register lifecycle hooks.
startup() and shutdown() must be called manually.
AUTODETECT: Feature will register lifecycle hooks only if app is not running.
Behaves like MANAGED before application start,
and like MANUAL after application start.
"""
self.__active_app: web.Application = app
if any([
startup == Startup.MANAGED,
startup == Startup.AUTODETECT and not app.frozen
]):
app.on_startup.append(self.startup)
app.on_shutdown.append(self.before_shutdown)
app.on_cleanup.append(self.shutdown)
@property
def app(self) -> web.Application:
"""Currently active `web.Application`
Returns:
web.Application: The current app.
"""
return self.__active_app
@abstractmethod
async def startup(self, app: web.Application):
"""Lifecycle hook for initializing the feature in an async context.
Subclasses are expected to override this function.
Depending on the `startup` argument in `__init__()`,
`startup()` will be called when Aiohttp starts running.
Args:
app (web.Application):
Current Aiohttp application.
"""
pass # pragma: no cover
async def before_shutdown(self, app: web.Application):
"""Lifecycle hook for preparing to shut down the feature.
Subclasses may override this function, but it is not mandatory.
Depending on the `startup` argument in `__init__()`,
`before_shutdown()` will be called when Aiohttp is closing.
Args:
app (web.Application):
Current Aiohttp application.
"""
pass # pragma: no cover
@abstractmethod
async def shutdown(self, app: web.Application = None):
"""Lifecycle hook for shutting down the feature before the event loop is closed.
Subclasses are expected to override this function.
Depending on the `startup` argument in `__init__()`,
`shutdown()` will be called when Aiohttp is closing.
Args:
app (web.Application):
Current Aiohttp application.
"""
pass # pragma: no cover
|
BrewBlox/brewblox-service
|
brewblox_service/events.py
|
post_publish
|
python
|
async def post_publish(request):
args = await request.json()
try:
await get_publisher(request.app).publish(
args['exchange'],
args['routing'],
args['message']
)
return web.Response()
except Exception as ex:
warnings.warn(f'Unable to publish {args}: {ex}')
return web.Response(body='Event bus connection refused', status=500)
|
---
tags:
- Events
summary: Publish event.
description: Publish a new event message to the event bus.
operationId: events.publish
produces:
- text/plain
parameters:
-
in: body
name: body
description: Event message
required: true
schema:
type: object
properties:
exchange:
type: string
routing:
type: string
message:
type: object
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/events.py#L462-L499
|
[
"def get_publisher(app: web.Application) -> 'EventPublisher':\n return features.get(app, EventPublisher)\n"
] |
"""
Offers event publishing and subscription for service implementations.
Example use:
from brewblox_service import scheduler, events
scheduler.setup(app)
events.setup(app)
async def on_message(subscription, key, message):
print(f'Message from {subscription}: {key} = {message} ({type(message)})')
listener = events.get_listener(app)
listener.subscribe('brewblox', 'controller', on_message=on_message)
listener.subscribe('brewblox', 'controller.*', on_message=on_message)
listener.subscribe('brewblox', 'controller.#', on_message=on_message)
publisher = events.get_publisher(app)
await publisher.publish('brewblox', 'controller.value', {'example': True})
"""
import asyncio
import json
import warnings
from datetime import timedelta
from typing import Callable, Coroutine, List, Union
import aioamqp
from aiohttp import web
from brewblox_service import brewblox_logger, features, scheduler, strex
LOGGER = brewblox_logger(__name__)
routes = web.RouteTableDef()
EVENT_CALLBACK_ = Callable[['EventSubscription', str, Union[dict, str]], Coroutine]
ExchangeType_ = str
RECONNECT_INTERVAL = timedelta(seconds=1)
PENDING_WAIT_TIMEOUT = timedelta(seconds=5)
def setup(app: web.Application):
features.add(app, EventListener(app))
features.add(app, EventPublisher(app))
app.router.add_routes(routes)
def get_listener(app: web.Application) -> 'EventListener':
return features.get(app, EventListener)
def get_publisher(app: web.Application) -> 'EventPublisher':
return features.get(app, EventPublisher)
##############################################################################
# Incoming events
##############################################################################
async def _default_on_message(sub: 'EventSubscription', key: str, message: Union[dict, str]):
LOGGER.info(f'Unhandled event: subscription={sub}, key={key}, message={message}')
class EventSubscription():
"""
Subscription class for receiving AMQP messages.
This class should not be instantiated directly.
To subscribe to AMQP messages, use `EventListener.subscribe()`
The `on_message` property can safely be changed while the subscription is active.
It will be used for the next received message.
"""
def __init__(self,
exchange_name: str,
routing: str,
exchange_type: ExchangeType_ = 'topic',
on_message: EVENT_CALLBACK_ = None):
self._routing = routing
self._exchange_name = exchange_name
self._exchange_type = exchange_type
self.on_message = on_message
def __str__(self):
return f'<{self._routing} @ {self._exchange_name}>'
@property
def on_message(self) -> EVENT_CALLBACK_:
return self._on_message
@on_message.setter
def on_message(self, f: EVENT_CALLBACK_):
self._on_message = f if f else _default_on_message
async def declare_on_remote(self, channel: aioamqp.channel.Channel):
LOGGER.info(f'Declaring event bus subscription {self} on {channel}')
await channel.exchange_declare(
exchange_name=self._exchange_name,
type_name=self._exchange_type,
auto_delete=True
)
queue_info = await channel.queue_declare(exclusive=True)
queue_name = queue_info['queue']
await channel.queue_bind(
queue_name=queue_name,
exchange_name=self._exchange_name,
routing_key=self._routing
)
await channel.basic_consume(
callback=self._relay,
queue_name=queue_name
)
async def _relay(self,
channel: aioamqp.channel.Channel,
body: str,
envelope: aioamqp.envelope.Envelope,
properties: aioamqp.properties.Properties):
"""Relays incoming messages between the queue and the user callback"""
try:
await channel.basic_client_ack(envelope.delivery_tag)
await self.on_message(self, envelope.routing_key, json.loads(body))
except Exception as ex:
LOGGER.error(f'Exception relaying message in {self}: {ex}')
class EventListener(features.ServiceFeature):
"""
Allows subscribing to AMQP messages published to a central event bus.
`EventListener` will maintain a persistent connection to the AMQP host,
and ensures that all subscriptions remain valid if the connection is lost and reestablished.
"""
def __init__(self,
app: web.Application,
host: str = None,
port: int = None
):
super().__init__(app)
self._host: str = host or app['config']['eventbus_host']
self._port: int = port or app['config']['eventbus_port']
# Asyncio queues need a context loop
# We'll initialize self._pending when we have one
self._pending_pre_async: List[EventSubscription] = []
self._pending: asyncio.Queue = None
self._subscriptions: List[EventSubscription] = []
self._loop: asyncio.BaseEventLoop = None
self._task: asyncio.Task = None
def __str__(self):
return f'<{type(self).__name__} for "{self._host}">'
@property
def running(self):
return bool(self._task and not self._task.done())
def _lazy_listen(self):
"""
Ensures that the listener task only runs when actually needed.
This function is a no-op if any of the preconditions is not met.
Preconditions are:
* The application is running (self._loop is set)
* The task is not already running
* There are subscriptions: either pending, or active
"""
if all([
self._loop,
not self.running,
self._subscriptions or (self._pending and not self._pending.empty()),
]):
self._task = self._loop.create_task(self._listen())
async def _listen(self):
LOGGER.info(f'{self} now listening')
retrying = False
while True:
try:
transport, protocol = await aioamqp.connect(
host=self._host,
port=self._port,
)
channel = await protocol.channel()
LOGGER.info(f'Connected {self}')
# Declare all current subscriptions if reconnecting
[await sub.declare_on_remote(channel) for sub in self._subscriptions]
while True:
subscription = None
try:
await protocol.ensure_open()
retrying = False
subscription = await asyncio.wait_for(
self._pending.get(),
timeout=PENDING_WAIT_TIMEOUT.seconds
)
except asyncio.CancelledError:
# Exiting task
raise
except asyncio.TimeoutError: # pragma: no cover
# Timeout ensures that connection state is checked at least once per timeout
continue
try:
await protocol.ensure_open()
await subscription.declare_on_remote(channel)
self._subscriptions.append(subscription)
except Exception:
# Put subscription back in queue
# We'll declare it after reconnect
self._pending.put_nowait(subscription)
raise
except asyncio.CancelledError:
LOGGER.info(f'Cancelled {self}')
break
except Exception as ex:
if not retrying:
warnings.warn(f'Connection error in {self}: {strex(ex)}')
retrying = True
await asyncio.sleep(RECONNECT_INTERVAL.seconds)
continue
finally:
try:
await protocol.close()
transport.close()
except Exception: # pragma: no cover
pass
async def startup(self, app: web.Application):
await self.shutdown(app)
# Initialize the async queue now we know which loop we're using
self._loop = asyncio.get_event_loop()
self._pending = asyncio.Queue()
# Transfer all subscriptions that were made before the event loop started
[self._pending.put_nowait(s) for s in self._pending_pre_async]
# We won't be needing this anymore
self._pending_pre_async = None
self._lazy_listen()
async def shutdown(self, app: web.Application):
LOGGER.info(f'Closing {self}')
await scheduler.cancel_task(app, self._task)
self._loop = None
self._task = None
def subscribe(self,
exchange_name: str,
routing: str,
exchange_type: ExchangeType_ = 'topic',
on_message: EVENT_CALLBACK_ = None
) -> EventSubscription:
"""Adds a new event subscription to the listener.
Actual queue declaration to the remote message server is done when connected.
If the listener is not currently connected, it defers declaration.
All existing subscriptions are redeclared on the remote if `EventListener`
loses and recreates the connection.
Args:
exchange_name (str):
Name of the AMQP exchange. Messages are always published to a specific exchange.
routing (str):
Filter messages passing through the exchange.
A routing key is a '.'-separated string, and accepts '#' and '*' wildcards.
exchange_type (ExchangeType_, optional):
If the exchange does not yet exist, it will be created with this type.
Default is `topic`, acceptable values are `topic`, `fanout`, or `direct`.
on_message (EVENT_CALLBACK_, optional):
The function to be called when a new message is received.
If `on_message` is none, it will default to logging the message.
Returns:
EventSubscription:
The newly created subscription.
This value can safely be discarded: EventListener keeps its own reference.
"""
sub = EventSubscription(
exchange_name,
routing,
exchange_type,
on_message=on_message
)
if self._pending is not None:
self._pending.put_nowait(sub)
else:
self._pending_pre_async.append(sub)
LOGGER.info(f'Deferred event bus subscription: [{sub}]')
self._lazy_listen()
return sub
##############################################################################
# Outgoing events
##############################################################################
class EventPublisher(features.ServiceFeature):
"""
Allows publishing AMQP messages to a central eventbus.
`EventPublisher` is associated with a single eventbus address,
but will only create a connection when attempting to publish.
Connections are re-used for subsequent `publish()` calls.
"""
def __init__(self,
app: web.Application,
host: str = None,
port: int = None
):
super().__init__(app)
self._host: str = host or app['config']['eventbus_host']
self._port: int = port or app['config']['eventbus_port']
self._reset()
@property
def connected(self):
return self._transport and self._protocol and self._channel
def __str__(self):
return f'<{type(self).__name__} for "{self._host}">'
def _reset(self):
self._transport: asyncio.Transport = None
self._protocol: aioamqp.AmqpProtocol = None
self._channel: aioamqp.channel.Channel = None
async def _close(self):
LOGGER.info(f'Closing {self}')
try:
await self._protocol.close()
self._transport.close()
except Exception:
pass
finally:
self._reset()
async def _ensure_channel(self):
if not self.connected:
self._transport, self._protocol = await aioamqp.connect(
host=self._host,
port=self._port,
loop=self.app.loop
)
self._channel = await self._protocol.channel()
try:
await self._protocol.ensure_open()
except aioamqp.exceptions.AioamqpException:
await self._close()
raise
async def startup(self, *_):
pass # Connections are created when attempting to publish
async def shutdown(self, *_):
await self._close()
async def publish(self,
exchange: str,
routing: str,
message: Union[str, dict],
exchange_type: ExchangeType_ = 'topic'):
"""
Publish a new event message.
Connections are created automatically when calling `publish()`,
and will attempt to reconnect if connection was lost.
For more information on publishing AMQP messages,
see https://www.rabbitmq.com/tutorials/tutorial-three-python.html
Args:
exchange (str):
The AMQP message exchange to publish the message to.
A new exchange will be created if it does not yet exist.
routing (str):
The routing identification with which the message should be published.
Subscribers use routing information for fine-grained filtering.
Routing can be expressed as a '.'-separated path.
message (Union[str, dict]):
The message body. It will be serialized before transmission.
exchange_type (ExchangeType_, optional):
When publishing to a previously undeclared exchange, it will be created.
`exchange_type` defines how the exchange distributes messages between subscribers.
The default is 'topic', and acceptable values are: 'topic', 'direct', or 'fanout'.
Raises:
aioamqp.exceptions.AioamqpException:
* Failed to connect to AMQP host
* Failed to send message
* `exchange` already exists, but has a different `exchange_type`
"""
try:
await self._ensure_channel()
except Exception:
# If server has restarted since our last attempt, ensure channel will fail (old connection invalid)
# Retry once to check whether a new connection can be made
await self._ensure_channel()
# json.dumps() also correctly handles strings
data = json.dumps(message).encode()
await self._channel.exchange_declare(
exchange_name=exchange,
type_name=exchange_type,
auto_delete=True
)
await self._channel.basic_publish(
payload=data,
exchange_name=exchange,
routing_key=routing
)
##############################################################################
# REST endpoints
##############################################################################
@routes.post('/_debug/publish')
@routes.post('/_debug/subscribe')
async def post_subscribe(request):
"""
---
tags:
- Events
summary: Subscribe to events.
operationId: events.subscribe
produces:
- text/plain
parameters:
-
in: body
name: body
description: Event message
required: true
schema:
type: object
properties:
exchange:
type: string
routing:
type: string
"""
args = await request.json()
get_listener(request.app).subscribe(
args['exchange'],
args['routing']
)
return web.Response()
|
BrewBlox/brewblox-service
|
brewblox_service/events.py
|
post_subscribe
|
python
|
async def post_subscribe(request):
args = await request.json()
get_listener(request.app).subscribe(
args['exchange'],
args['routing']
)
return web.Response()
|
---
tags:
- Events
summary: Subscribe to events.
operationId: events.subscribe
produces:
- text/plain
parameters:
-
in: body
name: body
description: Event message
required: true
schema:
type: object
properties:
exchange:
type: string
routing:
type: string
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/events.py#L503-L531
|
[
"def get_listener(app: web.Application) -> 'EventListener':\n return features.get(app, EventListener)\n"
] |
"""
Offers event publishing and subscription for service implementations.
Example use:
from brewblox_service import scheduler, events
scheduler.setup(app)
events.setup(app)
async def on_message(subscription, key, message):
print(f'Message from {subscription}: {key} = {message} ({type(message)})')
listener = events.get_listener(app)
listener.subscribe('brewblox', 'controller', on_message=on_message)
listener.subscribe('brewblox', 'controller.*', on_message=on_message)
listener.subscribe('brewblox', 'controller.#', on_message=on_message)
publisher = events.get_publisher(app)
await publisher.publish('brewblox', 'controller.value', {'example': True})
"""
import asyncio
import json
import warnings
from datetime import timedelta
from typing import Callable, Coroutine, List, Union
import aioamqp
from aiohttp import web
from brewblox_service import brewblox_logger, features, scheduler, strex
LOGGER = brewblox_logger(__name__)
routes = web.RouteTableDef()
EVENT_CALLBACK_ = Callable[['EventSubscription', str, Union[dict, str]], Coroutine]
ExchangeType_ = str
RECONNECT_INTERVAL = timedelta(seconds=1)
PENDING_WAIT_TIMEOUT = timedelta(seconds=5)
def setup(app: web.Application):
features.add(app, EventListener(app))
features.add(app, EventPublisher(app))
app.router.add_routes(routes)
def get_listener(app: web.Application) -> 'EventListener':
return features.get(app, EventListener)
def get_publisher(app: web.Application) -> 'EventPublisher':
return features.get(app, EventPublisher)
##############################################################################
# Incoming events
##############################################################################
async def _default_on_message(sub: 'EventSubscription', key: str, message: Union[dict, str]):
LOGGER.info(f'Unhandled event: subscription={sub}, key={key}, message={message}')
class EventSubscription():
"""
Subscription class for receiving AMQP messages.
This class should not be instantiated directly.
To subscribe to AMQP messages, use `EventListener.subscribe()`
The `on_message` property can safely be changed while the subscription is active.
It will be used for the next received message.
"""
def __init__(self,
exchange_name: str,
routing: str,
exchange_type: ExchangeType_ = 'topic',
on_message: EVENT_CALLBACK_ = None):
self._routing = routing
self._exchange_name = exchange_name
self._exchange_type = exchange_type
self.on_message = on_message
def __str__(self):
return f'<{self._routing} @ {self._exchange_name}>'
@property
def on_message(self) -> EVENT_CALLBACK_:
return self._on_message
@on_message.setter
def on_message(self, f: EVENT_CALLBACK_):
self._on_message = f if f else _default_on_message
async def declare_on_remote(self, channel: aioamqp.channel.Channel):
LOGGER.info(f'Declaring event bus subscription {self} on {channel}')
await channel.exchange_declare(
exchange_name=self._exchange_name,
type_name=self._exchange_type,
auto_delete=True
)
queue_info = await channel.queue_declare(exclusive=True)
queue_name = queue_info['queue']
await channel.queue_bind(
queue_name=queue_name,
exchange_name=self._exchange_name,
routing_key=self._routing
)
await channel.basic_consume(
callback=self._relay,
queue_name=queue_name
)
async def _relay(self,
channel: aioamqp.channel.Channel,
body: str,
envelope: aioamqp.envelope.Envelope,
properties: aioamqp.properties.Properties):
"""Relays incoming messages between the queue and the user callback"""
try:
await channel.basic_client_ack(envelope.delivery_tag)
await self.on_message(self, envelope.routing_key, json.loads(body))
except Exception as ex:
LOGGER.error(f'Exception relaying message in {self}: {ex}')
class EventListener(features.ServiceFeature):
"""
Allows subscribing to AMQP messages published to a central event bus.
`EventListener` will maintain a persistent connection to the AMQP host,
and ensures that all subscriptions remain valid if the connection is lost and reestablished.
"""
def __init__(self,
app: web.Application,
host: str = None,
port: int = None
):
super().__init__(app)
self._host: str = host or app['config']['eventbus_host']
self._port: int = port or app['config']['eventbus_port']
# Asyncio queues need a context loop
# We'll initialize self._pending when we have one
self._pending_pre_async: List[EventSubscription] = []
self._pending: asyncio.Queue = None
self._subscriptions: List[EventSubscription] = []
self._loop: asyncio.BaseEventLoop = None
self._task: asyncio.Task = None
def __str__(self):
return f'<{type(self).__name__} for "{self._host}">'
@property
def running(self):
return bool(self._task and not self._task.done())
def _lazy_listen(self):
"""
Ensures that the listener task only runs when actually needed.
This function is a no-op if any of the preconditions is not met.
Preconditions are:
* The application is running (self._loop is set)
* The task is not already running
* There are subscriptions: either pending, or active
"""
if all([
self._loop,
not self.running,
self._subscriptions or (self._pending and not self._pending.empty()),
]):
self._task = self._loop.create_task(self._listen())
async def _listen(self):
LOGGER.info(f'{self} now listening')
retrying = False
while True:
try:
transport, protocol = await aioamqp.connect(
host=self._host,
port=self._port,
)
channel = await protocol.channel()
LOGGER.info(f'Connected {self}')
# Declare all current subscriptions if reconnecting
[await sub.declare_on_remote(channel) for sub in self._subscriptions]
while True:
subscription = None
try:
await protocol.ensure_open()
retrying = False
subscription = await asyncio.wait_for(
self._pending.get(),
timeout=PENDING_WAIT_TIMEOUT.seconds
)
except asyncio.CancelledError:
# Exiting task
raise
except asyncio.TimeoutError: # pragma: no cover
# Timeout ensures that connection state is checked at least once per timeout
continue
try:
await protocol.ensure_open()
await subscription.declare_on_remote(channel)
self._subscriptions.append(subscription)
except Exception:
# Put subscription back in queue
# We'll declare it after reconnect
self._pending.put_nowait(subscription)
raise
except asyncio.CancelledError:
LOGGER.info(f'Cancelled {self}')
break
except Exception as ex:
if not retrying:
warnings.warn(f'Connection error in {self}: {strex(ex)}')
retrying = True
await asyncio.sleep(RECONNECT_INTERVAL.seconds)
continue
finally:
try:
await protocol.close()
transport.close()
except Exception: # pragma: no cover
pass
async def startup(self, app: web.Application):
await self.shutdown(app)
# Initialize the async queue now we know which loop we're using
self._loop = asyncio.get_event_loop()
self._pending = asyncio.Queue()
# Transfer all subscriptions that were made before the event loop started
[self._pending.put_nowait(s) for s in self._pending_pre_async]
# We won't be needing this anymore
self._pending_pre_async = None
self._lazy_listen()
async def shutdown(self, app: web.Application):
LOGGER.info(f'Closing {self}')
await scheduler.cancel_task(app, self._task)
self._loop = None
self._task = None
def subscribe(self,
exchange_name: str,
routing: str,
exchange_type: ExchangeType_ = 'topic',
on_message: EVENT_CALLBACK_ = None
) -> EventSubscription:
"""Adds a new event subscription to the listener.
Actual queue declaration to the remote message server is done when connected.
If the listener is not currently connected, it defers declaration.
All existing subscriptions are redeclared on the remote if `EventListener`
loses and recreates the connection.
Args:
exchange_name (str):
Name of the AMQP exchange. Messages are always published to a specific exchange.
routing (str):
Filter messages passing through the exchange.
A routing key is a '.'-separated string, and accepts '#' and '*' wildcards.
exchange_type (ExchangeType_, optional):
If the exchange does not yet exist, it will be created with this type.
Default is `topic`, acceptable values are `topic`, `fanout`, or `direct`.
on_message (EVENT_CALLBACK_, optional):
The function to be called when a new message is received.
If `on_message` is none, it will default to logging the message.
Returns:
EventSubscription:
The newly created subscription.
This value can safely be discarded: EventListener keeps its own reference.
"""
sub = EventSubscription(
exchange_name,
routing,
exchange_type,
on_message=on_message
)
if self._pending is not None:
self._pending.put_nowait(sub)
else:
self._pending_pre_async.append(sub)
LOGGER.info(f'Deferred event bus subscription: [{sub}]')
self._lazy_listen()
return sub
##############################################################################
# Outgoing events
##############################################################################
class EventPublisher(features.ServiceFeature):
"""
Allows publishing AMQP messages to a central eventbus.
`EventPublisher` is associated with a single eventbus address,
but will only create a connection when attempting to publish.
Connections are re-used for subsequent `publish()` calls.
"""
def __init__(self,
app: web.Application,
host: str = None,
port: int = None
):
super().__init__(app)
self._host: str = host or app['config']['eventbus_host']
self._port: int = port or app['config']['eventbus_port']
self._reset()
@property
def connected(self):
return self._transport and self._protocol and self._channel
def __str__(self):
return f'<{type(self).__name__} for "{self._host}">'
def _reset(self):
self._transport: asyncio.Transport = None
self._protocol: aioamqp.AmqpProtocol = None
self._channel: aioamqp.channel.Channel = None
async def _close(self):
LOGGER.info(f'Closing {self}')
try:
await self._protocol.close()
self._transport.close()
except Exception:
pass
finally:
self._reset()
async def _ensure_channel(self):
if not self.connected:
self._transport, self._protocol = await aioamqp.connect(
host=self._host,
port=self._port,
loop=self.app.loop
)
self._channel = await self._protocol.channel()
try:
await self._protocol.ensure_open()
except aioamqp.exceptions.AioamqpException:
await self._close()
raise
async def startup(self, *_):
pass # Connections are created when attempting to publish
async def shutdown(self, *_):
await self._close()
async def publish(self,
exchange: str,
routing: str,
message: Union[str, dict],
exchange_type: ExchangeType_ = 'topic'):
"""
Publish a new event message.
Connections are created automatically when calling `publish()`,
and will attempt to reconnect if connection was lost.
For more information on publishing AMQP messages,
see https://www.rabbitmq.com/tutorials/tutorial-three-python.html
Args:
exchange (str):
The AMQP message exchange to publish the message to.
A new exchange will be created if it does not yet exist.
routing (str):
The routing identification with which the message should be published.
Subscribers use routing information for fine-grained filtering.
Routing can be expressed as a '.'-separated path.
message (Union[str, dict]):
The message body. It will be serialized before transmission.
exchange_type (ExchangeType_, optional):
When publishing to a previously undeclared exchange, it will be created.
`exchange_type` defines how the exchange distributes messages between subscribers.
The default is 'topic', and acceptable values are: 'topic', 'direct', or 'fanout'.
Raises:
aioamqp.exceptions.AioamqpException:
* Failed to connect to AMQP host
* Failed to send message
* `exchange` already exists, but has a different `exchange_type`
"""
try:
await self._ensure_channel()
except Exception:
# If server has restarted since our last attempt, ensure channel will fail (old connection invalid)
# Retry once to check whether a new connection can be made
await self._ensure_channel()
# json.dumps() also correctly handles strings
data = json.dumps(message).encode()
await self._channel.exchange_declare(
exchange_name=exchange,
type_name=exchange_type,
auto_delete=True
)
await self._channel.basic_publish(
payload=data,
exchange_name=exchange,
routing_key=routing
)
##############################################################################
# REST endpoints
##############################################################################
@routes.post('/_debug/publish')
async def post_publish(request):
"""
---
tags:
- Events
summary: Publish event.
description: Publish a new event message to the event bus.
operationId: events.publish
produces:
- text/plain
parameters:
-
in: body
name: body
description: Event message
required: true
schema:
type: object
properties:
exchange:
type: string
routing:
type: string
message:
type: object
"""
args = await request.json()
try:
await get_publisher(request.app).publish(
args['exchange'],
args['routing'],
args['message']
)
return web.Response()
except Exception as ex:
warnings.warn(f'Unable to publish {args}: {ex}')
return web.Response(body='Event bus connection refused', status=500)
@routes.post('/_debug/subscribe')
|
BrewBlox/brewblox-service
|
brewblox_service/events.py
|
EventSubscription._relay
|
python
|
async def _relay(self,
channel: aioamqp.channel.Channel,
body: str,
envelope: aioamqp.envelope.Envelope,
properties: aioamqp.properties.Properties):
try:
await channel.basic_client_ack(envelope.delivery_tag)
await self.on_message(self, envelope.routing_key, json.loads(body))
except Exception as ex:
LOGGER.error(f'Exception relaying message in {self}: {ex}')
|
Relays incoming messages between the queue and the user callback
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/events.py#L122-L132
| null |
class EventSubscription():
"""
Subscription class for receiving AMQP messages.
This class should not be instantiated directly.
To subscribe to AMQP messages, use `EventListener.subscribe()`
The `on_message` property can safely be changed while the subscription is active.
It will be used for the next received message.
"""
def __init__(self,
exchange_name: str,
routing: str,
exchange_type: ExchangeType_ = 'topic',
on_message: EVENT_CALLBACK_ = None):
self._routing = routing
self._exchange_name = exchange_name
self._exchange_type = exchange_type
self.on_message = on_message
def __str__(self):
return f'<{self._routing} @ {self._exchange_name}>'
@property
def on_message(self) -> EVENT_CALLBACK_:
return self._on_message
@on_message.setter
def on_message(self, f: EVENT_CALLBACK_):
self._on_message = f if f else _default_on_message
async def declare_on_remote(self, channel: aioamqp.channel.Channel):
LOGGER.info(f'Declaring event bus subscription {self} on {channel}')
await channel.exchange_declare(
exchange_name=self._exchange_name,
type_name=self._exchange_type,
auto_delete=True
)
queue_info = await channel.queue_declare(exclusive=True)
queue_name = queue_info['queue']
await channel.queue_bind(
queue_name=queue_name,
exchange_name=self._exchange_name,
routing_key=self._routing
)
await channel.basic_consume(
callback=self._relay,
queue_name=queue_name
)
|
BrewBlox/brewblox-service
|
brewblox_service/events.py
|
EventListener._lazy_listen
|
python
|
def _lazy_listen(self):
if all([
self._loop,
not self.running,
self._subscriptions or (self._pending and not self._pending.empty()),
]):
self._task = self._loop.create_task(self._listen())
|
Ensures that the listener task only runs when actually needed.
This function is a no-op if any of the preconditions is not met.
Preconditions are:
* The application is running (self._loop is set)
* The task is not already running
* There are subscriptions: either pending, or active
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/events.py#L169-L184
| null |
class EventListener(features.ServiceFeature):
"""
Allows subscribing to AMQP messages published to a central event bus.
`EventListener` will maintain a persistent connection to the AMQP host,
and ensures that all subscriptions remain valid if the connection is lost and reestablished.
"""
def __init__(self,
app: web.Application,
host: str = None,
port: int = None
):
super().__init__(app)
self._host: str = host or app['config']['eventbus_host']
self._port: int = port or app['config']['eventbus_port']
# Asyncio queues need a context loop
# We'll initialize self._pending when we have one
self._pending_pre_async: List[EventSubscription] = []
self._pending: asyncio.Queue = None
self._subscriptions: List[EventSubscription] = []
self._loop: asyncio.BaseEventLoop = None
self._task: asyncio.Task = None
def __str__(self):
return f'<{type(self).__name__} for "{self._host}">'
@property
def running(self):
return bool(self._task and not self._task.done())
async def _listen(self):
LOGGER.info(f'{self} now listening')
retrying = False
while True:
try:
transport, protocol = await aioamqp.connect(
host=self._host,
port=self._port,
)
channel = await protocol.channel()
LOGGER.info(f'Connected {self}')
# Declare all current subscriptions if reconnecting
[await sub.declare_on_remote(channel) for sub in self._subscriptions]
while True:
subscription = None
try:
await protocol.ensure_open()
retrying = False
subscription = await asyncio.wait_for(
self._pending.get(),
timeout=PENDING_WAIT_TIMEOUT.seconds
)
except asyncio.CancelledError:
# Exiting task
raise
except asyncio.TimeoutError: # pragma: no cover
# Timeout ensures that connection state is checked at least once per timeout
continue
try:
await protocol.ensure_open()
await subscription.declare_on_remote(channel)
self._subscriptions.append(subscription)
except Exception:
# Put subscription back in queue
# We'll declare it after reconnect
self._pending.put_nowait(subscription)
raise
except asyncio.CancelledError:
LOGGER.info(f'Cancelled {self}')
break
except Exception as ex:
if not retrying:
warnings.warn(f'Connection error in {self}: {strex(ex)}')
retrying = True
await asyncio.sleep(RECONNECT_INTERVAL.seconds)
continue
finally:
try:
await protocol.close()
transport.close()
except Exception: # pragma: no cover
pass
async def startup(self, app: web.Application):
await self.shutdown(app)
# Initialize the async queue now we know which loop we're using
self._loop = asyncio.get_event_loop()
self._pending = asyncio.Queue()
# Transfer all subscriptions that were made before the event loop started
[self._pending.put_nowait(s) for s in self._pending_pre_async]
# We won't be needing this anymore
self._pending_pre_async = None
self._lazy_listen()
async def shutdown(self, app: web.Application):
LOGGER.info(f'Closing {self}')
await scheduler.cancel_task(app, self._task)
self._loop = None
self._task = None
def subscribe(self,
exchange_name: str,
routing: str,
exchange_type: ExchangeType_ = 'topic',
on_message: EVENT_CALLBACK_ = None
) -> EventSubscription:
"""Adds a new event subscription to the listener.
Actual queue declaration to the remote message server is done when connected.
If the listener is not currently connected, it defers declaration.
All existing subscriptions are redeclared on the remote if `EventListener`
loses and recreates the connection.
Args:
exchange_name (str):
Name of the AMQP exchange. Messages are always published to a specific exchange.
routing (str):
Filter messages passing through the exchange.
A routing key is a '.'-separated string, and accepts '#' and '*' wildcards.
exchange_type (ExchangeType_, optional):
If the exchange does not yet exist, it will be created with this type.
Default is `topic`, acceptable values are `topic`, `fanout`, or `direct`.
on_message (EVENT_CALLBACK_, optional):
The function to be called when a new message is received.
If `on_message` is none, it will default to logging the message.
Returns:
EventSubscription:
The newly created subscription.
This value can safely be discarded: EventListener keeps its own reference.
"""
sub = EventSubscription(
exchange_name,
routing,
exchange_type,
on_message=on_message
)
if self._pending is not None:
self._pending.put_nowait(sub)
else:
self._pending_pre_async.append(sub)
LOGGER.info(f'Deferred event bus subscription: [{sub}]')
self._lazy_listen()
return sub
|
BrewBlox/brewblox-service
|
brewblox_service/events.py
|
EventListener.subscribe
|
python
|
def subscribe(self,
exchange_name: str,
routing: str,
exchange_type: ExchangeType_ = 'topic',
on_message: EVENT_CALLBACK_ = None
) -> EventSubscription:
sub = EventSubscription(
exchange_name,
routing,
exchange_type,
on_message=on_message
)
if self._pending is not None:
self._pending.put_nowait(sub)
else:
self._pending_pre_async.append(sub)
LOGGER.info(f'Deferred event bus subscription: [{sub}]')
self._lazy_listen()
return sub
|
Adds a new event subscription to the listener.
Actual queue declaration to the remote message server is done when connected.
If the listener is not currently connected, it defers declaration.
All existing subscriptions are redeclared on the remote if `EventListener`
loses and recreates the connection.
Args:
exchange_name (str):
Name of the AMQP exchange. Messages are always published to a specific exchange.
routing (str):
Filter messages passing through the exchange.
A routing key is a '.'-separated string, and accepts '#' and '*' wildcards.
exchange_type (ExchangeType_, optional):
If the exchange does not yet exist, it will be created with this type.
Default is `topic`, acceptable values are `topic`, `fanout`, or `direct`.
on_message (EVENT_CALLBACK_, optional):
The function to be called when a new message is received.
If `on_message` is none, it will default to logging the message.
Returns:
EventSubscription:
The newly created subscription.
This value can safely be discarded: EventListener keeps its own reference.
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/events.py#L275-L324
|
[
"def _lazy_listen(self):\n \"\"\"\n Ensures that the listener task only runs when actually needed.\n This function is a no-op if any of the preconditions is not met.\n\n Preconditions are:\n * The application is running (self._loop is set)\n * The task is not already running\n * There are subscriptions: either pending, or active\n \"\"\"\n if all([\n self._loop,\n not self.running,\n self._subscriptions or (self._pending and not self._pending.empty()),\n ]):\n self._task = self._loop.create_task(self._listen())\n"
] |
class EventListener(features.ServiceFeature):
"""
Allows subscribing to AMQP messages published to a central event bus.
`EventListener` will maintain a persistent connection to the AMQP host,
and ensures that all subscriptions remain valid if the connection is lost and reestablished.
"""
def __init__(self,
app: web.Application,
host: str = None,
port: int = None
):
super().__init__(app)
self._host: str = host or app['config']['eventbus_host']
self._port: int = port or app['config']['eventbus_port']
# Asyncio queues need a context loop
# We'll initialize self._pending when we have one
self._pending_pre_async: List[EventSubscription] = []
self._pending: asyncio.Queue = None
self._subscriptions: List[EventSubscription] = []
self._loop: asyncio.BaseEventLoop = None
self._task: asyncio.Task = None
def __str__(self):
return f'<{type(self).__name__} for "{self._host}">'
@property
def running(self):
return bool(self._task and not self._task.done())
def _lazy_listen(self):
"""
Ensures that the listener task only runs when actually needed.
This function is a no-op if any of the preconditions is not met.
Preconditions are:
* The application is running (self._loop is set)
* The task is not already running
* There are subscriptions: either pending, or active
"""
if all([
self._loop,
not self.running,
self._subscriptions or (self._pending and not self._pending.empty()),
]):
self._task = self._loop.create_task(self._listen())
async def _listen(self):
LOGGER.info(f'{self} now listening')
retrying = False
while True:
try:
transport, protocol = await aioamqp.connect(
host=self._host,
port=self._port,
)
channel = await protocol.channel()
LOGGER.info(f'Connected {self}')
# Declare all current subscriptions if reconnecting
[await sub.declare_on_remote(channel) for sub in self._subscriptions]
while True:
subscription = None
try:
await protocol.ensure_open()
retrying = False
subscription = await asyncio.wait_for(
self._pending.get(),
timeout=PENDING_WAIT_TIMEOUT.seconds
)
except asyncio.CancelledError:
# Exiting task
raise
except asyncio.TimeoutError: # pragma: no cover
# Timeout ensures that connection state is checked at least once per timeout
continue
try:
await protocol.ensure_open()
await subscription.declare_on_remote(channel)
self._subscriptions.append(subscription)
except Exception:
# Put subscription back in queue
# We'll declare it after reconnect
self._pending.put_nowait(subscription)
raise
except asyncio.CancelledError:
LOGGER.info(f'Cancelled {self}')
break
except Exception as ex:
if not retrying:
warnings.warn(f'Connection error in {self}: {strex(ex)}')
retrying = True
await asyncio.sleep(RECONNECT_INTERVAL.seconds)
continue
finally:
try:
await protocol.close()
transport.close()
except Exception: # pragma: no cover
pass
async def startup(self, app: web.Application):
await self.shutdown(app)
# Initialize the async queue now we know which loop we're using
self._loop = asyncio.get_event_loop()
self._pending = asyncio.Queue()
# Transfer all subscriptions that were made before the event loop started
[self._pending.put_nowait(s) for s in self._pending_pre_async]
# We won't be needing this anymore
self._pending_pre_async = None
self._lazy_listen()
async def shutdown(self, app: web.Application):
LOGGER.info(f'Closing {self}')
await scheduler.cancel_task(app, self._task)
self._loop = None
self._task = None
|
BrewBlox/brewblox-service
|
brewblox_service/events.py
|
EventPublisher.publish
|
python
|
async def publish(self,
exchange: str,
routing: str,
message: Union[str, dict],
exchange_type: ExchangeType_ = 'topic'):
try:
await self._ensure_channel()
except Exception:
# If server has restarted since our last attempt, ensure channel will fail (old connection invalid)
# Retry once to check whether a new connection can be made
await self._ensure_channel()
# json.dumps() also correctly handles strings
data = json.dumps(message).encode()
await self._channel.exchange_declare(
exchange_name=exchange,
type_name=exchange_type,
auto_delete=True
)
await self._channel.basic_publish(
payload=data,
exchange_name=exchange,
routing_key=routing
)
|
Publish a new event message.
Connections are created automatically when calling `publish()`,
and will attempt to reconnect if connection was lost.
For more information on publishing AMQP messages,
see https://www.rabbitmq.com/tutorials/tutorial-three-python.html
Args:
exchange (str):
The AMQP message exchange to publish the message to.
A new exchange will be created if it does not yet exist.
routing (str):
The routing identification with which the message should be published.
Subscribers use routing information for fine-grained filtering.
Routing can be expressed as a '.'-separated path.
message (Union[str, dict]):
The message body. It will be serialized before transmission.
exchange_type (ExchangeType_, optional):
When publishing to a previously undeclared exchange, it will be created.
`exchange_type` defines how the exchange distributes messages between subscribers.
The default is 'topic', and acceptable values are: 'topic', 'direct', or 'fanout'.
Raises:
aioamqp.exceptions.AioamqpException:
* Failed to connect to AMQP host
* Failed to send message
* `exchange` already exists, but has a different `exchange_type`
|
train
|
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/events.py#L396-L454
|
[
"async def _ensure_channel(self):\n if not self.connected:\n self._transport, self._protocol = await aioamqp.connect(\n host=self._host,\n port=self._port,\n loop=self.app.loop\n )\n self._channel = await self._protocol.channel()\n\n try:\n await self._protocol.ensure_open()\n except aioamqp.exceptions.AioamqpException:\n await self._close()\n raise\n"
] |
class EventPublisher(features.ServiceFeature):
"""
Allows publishing AMQP messages to a central eventbus.
`EventPublisher` is associated with a single eventbus address,
but will only create a connection when attempting to publish.
Connections are re-used for subsequent `publish()` calls.
"""
def __init__(self,
app: web.Application,
host: str = None,
port: int = None
):
super().__init__(app)
self._host: str = host or app['config']['eventbus_host']
self._port: int = port or app['config']['eventbus_port']
self._reset()
@property
def connected(self):
return self._transport and self._protocol and self._channel
def __str__(self):
return f'<{type(self).__name__} for "{self._host}">'
def _reset(self):
self._transport: asyncio.Transport = None
self._protocol: aioamqp.AmqpProtocol = None
self._channel: aioamqp.channel.Channel = None
async def _close(self):
LOGGER.info(f'Closing {self}')
try:
await self._protocol.close()
self._transport.close()
except Exception:
pass
finally:
self._reset()
async def _ensure_channel(self):
if not self.connected:
self._transport, self._protocol = await aioamqp.connect(
host=self._host,
port=self._port,
loop=self.app.loop
)
self._channel = await self._protocol.channel()
try:
await self._protocol.ensure_open()
except aioamqp.exceptions.AioamqpException:
await self._close()
raise
async def startup(self, *_):
pass # Connections are created when attempting to publish
async def shutdown(self, *_):
await self._close()
|
aloetesting/aloe_django
|
aloe_django/management/commands/harvest.py
|
Command.run_from_argv
|
python
|
def run_from_argv(self, argv):
self.test_runner = test_runner_class
super(Command, self).run_from_argv(argv)
|
Set the default Gherkin test runner for its options to be parsed.
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/management/commands/harvest.py#L25-L31
| null |
class Command(TestCommand):
"""Django command: harvest"""
help = "Run Gherkin tests"
requires_system_checks = False
def handle(self, *test_labels, **options):
"""
Set the default Gherkin test runner.
"""
if not options.get('testrunner', None):
options['testrunner'] = test_runner_class
return super(Command, self).handle(*test_labels, **options)
|
aloetesting/aloe_django
|
aloe_django/management/commands/harvest.py
|
Command.handle
|
python
|
def handle(self, *test_labels, **options):
if not options.get('testrunner', None):
options['testrunner'] = test_runner_class
return super(Command, self).handle(*test_labels, **options)
|
Set the default Gherkin test runner.
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/management/commands/harvest.py#L33-L40
| null |
class Command(TestCommand):
"""Django command: harvest"""
help = "Run Gherkin tests"
requires_system_checks = False
def run_from_argv(self, argv):
"""
Set the default Gherkin test runner for its options to be parsed.
"""
self.test_runner = test_runner_class
super(Command, self).run_from_argv(argv)
|
aloetesting/aloe_django
|
aloe_django/__init__.py
|
django_url
|
python
|
def django_url(step, url=None):
base_url = step.test.live_server_url
if url:
return urljoin(base_url, url)
else:
return base_url
|
The URL for a page from the test server.
:param step: A Gherkin step
:param url: If specified, the relative URL to append.
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/__init__.py#L38-L51
| null |
# -*- coding: utf-8 -*-
"""
Django integration for Aloe
"""
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin # pylint:disable=import-error
from django.core.exceptions import ImproperlyConfigured
try:
from django.contrib.staticfiles.testing import (
StaticLiveServerTestCase as LiveServerTestCase)
from aloe.testclass import TestCase as AloeTestCase
# pylint:disable=abstract-method
# Pylint cannot infer methods dynamically added by Aloe
class TestCase(LiveServerTestCase, AloeTestCase):
"""
Base test class for Django Gherkin tests.
Inherits from both :class:`aloe.testclass.TestCase` and
:class:`django.test.LiveServerTestCase`.
"""
pass
except (ImproperlyConfigured, ImportError):
# Probably running tests for Aloe-Django and Django isn't configured
pass
|
aloetesting/aloe_django
|
aloe_django/steps/models.py
|
_models_generator
|
python
|
def _models_generator():
for app in apps.get_app_configs():
for model in app.get_models():
yield (str(model._meta.verbose_name).lower(), model)
yield (str(model._meta.verbose_name_plural).lower(), model)
|
Build a hash of model verbose names to models
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/steps/models.py#L29-L37
| null |
"""
Step definitions and utilities for working with Django models.
"""
from __future__ import print_function
from __future__ import unicode_literals
# pylint:disable=redefined-builtin
from builtins import str
# pylint:disable=redefined-builtin
import warnings
from functools import partial
from django.apps import apps
from django.core.management.color import no_style
from django.db import connection
from aloe import step
from aloe.tools import guess_types
__all__ = ('writes_models', 'write_models',
'tests_existence', 'test_existence',
'reset_sequence')
STEP_PREFIX = r'(?:Given|And|Then|When) '
try:
MODELS = dict(_models_generator())
except: # pylint:disable=bare-except
warnings.warn("Models not loaded!")
_WRITE_MODEL = {}
def writes_models(model):
"""
Register a model-specific create and update function.
This can then be accessed via the steps:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
A method for a specific model can define a function ``write_badgers(data,
field)``, which creates and updates the Badger model and decorating it with
the ``writes_models(model_class)`` decorator:
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
if field:
profile = Profile.objects.get(**{field: hash_[field]})
else:
profile = Profile()
...
reset_sequence(Profile)
The function must accept a list of data hashes and a field name. If field
is not None, it is the field that must be used to get the existing objects
out of the database to update them; otherwise, new objects must be created
for each data hash.
Follow up model creation with a call to :func:`reset_sequence` to
update the database sequences.
If you only want to modify the hash, you can make modifications and then
pass it on to :func:`write_models`.
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
# modify hash
return write_models(Profile, data, field)
"""
def decorated(func):
"""
Decorator for the creation function.
"""
_WRITE_MODEL[model] = func
return func
return decorated
_TEST_MODEL = {}
def tests_existence(model):
"""
Register a model-specific existence test.
This can then be accessed via the steps:
.. code-block:: gherkin
Then foos should be present in the database:
| name | bar |
| badger | baz |
Then foos should not be present in the database:
| name | bar |
| badger | baz |
A method for a specific model can define a function
``test_badgers(queryset, data)`` and decorating it with the
``tests_existence(model_class)`` decorator:
.. code-block:: python
@tests_existence(Profile)
def test_profile(queryset, data):
'''Test a Profile model'''
# modify data ...
return test_existence(queryset, data)
If you only want to modify the hash, you can make modifications then pass
it on to test_existence().
"""
def decorated(func):
"""
Decorator for the existence function.
"""
_TEST_MODEL[model] = func
return func
return decorated
def get_model(name):
"""
Convert a model's verbose name to the model class. This allows us to
use the models verbose name in steps.
"""
model = MODELS.get(name.lower(), None)
assert model, "Could not locate model by name '%s'" % name
return model
def reset_sequence(model):
"""
Reset the ID sequence for a model.
"""
sql = connection.ops.sequence_reset_sql(no_style(), [model])
for cmd in sql:
connection.cursor().execute(cmd)
def _dump_model(model, attrs=None):
"""
Dump the model fields for debugging.
"""
fields = []
for field in model._meta.fields:
fields.append((field.name, str(getattr(model, field.name))))
if attrs is not None:
for attr in attrs:
fields.append((attr, str(getattr(model, attr))))
for field in model._meta.many_to_many:
vals = getattr(model, field.name)
fields.append((field.name, '{val} ({count})'.format(
val=', '.join(map(str, vals.all())),
count=vals.count(),
)))
print(', '.join(
'{0}={1}'.format(field, value)
for field, value in fields
))
def test_existence(queryset, data):
"""
:param queryset: a Django queryset
:param data: a single model to check for
:returns: True if the model exists
Test existence of a given hash in a `queryset` (or among all model
instances if a model is given).
Useful when registering custom tests with :func:`tests_existence`.
"""
fields = {}
extra_attrs = {}
for key, value in data.items():
if key.startswith('@'):
# this is an attribute
extra_attrs[key[1:]] = value
else:
fields[key] = value
filtered = queryset.filter(**fields)
if filtered.exists():
return any(
all(getattr(obj, k) == v for k, v in extra_attrs.items())
for obj in filtered.all()
)
return False
def _model_exists_step(self, model, should_exist):
"""
Test for the existence of a model matching the given data.
"""
model = get_model(model)
data = guess_types(self.hashes)
queryset = model.objects
try:
existence_check = _TEST_MODEL[model]
except KeyError:
existence_check = test_existence
failed = 0
try:
for hash_ in data:
match = existence_check(queryset, hash_)
if should_exist:
assert match, \
"%s does not exist: %s" % (model.__name__, hash_)
else:
assert not match, \
"%s exists: %s" % (model.__name__, hash_)
except AssertionError as exc:
print(exc)
failed += 1
if failed:
print("Rows in DB are:")
for existing_model in queryset.all():
_dump_model(existing_model,
attrs=[k[1:]
for k in data[0].keys()
if k.startswith('@')])
if should_exist:
raise AssertionError("%i rows missing" % failed)
else:
raise AssertionError("%i rows found" % failed)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should be present in the database')
def _model_exists_positive_step(self, model):
"""
Test for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties) prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, True)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should not be present in the database')
def _model_exists_negative_step(self, model):
"""
Tests for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties). Prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should not be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, False)
def write_models(model, data, field):
"""
:param model: a Django model class
:param data: a list of hashes to build models from
:param field: a field name to match models on, or None
:returns: a list of models written
Create or update models for each data hash.
`field` is the field that is used to get the existing models out of
the database to update them; otherwise, if ``field=None``, new models are
created.
Useful when registering custom tests with :func:`writes_models`.
"""
written = []
for hash_ in data:
if field:
if field not in hash_:
raise KeyError(("The \"%s\" field is required for all update "
"operations") % field)
model_kwargs = {field: hash_[field]}
model_obj = model.objects.get(**model_kwargs)
for to_set, val in hash_.items():
setattr(model_obj, to_set, val)
model_obj.save()
else:
model_obj = model.objects.create(**hash_)
written.append(model_obj)
reset_sequence(model)
return written
def _write_models_step(self, model, field=None):
"""
Write or update a model.
"""
model = get_model(model)
data = guess_types(self.hashes)
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, field)
@step(r'I have(?: an?)? ([a-z][a-z0-9_ ]*) in the database:')
def _write_models_step_new(self, model):
"""
Create models in the database.
Syntax:
I have `model` in the database:
Example:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
See :func:`writes_models`.
"""
return _write_models_step(self, model)
@step(r'I update(?: an?)? existing ([a-z][a-z0-9_ ]*) by ([a-z][a-z0-9_]*) '
'in the database:')
def _write_models_step_update(self, model, field):
"""
Update existing models in the database, specifying a column to match on.
Syntax:
I update `model` by `key` in the database:
Example:
.. code-block:: gherkin
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
See :func:`writes_models`.
"""
return _write_models_step(self, model, field=field)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' has(?: an?)? ([A-Z][a-z0-9_ ]*) in the database:')
def _create_models_for_relation_step(self, rel_model_name,
rel_key, rel_value, model):
"""
Create a new model linked to the given model.
Syntax:
And `model` with `field` "`value`" has `new model` in the database:
Example:
.. code-block:: gherkin
And project with name "Ball Project" has goals in the database:
| description |
| To have fun playing with balls of twine |
"""
model = get_model(model)
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
data = guess_types(self.hashes)
for hash_ in data:
hash_['%s' % rel_model_name] = rel_model
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, None)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' is linked to ([A-Z][a-z0-9_ ]*) in the database:')
def _create_m2m_links_step(self, rel_model_name,
rel_key, rel_value, relation_name):
"""
Link many-to-many models together.
Syntax:
And `model` with `field` "`value`" is linked to `other model` in the
database:
Example:
.. code-block:: gherkin
And article with name "Guidelines" is linked to tags in the database:
| name |
| coding |
| style |
"""
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
relation = None
for m2m in rel_model._meta.many_to_many:
if relation_name in (m2m.name, m2m.verbose_name):
relation = getattr(rel_model, m2m.name)
break
if not relation:
try:
relation = getattr(rel_model, relation_name)
except AttributeError:
pass
assert relation, \
"%s does not have a many-to-many relation named '%s'" % (
rel_model._meta.verbose_name.capitalize(),
relation_name,
)
m2m_model = relation.model
for hash_ in self.hashes:
relation.add(m2m_model.objects.get(**hash_))
@step(r'There should be (\d+) ([a-z][a-z0-9_ ]*) in the database')
def _model_count_step(self, count, model):
"""
Count the number of models in the database.
Example:
.. code-block:: gherkin
Then there should be 0 goals in the database
"""
model = get_model(model)
expected = int(count)
found = model.objects.count()
assert found == expected, "Expected %d %s, found %d." % \
(expected, model._meta.verbose_name_plural, found)
|
aloetesting/aloe_django
|
aloe_django/steps/models.py
|
get_model
|
python
|
def get_model(name):
model = MODELS.get(name.lower(), None)
assert model, "Could not locate model by name '%s'" % name
return model
|
Convert a model's verbose name to the model class. This allows us to
use the models verbose name in steps.
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/steps/models.py#L166-L176
| null |
"""
Step definitions and utilities for working with Django models.
"""
from __future__ import print_function
from __future__ import unicode_literals
# pylint:disable=redefined-builtin
from builtins import str
# pylint:disable=redefined-builtin
import warnings
from functools import partial
from django.apps import apps
from django.core.management.color import no_style
from django.db import connection
from aloe import step
from aloe.tools import guess_types
__all__ = ('writes_models', 'write_models',
'tests_existence', 'test_existence',
'reset_sequence')
STEP_PREFIX = r'(?:Given|And|Then|When) '
def _models_generator():
"""
Build a hash of model verbose names to models
"""
for app in apps.get_app_configs():
for model in app.get_models():
yield (str(model._meta.verbose_name).lower(), model)
yield (str(model._meta.verbose_name_plural).lower(), model)
try:
MODELS = dict(_models_generator())
except: # pylint:disable=bare-except
warnings.warn("Models not loaded!")
_WRITE_MODEL = {}
def writes_models(model):
"""
Register a model-specific create and update function.
This can then be accessed via the steps:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
A method for a specific model can define a function ``write_badgers(data,
field)``, which creates and updates the Badger model and decorating it with
the ``writes_models(model_class)`` decorator:
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
if field:
profile = Profile.objects.get(**{field: hash_[field]})
else:
profile = Profile()
...
reset_sequence(Profile)
The function must accept a list of data hashes and a field name. If field
is not None, it is the field that must be used to get the existing objects
out of the database to update them; otherwise, new objects must be created
for each data hash.
Follow up model creation with a call to :func:`reset_sequence` to
update the database sequences.
If you only want to modify the hash, you can make modifications and then
pass it on to :func:`write_models`.
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
# modify hash
return write_models(Profile, data, field)
"""
def decorated(func):
"""
Decorator for the creation function.
"""
_WRITE_MODEL[model] = func
return func
return decorated
_TEST_MODEL = {}
def tests_existence(model):
"""
Register a model-specific existence test.
This can then be accessed via the steps:
.. code-block:: gherkin
Then foos should be present in the database:
| name | bar |
| badger | baz |
Then foos should not be present in the database:
| name | bar |
| badger | baz |
A method for a specific model can define a function
``test_badgers(queryset, data)`` and decorating it with the
``tests_existence(model_class)`` decorator:
.. code-block:: python
@tests_existence(Profile)
def test_profile(queryset, data):
'''Test a Profile model'''
# modify data ...
return test_existence(queryset, data)
If you only want to modify the hash, you can make modifications then pass
it on to test_existence().
"""
def decorated(func):
"""
Decorator for the existence function.
"""
_TEST_MODEL[model] = func
return func
return decorated
def reset_sequence(model):
"""
Reset the ID sequence for a model.
"""
sql = connection.ops.sequence_reset_sql(no_style(), [model])
for cmd in sql:
connection.cursor().execute(cmd)
def _dump_model(model, attrs=None):
"""
Dump the model fields for debugging.
"""
fields = []
for field in model._meta.fields:
fields.append((field.name, str(getattr(model, field.name))))
if attrs is not None:
for attr in attrs:
fields.append((attr, str(getattr(model, attr))))
for field in model._meta.many_to_many:
vals = getattr(model, field.name)
fields.append((field.name, '{val} ({count})'.format(
val=', '.join(map(str, vals.all())),
count=vals.count(),
)))
print(', '.join(
'{0}={1}'.format(field, value)
for field, value in fields
))
def test_existence(queryset, data):
"""
:param queryset: a Django queryset
:param data: a single model to check for
:returns: True if the model exists
Test existence of a given hash in a `queryset` (or among all model
instances if a model is given).
Useful when registering custom tests with :func:`tests_existence`.
"""
fields = {}
extra_attrs = {}
for key, value in data.items():
if key.startswith('@'):
# this is an attribute
extra_attrs[key[1:]] = value
else:
fields[key] = value
filtered = queryset.filter(**fields)
if filtered.exists():
return any(
all(getattr(obj, k) == v for k, v in extra_attrs.items())
for obj in filtered.all()
)
return False
def _model_exists_step(self, model, should_exist):
"""
Test for the existence of a model matching the given data.
"""
model = get_model(model)
data = guess_types(self.hashes)
queryset = model.objects
try:
existence_check = _TEST_MODEL[model]
except KeyError:
existence_check = test_existence
failed = 0
try:
for hash_ in data:
match = existence_check(queryset, hash_)
if should_exist:
assert match, \
"%s does not exist: %s" % (model.__name__, hash_)
else:
assert not match, \
"%s exists: %s" % (model.__name__, hash_)
except AssertionError as exc:
print(exc)
failed += 1
if failed:
print("Rows in DB are:")
for existing_model in queryset.all():
_dump_model(existing_model,
attrs=[k[1:]
for k in data[0].keys()
if k.startswith('@')])
if should_exist:
raise AssertionError("%i rows missing" % failed)
else:
raise AssertionError("%i rows found" % failed)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should be present in the database')
def _model_exists_positive_step(self, model):
"""
Test for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties) prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, True)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should not be present in the database')
def _model_exists_negative_step(self, model):
"""
Tests for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties). Prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should not be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, False)
def write_models(model, data, field):
"""
:param model: a Django model class
:param data: a list of hashes to build models from
:param field: a field name to match models on, or None
:returns: a list of models written
Create or update models for each data hash.
`field` is the field that is used to get the existing models out of
the database to update them; otherwise, if ``field=None``, new models are
created.
Useful when registering custom tests with :func:`writes_models`.
"""
written = []
for hash_ in data:
if field:
if field not in hash_:
raise KeyError(("The \"%s\" field is required for all update "
"operations") % field)
model_kwargs = {field: hash_[field]}
model_obj = model.objects.get(**model_kwargs)
for to_set, val in hash_.items():
setattr(model_obj, to_set, val)
model_obj.save()
else:
model_obj = model.objects.create(**hash_)
written.append(model_obj)
reset_sequence(model)
return written
def _write_models_step(self, model, field=None):
"""
Write or update a model.
"""
model = get_model(model)
data = guess_types(self.hashes)
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, field)
@step(r'I have(?: an?)? ([a-z][a-z0-9_ ]*) in the database:')
def _write_models_step_new(self, model):
"""
Create models in the database.
Syntax:
I have `model` in the database:
Example:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
See :func:`writes_models`.
"""
return _write_models_step(self, model)
@step(r'I update(?: an?)? existing ([a-z][a-z0-9_ ]*) by ([a-z][a-z0-9_]*) '
'in the database:')
def _write_models_step_update(self, model, field):
"""
Update existing models in the database, specifying a column to match on.
Syntax:
I update `model` by `key` in the database:
Example:
.. code-block:: gherkin
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
See :func:`writes_models`.
"""
return _write_models_step(self, model, field=field)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' has(?: an?)? ([A-Z][a-z0-9_ ]*) in the database:')
def _create_models_for_relation_step(self, rel_model_name,
rel_key, rel_value, model):
"""
Create a new model linked to the given model.
Syntax:
And `model` with `field` "`value`" has `new model` in the database:
Example:
.. code-block:: gherkin
And project with name "Ball Project" has goals in the database:
| description |
| To have fun playing with balls of twine |
"""
model = get_model(model)
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
data = guess_types(self.hashes)
for hash_ in data:
hash_['%s' % rel_model_name] = rel_model
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, None)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' is linked to ([A-Z][a-z0-9_ ]*) in the database:')
def _create_m2m_links_step(self, rel_model_name,
rel_key, rel_value, relation_name):
"""
Link many-to-many models together.
Syntax:
And `model` with `field` "`value`" is linked to `other model` in the
database:
Example:
.. code-block:: gherkin
And article with name "Guidelines" is linked to tags in the database:
| name |
| coding |
| style |
"""
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
relation = None
for m2m in rel_model._meta.many_to_many:
if relation_name in (m2m.name, m2m.verbose_name):
relation = getattr(rel_model, m2m.name)
break
if not relation:
try:
relation = getattr(rel_model, relation_name)
except AttributeError:
pass
assert relation, \
"%s does not have a many-to-many relation named '%s'" % (
rel_model._meta.verbose_name.capitalize(),
relation_name,
)
m2m_model = relation.model
for hash_ in self.hashes:
relation.add(m2m_model.objects.get(**hash_))
@step(r'There should be (\d+) ([a-z][a-z0-9_ ]*) in the database')
def _model_count_step(self, count, model):
"""
Count the number of models in the database.
Example:
.. code-block:: gherkin
Then there should be 0 goals in the database
"""
model = get_model(model)
expected = int(count)
found = model.objects.count()
assert found == expected, "Expected %d %s, found %d." % \
(expected, model._meta.verbose_name_plural, found)
|
aloetesting/aloe_django
|
aloe_django/steps/models.py
|
reset_sequence
|
python
|
def reset_sequence(model):
sql = connection.ops.sequence_reset_sql(no_style(), [model])
for cmd in sql:
connection.cursor().execute(cmd)
|
Reset the ID sequence for a model.
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/steps/models.py#L179-L185
| null |
"""
Step definitions and utilities for working with Django models.
"""
from __future__ import print_function
from __future__ import unicode_literals
# pylint:disable=redefined-builtin
from builtins import str
# pylint:disable=redefined-builtin
import warnings
from functools import partial
from django.apps import apps
from django.core.management.color import no_style
from django.db import connection
from aloe import step
from aloe.tools import guess_types
__all__ = ('writes_models', 'write_models',
'tests_existence', 'test_existence',
'reset_sequence')
STEP_PREFIX = r'(?:Given|And|Then|When) '
def _models_generator():
"""
Build a hash of model verbose names to models
"""
for app in apps.get_app_configs():
for model in app.get_models():
yield (str(model._meta.verbose_name).lower(), model)
yield (str(model._meta.verbose_name_plural).lower(), model)
try:
MODELS = dict(_models_generator())
except: # pylint:disable=bare-except
warnings.warn("Models not loaded!")
_WRITE_MODEL = {}
def writes_models(model):
"""
Register a model-specific create and update function.
This can then be accessed via the steps:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
A method for a specific model can define a function ``write_badgers(data,
field)``, which creates and updates the Badger model and decorating it with
the ``writes_models(model_class)`` decorator:
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
if field:
profile = Profile.objects.get(**{field: hash_[field]})
else:
profile = Profile()
...
reset_sequence(Profile)
The function must accept a list of data hashes and a field name. If field
is not None, it is the field that must be used to get the existing objects
out of the database to update them; otherwise, new objects must be created
for each data hash.
Follow up model creation with a call to :func:`reset_sequence` to
update the database sequences.
If you only want to modify the hash, you can make modifications and then
pass it on to :func:`write_models`.
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
# modify hash
return write_models(Profile, data, field)
"""
def decorated(func):
"""
Decorator for the creation function.
"""
_WRITE_MODEL[model] = func
return func
return decorated
_TEST_MODEL = {}
def tests_existence(model):
"""
Register a model-specific existence test.
This can then be accessed via the steps:
.. code-block:: gherkin
Then foos should be present in the database:
| name | bar |
| badger | baz |
Then foos should not be present in the database:
| name | bar |
| badger | baz |
A method for a specific model can define a function
``test_badgers(queryset, data)`` and decorating it with the
``tests_existence(model_class)`` decorator:
.. code-block:: python
@tests_existence(Profile)
def test_profile(queryset, data):
'''Test a Profile model'''
# modify data ...
return test_existence(queryset, data)
If you only want to modify the hash, you can make modifications then pass
it on to test_existence().
"""
def decorated(func):
"""
Decorator for the existence function.
"""
_TEST_MODEL[model] = func
return func
return decorated
def get_model(name):
"""
Convert a model's verbose name to the model class. This allows us to
use the models verbose name in steps.
"""
model = MODELS.get(name.lower(), None)
assert model, "Could not locate model by name '%s'" % name
return model
def _dump_model(model, attrs=None):
"""
Dump the model fields for debugging.
"""
fields = []
for field in model._meta.fields:
fields.append((field.name, str(getattr(model, field.name))))
if attrs is not None:
for attr in attrs:
fields.append((attr, str(getattr(model, attr))))
for field in model._meta.many_to_many:
vals = getattr(model, field.name)
fields.append((field.name, '{val} ({count})'.format(
val=', '.join(map(str, vals.all())),
count=vals.count(),
)))
print(', '.join(
'{0}={1}'.format(field, value)
for field, value in fields
))
def test_existence(queryset, data):
"""
:param queryset: a Django queryset
:param data: a single model to check for
:returns: True if the model exists
Test existence of a given hash in a `queryset` (or among all model
instances if a model is given).
Useful when registering custom tests with :func:`tests_existence`.
"""
fields = {}
extra_attrs = {}
for key, value in data.items():
if key.startswith('@'):
# this is an attribute
extra_attrs[key[1:]] = value
else:
fields[key] = value
filtered = queryset.filter(**fields)
if filtered.exists():
return any(
all(getattr(obj, k) == v for k, v in extra_attrs.items())
for obj in filtered.all()
)
return False
def _model_exists_step(self, model, should_exist):
"""
Test for the existence of a model matching the given data.
"""
model = get_model(model)
data = guess_types(self.hashes)
queryset = model.objects
try:
existence_check = _TEST_MODEL[model]
except KeyError:
existence_check = test_existence
failed = 0
try:
for hash_ in data:
match = existence_check(queryset, hash_)
if should_exist:
assert match, \
"%s does not exist: %s" % (model.__name__, hash_)
else:
assert not match, \
"%s exists: %s" % (model.__name__, hash_)
except AssertionError as exc:
print(exc)
failed += 1
if failed:
print("Rows in DB are:")
for existing_model in queryset.all():
_dump_model(existing_model,
attrs=[k[1:]
for k in data[0].keys()
if k.startswith('@')])
if should_exist:
raise AssertionError("%i rows missing" % failed)
else:
raise AssertionError("%i rows found" % failed)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should be present in the database')
def _model_exists_positive_step(self, model):
"""
Test for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties) prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, True)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should not be present in the database')
def _model_exists_negative_step(self, model):
"""
Tests for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties). Prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should not be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, False)
def write_models(model, data, field):
"""
:param model: a Django model class
:param data: a list of hashes to build models from
:param field: a field name to match models on, or None
:returns: a list of models written
Create or update models for each data hash.
`field` is the field that is used to get the existing models out of
the database to update them; otherwise, if ``field=None``, new models are
created.
Useful when registering custom tests with :func:`writes_models`.
"""
written = []
for hash_ in data:
if field:
if field not in hash_:
raise KeyError(("The \"%s\" field is required for all update "
"operations") % field)
model_kwargs = {field: hash_[field]}
model_obj = model.objects.get(**model_kwargs)
for to_set, val in hash_.items():
setattr(model_obj, to_set, val)
model_obj.save()
else:
model_obj = model.objects.create(**hash_)
written.append(model_obj)
reset_sequence(model)
return written
def _write_models_step(self, model, field=None):
"""
Write or update a model.
"""
model = get_model(model)
data = guess_types(self.hashes)
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, field)
@step(r'I have(?: an?)? ([a-z][a-z0-9_ ]*) in the database:')
def _write_models_step_new(self, model):
"""
Create models in the database.
Syntax:
I have `model` in the database:
Example:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
See :func:`writes_models`.
"""
return _write_models_step(self, model)
@step(r'I update(?: an?)? existing ([a-z][a-z0-9_ ]*) by ([a-z][a-z0-9_]*) '
'in the database:')
def _write_models_step_update(self, model, field):
"""
Update existing models in the database, specifying a column to match on.
Syntax:
I update `model` by `key` in the database:
Example:
.. code-block:: gherkin
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
See :func:`writes_models`.
"""
return _write_models_step(self, model, field=field)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' has(?: an?)? ([A-Z][a-z0-9_ ]*) in the database:')
def _create_models_for_relation_step(self, rel_model_name,
rel_key, rel_value, model):
"""
Create a new model linked to the given model.
Syntax:
And `model` with `field` "`value`" has `new model` in the database:
Example:
.. code-block:: gherkin
And project with name "Ball Project" has goals in the database:
| description |
| To have fun playing with balls of twine |
"""
model = get_model(model)
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
data = guess_types(self.hashes)
for hash_ in data:
hash_['%s' % rel_model_name] = rel_model
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, None)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' is linked to ([A-Z][a-z0-9_ ]*) in the database:')
def _create_m2m_links_step(self, rel_model_name,
rel_key, rel_value, relation_name):
"""
Link many-to-many models together.
Syntax:
And `model` with `field` "`value`" is linked to `other model` in the
database:
Example:
.. code-block:: gherkin
And article with name "Guidelines" is linked to tags in the database:
| name |
| coding |
| style |
"""
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
relation = None
for m2m in rel_model._meta.many_to_many:
if relation_name in (m2m.name, m2m.verbose_name):
relation = getattr(rel_model, m2m.name)
break
if not relation:
try:
relation = getattr(rel_model, relation_name)
except AttributeError:
pass
assert relation, \
"%s does not have a many-to-many relation named '%s'" % (
rel_model._meta.verbose_name.capitalize(),
relation_name,
)
m2m_model = relation.model
for hash_ in self.hashes:
relation.add(m2m_model.objects.get(**hash_))
@step(r'There should be (\d+) ([a-z][a-z0-9_ ]*) in the database')
def _model_count_step(self, count, model):
"""
Count the number of models in the database.
Example:
.. code-block:: gherkin
Then there should be 0 goals in the database
"""
model = get_model(model)
expected = int(count)
found = model.objects.count()
assert found == expected, "Expected %d %s, found %d." % \
(expected, model._meta.verbose_name_plural, found)
|
aloetesting/aloe_django
|
aloe_django/steps/models.py
|
_dump_model
|
python
|
def _dump_model(model, attrs=None):
fields = []
for field in model._meta.fields:
fields.append((field.name, str(getattr(model, field.name))))
if attrs is not None:
for attr in attrs:
fields.append((attr, str(getattr(model, attr))))
for field in model._meta.many_to_many:
vals = getattr(model, field.name)
fields.append((field.name, '{val} ({count})'.format(
val=', '.join(map(str, vals.all())),
count=vals.count(),
)))
print(', '.join(
'{0}={1}'.format(field, value)
for field, value in fields
))
|
Dump the model fields for debugging.
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/steps/models.py#L188-L212
| null |
"""
Step definitions and utilities for working with Django models.
"""
from __future__ import print_function
from __future__ import unicode_literals
# pylint:disable=redefined-builtin
from builtins import str
# pylint:disable=redefined-builtin
import warnings
from functools import partial
from django.apps import apps
from django.core.management.color import no_style
from django.db import connection
from aloe import step
from aloe.tools import guess_types
__all__ = ('writes_models', 'write_models',
'tests_existence', 'test_existence',
'reset_sequence')
STEP_PREFIX = r'(?:Given|And|Then|When) '
def _models_generator():
"""
Build a hash of model verbose names to models
"""
for app in apps.get_app_configs():
for model in app.get_models():
yield (str(model._meta.verbose_name).lower(), model)
yield (str(model._meta.verbose_name_plural).lower(), model)
try:
MODELS = dict(_models_generator())
except: # pylint:disable=bare-except
warnings.warn("Models not loaded!")
_WRITE_MODEL = {}
def writes_models(model):
"""
Register a model-specific create and update function.
This can then be accessed via the steps:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
A method for a specific model can define a function ``write_badgers(data,
field)``, which creates and updates the Badger model and decorating it with
the ``writes_models(model_class)`` decorator:
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
if field:
profile = Profile.objects.get(**{field: hash_[field]})
else:
profile = Profile()
...
reset_sequence(Profile)
The function must accept a list of data hashes and a field name. If field
is not None, it is the field that must be used to get the existing objects
out of the database to update them; otherwise, new objects must be created
for each data hash.
Follow up model creation with a call to :func:`reset_sequence` to
update the database sequences.
If you only want to modify the hash, you can make modifications and then
pass it on to :func:`write_models`.
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
# modify hash
return write_models(Profile, data, field)
"""
def decorated(func):
"""
Decorator for the creation function.
"""
_WRITE_MODEL[model] = func
return func
return decorated
_TEST_MODEL = {}
def tests_existence(model):
"""
Register a model-specific existence test.
This can then be accessed via the steps:
.. code-block:: gherkin
Then foos should be present in the database:
| name | bar |
| badger | baz |
Then foos should not be present in the database:
| name | bar |
| badger | baz |
A method for a specific model can define a function
``test_badgers(queryset, data)`` and decorating it with the
``tests_existence(model_class)`` decorator:
.. code-block:: python
@tests_existence(Profile)
def test_profile(queryset, data):
'''Test a Profile model'''
# modify data ...
return test_existence(queryset, data)
If you only want to modify the hash, you can make modifications then pass
it on to test_existence().
"""
def decorated(func):
"""
Decorator for the existence function.
"""
_TEST_MODEL[model] = func
return func
return decorated
def get_model(name):
"""
Convert a model's verbose name to the model class. This allows us to
use the models verbose name in steps.
"""
model = MODELS.get(name.lower(), None)
assert model, "Could not locate model by name '%s'" % name
return model
def reset_sequence(model):
"""
Reset the ID sequence for a model.
"""
sql = connection.ops.sequence_reset_sql(no_style(), [model])
for cmd in sql:
connection.cursor().execute(cmd)
def test_existence(queryset, data):
"""
:param queryset: a Django queryset
:param data: a single model to check for
:returns: True if the model exists
Test existence of a given hash in a `queryset` (or among all model
instances if a model is given).
Useful when registering custom tests with :func:`tests_existence`.
"""
fields = {}
extra_attrs = {}
for key, value in data.items():
if key.startswith('@'):
# this is an attribute
extra_attrs[key[1:]] = value
else:
fields[key] = value
filtered = queryset.filter(**fields)
if filtered.exists():
return any(
all(getattr(obj, k) == v for k, v in extra_attrs.items())
for obj in filtered.all()
)
return False
def _model_exists_step(self, model, should_exist):
"""
Test for the existence of a model matching the given data.
"""
model = get_model(model)
data = guess_types(self.hashes)
queryset = model.objects
try:
existence_check = _TEST_MODEL[model]
except KeyError:
existence_check = test_existence
failed = 0
try:
for hash_ in data:
match = existence_check(queryset, hash_)
if should_exist:
assert match, \
"%s does not exist: %s" % (model.__name__, hash_)
else:
assert not match, \
"%s exists: %s" % (model.__name__, hash_)
except AssertionError as exc:
print(exc)
failed += 1
if failed:
print("Rows in DB are:")
for existing_model in queryset.all():
_dump_model(existing_model,
attrs=[k[1:]
for k in data[0].keys()
if k.startswith('@')])
if should_exist:
raise AssertionError("%i rows missing" % failed)
else:
raise AssertionError("%i rows found" % failed)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should be present in the database')
def _model_exists_positive_step(self, model):
"""
Test for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties) prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, True)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should not be present in the database')
def _model_exists_negative_step(self, model):
"""
Tests for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties). Prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should not be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, False)
def write_models(model, data, field):
"""
:param model: a Django model class
:param data: a list of hashes to build models from
:param field: a field name to match models on, or None
:returns: a list of models written
Create or update models for each data hash.
`field` is the field that is used to get the existing models out of
the database to update them; otherwise, if ``field=None``, new models are
created.
Useful when registering custom tests with :func:`writes_models`.
"""
written = []
for hash_ in data:
if field:
if field not in hash_:
raise KeyError(("The \"%s\" field is required for all update "
"operations") % field)
model_kwargs = {field: hash_[field]}
model_obj = model.objects.get(**model_kwargs)
for to_set, val in hash_.items():
setattr(model_obj, to_set, val)
model_obj.save()
else:
model_obj = model.objects.create(**hash_)
written.append(model_obj)
reset_sequence(model)
return written
def _write_models_step(self, model, field=None):
"""
Write or update a model.
"""
model = get_model(model)
data = guess_types(self.hashes)
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, field)
@step(r'I have(?: an?)? ([a-z][a-z0-9_ ]*) in the database:')
def _write_models_step_new(self, model):
"""
Create models in the database.
Syntax:
I have `model` in the database:
Example:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
See :func:`writes_models`.
"""
return _write_models_step(self, model)
@step(r'I update(?: an?)? existing ([a-z][a-z0-9_ ]*) by ([a-z][a-z0-9_]*) '
'in the database:')
def _write_models_step_update(self, model, field):
"""
Update existing models in the database, specifying a column to match on.
Syntax:
I update `model` by `key` in the database:
Example:
.. code-block:: gherkin
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
See :func:`writes_models`.
"""
return _write_models_step(self, model, field=field)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' has(?: an?)? ([A-Z][a-z0-9_ ]*) in the database:')
def _create_models_for_relation_step(self, rel_model_name,
rel_key, rel_value, model):
"""
Create a new model linked to the given model.
Syntax:
And `model` with `field` "`value`" has `new model` in the database:
Example:
.. code-block:: gherkin
And project with name "Ball Project" has goals in the database:
| description |
| To have fun playing with balls of twine |
"""
model = get_model(model)
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
data = guess_types(self.hashes)
for hash_ in data:
hash_['%s' % rel_model_name] = rel_model
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, None)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' is linked to ([A-Z][a-z0-9_ ]*) in the database:')
def _create_m2m_links_step(self, rel_model_name,
rel_key, rel_value, relation_name):
"""
Link many-to-many models together.
Syntax:
And `model` with `field` "`value`" is linked to `other model` in the
database:
Example:
.. code-block:: gherkin
And article with name "Guidelines" is linked to tags in the database:
| name |
| coding |
| style |
"""
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
relation = None
for m2m in rel_model._meta.many_to_many:
if relation_name in (m2m.name, m2m.verbose_name):
relation = getattr(rel_model, m2m.name)
break
if not relation:
try:
relation = getattr(rel_model, relation_name)
except AttributeError:
pass
assert relation, \
"%s does not have a many-to-many relation named '%s'" % (
rel_model._meta.verbose_name.capitalize(),
relation_name,
)
m2m_model = relation.model
for hash_ in self.hashes:
relation.add(m2m_model.objects.get(**hash_))
@step(r'There should be (\d+) ([a-z][a-z0-9_ ]*) in the database')
def _model_count_step(self, count, model):
"""
Count the number of models in the database.
Example:
.. code-block:: gherkin
Then there should be 0 goals in the database
"""
model = get_model(model)
expected = int(count)
found = model.objects.count()
assert found == expected, "Expected %d %s, found %d." % \
(expected, model._meta.verbose_name_plural, found)
|
aloetesting/aloe_django
|
aloe_django/steps/models.py
|
_model_exists_step
|
python
|
def _model_exists_step(self, model, should_exist):
model = get_model(model)
data = guess_types(self.hashes)
queryset = model.objects
try:
existence_check = _TEST_MODEL[model]
except KeyError:
existence_check = test_existence
failed = 0
try:
for hash_ in data:
match = existence_check(queryset, hash_)
if should_exist:
assert match, \
"%s does not exist: %s" % (model.__name__, hash_)
else:
assert not match, \
"%s exists: %s" % (model.__name__, hash_)
except AssertionError as exc:
print(exc)
failed += 1
if failed:
print("Rows in DB are:")
for existing_model in queryset.all():
_dump_model(existing_model,
attrs=[k[1:]
for k in data[0].keys()
if k.startswith('@')])
if should_exist:
raise AssertionError("%i rows missing" % failed)
else:
raise AssertionError("%i rows found" % failed)
|
Test for the existence of a model matching the given data.
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/steps/models.py#L247-L289
| null |
"""
Step definitions and utilities for working with Django models.
"""
from __future__ import print_function
from __future__ import unicode_literals
# pylint:disable=redefined-builtin
from builtins import str
# pylint:disable=redefined-builtin
import warnings
from functools import partial
from django.apps import apps
from django.core.management.color import no_style
from django.db import connection
from aloe import step
from aloe.tools import guess_types
__all__ = ('writes_models', 'write_models',
'tests_existence', 'test_existence',
'reset_sequence')
STEP_PREFIX = r'(?:Given|And|Then|When) '
def _models_generator():
"""
Build a hash of model verbose names to models
"""
for app in apps.get_app_configs():
for model in app.get_models():
yield (str(model._meta.verbose_name).lower(), model)
yield (str(model._meta.verbose_name_plural).lower(), model)
try:
MODELS = dict(_models_generator())
except: # pylint:disable=bare-except
warnings.warn("Models not loaded!")
_WRITE_MODEL = {}
def writes_models(model):
"""
Register a model-specific create and update function.
This can then be accessed via the steps:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
A method for a specific model can define a function ``write_badgers(data,
field)``, which creates and updates the Badger model and decorating it with
the ``writes_models(model_class)`` decorator:
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
if field:
profile = Profile.objects.get(**{field: hash_[field]})
else:
profile = Profile()
...
reset_sequence(Profile)
The function must accept a list of data hashes and a field name. If field
is not None, it is the field that must be used to get the existing objects
out of the database to update them; otherwise, new objects must be created
for each data hash.
Follow up model creation with a call to :func:`reset_sequence` to
update the database sequences.
If you only want to modify the hash, you can make modifications and then
pass it on to :func:`write_models`.
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
# modify hash
return write_models(Profile, data, field)
"""
def decorated(func):
"""
Decorator for the creation function.
"""
_WRITE_MODEL[model] = func
return func
return decorated
_TEST_MODEL = {}
def tests_existence(model):
"""
Register a model-specific existence test.
This can then be accessed via the steps:
.. code-block:: gherkin
Then foos should be present in the database:
| name | bar |
| badger | baz |
Then foos should not be present in the database:
| name | bar |
| badger | baz |
A method for a specific model can define a function
``test_badgers(queryset, data)`` and decorating it with the
``tests_existence(model_class)`` decorator:
.. code-block:: python
@tests_existence(Profile)
def test_profile(queryset, data):
'''Test a Profile model'''
# modify data ...
return test_existence(queryset, data)
If you only want to modify the hash, you can make modifications then pass
it on to test_existence().
"""
def decorated(func):
"""
Decorator for the existence function.
"""
_TEST_MODEL[model] = func
return func
return decorated
def get_model(name):
"""
Convert a model's verbose name to the model class. This allows us to
use the models verbose name in steps.
"""
model = MODELS.get(name.lower(), None)
assert model, "Could not locate model by name '%s'" % name
return model
def reset_sequence(model):
"""
Reset the ID sequence for a model.
"""
sql = connection.ops.sequence_reset_sql(no_style(), [model])
for cmd in sql:
connection.cursor().execute(cmd)
def _dump_model(model, attrs=None):
"""
Dump the model fields for debugging.
"""
fields = []
for field in model._meta.fields:
fields.append((field.name, str(getattr(model, field.name))))
if attrs is not None:
for attr in attrs:
fields.append((attr, str(getattr(model, attr))))
for field in model._meta.many_to_many:
vals = getattr(model, field.name)
fields.append((field.name, '{val} ({count})'.format(
val=', '.join(map(str, vals.all())),
count=vals.count(),
)))
print(', '.join(
'{0}={1}'.format(field, value)
for field, value in fields
))
def test_existence(queryset, data):
"""
:param queryset: a Django queryset
:param data: a single model to check for
:returns: True if the model exists
Test existence of a given hash in a `queryset` (or among all model
instances if a model is given).
Useful when registering custom tests with :func:`tests_existence`.
"""
fields = {}
extra_attrs = {}
for key, value in data.items():
if key.startswith('@'):
# this is an attribute
extra_attrs[key[1:]] = value
else:
fields[key] = value
filtered = queryset.filter(**fields)
if filtered.exists():
return any(
all(getattr(obj, k) == v for k, v in extra_attrs.items())
for obj in filtered.all()
)
return False
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should be present in the database')
def _model_exists_positive_step(self, model):
"""
Test for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties) prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, True)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should not be present in the database')
def _model_exists_negative_step(self, model):
"""
Tests for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties). Prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should not be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, False)
def write_models(model, data, field):
"""
:param model: a Django model class
:param data: a list of hashes to build models from
:param field: a field name to match models on, or None
:returns: a list of models written
Create or update models for each data hash.
`field` is the field that is used to get the existing models out of
the database to update them; otherwise, if ``field=None``, new models are
created.
Useful when registering custom tests with :func:`writes_models`.
"""
written = []
for hash_ in data:
if field:
if field not in hash_:
raise KeyError(("The \"%s\" field is required for all update "
"operations") % field)
model_kwargs = {field: hash_[field]}
model_obj = model.objects.get(**model_kwargs)
for to_set, val in hash_.items():
setattr(model_obj, to_set, val)
model_obj.save()
else:
model_obj = model.objects.create(**hash_)
written.append(model_obj)
reset_sequence(model)
return written
def _write_models_step(self, model, field=None):
"""
Write or update a model.
"""
model = get_model(model)
data = guess_types(self.hashes)
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, field)
@step(r'I have(?: an?)? ([a-z][a-z0-9_ ]*) in the database:')
def _write_models_step_new(self, model):
"""
Create models in the database.
Syntax:
I have `model` in the database:
Example:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
See :func:`writes_models`.
"""
return _write_models_step(self, model)
@step(r'I update(?: an?)? existing ([a-z][a-z0-9_ ]*) by ([a-z][a-z0-9_]*) '
'in the database:')
def _write_models_step_update(self, model, field):
"""
Update existing models in the database, specifying a column to match on.
Syntax:
I update `model` by `key` in the database:
Example:
.. code-block:: gherkin
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
See :func:`writes_models`.
"""
return _write_models_step(self, model, field=field)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' has(?: an?)? ([A-Z][a-z0-9_ ]*) in the database:')
def _create_models_for_relation_step(self, rel_model_name,
rel_key, rel_value, model):
"""
Create a new model linked to the given model.
Syntax:
And `model` with `field` "`value`" has `new model` in the database:
Example:
.. code-block:: gherkin
And project with name "Ball Project" has goals in the database:
| description |
| To have fun playing with balls of twine |
"""
model = get_model(model)
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
data = guess_types(self.hashes)
for hash_ in data:
hash_['%s' % rel_model_name] = rel_model
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, None)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' is linked to ([A-Z][a-z0-9_ ]*) in the database:')
def _create_m2m_links_step(self, rel_model_name,
rel_key, rel_value, relation_name):
"""
Link many-to-many models together.
Syntax:
And `model` with `field` "`value`" is linked to `other model` in the
database:
Example:
.. code-block:: gherkin
And article with name "Guidelines" is linked to tags in the database:
| name |
| coding |
| style |
"""
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
relation = None
for m2m in rel_model._meta.many_to_many:
if relation_name in (m2m.name, m2m.verbose_name):
relation = getattr(rel_model, m2m.name)
break
if not relation:
try:
relation = getattr(rel_model, relation_name)
except AttributeError:
pass
assert relation, \
"%s does not have a many-to-many relation named '%s'" % (
rel_model._meta.verbose_name.capitalize(),
relation_name,
)
m2m_model = relation.model
for hash_ in self.hashes:
relation.add(m2m_model.objects.get(**hash_))
@step(r'There should be (\d+) ([a-z][a-z0-9_ ]*) in the database')
def _model_count_step(self, count, model):
"""
Count the number of models in the database.
Example:
.. code-block:: gherkin
Then there should be 0 goals in the database
"""
model = get_model(model)
expected = int(count)
found = model.objects.count()
assert found == expected, "Expected %d %s, found %d." % \
(expected, model._meta.verbose_name_plural, found)
|
aloetesting/aloe_django
|
aloe_django/steps/models.py
|
write_models
|
python
|
def write_models(model, data, field):
written = []
for hash_ in data:
if field:
if field not in hash_:
raise KeyError(("The \"%s\" field is required for all update "
"operations") % field)
model_kwargs = {field: hash_[field]}
model_obj = model.objects.get(**model_kwargs)
for to_set, val in hash_.items():
setattr(model_obj, to_set, val)
model_obj.save()
else:
model_obj = model.objects.create(**hash_)
written.append(model_obj)
reset_sequence(model)
return written
|
:param model: a Django model class
:param data: a list of hashes to build models from
:param field: a field name to match models on, or None
:returns: a list of models written
Create or update models for each data hash.
`field` is the field that is used to get the existing models out of
the database to update them; otherwise, if ``field=None``, new models are
created.
Useful when registering custom tests with :func:`writes_models`.
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/steps/models.py#L338-L375
|
[
"def reset_sequence(model):\n \"\"\"\n Reset the ID sequence for a model.\n \"\"\"\n sql = connection.ops.sequence_reset_sql(no_style(), [model])\n for cmd in sql:\n connection.cursor().execute(cmd)\n"
] |
"""
Step definitions and utilities for working with Django models.
"""
from __future__ import print_function
from __future__ import unicode_literals
# pylint:disable=redefined-builtin
from builtins import str
# pylint:disable=redefined-builtin
import warnings
from functools import partial
from django.apps import apps
from django.core.management.color import no_style
from django.db import connection
from aloe import step
from aloe.tools import guess_types
__all__ = ('writes_models', 'write_models',
'tests_existence', 'test_existence',
'reset_sequence')
STEP_PREFIX = r'(?:Given|And|Then|When) '
def _models_generator():
"""
Build a hash of model verbose names to models
"""
for app in apps.get_app_configs():
for model in app.get_models():
yield (str(model._meta.verbose_name).lower(), model)
yield (str(model._meta.verbose_name_plural).lower(), model)
try:
MODELS = dict(_models_generator())
except: # pylint:disable=bare-except
warnings.warn("Models not loaded!")
_WRITE_MODEL = {}
def writes_models(model):
"""
Register a model-specific create and update function.
This can then be accessed via the steps:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
A method for a specific model can define a function ``write_badgers(data,
field)``, which creates and updates the Badger model and decorating it with
the ``writes_models(model_class)`` decorator:
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
if field:
profile = Profile.objects.get(**{field: hash_[field]})
else:
profile = Profile()
...
reset_sequence(Profile)
The function must accept a list of data hashes and a field name. If field
is not None, it is the field that must be used to get the existing objects
out of the database to update them; otherwise, new objects must be created
for each data hash.
Follow up model creation with a call to :func:`reset_sequence` to
update the database sequences.
If you only want to modify the hash, you can make modifications and then
pass it on to :func:`write_models`.
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
# modify hash
return write_models(Profile, data, field)
"""
def decorated(func):
"""
Decorator for the creation function.
"""
_WRITE_MODEL[model] = func
return func
return decorated
_TEST_MODEL = {}
def tests_existence(model):
"""
Register a model-specific existence test.
This can then be accessed via the steps:
.. code-block:: gherkin
Then foos should be present in the database:
| name | bar |
| badger | baz |
Then foos should not be present in the database:
| name | bar |
| badger | baz |
A method for a specific model can define a function
``test_badgers(queryset, data)`` and decorating it with the
``tests_existence(model_class)`` decorator:
.. code-block:: python
@tests_existence(Profile)
def test_profile(queryset, data):
'''Test a Profile model'''
# modify data ...
return test_existence(queryset, data)
If you only want to modify the hash, you can make modifications then pass
it on to test_existence().
"""
def decorated(func):
"""
Decorator for the existence function.
"""
_TEST_MODEL[model] = func
return func
return decorated
def get_model(name):
"""
Convert a model's verbose name to the model class. This allows us to
use the models verbose name in steps.
"""
model = MODELS.get(name.lower(), None)
assert model, "Could not locate model by name '%s'" % name
return model
def reset_sequence(model):
"""
Reset the ID sequence for a model.
"""
sql = connection.ops.sequence_reset_sql(no_style(), [model])
for cmd in sql:
connection.cursor().execute(cmd)
def _dump_model(model, attrs=None):
"""
Dump the model fields for debugging.
"""
fields = []
for field in model._meta.fields:
fields.append((field.name, str(getattr(model, field.name))))
if attrs is not None:
for attr in attrs:
fields.append((attr, str(getattr(model, attr))))
for field in model._meta.many_to_many:
vals = getattr(model, field.name)
fields.append((field.name, '{val} ({count})'.format(
val=', '.join(map(str, vals.all())),
count=vals.count(),
)))
print(', '.join(
'{0}={1}'.format(field, value)
for field, value in fields
))
def test_existence(queryset, data):
"""
:param queryset: a Django queryset
:param data: a single model to check for
:returns: True if the model exists
Test existence of a given hash in a `queryset` (or among all model
instances if a model is given).
Useful when registering custom tests with :func:`tests_existence`.
"""
fields = {}
extra_attrs = {}
for key, value in data.items():
if key.startswith('@'):
# this is an attribute
extra_attrs[key[1:]] = value
else:
fields[key] = value
filtered = queryset.filter(**fields)
if filtered.exists():
return any(
all(getattr(obj, k) == v for k, v in extra_attrs.items())
for obj in filtered.all()
)
return False
def _model_exists_step(self, model, should_exist):
"""
Test for the existence of a model matching the given data.
"""
model = get_model(model)
data = guess_types(self.hashes)
queryset = model.objects
try:
existence_check = _TEST_MODEL[model]
except KeyError:
existence_check = test_existence
failed = 0
try:
for hash_ in data:
match = existence_check(queryset, hash_)
if should_exist:
assert match, \
"%s does not exist: %s" % (model.__name__, hash_)
else:
assert not match, \
"%s exists: %s" % (model.__name__, hash_)
except AssertionError as exc:
print(exc)
failed += 1
if failed:
print("Rows in DB are:")
for existing_model in queryset.all():
_dump_model(existing_model,
attrs=[k[1:]
for k in data[0].keys()
if k.startswith('@')])
if should_exist:
raise AssertionError("%i rows missing" % failed)
else:
raise AssertionError("%i rows found" % failed)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should be present in the database')
def _model_exists_positive_step(self, model):
"""
Test for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties) prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, True)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should not be present in the database')
def _model_exists_negative_step(self, model):
"""
Tests for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties). Prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should not be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, False)
def _write_models_step(self, model, field=None):
"""
Write or update a model.
"""
model = get_model(model)
data = guess_types(self.hashes)
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, field)
@step(r'I have(?: an?)? ([a-z][a-z0-9_ ]*) in the database:')
def _write_models_step_new(self, model):
"""
Create models in the database.
Syntax:
I have `model` in the database:
Example:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
See :func:`writes_models`.
"""
return _write_models_step(self, model)
@step(r'I update(?: an?)? existing ([a-z][a-z0-9_ ]*) by ([a-z][a-z0-9_]*) '
'in the database:')
def _write_models_step_update(self, model, field):
"""
Update existing models in the database, specifying a column to match on.
Syntax:
I update `model` by `key` in the database:
Example:
.. code-block:: gherkin
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
See :func:`writes_models`.
"""
return _write_models_step(self, model, field=field)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' has(?: an?)? ([A-Z][a-z0-9_ ]*) in the database:')
def _create_models_for_relation_step(self, rel_model_name,
rel_key, rel_value, model):
"""
Create a new model linked to the given model.
Syntax:
And `model` with `field` "`value`" has `new model` in the database:
Example:
.. code-block:: gherkin
And project with name "Ball Project" has goals in the database:
| description |
| To have fun playing with balls of twine |
"""
model = get_model(model)
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
data = guess_types(self.hashes)
for hash_ in data:
hash_['%s' % rel_model_name] = rel_model
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, None)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' is linked to ([A-Z][a-z0-9_ ]*) in the database:')
def _create_m2m_links_step(self, rel_model_name,
rel_key, rel_value, relation_name):
"""
Link many-to-many models together.
Syntax:
And `model` with `field` "`value`" is linked to `other model` in the
database:
Example:
.. code-block:: gherkin
And article with name "Guidelines" is linked to tags in the database:
| name |
| coding |
| style |
"""
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
relation = None
for m2m in rel_model._meta.many_to_many:
if relation_name in (m2m.name, m2m.verbose_name):
relation = getattr(rel_model, m2m.name)
break
if not relation:
try:
relation = getattr(rel_model, relation_name)
except AttributeError:
pass
assert relation, \
"%s does not have a many-to-many relation named '%s'" % (
rel_model._meta.verbose_name.capitalize(),
relation_name,
)
m2m_model = relation.model
for hash_ in self.hashes:
relation.add(m2m_model.objects.get(**hash_))
@step(r'There should be (\d+) ([a-z][a-z0-9_ ]*) in the database')
def _model_count_step(self, count, model):
"""
Count the number of models in the database.
Example:
.. code-block:: gherkin
Then there should be 0 goals in the database
"""
model = get_model(model)
expected = int(count)
found = model.objects.count()
assert found == expected, "Expected %d %s, found %d." % \
(expected, model._meta.verbose_name_plural, found)
|
aloetesting/aloe_django
|
aloe_django/steps/models.py
|
_write_models_step
|
python
|
def _write_models_step(self, model, field=None):
model = get_model(model)
data = guess_types(self.hashes)
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, field)
|
Write or update a model.
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/steps/models.py#L378-L391
| null |
"""
Step definitions and utilities for working with Django models.
"""
from __future__ import print_function
from __future__ import unicode_literals
# pylint:disable=redefined-builtin
from builtins import str
# pylint:disable=redefined-builtin
import warnings
from functools import partial
from django.apps import apps
from django.core.management.color import no_style
from django.db import connection
from aloe import step
from aloe.tools import guess_types
__all__ = ('writes_models', 'write_models',
'tests_existence', 'test_existence',
'reset_sequence')
STEP_PREFIX = r'(?:Given|And|Then|When) '
def _models_generator():
"""
Build a hash of model verbose names to models
"""
for app in apps.get_app_configs():
for model in app.get_models():
yield (str(model._meta.verbose_name).lower(), model)
yield (str(model._meta.verbose_name_plural).lower(), model)
try:
MODELS = dict(_models_generator())
except: # pylint:disable=bare-except
warnings.warn("Models not loaded!")
_WRITE_MODEL = {}
def writes_models(model):
"""
Register a model-specific create and update function.
This can then be accessed via the steps:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
A method for a specific model can define a function ``write_badgers(data,
field)``, which creates and updates the Badger model and decorating it with
the ``writes_models(model_class)`` decorator:
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
if field:
profile = Profile.objects.get(**{field: hash_[field]})
else:
profile = Profile()
...
reset_sequence(Profile)
The function must accept a list of data hashes and a field name. If field
is not None, it is the field that must be used to get the existing objects
out of the database to update them; otherwise, new objects must be created
for each data hash.
Follow up model creation with a call to :func:`reset_sequence` to
update the database sequences.
If you only want to modify the hash, you can make modifications and then
pass it on to :func:`write_models`.
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
# modify hash
return write_models(Profile, data, field)
"""
def decorated(func):
"""
Decorator for the creation function.
"""
_WRITE_MODEL[model] = func
return func
return decorated
_TEST_MODEL = {}
def tests_existence(model):
"""
Register a model-specific existence test.
This can then be accessed via the steps:
.. code-block:: gherkin
Then foos should be present in the database:
| name | bar |
| badger | baz |
Then foos should not be present in the database:
| name | bar |
| badger | baz |
A method for a specific model can define a function
``test_badgers(queryset, data)`` and decorating it with the
``tests_existence(model_class)`` decorator:
.. code-block:: python
@tests_existence(Profile)
def test_profile(queryset, data):
'''Test a Profile model'''
# modify data ...
return test_existence(queryset, data)
If you only want to modify the hash, you can make modifications then pass
it on to test_existence().
"""
def decorated(func):
"""
Decorator for the existence function.
"""
_TEST_MODEL[model] = func
return func
return decorated
def get_model(name):
"""
Convert a model's verbose name to the model class. This allows us to
use the models verbose name in steps.
"""
model = MODELS.get(name.lower(), None)
assert model, "Could not locate model by name '%s'" % name
return model
def reset_sequence(model):
"""
Reset the ID sequence for a model.
"""
sql = connection.ops.sequence_reset_sql(no_style(), [model])
for cmd in sql:
connection.cursor().execute(cmd)
def _dump_model(model, attrs=None):
"""
Dump the model fields for debugging.
"""
fields = []
for field in model._meta.fields:
fields.append((field.name, str(getattr(model, field.name))))
if attrs is not None:
for attr in attrs:
fields.append((attr, str(getattr(model, attr))))
for field in model._meta.many_to_many:
vals = getattr(model, field.name)
fields.append((field.name, '{val} ({count})'.format(
val=', '.join(map(str, vals.all())),
count=vals.count(),
)))
print(', '.join(
'{0}={1}'.format(field, value)
for field, value in fields
))
def test_existence(queryset, data):
"""
:param queryset: a Django queryset
:param data: a single model to check for
:returns: True if the model exists
Test existence of a given hash in a `queryset` (or among all model
instances if a model is given).
Useful when registering custom tests with :func:`tests_existence`.
"""
fields = {}
extra_attrs = {}
for key, value in data.items():
if key.startswith('@'):
# this is an attribute
extra_attrs[key[1:]] = value
else:
fields[key] = value
filtered = queryset.filter(**fields)
if filtered.exists():
return any(
all(getattr(obj, k) == v for k, v in extra_attrs.items())
for obj in filtered.all()
)
return False
def _model_exists_step(self, model, should_exist):
"""
Test for the existence of a model matching the given data.
"""
model = get_model(model)
data = guess_types(self.hashes)
queryset = model.objects
try:
existence_check = _TEST_MODEL[model]
except KeyError:
existence_check = test_existence
failed = 0
try:
for hash_ in data:
match = existence_check(queryset, hash_)
if should_exist:
assert match, \
"%s does not exist: %s" % (model.__name__, hash_)
else:
assert not match, \
"%s exists: %s" % (model.__name__, hash_)
except AssertionError as exc:
print(exc)
failed += 1
if failed:
print("Rows in DB are:")
for existing_model in queryset.all():
_dump_model(existing_model,
attrs=[k[1:]
for k in data[0].keys()
if k.startswith('@')])
if should_exist:
raise AssertionError("%i rows missing" % failed)
else:
raise AssertionError("%i rows found" % failed)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should be present in the database')
def _model_exists_positive_step(self, model):
"""
Test for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties) prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, True)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should not be present in the database')
def _model_exists_negative_step(self, model):
"""
Tests for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties). Prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should not be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, False)
def write_models(model, data, field):
"""
:param model: a Django model class
:param data: a list of hashes to build models from
:param field: a field name to match models on, or None
:returns: a list of models written
Create or update models for each data hash.
`field` is the field that is used to get the existing models out of
the database to update them; otherwise, if ``field=None``, new models are
created.
Useful when registering custom tests with :func:`writes_models`.
"""
written = []
for hash_ in data:
if field:
if field not in hash_:
raise KeyError(("The \"%s\" field is required for all update "
"operations") % field)
model_kwargs = {field: hash_[field]}
model_obj = model.objects.get(**model_kwargs)
for to_set, val in hash_.items():
setattr(model_obj, to_set, val)
model_obj.save()
else:
model_obj = model.objects.create(**hash_)
written.append(model_obj)
reset_sequence(model)
return written
@step(r'I have(?: an?)? ([a-z][a-z0-9_ ]*) in the database:')
def _write_models_step_new(self, model):
"""
Create models in the database.
Syntax:
I have `model` in the database:
Example:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
See :func:`writes_models`.
"""
return _write_models_step(self, model)
@step(r'I update(?: an?)? existing ([a-z][a-z0-9_ ]*) by ([a-z][a-z0-9_]*) '
'in the database:')
def _write_models_step_update(self, model, field):
"""
Update existing models in the database, specifying a column to match on.
Syntax:
I update `model` by `key` in the database:
Example:
.. code-block:: gherkin
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
See :func:`writes_models`.
"""
return _write_models_step(self, model, field=field)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' has(?: an?)? ([A-Z][a-z0-9_ ]*) in the database:')
def _create_models_for_relation_step(self, rel_model_name,
rel_key, rel_value, model):
"""
Create a new model linked to the given model.
Syntax:
And `model` with `field` "`value`" has `new model` in the database:
Example:
.. code-block:: gherkin
And project with name "Ball Project" has goals in the database:
| description |
| To have fun playing with balls of twine |
"""
model = get_model(model)
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
data = guess_types(self.hashes)
for hash_ in data:
hash_['%s' % rel_model_name] = rel_model
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, None)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' is linked to ([A-Z][a-z0-9_ ]*) in the database:')
def _create_m2m_links_step(self, rel_model_name,
rel_key, rel_value, relation_name):
"""
Link many-to-many models together.
Syntax:
And `model` with `field` "`value`" is linked to `other model` in the
database:
Example:
.. code-block:: gherkin
And article with name "Guidelines" is linked to tags in the database:
| name |
| coding |
| style |
"""
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
relation = None
for m2m in rel_model._meta.many_to_many:
if relation_name in (m2m.name, m2m.verbose_name):
relation = getattr(rel_model, m2m.name)
break
if not relation:
try:
relation = getattr(rel_model, relation_name)
except AttributeError:
pass
assert relation, \
"%s does not have a many-to-many relation named '%s'" % (
rel_model._meta.verbose_name.capitalize(),
relation_name,
)
m2m_model = relation.model
for hash_ in self.hashes:
relation.add(m2m_model.objects.get(**hash_))
@step(r'There should be (\d+) ([a-z][a-z0-9_ ]*) in the database')
def _model_count_step(self, count, model):
"""
Count the number of models in the database.
Example:
.. code-block:: gherkin
Then there should be 0 goals in the database
"""
model = get_model(model)
expected = int(count)
found = model.objects.count()
assert found == expected, "Expected %d %s, found %d." % \
(expected, model._meta.verbose_name_plural, found)
|
aloetesting/aloe_django
|
aloe_django/steps/models.py
|
_create_models_for_relation_step
|
python
|
def _create_models_for_relation_step(self, rel_model_name,
rel_key, rel_value, model):
model = get_model(model)
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
data = guess_types(self.hashes)
for hash_ in data:
hash_['%s' % rel_model_name] = rel_model
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, None)
|
Create a new model linked to the given model.
Syntax:
And `model` with `field` "`value`" has `new model` in the database:
Example:
.. code-block:: gherkin
And project with name "Ball Project" has goals in the database:
| description |
| To have fun playing with balls of twine |
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/steps/models.py#L441-L473
| null |
"""
Step definitions and utilities for working with Django models.
"""
from __future__ import print_function
from __future__ import unicode_literals
# pylint:disable=redefined-builtin
from builtins import str
# pylint:disable=redefined-builtin
import warnings
from functools import partial
from django.apps import apps
from django.core.management.color import no_style
from django.db import connection
from aloe import step
from aloe.tools import guess_types
__all__ = ('writes_models', 'write_models',
'tests_existence', 'test_existence',
'reset_sequence')
STEP_PREFIX = r'(?:Given|And|Then|When) '
def _models_generator():
"""
Build a hash of model verbose names to models
"""
for app in apps.get_app_configs():
for model in app.get_models():
yield (str(model._meta.verbose_name).lower(), model)
yield (str(model._meta.verbose_name_plural).lower(), model)
try:
MODELS = dict(_models_generator())
except: # pylint:disable=bare-except
warnings.warn("Models not loaded!")
_WRITE_MODEL = {}
def writes_models(model):
"""
Register a model-specific create and update function.
This can then be accessed via the steps:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
A method for a specific model can define a function ``write_badgers(data,
field)``, which creates and updates the Badger model and decorating it with
the ``writes_models(model_class)`` decorator:
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
if field:
profile = Profile.objects.get(**{field: hash_[field]})
else:
profile = Profile()
...
reset_sequence(Profile)
The function must accept a list of data hashes and a field name. If field
is not None, it is the field that must be used to get the existing objects
out of the database to update them; otherwise, new objects must be created
for each data hash.
Follow up model creation with a call to :func:`reset_sequence` to
update the database sequences.
If you only want to modify the hash, you can make modifications and then
pass it on to :func:`write_models`.
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
# modify hash
return write_models(Profile, data, field)
"""
def decorated(func):
"""
Decorator for the creation function.
"""
_WRITE_MODEL[model] = func
return func
return decorated
_TEST_MODEL = {}
def tests_existence(model):
"""
Register a model-specific existence test.
This can then be accessed via the steps:
.. code-block:: gherkin
Then foos should be present in the database:
| name | bar |
| badger | baz |
Then foos should not be present in the database:
| name | bar |
| badger | baz |
A method for a specific model can define a function
``test_badgers(queryset, data)`` and decorating it with the
``tests_existence(model_class)`` decorator:
.. code-block:: python
@tests_existence(Profile)
def test_profile(queryset, data):
'''Test a Profile model'''
# modify data ...
return test_existence(queryset, data)
If you only want to modify the hash, you can make modifications then pass
it on to test_existence().
"""
def decorated(func):
"""
Decorator for the existence function.
"""
_TEST_MODEL[model] = func
return func
return decorated
def get_model(name):
"""
Convert a model's verbose name to the model class. This allows us to
use the models verbose name in steps.
"""
model = MODELS.get(name.lower(), None)
assert model, "Could not locate model by name '%s'" % name
return model
def reset_sequence(model):
"""
Reset the ID sequence for a model.
"""
sql = connection.ops.sequence_reset_sql(no_style(), [model])
for cmd in sql:
connection.cursor().execute(cmd)
def _dump_model(model, attrs=None):
"""
Dump the model fields for debugging.
"""
fields = []
for field in model._meta.fields:
fields.append((field.name, str(getattr(model, field.name))))
if attrs is not None:
for attr in attrs:
fields.append((attr, str(getattr(model, attr))))
for field in model._meta.many_to_many:
vals = getattr(model, field.name)
fields.append((field.name, '{val} ({count})'.format(
val=', '.join(map(str, vals.all())),
count=vals.count(),
)))
print(', '.join(
'{0}={1}'.format(field, value)
for field, value in fields
))
def test_existence(queryset, data):
"""
:param queryset: a Django queryset
:param data: a single model to check for
:returns: True if the model exists
Test existence of a given hash in a `queryset` (or among all model
instances if a model is given).
Useful when registering custom tests with :func:`tests_existence`.
"""
fields = {}
extra_attrs = {}
for key, value in data.items():
if key.startswith('@'):
# this is an attribute
extra_attrs[key[1:]] = value
else:
fields[key] = value
filtered = queryset.filter(**fields)
if filtered.exists():
return any(
all(getattr(obj, k) == v for k, v in extra_attrs.items())
for obj in filtered.all()
)
return False
def _model_exists_step(self, model, should_exist):
"""
Test for the existence of a model matching the given data.
"""
model = get_model(model)
data = guess_types(self.hashes)
queryset = model.objects
try:
existence_check = _TEST_MODEL[model]
except KeyError:
existence_check = test_existence
failed = 0
try:
for hash_ in data:
match = existence_check(queryset, hash_)
if should_exist:
assert match, \
"%s does not exist: %s" % (model.__name__, hash_)
else:
assert not match, \
"%s exists: %s" % (model.__name__, hash_)
except AssertionError as exc:
print(exc)
failed += 1
if failed:
print("Rows in DB are:")
for existing_model in queryset.all():
_dump_model(existing_model,
attrs=[k[1:]
for k in data[0].keys()
if k.startswith('@')])
if should_exist:
raise AssertionError("%i rows missing" % failed)
else:
raise AssertionError("%i rows found" % failed)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should be present in the database')
def _model_exists_positive_step(self, model):
"""
Test for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties) prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, True)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should not be present in the database')
def _model_exists_negative_step(self, model):
"""
Tests for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties). Prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should not be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, False)
def write_models(model, data, field):
"""
:param model: a Django model class
:param data: a list of hashes to build models from
:param field: a field name to match models on, or None
:returns: a list of models written
Create or update models for each data hash.
`field` is the field that is used to get the existing models out of
the database to update them; otherwise, if ``field=None``, new models are
created.
Useful when registering custom tests with :func:`writes_models`.
"""
written = []
for hash_ in data:
if field:
if field not in hash_:
raise KeyError(("The \"%s\" field is required for all update "
"operations") % field)
model_kwargs = {field: hash_[field]}
model_obj = model.objects.get(**model_kwargs)
for to_set, val in hash_.items():
setattr(model_obj, to_set, val)
model_obj.save()
else:
model_obj = model.objects.create(**hash_)
written.append(model_obj)
reset_sequence(model)
return written
def _write_models_step(self, model, field=None):
"""
Write or update a model.
"""
model = get_model(model)
data = guess_types(self.hashes)
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, field)
@step(r'I have(?: an?)? ([a-z][a-z0-9_ ]*) in the database:')
def _write_models_step_new(self, model):
"""
Create models in the database.
Syntax:
I have `model` in the database:
Example:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
See :func:`writes_models`.
"""
return _write_models_step(self, model)
@step(r'I update(?: an?)? existing ([a-z][a-z0-9_ ]*) by ([a-z][a-z0-9_]*) '
'in the database:')
def _write_models_step_update(self, model, field):
"""
Update existing models in the database, specifying a column to match on.
Syntax:
I update `model` by `key` in the database:
Example:
.. code-block:: gherkin
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
See :func:`writes_models`.
"""
return _write_models_step(self, model, field=field)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' has(?: an?)? ([A-Z][a-z0-9_ ]*) in the database:')
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' is linked to ([A-Z][a-z0-9_ ]*) in the database:')
def _create_m2m_links_step(self, rel_model_name,
rel_key, rel_value, relation_name):
"""
Link many-to-many models together.
Syntax:
And `model` with `field` "`value`" is linked to `other model` in the
database:
Example:
.. code-block:: gherkin
And article with name "Guidelines" is linked to tags in the database:
| name |
| coding |
| style |
"""
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
relation = None
for m2m in rel_model._meta.many_to_many:
if relation_name in (m2m.name, m2m.verbose_name):
relation = getattr(rel_model, m2m.name)
break
if not relation:
try:
relation = getattr(rel_model, relation_name)
except AttributeError:
pass
assert relation, \
"%s does not have a many-to-many relation named '%s'" % (
rel_model._meta.verbose_name.capitalize(),
relation_name,
)
m2m_model = relation.model
for hash_ in self.hashes:
relation.add(m2m_model.objects.get(**hash_))
@step(r'There should be (\d+) ([a-z][a-z0-9_ ]*) in the database')
def _model_count_step(self, count, model):
"""
Count the number of models in the database.
Example:
.. code-block:: gherkin
Then there should be 0 goals in the database
"""
model = get_model(model)
expected = int(count)
found = model.objects.count()
assert found == expected, "Expected %d %s, found %d." % \
(expected, model._meta.verbose_name_plural, found)
|
aloetesting/aloe_django
|
aloe_django/steps/models.py
|
_create_m2m_links_step
|
python
|
def _create_m2m_links_step(self, rel_model_name,
rel_key, rel_value, relation_name):
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
relation = None
for m2m in rel_model._meta.many_to_many:
if relation_name in (m2m.name, m2m.verbose_name):
relation = getattr(rel_model, m2m.name)
break
if not relation:
try:
relation = getattr(rel_model, relation_name)
except AttributeError:
pass
assert relation, \
"%s does not have a many-to-many relation named '%s'" % (
rel_model._meta.verbose_name.capitalize(),
relation_name,
)
m2m_model = relation.model
for hash_ in self.hashes:
relation.add(m2m_model.objects.get(**hash_))
|
Link many-to-many models together.
Syntax:
And `model` with `field` "`value`" is linked to `other model` in the
database:
Example:
.. code-block:: gherkin
And article with name "Guidelines" is linked to tags in the database:
| name |
| coding |
| style |
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/steps/models.py#L478-L518
| null |
"""
Step definitions and utilities for working with Django models.
"""
from __future__ import print_function
from __future__ import unicode_literals
# pylint:disable=redefined-builtin
from builtins import str
# pylint:disable=redefined-builtin
import warnings
from functools import partial
from django.apps import apps
from django.core.management.color import no_style
from django.db import connection
from aloe import step
from aloe.tools import guess_types
__all__ = ('writes_models', 'write_models',
'tests_existence', 'test_existence',
'reset_sequence')
STEP_PREFIX = r'(?:Given|And|Then|When) '
def _models_generator():
"""
Build a hash of model verbose names to models
"""
for app in apps.get_app_configs():
for model in app.get_models():
yield (str(model._meta.verbose_name).lower(), model)
yield (str(model._meta.verbose_name_plural).lower(), model)
try:
MODELS = dict(_models_generator())
except: # pylint:disable=bare-except
warnings.warn("Models not loaded!")
_WRITE_MODEL = {}
def writes_models(model):
"""
Register a model-specific create and update function.
This can then be accessed via the steps:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
A method for a specific model can define a function ``write_badgers(data,
field)``, which creates and updates the Badger model and decorating it with
the ``writes_models(model_class)`` decorator:
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
if field:
profile = Profile.objects.get(**{field: hash_[field]})
else:
profile = Profile()
...
reset_sequence(Profile)
The function must accept a list of data hashes and a field name. If field
is not None, it is the field that must be used to get the existing objects
out of the database to update them; otherwise, new objects must be created
for each data hash.
Follow up model creation with a call to :func:`reset_sequence` to
update the database sequences.
If you only want to modify the hash, you can make modifications and then
pass it on to :func:`write_models`.
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
# modify hash
return write_models(Profile, data, field)
"""
def decorated(func):
"""
Decorator for the creation function.
"""
_WRITE_MODEL[model] = func
return func
return decorated
_TEST_MODEL = {}
def tests_existence(model):
"""
Register a model-specific existence test.
This can then be accessed via the steps:
.. code-block:: gherkin
Then foos should be present in the database:
| name | bar |
| badger | baz |
Then foos should not be present in the database:
| name | bar |
| badger | baz |
A method for a specific model can define a function
``test_badgers(queryset, data)`` and decorating it with the
``tests_existence(model_class)`` decorator:
.. code-block:: python
@tests_existence(Profile)
def test_profile(queryset, data):
'''Test a Profile model'''
# modify data ...
return test_existence(queryset, data)
If you only want to modify the hash, you can make modifications then pass
it on to test_existence().
"""
def decorated(func):
"""
Decorator for the existence function.
"""
_TEST_MODEL[model] = func
return func
return decorated
def get_model(name):
"""
Convert a model's verbose name to the model class. This allows us to
use the models verbose name in steps.
"""
model = MODELS.get(name.lower(), None)
assert model, "Could not locate model by name '%s'" % name
return model
def reset_sequence(model):
"""
Reset the ID sequence for a model.
"""
sql = connection.ops.sequence_reset_sql(no_style(), [model])
for cmd in sql:
connection.cursor().execute(cmd)
def _dump_model(model, attrs=None):
"""
Dump the model fields for debugging.
"""
fields = []
for field in model._meta.fields:
fields.append((field.name, str(getattr(model, field.name))))
if attrs is not None:
for attr in attrs:
fields.append((attr, str(getattr(model, attr))))
for field in model._meta.many_to_many:
vals = getattr(model, field.name)
fields.append((field.name, '{val} ({count})'.format(
val=', '.join(map(str, vals.all())),
count=vals.count(),
)))
print(', '.join(
'{0}={1}'.format(field, value)
for field, value in fields
))
def test_existence(queryset, data):
"""
:param queryset: a Django queryset
:param data: a single model to check for
:returns: True if the model exists
Test existence of a given hash in a `queryset` (or among all model
instances if a model is given).
Useful when registering custom tests with :func:`tests_existence`.
"""
fields = {}
extra_attrs = {}
for key, value in data.items():
if key.startswith('@'):
# this is an attribute
extra_attrs[key[1:]] = value
else:
fields[key] = value
filtered = queryset.filter(**fields)
if filtered.exists():
return any(
all(getattr(obj, k) == v for k, v in extra_attrs.items())
for obj in filtered.all()
)
return False
def _model_exists_step(self, model, should_exist):
"""
Test for the existence of a model matching the given data.
"""
model = get_model(model)
data = guess_types(self.hashes)
queryset = model.objects
try:
existence_check = _TEST_MODEL[model]
except KeyError:
existence_check = test_existence
failed = 0
try:
for hash_ in data:
match = existence_check(queryset, hash_)
if should_exist:
assert match, \
"%s does not exist: %s" % (model.__name__, hash_)
else:
assert not match, \
"%s exists: %s" % (model.__name__, hash_)
except AssertionError as exc:
print(exc)
failed += 1
if failed:
print("Rows in DB are:")
for existing_model in queryset.all():
_dump_model(existing_model,
attrs=[k[1:]
for k in data[0].keys()
if k.startswith('@')])
if should_exist:
raise AssertionError("%i rows missing" % failed)
else:
raise AssertionError("%i rows found" % failed)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should be present in the database')
def _model_exists_positive_step(self, model):
"""
Test for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties) prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, True)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should not be present in the database')
def _model_exists_negative_step(self, model):
"""
Tests for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties). Prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should not be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, False)
def write_models(model, data, field):
"""
:param model: a Django model class
:param data: a list of hashes to build models from
:param field: a field name to match models on, or None
:returns: a list of models written
Create or update models for each data hash.
`field` is the field that is used to get the existing models out of
the database to update them; otherwise, if ``field=None``, new models are
created.
Useful when registering custom tests with :func:`writes_models`.
"""
written = []
for hash_ in data:
if field:
if field not in hash_:
raise KeyError(("The \"%s\" field is required for all update "
"operations") % field)
model_kwargs = {field: hash_[field]}
model_obj = model.objects.get(**model_kwargs)
for to_set, val in hash_.items():
setattr(model_obj, to_set, val)
model_obj.save()
else:
model_obj = model.objects.create(**hash_)
written.append(model_obj)
reset_sequence(model)
return written
def _write_models_step(self, model, field=None):
"""
Write or update a model.
"""
model = get_model(model)
data = guess_types(self.hashes)
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, field)
@step(r'I have(?: an?)? ([a-z][a-z0-9_ ]*) in the database:')
def _write_models_step_new(self, model):
"""
Create models in the database.
Syntax:
I have `model` in the database:
Example:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
See :func:`writes_models`.
"""
return _write_models_step(self, model)
@step(r'I update(?: an?)? existing ([a-z][a-z0-9_ ]*) by ([a-z][a-z0-9_]*) '
'in the database:')
def _write_models_step_update(self, model, field):
"""
Update existing models in the database, specifying a column to match on.
Syntax:
I update `model` by `key` in the database:
Example:
.. code-block:: gherkin
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
See :func:`writes_models`.
"""
return _write_models_step(self, model, field=field)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' has(?: an?)? ([A-Z][a-z0-9_ ]*) in the database:')
def _create_models_for_relation_step(self, rel_model_name,
rel_key, rel_value, model):
"""
Create a new model linked to the given model.
Syntax:
And `model` with `field` "`value`" has `new model` in the database:
Example:
.. code-block:: gherkin
And project with name "Ball Project" has goals in the database:
| description |
| To have fun playing with balls of twine |
"""
model = get_model(model)
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
data = guess_types(self.hashes)
for hash_ in data:
hash_['%s' % rel_model_name] = rel_model
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, None)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' is linked to ([A-Z][a-z0-9_ ]*) in the database:')
@step(r'There should be (\d+) ([a-z][a-z0-9_ ]*) in the database')
def _model_count_step(self, count, model):
"""
Count the number of models in the database.
Example:
.. code-block:: gherkin
Then there should be 0 goals in the database
"""
model = get_model(model)
expected = int(count)
found = model.objects.count()
assert found == expected, "Expected %d %s, found %d." % \
(expected, model._meta.verbose_name_plural, found)
|
aloetesting/aloe_django
|
aloe_django/steps/models.py
|
_model_count_step
|
python
|
def _model_count_step(self, count, model):
model = get_model(model)
expected = int(count)
found = model.objects.count()
assert found == expected, "Expected %d %s, found %d." % \
(expected, model._meta.verbose_name_plural, found)
|
Count the number of models in the database.
Example:
.. code-block:: gherkin
Then there should be 0 goals in the database
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/steps/models.py#L522-L538
| null |
"""
Step definitions and utilities for working with Django models.
"""
from __future__ import print_function
from __future__ import unicode_literals
# pylint:disable=redefined-builtin
from builtins import str
# pylint:disable=redefined-builtin
import warnings
from functools import partial
from django.apps import apps
from django.core.management.color import no_style
from django.db import connection
from aloe import step
from aloe.tools import guess_types
__all__ = ('writes_models', 'write_models',
'tests_existence', 'test_existence',
'reset_sequence')
STEP_PREFIX = r'(?:Given|And|Then|When) '
def _models_generator():
"""
Build a hash of model verbose names to models
"""
for app in apps.get_app_configs():
for model in app.get_models():
yield (str(model._meta.verbose_name).lower(), model)
yield (str(model._meta.verbose_name_plural).lower(), model)
try:
MODELS = dict(_models_generator())
except: # pylint:disable=bare-except
warnings.warn("Models not loaded!")
_WRITE_MODEL = {}
def writes_models(model):
"""
Register a model-specific create and update function.
This can then be accessed via the steps:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
A method for a specific model can define a function ``write_badgers(data,
field)``, which creates and updates the Badger model and decorating it with
the ``writes_models(model_class)`` decorator:
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
if field:
profile = Profile.objects.get(**{field: hash_[field]})
else:
profile = Profile()
...
reset_sequence(Profile)
The function must accept a list of data hashes and a field name. If field
is not None, it is the field that must be used to get the existing objects
out of the database to update them; otherwise, new objects must be created
for each data hash.
Follow up model creation with a call to :func:`reset_sequence` to
update the database sequences.
If you only want to modify the hash, you can make modifications and then
pass it on to :func:`write_models`.
.. code-block:: python
@writes_models(Profile)
def write_profile(data, field):
'''Creates a Profile model'''
for hash_ in data:
# modify hash
return write_models(Profile, data, field)
"""
def decorated(func):
"""
Decorator for the creation function.
"""
_WRITE_MODEL[model] = func
return func
return decorated
_TEST_MODEL = {}
def tests_existence(model):
"""
Register a model-specific existence test.
This can then be accessed via the steps:
.. code-block:: gherkin
Then foos should be present in the database:
| name | bar |
| badger | baz |
Then foos should not be present in the database:
| name | bar |
| badger | baz |
A method for a specific model can define a function
``test_badgers(queryset, data)`` and decorating it with the
``tests_existence(model_class)`` decorator:
.. code-block:: python
@tests_existence(Profile)
def test_profile(queryset, data):
'''Test a Profile model'''
# modify data ...
return test_existence(queryset, data)
If you only want to modify the hash, you can make modifications then pass
it on to test_existence().
"""
def decorated(func):
"""
Decorator for the existence function.
"""
_TEST_MODEL[model] = func
return func
return decorated
def get_model(name):
"""
Convert a model's verbose name to the model class. This allows us to
use the models verbose name in steps.
"""
model = MODELS.get(name.lower(), None)
assert model, "Could not locate model by name '%s'" % name
return model
def reset_sequence(model):
"""
Reset the ID sequence for a model.
"""
sql = connection.ops.sequence_reset_sql(no_style(), [model])
for cmd in sql:
connection.cursor().execute(cmd)
def _dump_model(model, attrs=None):
"""
Dump the model fields for debugging.
"""
fields = []
for field in model._meta.fields:
fields.append((field.name, str(getattr(model, field.name))))
if attrs is not None:
for attr in attrs:
fields.append((attr, str(getattr(model, attr))))
for field in model._meta.many_to_many:
vals = getattr(model, field.name)
fields.append((field.name, '{val} ({count})'.format(
val=', '.join(map(str, vals.all())),
count=vals.count(),
)))
print(', '.join(
'{0}={1}'.format(field, value)
for field, value in fields
))
def test_existence(queryset, data):
"""
:param queryset: a Django queryset
:param data: a single model to check for
:returns: True if the model exists
Test existence of a given hash in a `queryset` (or among all model
instances if a model is given).
Useful when registering custom tests with :func:`tests_existence`.
"""
fields = {}
extra_attrs = {}
for key, value in data.items():
if key.startswith('@'):
# this is an attribute
extra_attrs[key[1:]] = value
else:
fields[key] = value
filtered = queryset.filter(**fields)
if filtered.exists():
return any(
all(getattr(obj, k) == v for k, v in extra_attrs.items())
for obj in filtered.all()
)
return False
def _model_exists_step(self, model, should_exist):
"""
Test for the existence of a model matching the given data.
"""
model = get_model(model)
data = guess_types(self.hashes)
queryset = model.objects
try:
existence_check = _TEST_MODEL[model]
except KeyError:
existence_check = test_existence
failed = 0
try:
for hash_ in data:
match = existence_check(queryset, hash_)
if should_exist:
assert match, \
"%s does not exist: %s" % (model.__name__, hash_)
else:
assert not match, \
"%s exists: %s" % (model.__name__, hash_)
except AssertionError as exc:
print(exc)
failed += 1
if failed:
print("Rows in DB are:")
for existing_model in queryset.all():
_dump_model(existing_model,
attrs=[k[1:]
for k in data[0].keys()
if k.startswith('@')])
if should_exist:
raise AssertionError("%i rows missing" % failed)
else:
raise AssertionError("%i rows found" % failed)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should be present in the database')
def _model_exists_positive_step(self, model):
"""
Test for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties) prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, True)
@step(STEP_PREFIX
+ r'(?:an? )?([A-Z][a-z0-9_ ]*) should not be present in the database')
def _model_exists_negative_step(self, model):
"""
Tests for the existence of a model matching the given data.
Column names are included in a query to the database. To check model
attributes that are not database columns (i.e. properties). Prepend the
column with an ``@`` sign.
Example:
.. code-block:: gherkin
Then foos should not be present in the database:
| name | @bar |
| badger | baz |
See :func:`tests_existence`.
"""
return _model_exists_step(self, model, False)
def write_models(model, data, field):
"""
:param model: a Django model class
:param data: a list of hashes to build models from
:param field: a field name to match models on, or None
:returns: a list of models written
Create or update models for each data hash.
`field` is the field that is used to get the existing models out of
the database to update them; otherwise, if ``field=None``, new models are
created.
Useful when registering custom tests with :func:`writes_models`.
"""
written = []
for hash_ in data:
if field:
if field not in hash_:
raise KeyError(("The \"%s\" field is required for all update "
"operations") % field)
model_kwargs = {field: hash_[field]}
model_obj = model.objects.get(**model_kwargs)
for to_set, val in hash_.items():
setattr(model_obj, to_set, val)
model_obj.save()
else:
model_obj = model.objects.create(**hash_)
written.append(model_obj)
reset_sequence(model)
return written
def _write_models_step(self, model, field=None):
"""
Write or update a model.
"""
model = get_model(model)
data = guess_types(self.hashes)
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, field)
@step(r'I have(?: an?)? ([a-z][a-z0-9_ ]*) in the database:')
def _write_models_step_new(self, model):
"""
Create models in the database.
Syntax:
I have `model` in the database:
Example:
.. code-block:: gherkin
And I have foos in the database:
| name | bar |
| Baz | Quux |
See :func:`writes_models`.
"""
return _write_models_step(self, model)
@step(r'I update(?: an?)? existing ([a-z][a-z0-9_ ]*) by ([a-z][a-z0-9_]*) '
'in the database:')
def _write_models_step_update(self, model, field):
"""
Update existing models in the database, specifying a column to match on.
Syntax:
I update `model` by `key` in the database:
Example:
.. code-block:: gherkin
And I update existing foos by pk in the database:
| pk | name |
| 1 | Bar |
See :func:`writes_models`.
"""
return _write_models_step(self, model, field=field)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' has(?: an?)? ([A-Z][a-z0-9_ ]*) in the database:')
def _create_models_for_relation_step(self, rel_model_name,
rel_key, rel_value, model):
"""
Create a new model linked to the given model.
Syntax:
And `model` with `field` "`value`" has `new model` in the database:
Example:
.. code-block:: gherkin
And project with name "Ball Project" has goals in the database:
| description |
| To have fun playing with balls of twine |
"""
model = get_model(model)
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
data = guess_types(self.hashes)
for hash_ in data:
hash_['%s' % rel_model_name] = rel_model
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, None)
@step(STEP_PREFIX + r'([A-Z][a-z0-9_ ]*) with ([a-z]+) "([^"]*)"'
+ r' is linked to ([A-Z][a-z0-9_ ]*) in the database:')
def _create_m2m_links_step(self, rel_model_name,
rel_key, rel_value, relation_name):
"""
Link many-to-many models together.
Syntax:
And `model` with `field` "`value`" is linked to `other model` in the
database:
Example:
.. code-block:: gherkin
And article with name "Guidelines" is linked to tags in the database:
| name |
| coding |
| style |
"""
lookup = {rel_key: rel_value}
rel_model = get_model(rel_model_name).objects.get(**lookup)
relation = None
for m2m in rel_model._meta.many_to_many:
if relation_name in (m2m.name, m2m.verbose_name):
relation = getattr(rel_model, m2m.name)
break
if not relation:
try:
relation = getattr(rel_model, relation_name)
except AttributeError:
pass
assert relation, \
"%s does not have a many-to-many relation named '%s'" % (
rel_model._meta.verbose_name.capitalize(),
relation_name,
)
m2m_model = relation.model
for hash_ in self.hashes:
relation.add(m2m_model.objects.get(**hash_))
@step(r'There should be (\d+) ([a-z][a-z0-9_ ]*) in the database')
|
aloetesting/aloe_django
|
aloe_django/steps/mail.py
|
mail_sent_count
|
python
|
def mail_sent_count(self, count):
expected = int(count)
actual = len(mail.outbox)
assert expected == actual, \
"Expected to send {0} email(s), got {1}.".format(expected, actual)
|
Test that `count` mails have been sent.
Syntax:
I have sent `count` emails
Example:
.. code-block:: gherkin
Then I have sent 2 emails
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/steps/mail.py#L25-L42
| null |
"""
Step definitions for working with Django email.
"""
from __future__ import print_function
from smtplib import SMTPException
from django.core import mail
from django.test.html import parse_html
from nose.tools import assert_in # pylint:disable=no-name-in-module
from aloe import step
__all__ = ()
STEP_PREFIX = r'(?:Given|And|Then|When) '
CHECK_PREFIX = r'(?:And|Then) '
EMAIL_PARTS = ('subject', 'body', 'from_email', 'to', 'bcc', 'cc')
GOOD_MAIL = mail.EmailMessage.send
@step(CHECK_PREFIX + r'I have sent (\d+) emails?')
@step(r'I have not sent any emails')
def mail_not_sent(self):
"""
Test no emails have been sent.
Example:
.. code-block:: gherkin
Then I have not sent any emails
"""
return mail_sent_count(self, 0)
@step(CHECK_PREFIX + (r'I have sent an email with "([^"]*)" in the ({0})')
.format('|'.join(EMAIL_PARTS)))
def mail_sent_content(self, text, part):
"""
Test an email contains (assert text in) the given text in the relevant
message part (accessible as an attribute on the email object).
This step strictly applies whitespace.
Syntax:
I have sent an email with "`text`" in the `part`
Example:
.. code-block:: gherkin
Then I have sent an email with "pandas" in the body
"""
if not any(text in getattr(email, part) for email in mail.outbox):
dump_emails(part)
raise AssertionError(
"No email contained expected text in the {0}.".format(part))
@step(CHECK_PREFIX + (r'I have not sent an email with "([^"]*)" in the ({0})')
.format('|'.join(EMAIL_PARTS)))
def mail_not_sent_content(self, text, part):
"""
Test an email does not contain (assert text not in) the given text in the
relevant message part (accessible as an attribute on the email object).
This step strictly applies whitespace.
Syntax:
I have not sent an email with "`text`" in the `part`
Example:
.. code-block:: gherkin
Then I have not sent an email with "pandas" in the body
"""
if any(text in getattr(email, part) for email in mail.outbox):
dump_emails(part)
raise AssertionError(
"An email contained unexpected text in the {0}.".format(part))
@step(CHECK_PREFIX + r'I have sent an email with the following in the body:')
def mail_sent_content_multiline(self):
"""
Test the body of an email contains (assert text in) the given multiline
string.
This step strictly applies whitespace.
Example:
.. code-block:: gherkin
Then I have sent an email with the following in the body:
\"\"\"
Dear Mr. Panda,
\"\"\"
"""
return mail_sent_content(self, self.multiline, 'body')
@step(CHECK_PREFIX
+ r'I have sent an email with the following HTML alternative:')
def mail_sent_contains_html(self):
"""
Test that an email contains the HTML (assert HTML in) in the multiline as
one of its MIME alternatives.
The HTML is normalised by passing through Django's
:func:`django.test.html.parse_html`.
Example:
.. code-block:: gherkin
And I have sent an email with the following HTML alternative:
\"\"\"
<p><strong>Name:</strong> Sir Panda</p>
<p><strong>Phone:</strong> 0400000000</p>
<p><strong>Email:</strong> sir.panda@pand.as</p>
\"\"\"
"""
for email in mail.outbox:
try:
html = next(content for content, mime in email.alternatives
if mime == 'text/html')
dom1 = parse_html(html)
dom2 = parse_html(self.multiline)
assert_in(dom1, dom2)
except AssertionError as exc:
print("Email did not match", exc)
# we intentionally eat the exception
continue
return True
raise AssertionError("No email contained the HTML")
@step(STEP_PREFIX + r'I clear my email outbox')
def mail_clear(self):
"""
Clear the email outbox.
Example:
.. code-block:: gherkin
Given I clear my email outbox
"""
mail.EmailMessage.send = GOOD_MAIL
mail.outbox = []
def broken_send(*args, **kwargs):
"""
Broken send function for email_broken step
"""
raise SMTPException("Failure mocked by aloe_django")
@step(STEP_PREFIX + r'sending email does not work')
def email_broken(self):
"""
Cause sending email to raise an exception.
This allows simulating email failure.
Example:
.. code-block:: gherkin
Given sending email does not work
"""
mail.EmailMessage.send = broken_send
def dump_emails(part):
"""Show the sent emails' tested parts, to aid in debugging."""
print("Sent emails:")
for email in mail.outbox:
print(getattr(email, part))
|
aloetesting/aloe_django
|
aloe_django/steps/mail.py
|
mail_sent_content
|
python
|
def mail_sent_content(self, text, part):
if not any(text in getattr(email, part) for email in mail.outbox):
dump_emails(part)
raise AssertionError(
"No email contained expected text in the {0}.".format(part))
|
Test an email contains (assert text in) the given text in the relevant
message part (accessible as an attribute on the email object).
This step strictly applies whitespace.
Syntax:
I have sent an email with "`text`" in the `part`
Example:
.. code-block:: gherkin
Then I have sent an email with "pandas" in the body
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/steps/mail.py#L61-L81
|
[
"def dump_emails(part):\n \"\"\"Show the sent emails' tested parts, to aid in debugging.\"\"\"\n\n print(\"Sent emails:\")\n for email in mail.outbox:\n print(getattr(email, part))\n"
] |
"""
Step definitions for working with Django email.
"""
from __future__ import print_function
from smtplib import SMTPException
from django.core import mail
from django.test.html import parse_html
from nose.tools import assert_in # pylint:disable=no-name-in-module
from aloe import step
__all__ = ()
STEP_PREFIX = r'(?:Given|And|Then|When) '
CHECK_PREFIX = r'(?:And|Then) '
EMAIL_PARTS = ('subject', 'body', 'from_email', 'to', 'bcc', 'cc')
GOOD_MAIL = mail.EmailMessage.send
@step(CHECK_PREFIX + r'I have sent (\d+) emails?')
def mail_sent_count(self, count):
"""
Test that `count` mails have been sent.
Syntax:
I have sent `count` emails
Example:
.. code-block:: gherkin
Then I have sent 2 emails
"""
expected = int(count)
actual = len(mail.outbox)
assert expected == actual, \
"Expected to send {0} email(s), got {1}.".format(expected, actual)
@step(r'I have not sent any emails')
def mail_not_sent(self):
"""
Test no emails have been sent.
Example:
.. code-block:: gherkin
Then I have not sent any emails
"""
return mail_sent_count(self, 0)
@step(CHECK_PREFIX + (r'I have sent an email with "([^"]*)" in the ({0})')
.format('|'.join(EMAIL_PARTS)))
@step(CHECK_PREFIX + (r'I have not sent an email with "([^"]*)" in the ({0})')
.format('|'.join(EMAIL_PARTS)))
def mail_not_sent_content(self, text, part):
"""
Test an email does not contain (assert text not in) the given text in the
relevant message part (accessible as an attribute on the email object).
This step strictly applies whitespace.
Syntax:
I have not sent an email with "`text`" in the `part`
Example:
.. code-block:: gherkin
Then I have not sent an email with "pandas" in the body
"""
if any(text in getattr(email, part) for email in mail.outbox):
dump_emails(part)
raise AssertionError(
"An email contained unexpected text in the {0}.".format(part))
@step(CHECK_PREFIX + r'I have sent an email with the following in the body:')
def mail_sent_content_multiline(self):
"""
Test the body of an email contains (assert text in) the given multiline
string.
This step strictly applies whitespace.
Example:
.. code-block:: gherkin
Then I have sent an email with the following in the body:
\"\"\"
Dear Mr. Panda,
\"\"\"
"""
return mail_sent_content(self, self.multiline, 'body')
@step(CHECK_PREFIX
+ r'I have sent an email with the following HTML alternative:')
def mail_sent_contains_html(self):
"""
Test that an email contains the HTML (assert HTML in) in the multiline as
one of its MIME alternatives.
The HTML is normalised by passing through Django's
:func:`django.test.html.parse_html`.
Example:
.. code-block:: gherkin
And I have sent an email with the following HTML alternative:
\"\"\"
<p><strong>Name:</strong> Sir Panda</p>
<p><strong>Phone:</strong> 0400000000</p>
<p><strong>Email:</strong> sir.panda@pand.as</p>
\"\"\"
"""
for email in mail.outbox:
try:
html = next(content for content, mime in email.alternatives
if mime == 'text/html')
dom1 = parse_html(html)
dom2 = parse_html(self.multiline)
assert_in(dom1, dom2)
except AssertionError as exc:
print("Email did not match", exc)
# we intentionally eat the exception
continue
return True
raise AssertionError("No email contained the HTML")
@step(STEP_PREFIX + r'I clear my email outbox')
def mail_clear(self):
"""
Clear the email outbox.
Example:
.. code-block:: gherkin
Given I clear my email outbox
"""
mail.EmailMessage.send = GOOD_MAIL
mail.outbox = []
def broken_send(*args, **kwargs):
"""
Broken send function for email_broken step
"""
raise SMTPException("Failure mocked by aloe_django")
@step(STEP_PREFIX + r'sending email does not work')
def email_broken(self):
"""
Cause sending email to raise an exception.
This allows simulating email failure.
Example:
.. code-block:: gherkin
Given sending email does not work
"""
mail.EmailMessage.send = broken_send
def dump_emails(part):
"""Show the sent emails' tested parts, to aid in debugging."""
print("Sent emails:")
for email in mail.outbox:
print(getattr(email, part))
|
aloetesting/aloe_django
|
aloe_django/steps/mail.py
|
mail_sent_contains_html
|
python
|
def mail_sent_contains_html(self):
for email in mail.outbox:
try:
html = next(content for content, mime in email.alternatives
if mime == 'text/html')
dom1 = parse_html(html)
dom2 = parse_html(self.multiline)
assert_in(dom1, dom2)
except AssertionError as exc:
print("Email did not match", exc)
# we intentionally eat the exception
continue
return True
raise AssertionError("No email contained the HTML")
|
Test that an email contains the HTML (assert HTML in) in the multiline as
one of its MIME alternatives.
The HTML is normalised by passing through Django's
:func:`django.test.html.parse_html`.
Example:
.. code-block:: gherkin
And I have sent an email with the following HTML alternative:
\"\"\"
<p><strong>Name:</strong> Sir Panda</p>
<p><strong>Phone:</strong> 0400000000</p>
<p><strong>Email:</strong> sir.panda@pand.as</p>
\"\"\"
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/steps/mail.py#L131-L167
| null |
"""
Step definitions for working with Django email.
"""
from __future__ import print_function
from smtplib import SMTPException
from django.core import mail
from django.test.html import parse_html
from nose.tools import assert_in # pylint:disable=no-name-in-module
from aloe import step
__all__ = ()
STEP_PREFIX = r'(?:Given|And|Then|When) '
CHECK_PREFIX = r'(?:And|Then) '
EMAIL_PARTS = ('subject', 'body', 'from_email', 'to', 'bcc', 'cc')
GOOD_MAIL = mail.EmailMessage.send
@step(CHECK_PREFIX + r'I have sent (\d+) emails?')
def mail_sent_count(self, count):
"""
Test that `count` mails have been sent.
Syntax:
I have sent `count` emails
Example:
.. code-block:: gherkin
Then I have sent 2 emails
"""
expected = int(count)
actual = len(mail.outbox)
assert expected == actual, \
"Expected to send {0} email(s), got {1}.".format(expected, actual)
@step(r'I have not sent any emails')
def mail_not_sent(self):
"""
Test no emails have been sent.
Example:
.. code-block:: gherkin
Then I have not sent any emails
"""
return mail_sent_count(self, 0)
@step(CHECK_PREFIX + (r'I have sent an email with "([^"]*)" in the ({0})')
.format('|'.join(EMAIL_PARTS)))
def mail_sent_content(self, text, part):
"""
Test an email contains (assert text in) the given text in the relevant
message part (accessible as an attribute on the email object).
This step strictly applies whitespace.
Syntax:
I have sent an email with "`text`" in the `part`
Example:
.. code-block:: gherkin
Then I have sent an email with "pandas" in the body
"""
if not any(text in getattr(email, part) for email in mail.outbox):
dump_emails(part)
raise AssertionError(
"No email contained expected text in the {0}.".format(part))
@step(CHECK_PREFIX + (r'I have not sent an email with "([^"]*)" in the ({0})')
.format('|'.join(EMAIL_PARTS)))
def mail_not_sent_content(self, text, part):
"""
Test an email does not contain (assert text not in) the given text in the
relevant message part (accessible as an attribute on the email object).
This step strictly applies whitespace.
Syntax:
I have not sent an email with "`text`" in the `part`
Example:
.. code-block:: gherkin
Then I have not sent an email with "pandas" in the body
"""
if any(text in getattr(email, part) for email in mail.outbox):
dump_emails(part)
raise AssertionError(
"An email contained unexpected text in the {0}.".format(part))
@step(CHECK_PREFIX + r'I have sent an email with the following in the body:')
def mail_sent_content_multiline(self):
"""
Test the body of an email contains (assert text in) the given multiline
string.
This step strictly applies whitespace.
Example:
.. code-block:: gherkin
Then I have sent an email with the following in the body:
\"\"\"
Dear Mr. Panda,
\"\"\"
"""
return mail_sent_content(self, self.multiline, 'body')
@step(CHECK_PREFIX
+ r'I have sent an email with the following HTML alternative:')
@step(STEP_PREFIX + r'I clear my email outbox')
def mail_clear(self):
"""
Clear the email outbox.
Example:
.. code-block:: gherkin
Given I clear my email outbox
"""
mail.EmailMessage.send = GOOD_MAIL
mail.outbox = []
def broken_send(*args, **kwargs):
"""
Broken send function for email_broken step
"""
raise SMTPException("Failure mocked by aloe_django")
@step(STEP_PREFIX + r'sending email does not work')
def email_broken(self):
"""
Cause sending email to raise an exception.
This allows simulating email failure.
Example:
.. code-block:: gherkin
Given sending email does not work
"""
mail.EmailMessage.send = broken_send
def dump_emails(part):
"""Show the sent emails' tested parts, to aid in debugging."""
print("Sent emails:")
for email in mail.outbox:
print(getattr(email, part))
|
aloetesting/aloe_django
|
aloe_django/steps/mail.py
|
dump_emails
|
python
|
def dump_emails(part):
print("Sent emails:")
for email in mail.outbox:
print(getattr(email, part))
|
Show the sent emails' tested parts, to aid in debugging.
|
train
|
https://github.com/aloetesting/aloe_django/blob/672eac97c97644bfe334e70696a6dc5ddf4ced02/aloe_django/steps/mail.py#L208-L213
| null |
"""
Step definitions for working with Django email.
"""
from __future__ import print_function
from smtplib import SMTPException
from django.core import mail
from django.test.html import parse_html
from nose.tools import assert_in # pylint:disable=no-name-in-module
from aloe import step
__all__ = ()
STEP_PREFIX = r'(?:Given|And|Then|When) '
CHECK_PREFIX = r'(?:And|Then) '
EMAIL_PARTS = ('subject', 'body', 'from_email', 'to', 'bcc', 'cc')
GOOD_MAIL = mail.EmailMessage.send
@step(CHECK_PREFIX + r'I have sent (\d+) emails?')
def mail_sent_count(self, count):
"""
Test that `count` mails have been sent.
Syntax:
I have sent `count` emails
Example:
.. code-block:: gherkin
Then I have sent 2 emails
"""
expected = int(count)
actual = len(mail.outbox)
assert expected == actual, \
"Expected to send {0} email(s), got {1}.".format(expected, actual)
@step(r'I have not sent any emails')
def mail_not_sent(self):
"""
Test no emails have been sent.
Example:
.. code-block:: gherkin
Then I have not sent any emails
"""
return mail_sent_count(self, 0)
@step(CHECK_PREFIX + (r'I have sent an email with "([^"]*)" in the ({0})')
.format('|'.join(EMAIL_PARTS)))
def mail_sent_content(self, text, part):
"""
Test an email contains (assert text in) the given text in the relevant
message part (accessible as an attribute on the email object).
This step strictly applies whitespace.
Syntax:
I have sent an email with "`text`" in the `part`
Example:
.. code-block:: gherkin
Then I have sent an email with "pandas" in the body
"""
if not any(text in getattr(email, part) for email in mail.outbox):
dump_emails(part)
raise AssertionError(
"No email contained expected text in the {0}.".format(part))
@step(CHECK_PREFIX + (r'I have not sent an email with "([^"]*)" in the ({0})')
.format('|'.join(EMAIL_PARTS)))
def mail_not_sent_content(self, text, part):
"""
Test an email does not contain (assert text not in) the given text in the
relevant message part (accessible as an attribute on the email object).
This step strictly applies whitespace.
Syntax:
I have not sent an email with "`text`" in the `part`
Example:
.. code-block:: gherkin
Then I have not sent an email with "pandas" in the body
"""
if any(text in getattr(email, part) for email in mail.outbox):
dump_emails(part)
raise AssertionError(
"An email contained unexpected text in the {0}.".format(part))
@step(CHECK_PREFIX + r'I have sent an email with the following in the body:')
def mail_sent_content_multiline(self):
"""
Test the body of an email contains (assert text in) the given multiline
string.
This step strictly applies whitespace.
Example:
.. code-block:: gherkin
Then I have sent an email with the following in the body:
\"\"\"
Dear Mr. Panda,
\"\"\"
"""
return mail_sent_content(self, self.multiline, 'body')
@step(CHECK_PREFIX
+ r'I have sent an email with the following HTML alternative:')
def mail_sent_contains_html(self):
"""
Test that an email contains the HTML (assert HTML in) in the multiline as
one of its MIME alternatives.
The HTML is normalised by passing through Django's
:func:`django.test.html.parse_html`.
Example:
.. code-block:: gherkin
And I have sent an email with the following HTML alternative:
\"\"\"
<p><strong>Name:</strong> Sir Panda</p>
<p><strong>Phone:</strong> 0400000000</p>
<p><strong>Email:</strong> sir.panda@pand.as</p>
\"\"\"
"""
for email in mail.outbox:
try:
html = next(content for content, mime in email.alternatives
if mime == 'text/html')
dom1 = parse_html(html)
dom2 = parse_html(self.multiline)
assert_in(dom1, dom2)
except AssertionError as exc:
print("Email did not match", exc)
# we intentionally eat the exception
continue
return True
raise AssertionError("No email contained the HTML")
@step(STEP_PREFIX + r'I clear my email outbox')
def mail_clear(self):
"""
Clear the email outbox.
Example:
.. code-block:: gherkin
Given I clear my email outbox
"""
mail.EmailMessage.send = GOOD_MAIL
mail.outbox = []
def broken_send(*args, **kwargs):
"""
Broken send function for email_broken step
"""
raise SMTPException("Failure mocked by aloe_django")
@step(STEP_PREFIX + r'sending email does not work')
def email_broken(self):
"""
Cause sending email to raise an exception.
This allows simulating email failure.
Example:
.. code-block:: gherkin
Given sending email does not work
"""
mail.EmailMessage.send = broken_send
|
sci-bots/serial-device
|
serial_device/__init__.py
|
_comports
|
python
|
def _comports():
'''
Returns
-------
pandas.DataFrame
Table containing descriptor, and hardware ID of each available COM
port, indexed by port (e.g., "COM4").
'''
return (pd.DataFrame(list(map(list, serial.tools.list_ports.comports())),
columns=['port', 'descriptor', 'hardware_id'])
.set_index('port'))
|
Returns
-------
pandas.DataFrame
Table containing descriptor, and hardware ID of each available COM
port, indexed by port (e.g., "COM4").
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/__init__.py#L34-L44
| null |
'''
Copyright 2014 Christian Fobel
Copyright 2011 Ryan Fobel
This file is part of serial_device.
serial_device is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
serial_device is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with serial_device. If not, see <http://www.gnu.org/licenses/>.
'''
from time import sleep
import itertools
import os
import pandas as pd
import path_helpers as ph
import serial.tools.list_ports
import six.moves
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def comports(vid_pid=None, include_all=False, check_available=True,
only_available=False):
'''
.. versionchanged:: 0.9
Add :data:`check_available` keyword argument to optionally check if
each port is actually available by attempting to open a temporary
connection.
Add :data:`only_available` keyword argument to only include ports that
are actually available for connection.
Parameters
----------
vid_pid : str or list, optional
One or more USB vendor/product IDs to match.
Each USB vendor/product must be in the form ``'<vid>:<pid>'``.
For example, ``'2341:0010'``.
include_all : bool, optional
If ``True``, include all available serial ports, but sort rows such
that ports matching specified USB vendor/product IDs come first.
If ``False``, only include ports that match specified USB
vendor/product IDs.
check_available : bool, optional
If ``True``, check if each port is actually available by attempting to
open a temporary connection.
only_available : bool, optional
If ``True``, only include ports that are available.
Returns
-------
pandas.DataFrame
Table containing descriptor and hardware ID of each COM port, indexed
by port (e.g., "COM4").
.. versionchanged:: 0.9
If :data:`check_available` is ``True``, add an ``available`` column
to the table indicating whether each port accepted a connection.
'''
df_comports = _comports()
# Extract USB product and vendor IDs from `hwid` entries of the form:
#
# FTDIBUS\VID_0403+PID_6001+A60081GEA\0000
df_hwid = (df_comports.hardware_id.str.lower().str
.extract('vid_(?P<vid>[0-9a-f]+)\+pid_(?P<pid>[0-9a-f]+)',
expand=True))
# Extract USB product and vendor IDs from `hwid` entries of the form:
#
# USB VID:PID=16C0:0483 SNR=2145930
no_id_mask = df_hwid.vid.isnull()
df_hwid.loc[no_id_mask] = (df_comports.loc[no_id_mask, 'hardware_id']
.str.lower().str
.extract('vid:pid=(?P<vid>[0-9a-f]+):'
'(?P<pid>[0-9a-f]+)', expand=True))
df_comports = df_comports.join(df_hwid)
if vid_pid is not None:
if isinstance(vid_pid, six.string_types):
# Single USB vendor/product ID specified.
vid_pid = [vid_pid]
# Mark ports that match specified USB vendor/product IDs.
df_comports['include'] = (df_comports.vid + ':' +
df_comports.pid).isin(map(str.lower,
vid_pid))
if include_all:
# All ports should be included, but sort rows such that ports
# matching specified USB vendor/product IDs come first.
df_comports = (df_comports.sort_values('include', ascending=False)
.drop('include', axis=1))
else:
# Only include ports that match specified USB vendor/product IDs.
df_comports = (df_comports.loc[df_comports.include]
.drop('include', axis=1))
if check_available or only_available:
# Add `available` column indicating whether each port accepted a
# connection. A port may not, for example, accept a connection if the
# port is already open.
available = []
for name_i, port_info_i in df_comports.iterrows():
try:
connection = serial.Serial(port=name_i)
connection.close()
available.append(True)
except serial.SerialException:
available.append(False)
df_comports['available'] = available
if only_available:
df_comports = df_comports.loc[df_comports.available]
if not check_available:
del df_comports['available']
return df_comports
def get_serial_ports():
if os.name == 'nt':
ports = _get_serial_ports_windows()
else:
ports = itertools.chain(ph.path('/dev').walk('ttyUSB*'),
ph.path('/dev').walk('ttyACM*'),
ph.path('/dev').walk('tty.usb*'))
# sort list alphabetically
ports_ = [port for port in ports]
ports_.sort()
for port in ports_:
yield port
def _get_serial_ports_windows():
'''
Uses the Win32 registry to return a iterator of serial (COM) ports existing
on this computer.
See http://stackoverflow.com/questions/1205383/listing-serial-com-ports-on-windows
'''
import six.moves.winreg as winreg
reg_path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, reg_path)
except WindowsError:
# No serial ports. Return empty generator.
return
for i in itertools.count():
try:
val = winreg.EnumValue(key, i)
yield str(val[1])
except EnvironmentError:
break
class ConnectionError(Exception):
pass
class SerialDevice(object):
'''
This class provides a base interface for encapsulating interaction with a
device connected through a serial-port.
It provides methods to automatically resolve a port based on an
implementation-defined connection-test, which is applied to all available
serial-ports until a successful connection is made.
Notes
=====
This class intends to be cross-platform and has been verified to work on
Windows and Ubuntu.
'''
def __init__(self):
self.port = None
def get_port(self, baud_rate):
'''
Using the specified baud-rate, attempt to connect to each available
serial port. If the `test_connection()` method returns `True` for a
port, update the `port` attribute and return the port.
In the case where the `test_connection()` does not return `True` for
any of the evaluated ports, raise a `ConnectionError`.
'''
self.port = None
for test_port in get_serial_ports():
if self.test_connection(test_port, baud_rate):
self.port = test_port
break
sleep(0.1)
if self.port is None:
raise ConnectionError('Could not connect to serial device.')
return self.port
def test_connection(self, port, baud_rate):
'''
Test connection to device using the specified port and baud-rate.
If the connection is successful, return `True`.
Otherwise, return `False`.
'''
raise NotImplementedError
|
sci-bots/serial-device
|
serial_device/__init__.py
|
comports
|
python
|
def comports(vid_pid=None, include_all=False, check_available=True,
only_available=False):
'''
.. versionchanged:: 0.9
Add :data:`check_available` keyword argument to optionally check if
each port is actually available by attempting to open a temporary
connection.
Add :data:`only_available` keyword argument to only include ports that
are actually available for connection.
Parameters
----------
vid_pid : str or list, optional
One or more USB vendor/product IDs to match.
Each USB vendor/product must be in the form ``'<vid>:<pid>'``.
For example, ``'2341:0010'``.
include_all : bool, optional
If ``True``, include all available serial ports, but sort rows such
that ports matching specified USB vendor/product IDs come first.
If ``False``, only include ports that match specified USB
vendor/product IDs.
check_available : bool, optional
If ``True``, check if each port is actually available by attempting to
open a temporary connection.
only_available : bool, optional
If ``True``, only include ports that are available.
Returns
-------
pandas.DataFrame
Table containing descriptor and hardware ID of each COM port, indexed
by port (e.g., "COM4").
.. versionchanged:: 0.9
If :data:`check_available` is ``True``, add an ``available`` column
to the table indicating whether each port accepted a connection.
'''
df_comports = _comports()
# Extract USB product and vendor IDs from `hwid` entries of the form:
#
# FTDIBUS\VID_0403+PID_6001+A60081GEA\0000
df_hwid = (df_comports.hardware_id.str.lower().str
.extract('vid_(?P<vid>[0-9a-f]+)\+pid_(?P<pid>[0-9a-f]+)',
expand=True))
# Extract USB product and vendor IDs from `hwid` entries of the form:
#
# USB VID:PID=16C0:0483 SNR=2145930
no_id_mask = df_hwid.vid.isnull()
df_hwid.loc[no_id_mask] = (df_comports.loc[no_id_mask, 'hardware_id']
.str.lower().str
.extract('vid:pid=(?P<vid>[0-9a-f]+):'
'(?P<pid>[0-9a-f]+)', expand=True))
df_comports = df_comports.join(df_hwid)
if vid_pid is not None:
if isinstance(vid_pid, six.string_types):
# Single USB vendor/product ID specified.
vid_pid = [vid_pid]
# Mark ports that match specified USB vendor/product IDs.
df_comports['include'] = (df_comports.vid + ':' +
df_comports.pid).isin(map(str.lower,
vid_pid))
if include_all:
# All ports should be included, but sort rows such that ports
# matching specified USB vendor/product IDs come first.
df_comports = (df_comports.sort_values('include', ascending=False)
.drop('include', axis=1))
else:
# Only include ports that match specified USB vendor/product IDs.
df_comports = (df_comports.loc[df_comports.include]
.drop('include', axis=1))
if check_available or only_available:
# Add `available` column indicating whether each port accepted a
# connection. A port may not, for example, accept a connection if the
# port is already open.
available = []
for name_i, port_info_i in df_comports.iterrows():
try:
connection = serial.Serial(port=name_i)
connection.close()
available.append(True)
except serial.SerialException:
available.append(False)
df_comports['available'] = available
if only_available:
df_comports = df_comports.loc[df_comports.available]
if not check_available:
del df_comports['available']
return df_comports
|
.. versionchanged:: 0.9
Add :data:`check_available` keyword argument to optionally check if
each port is actually available by attempting to open a temporary
connection.
Add :data:`only_available` keyword argument to only include ports that
are actually available for connection.
Parameters
----------
vid_pid : str or list, optional
One or more USB vendor/product IDs to match.
Each USB vendor/product must be in the form ``'<vid>:<pid>'``.
For example, ``'2341:0010'``.
include_all : bool, optional
If ``True``, include all available serial ports, but sort rows such
that ports matching specified USB vendor/product IDs come first.
If ``False``, only include ports that match specified USB
vendor/product IDs.
check_available : bool, optional
If ``True``, check if each port is actually available by attempting to
open a temporary connection.
only_available : bool, optional
If ``True``, only include ports that are available.
Returns
-------
pandas.DataFrame
Table containing descriptor and hardware ID of each COM port, indexed
by port (e.g., "COM4").
.. versionchanged:: 0.9
If :data:`check_available` is ``True``, add an ``available`` column
to the table indicating whether each port accepted a connection.
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/__init__.py#L47-L143
|
[
"def _comports():\n '''\n Returns\n -------\n pandas.DataFrame\n Table containing descriptor, and hardware ID of each available COM\n port, indexed by port (e.g., \"COM4\").\n '''\n return (pd.DataFrame(list(map(list, serial.tools.list_ports.comports())),\n columns=['port', 'descriptor', 'hardware_id'])\n .set_index('port'))\n"
] |
'''
Copyright 2014 Christian Fobel
Copyright 2011 Ryan Fobel
This file is part of serial_device.
serial_device is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
serial_device is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with serial_device. If not, see <http://www.gnu.org/licenses/>.
'''
from time import sleep
import itertools
import os
import pandas as pd
import path_helpers as ph
import serial.tools.list_ports
import six.moves
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def _comports():
'''
Returns
-------
pandas.DataFrame
Table containing descriptor, and hardware ID of each available COM
port, indexed by port (e.g., "COM4").
'''
return (pd.DataFrame(list(map(list, serial.tools.list_ports.comports())),
columns=['port', 'descriptor', 'hardware_id'])
.set_index('port'))
def get_serial_ports():
if os.name == 'nt':
ports = _get_serial_ports_windows()
else:
ports = itertools.chain(ph.path('/dev').walk('ttyUSB*'),
ph.path('/dev').walk('ttyACM*'),
ph.path('/dev').walk('tty.usb*'))
# sort list alphabetically
ports_ = [port for port in ports]
ports_.sort()
for port in ports_:
yield port
def _get_serial_ports_windows():
'''
Uses the Win32 registry to return a iterator of serial (COM) ports existing
on this computer.
See http://stackoverflow.com/questions/1205383/listing-serial-com-ports-on-windows
'''
import six.moves.winreg as winreg
reg_path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, reg_path)
except WindowsError:
# No serial ports. Return empty generator.
return
for i in itertools.count():
try:
val = winreg.EnumValue(key, i)
yield str(val[1])
except EnvironmentError:
break
class ConnectionError(Exception):
pass
class SerialDevice(object):
'''
This class provides a base interface for encapsulating interaction with a
device connected through a serial-port.
It provides methods to automatically resolve a port based on an
implementation-defined connection-test, which is applied to all available
serial-ports until a successful connection is made.
Notes
=====
This class intends to be cross-platform and has been verified to work on
Windows and Ubuntu.
'''
def __init__(self):
self.port = None
def get_port(self, baud_rate):
'''
Using the specified baud-rate, attempt to connect to each available
serial port. If the `test_connection()` method returns `True` for a
port, update the `port` attribute and return the port.
In the case where the `test_connection()` does not return `True` for
any of the evaluated ports, raise a `ConnectionError`.
'''
self.port = None
for test_port in get_serial_ports():
if self.test_connection(test_port, baud_rate):
self.port = test_port
break
sleep(0.1)
if self.port is None:
raise ConnectionError('Could not connect to serial device.')
return self.port
def test_connection(self, port, baud_rate):
'''
Test connection to device using the specified port and baud-rate.
If the connection is successful, return `True`.
Otherwise, return `False`.
'''
raise NotImplementedError
|
sci-bots/serial-device
|
serial_device/__init__.py
|
_get_serial_ports_windows
|
python
|
def _get_serial_ports_windows():
'''
Uses the Win32 registry to return a iterator of serial (COM) ports existing
on this computer.
See http://stackoverflow.com/questions/1205383/listing-serial-com-ports-on-windows
'''
import six.moves.winreg as winreg
reg_path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, reg_path)
except WindowsError:
# No serial ports. Return empty generator.
return
for i in itertools.count():
try:
val = winreg.EnumValue(key, i)
yield str(val[1])
except EnvironmentError:
break
|
Uses the Win32 registry to return a iterator of serial (COM) ports existing
on this computer.
See http://stackoverflow.com/questions/1205383/listing-serial-com-ports-on-windows
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/__init__.py#L160-L181
| null |
'''
Copyright 2014 Christian Fobel
Copyright 2011 Ryan Fobel
This file is part of serial_device.
serial_device is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
serial_device is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with serial_device. If not, see <http://www.gnu.org/licenses/>.
'''
from time import sleep
import itertools
import os
import pandas as pd
import path_helpers as ph
import serial.tools.list_ports
import six.moves
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def _comports():
'''
Returns
-------
pandas.DataFrame
Table containing descriptor, and hardware ID of each available COM
port, indexed by port (e.g., "COM4").
'''
return (pd.DataFrame(list(map(list, serial.tools.list_ports.comports())),
columns=['port', 'descriptor', 'hardware_id'])
.set_index('port'))
def comports(vid_pid=None, include_all=False, check_available=True,
only_available=False):
'''
.. versionchanged:: 0.9
Add :data:`check_available` keyword argument to optionally check if
each port is actually available by attempting to open a temporary
connection.
Add :data:`only_available` keyword argument to only include ports that
are actually available for connection.
Parameters
----------
vid_pid : str or list, optional
One or more USB vendor/product IDs to match.
Each USB vendor/product must be in the form ``'<vid>:<pid>'``.
For example, ``'2341:0010'``.
include_all : bool, optional
If ``True``, include all available serial ports, but sort rows such
that ports matching specified USB vendor/product IDs come first.
If ``False``, only include ports that match specified USB
vendor/product IDs.
check_available : bool, optional
If ``True``, check if each port is actually available by attempting to
open a temporary connection.
only_available : bool, optional
If ``True``, only include ports that are available.
Returns
-------
pandas.DataFrame
Table containing descriptor and hardware ID of each COM port, indexed
by port (e.g., "COM4").
.. versionchanged:: 0.9
If :data:`check_available` is ``True``, add an ``available`` column
to the table indicating whether each port accepted a connection.
'''
df_comports = _comports()
# Extract USB product and vendor IDs from `hwid` entries of the form:
#
# FTDIBUS\VID_0403+PID_6001+A60081GEA\0000
df_hwid = (df_comports.hardware_id.str.lower().str
.extract('vid_(?P<vid>[0-9a-f]+)\+pid_(?P<pid>[0-9a-f]+)',
expand=True))
# Extract USB product and vendor IDs from `hwid` entries of the form:
#
# USB VID:PID=16C0:0483 SNR=2145930
no_id_mask = df_hwid.vid.isnull()
df_hwid.loc[no_id_mask] = (df_comports.loc[no_id_mask, 'hardware_id']
.str.lower().str
.extract('vid:pid=(?P<vid>[0-9a-f]+):'
'(?P<pid>[0-9a-f]+)', expand=True))
df_comports = df_comports.join(df_hwid)
if vid_pid is not None:
if isinstance(vid_pid, six.string_types):
# Single USB vendor/product ID specified.
vid_pid = [vid_pid]
# Mark ports that match specified USB vendor/product IDs.
df_comports['include'] = (df_comports.vid + ':' +
df_comports.pid).isin(map(str.lower,
vid_pid))
if include_all:
# All ports should be included, but sort rows such that ports
# matching specified USB vendor/product IDs come first.
df_comports = (df_comports.sort_values('include', ascending=False)
.drop('include', axis=1))
else:
# Only include ports that match specified USB vendor/product IDs.
df_comports = (df_comports.loc[df_comports.include]
.drop('include', axis=1))
if check_available or only_available:
# Add `available` column indicating whether each port accepted a
# connection. A port may not, for example, accept a connection if the
# port is already open.
available = []
for name_i, port_info_i in df_comports.iterrows():
try:
connection = serial.Serial(port=name_i)
connection.close()
available.append(True)
except serial.SerialException:
available.append(False)
df_comports['available'] = available
if only_available:
df_comports = df_comports.loc[df_comports.available]
if not check_available:
del df_comports['available']
return df_comports
def get_serial_ports():
if os.name == 'nt':
ports = _get_serial_ports_windows()
else:
ports = itertools.chain(ph.path('/dev').walk('ttyUSB*'),
ph.path('/dev').walk('ttyACM*'),
ph.path('/dev').walk('tty.usb*'))
# sort list alphabetically
ports_ = [port for port in ports]
ports_.sort()
for port in ports_:
yield port
class ConnectionError(Exception):
pass
class SerialDevice(object):
'''
This class provides a base interface for encapsulating interaction with a
device connected through a serial-port.
It provides methods to automatically resolve a port based on an
implementation-defined connection-test, which is applied to all available
serial-ports until a successful connection is made.
Notes
=====
This class intends to be cross-platform and has been verified to work on
Windows and Ubuntu.
'''
def __init__(self):
self.port = None
def get_port(self, baud_rate):
'''
Using the specified baud-rate, attempt to connect to each available
serial port. If the `test_connection()` method returns `True` for a
port, update the `port` attribute and return the port.
In the case where the `test_connection()` does not return `True` for
any of the evaluated ports, raise a `ConnectionError`.
'''
self.port = None
for test_port in get_serial_ports():
if self.test_connection(test_port, baud_rate):
self.port = test_port
break
sleep(0.1)
if self.port is None:
raise ConnectionError('Could not connect to serial device.')
return self.port
def test_connection(self, port, baud_rate):
'''
Test connection to device using the specified port and baud-rate.
If the connection is successful, return `True`.
Otherwise, return `False`.
'''
raise NotImplementedError
|
sci-bots/serial-device
|
serial_device/__init__.py
|
SerialDevice.get_port
|
python
|
def get_port(self, baud_rate):
'''
Using the specified baud-rate, attempt to connect to each available
serial port. If the `test_connection()` method returns `True` for a
port, update the `port` attribute and return the port.
In the case where the `test_connection()` does not return `True` for
any of the evaluated ports, raise a `ConnectionError`.
'''
self.port = None
for test_port in get_serial_ports():
if self.test_connection(test_port, baud_rate):
self.port = test_port
break
sleep(0.1)
if self.port is None:
raise ConnectionError('Could not connect to serial device.')
return self.port
|
Using the specified baud-rate, attempt to connect to each available
serial port. If the `test_connection()` method returns `True` for a
port, update the `port` attribute and return the port.
In the case where the `test_connection()` does not return `True` for
any of the evaluated ports, raise a `ConnectionError`.
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/__init__.py#L206-L226
|
[
"def get_serial_ports():\n if os.name == 'nt':\n ports = _get_serial_ports_windows()\n else:\n ports = itertools.chain(ph.path('/dev').walk('ttyUSB*'),\n ph.path('/dev').walk('ttyACM*'),\n ph.path('/dev').walk('tty.usb*'))\n # sort list alphabetically\n ports_ = [port for port in ports]\n ports_.sort()\n for port in ports_:\n yield port\n",
"def test_connection(self, port, baud_rate):\n '''\n Test connection to device using the specified port and baud-rate.\n\n If the connection is successful, return `True`.\n Otherwise, return `False`.\n '''\n raise NotImplementedError\n"
] |
class SerialDevice(object):
'''
This class provides a base interface for encapsulating interaction with a
device connected through a serial-port.
It provides methods to automatically resolve a port based on an
implementation-defined connection-test, which is applied to all available
serial-ports until a successful connection is made.
Notes
=====
This class intends to be cross-platform and has been verified to work on
Windows and Ubuntu.
'''
def __init__(self):
self.port = None
def test_connection(self, port, baud_rate):
'''
Test connection to device using the specified port and baud-rate.
If the connection is successful, return `True`.
Otherwise, return `False`.
'''
raise NotImplementedError
|
sci-bots/serial-device
|
serial_device/or_event.py
|
orify
|
python
|
def orify(event, changed_callback):
'''
Override ``set`` and ``clear`` methods on event to call specified callback
function after performing default behaviour.
Parameters
----------
'''
event.changed = changed_callback
if not hasattr(event, '_set'):
# `set`/`clear` methods have not been overridden on event yet.
# Override methods to call `changed_callback` after performing default
# action.
event._set = event.set
event._clear = event.clear
event.set = lambda: or_set(event)
event.clear = lambda: or_clear(event)
|
Override ``set`` and ``clear`` methods on event to call specified callback
function after performing default behaviour.
Parameters
----------
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/or_event.py#L19-L36
| null |
'''
Wait on multiple :class:`threading.Event` instances.
Based on code from: https://stackoverflow.com/questions/12317940/python-threading-can-i-sleep-on-two-threading-events-simultaneously/12320352#12320352
'''
import threading
def or_set(self):
self._set()
self.changed()
def or_clear(self):
self._clear()
self.changed()
def OrEvent(*events):
'''
Parameters
----------
events : list(threading.Event)
List of events.
Returns
-------
threading.Event
Event that is set when **at least one** of the events in :data:`events`
is set.
'''
or_event = threading.Event()
def changed():
'''
Set ``or_event`` if any of the specified events have been set.
'''
bools = [event_i.is_set() for event_i in events]
if any(bools):
or_event.set()
else:
or_event.clear()
for event_i in events:
# Override ``set`` and ``clear`` methods on event to update state of
# `or_event` after performing default behaviour.
orify(event_i, changed)
# Set initial state of `or_event`.
changed()
return or_event
|
sci-bots/serial-device
|
serial_device/or_event.py
|
OrEvent
|
python
|
def OrEvent(*events):
'''
Parameters
----------
events : list(threading.Event)
List of events.
Returns
-------
threading.Event
Event that is set when **at least one** of the events in :data:`events`
is set.
'''
or_event = threading.Event()
def changed():
'''
Set ``or_event`` if any of the specified events have been set.
'''
bools = [event_i.is_set() for event_i in events]
if any(bools):
or_event.set()
else:
or_event.clear()
for event_i in events:
# Override ``set`` and ``clear`` methods on event to update state of
# `or_event` after performing default behaviour.
orify(event_i, changed)
# Set initial state of `or_event`.
changed()
return or_event
|
Parameters
----------
events : list(threading.Event)
List of events.
Returns
-------
threading.Event
Event that is set when **at least one** of the events in :data:`events`
is set.
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/or_event.py#L39-L70
|
[
"def orify(event, changed_callback):\n '''\n Override ``set`` and ``clear`` methods on event to call specified callback\n function after performing default behaviour.\n\n Parameters\n ----------\n\n '''\n event.changed = changed_callback\n if not hasattr(event, '_set'):\n # `set`/`clear` methods have not been overridden on event yet.\n # Override methods to call `changed_callback` after performing default\n # action.\n event._set = event.set\n event._clear = event.clear\n event.set = lambda: or_set(event)\n event.clear = lambda: or_clear(event)\n",
"def changed():\n '''\n Set ``or_event`` if any of the specified events have been set.\n '''\n bools = [event_i.is_set() for event_i in events]\n if any(bools):\n or_event.set()\n else:\n or_event.clear()\n"
] |
'''
Wait on multiple :class:`threading.Event` instances.
Based on code from: https://stackoverflow.com/questions/12317940/python-threading-can-i-sleep-on-two-threading-events-simultaneously/12320352#12320352
'''
import threading
def or_set(self):
self._set()
self.changed()
def or_clear(self):
self._clear()
self.changed()
def orify(event, changed_callback):
'''
Override ``set`` and ``clear`` methods on event to call specified callback
function after performing default behaviour.
Parameters
----------
'''
event.changed = changed_callback
if not hasattr(event, '_set'):
# `set`/`clear` methods have not been overridden on event yet.
# Override methods to call `changed_callback` after performing default
# action.
event._set = event.set
event._clear = event.clear
event.set = lambda: or_set(event)
event.clear = lambda: or_clear(event)
|
sci-bots/serial-device
|
serial_device/threaded.py
|
request
|
python
|
def request(device, response_queue, payload, timeout_s=None, poll=POLL_QUEUES):
'''
Send payload to serial device and wait for response.
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default.
'''
device.write(payload)
if poll:
# Polling enabled. Wait for response in busy loop.
start = dt.datetime.now()
while not response_queue.qsize():
if (dt.datetime.now() - start).total_seconds() > timeout_s:
raise queue.Empty('No response received.')
return response_queue.get()
else:
# Polling disabled. Use blocking `Queue.get()` method to wait for
# response.
return response_queue.get(timeout=timeout_s)
|
Send payload to serial device and wait for response.
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default.
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/threaded.py#L237-L272
|
[
"def write(self, data, timeout_s=None):\n '''\n Write to serial port.\n\n Waits for serial connection to be established before writing.\n\n Parameters\n ----------\n data : str or bytes\n Data to write to serial port.\n timeout_s : float, optional\n Maximum number of seconds to wait for serial connection to be\n established.\n\n By default, block until serial connection is ready.\n '''\n self.connected.wait(timeout_s)\n self.protocol.transport.write(data)\n"
] |
import queue
import logging
import platform
import threading
import datetime as dt
import serial
import serial.threaded
import serial_device
from .or_event import OrEvent
logger = logging.getLogger(__name__)
# Flag to indicate whether queues should be polled.
# XXX Note that polling performance may vary by platform.
POLL_QUEUES = (platform.system() == 'Windows')
class EventProtocol(serial.threaded.Protocol):
def __init__(self):
self.transport = None
self.connected = threading.Event()
self.disconnected = threading.Event()
self.port = None
def connection_made(self, transport):
"""Called when reader thread is started"""
self.port = transport.serial.port
logger.debug('connection_made: `%s` `%s`', self.port, transport)
self.transport = transport
self.connected.set()
self.disconnected.clear()
def data_received(self, data):
"""Called with snippets received from the serial port"""
raise NotImplementedError
def connection_lost(self, exception):
"""\
Called when the serial port is closed or the reader loop terminated
otherwise.
"""
if isinstance(exception, Exception):
logger.debug('Connection to port `%s` lost: %s', self.port,
exception)
else:
logger.debug('Connection to port `%s` closed', self.port)
self.connected.clear()
self.disconnected.set()
class KeepAliveReader(threading.Thread):
'''
Keep a serial connection alive (as much as possible).
Parameters
----------
state : dict
State dictionary to share ``protocol`` object reference.
comport : str
Name of com port to connect to.
default_timeout_s : float, optional
Default time to wait for serial operation (e.g., connect).
By default, block (i.e., no time out).
**kwargs
Keyword arguments passed to ``serial_for_url`` function, e.g.,
``baudrate``, etc.
'''
def __init__(self, protocol_class, comport, **kwargs):
super(KeepAliveReader, self).__init__()
self.daemon = True
self.protocol_class = protocol_class
self.comport = comport
self.kwargs = kwargs
self.protocol = None
self.default_timeout_s = kwargs.pop('default_timeout_s', None)
# Event to indicate serial connection has been established.
self.connected = threading.Event()
# Event to request a break from the run loop.
self.close_request = threading.Event()
# Event to indicate thread has been closed.
self.closed = threading.Event()
# Event to indicate an exception has occurred.
self.error = threading.Event()
# Event to indicate that the thread has connected to the specified port
# **at least once**.
self.has_connected = threading.Event()
@property
def alive(self):
return not self.closed.is_set()
def run(self):
# Verify requested serial port is available.
try:
if self.comport not in (serial_device
.comports(only_available=True).index):
raise NameError('Port `%s` not available. Available ports: '
'`%s`' % (self.comport,
', '.join(serial_device.comports()
.index)))
except NameError as exception:
self.error.exception = exception
self.error.set()
self.closed.set()
return
while True:
# Wait for requested serial port to become available.
while self.comport not in (serial_device
.comports(only_available=True).index):
# Assume serial port was disconnected temporarily. Wait and
# periodically check again.
self.close_request.wait(2)
if self.close_request.is_set():
# No connection is open, so nothing to close. Just quit.
self.closed.set()
return
try:
# Try to open serial device and monitor connection status.
logger.debug('Open `%s` and monitor connection status',
self.comport)
device = serial.serial_for_url(self.comport, **self.kwargs)
except serial.SerialException as exception:
self.error.exception = exception
self.error.set()
self.closed.set()
return
except Exception as exception:
self.error.exception = exception
self.error.set()
self.closed.set()
return
else:
with serial.threaded.ReaderThread(device, self
.protocol_class) as protocol:
self.protocol = protocol
connected_event = OrEvent(protocol.connected,
self.close_request)
disconnected_event = OrEvent(protocol.disconnected,
self.close_request)
# Wait for connection.
connected_event.wait(None if self.has_connected.is_set()
else self.default_timeout_s)
if self.close_request.is_set():
# Quit run loop. Serial connection will be closed by
# `ReaderThread` context manager.
self.closed.set()
return
self.connected.set()
self.has_connected.set()
# Wait for disconnection.
disconnected_event.wait()
if self.close_request.is_set():
# Quit run loop.
self.closed.set()
return
self.connected.clear()
# Loop to try to reconnect to serial device.
def write(self, data, timeout_s=None):
'''
Write to serial port.
Waits for serial connection to be established before writing.
Parameters
----------
data : str or bytes
Data to write to serial port.
timeout_s : float, optional
Maximum number of seconds to wait for serial connection to be
established.
By default, block until serial connection is ready.
'''
self.connected.wait(timeout_s)
self.protocol.transport.write(data)
def request(self, response_queue, payload, timeout_s=None,
poll=POLL_QUEUES):
'''
Send
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default.
'''
self.connected.wait(timeout_s)
return request(self, response_queue, payload, timeout_s=timeout_s,
poll=poll)
def close(self):
self.close_request.set()
# - - context manager, returns protocol
def __enter__(self):
"""\
Enter context handler. May raise RuntimeError in case the connection
could not be created.
"""
self.start()
# Wait for protocol to connect.
event = OrEvent(self.connected, self.closed)
event.wait(self.default_timeout_s)
return self
def __exit__(self, *args):
"""Leave context: close port"""
self.close()
self.closed.wait()
|
sci-bots/serial-device
|
serial_device/threaded.py
|
EventProtocol.connection_made
|
python
|
def connection_made(self, transport):
self.port = transport.serial.port
logger.debug('connection_made: `%s` `%s`', self.port, transport)
self.transport = transport
self.connected.set()
self.disconnected.clear()
|
Called when reader thread is started
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/threaded.py#L28-L34
| null |
class EventProtocol(serial.threaded.Protocol):
def __init__(self):
self.transport = None
self.connected = threading.Event()
self.disconnected = threading.Event()
self.port = None
def data_received(self, data):
"""Called with snippets received from the serial port"""
raise NotImplementedError
def connection_lost(self, exception):
"""\
Called when the serial port is closed or the reader loop terminated
otherwise.
"""
if isinstance(exception, Exception):
logger.debug('Connection to port `%s` lost: %s', self.port,
exception)
else:
logger.debug('Connection to port `%s` closed', self.port)
self.connected.clear()
self.disconnected.set()
|
sci-bots/serial-device
|
serial_device/threaded.py
|
EventProtocol.connection_lost
|
python
|
def connection_lost(self, exception):
if isinstance(exception, Exception):
logger.debug('Connection to port `%s` lost: %s', self.port,
exception)
else:
logger.debug('Connection to port `%s` closed', self.port)
self.connected.clear()
self.disconnected.set()
|
\
Called when the serial port is closed or the reader loop terminated
otherwise.
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/threaded.py#L40-L51
| null |
class EventProtocol(serial.threaded.Protocol):
def __init__(self):
self.transport = None
self.connected = threading.Event()
self.disconnected = threading.Event()
self.port = None
def connection_made(self, transport):
"""Called when reader thread is started"""
self.port = transport.serial.port
logger.debug('connection_made: `%s` `%s`', self.port, transport)
self.transport = transport
self.connected.set()
self.disconnected.clear()
def data_received(self, data):
"""Called with snippets received from the serial port"""
raise NotImplementedError
|
sci-bots/serial-device
|
serial_device/threaded.py
|
KeepAliveReader.write
|
python
|
def write(self, data, timeout_s=None):
'''
Write to serial port.
Waits for serial connection to be established before writing.
Parameters
----------
data : str or bytes
Data to write to serial port.
timeout_s : float, optional
Maximum number of seconds to wait for serial connection to be
established.
By default, block until serial connection is ready.
'''
self.connected.wait(timeout_s)
self.protocol.transport.write(data)
|
Write to serial port.
Waits for serial connection to be established before writing.
Parameters
----------
data : str or bytes
Data to write to serial port.
timeout_s : float, optional
Maximum number of seconds to wait for serial connection to be
established.
By default, block until serial connection is ready.
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/threaded.py#L167-L184
| null |
class KeepAliveReader(threading.Thread):
'''
Keep a serial connection alive (as much as possible).
Parameters
----------
state : dict
State dictionary to share ``protocol`` object reference.
comport : str
Name of com port to connect to.
default_timeout_s : float, optional
Default time to wait for serial operation (e.g., connect).
By default, block (i.e., no time out).
**kwargs
Keyword arguments passed to ``serial_for_url`` function, e.g.,
``baudrate``, etc.
'''
def __init__(self, protocol_class, comport, **kwargs):
super(KeepAliveReader, self).__init__()
self.daemon = True
self.protocol_class = protocol_class
self.comport = comport
self.kwargs = kwargs
self.protocol = None
self.default_timeout_s = kwargs.pop('default_timeout_s', None)
# Event to indicate serial connection has been established.
self.connected = threading.Event()
# Event to request a break from the run loop.
self.close_request = threading.Event()
# Event to indicate thread has been closed.
self.closed = threading.Event()
# Event to indicate an exception has occurred.
self.error = threading.Event()
# Event to indicate that the thread has connected to the specified port
# **at least once**.
self.has_connected = threading.Event()
@property
def alive(self):
return not self.closed.is_set()
def run(self):
# Verify requested serial port is available.
try:
if self.comport not in (serial_device
.comports(only_available=True).index):
raise NameError('Port `%s` not available. Available ports: '
'`%s`' % (self.comport,
', '.join(serial_device.comports()
.index)))
except NameError as exception:
self.error.exception = exception
self.error.set()
self.closed.set()
return
while True:
# Wait for requested serial port to become available.
while self.comport not in (serial_device
.comports(only_available=True).index):
# Assume serial port was disconnected temporarily. Wait and
# periodically check again.
self.close_request.wait(2)
if self.close_request.is_set():
# No connection is open, so nothing to close. Just quit.
self.closed.set()
return
try:
# Try to open serial device and monitor connection status.
logger.debug('Open `%s` and monitor connection status',
self.comport)
device = serial.serial_for_url(self.comport, **self.kwargs)
except serial.SerialException as exception:
self.error.exception = exception
self.error.set()
self.closed.set()
return
except Exception as exception:
self.error.exception = exception
self.error.set()
self.closed.set()
return
else:
with serial.threaded.ReaderThread(device, self
.protocol_class) as protocol:
self.protocol = protocol
connected_event = OrEvent(protocol.connected,
self.close_request)
disconnected_event = OrEvent(protocol.disconnected,
self.close_request)
# Wait for connection.
connected_event.wait(None if self.has_connected.is_set()
else self.default_timeout_s)
if self.close_request.is_set():
# Quit run loop. Serial connection will be closed by
# `ReaderThread` context manager.
self.closed.set()
return
self.connected.set()
self.has_connected.set()
# Wait for disconnection.
disconnected_event.wait()
if self.close_request.is_set():
# Quit run loop.
self.closed.set()
return
self.connected.clear()
# Loop to try to reconnect to serial device.
def request(self, response_queue, payload, timeout_s=None,
poll=POLL_QUEUES):
'''
Send
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default.
'''
self.connected.wait(timeout_s)
return request(self, response_queue, payload, timeout_s=timeout_s,
poll=poll)
def close(self):
self.close_request.set()
# - - context manager, returns protocol
def __enter__(self):
"""\
Enter context handler. May raise RuntimeError in case the connection
could not be created.
"""
self.start()
# Wait for protocol to connect.
event = OrEvent(self.connected, self.closed)
event.wait(self.default_timeout_s)
return self
def __exit__(self, *args):
"""Leave context: close port"""
self.close()
self.closed.wait()
|
sci-bots/serial-device
|
serial_device/threaded.py
|
KeepAliveReader.request
|
python
|
def request(self, response_queue, payload, timeout_s=None,
poll=POLL_QUEUES):
'''
Send
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default.
'''
self.connected.wait(timeout_s)
return request(self, response_queue, payload, timeout_s=timeout_s,
poll=poll)
|
Send
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default.
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/threaded.py#L186-L213
|
[
"def request(device, response_queue, payload, timeout_s=None, poll=POLL_QUEUES):\n '''\n Send payload to serial device and wait for response.\n\n Parameters\n ----------\n device : serial.Serial\n Serial instance.\n response_queue : Queue.Queue\n Queue to wait for response on.\n payload : str or bytes\n Payload to send.\n timeout_s : float, optional\n Maximum time to wait (in seconds) for response.\n\n By default, block until response is ready.\n poll : bool, optional\n If ``True``, poll response queue in a busy loop until response is\n ready (or timeout occurs).\n\n Polling is much more processor intensive, but (at least on Windows)\n results in faster response processing. On Windows, polling is\n enabled by default.\n '''\n device.write(payload)\n if poll:\n # Polling enabled. Wait for response in busy loop.\n start = dt.datetime.now()\n while not response_queue.qsize():\n if (dt.datetime.now() - start).total_seconds() > timeout_s:\n raise queue.Empty('No response received.')\n return response_queue.get()\n else:\n # Polling disabled. Use blocking `Queue.get()` method to wait for\n # response.\n return response_queue.get(timeout=timeout_s)\n"
] |
class KeepAliveReader(threading.Thread):
'''
Keep a serial connection alive (as much as possible).
Parameters
----------
state : dict
State dictionary to share ``protocol`` object reference.
comport : str
Name of com port to connect to.
default_timeout_s : float, optional
Default time to wait for serial operation (e.g., connect).
By default, block (i.e., no time out).
**kwargs
Keyword arguments passed to ``serial_for_url`` function, e.g.,
``baudrate``, etc.
'''
def __init__(self, protocol_class, comport, **kwargs):
super(KeepAliveReader, self).__init__()
self.daemon = True
self.protocol_class = protocol_class
self.comport = comport
self.kwargs = kwargs
self.protocol = None
self.default_timeout_s = kwargs.pop('default_timeout_s', None)
# Event to indicate serial connection has been established.
self.connected = threading.Event()
# Event to request a break from the run loop.
self.close_request = threading.Event()
# Event to indicate thread has been closed.
self.closed = threading.Event()
# Event to indicate an exception has occurred.
self.error = threading.Event()
# Event to indicate that the thread has connected to the specified port
# **at least once**.
self.has_connected = threading.Event()
@property
def alive(self):
return not self.closed.is_set()
def run(self):
# Verify requested serial port is available.
try:
if self.comport not in (serial_device
.comports(only_available=True).index):
raise NameError('Port `%s` not available. Available ports: '
'`%s`' % (self.comport,
', '.join(serial_device.comports()
.index)))
except NameError as exception:
self.error.exception = exception
self.error.set()
self.closed.set()
return
while True:
# Wait for requested serial port to become available.
while self.comport not in (serial_device
.comports(only_available=True).index):
# Assume serial port was disconnected temporarily. Wait and
# periodically check again.
self.close_request.wait(2)
if self.close_request.is_set():
# No connection is open, so nothing to close. Just quit.
self.closed.set()
return
try:
# Try to open serial device and monitor connection status.
logger.debug('Open `%s` and monitor connection status',
self.comport)
device = serial.serial_for_url(self.comport, **self.kwargs)
except serial.SerialException as exception:
self.error.exception = exception
self.error.set()
self.closed.set()
return
except Exception as exception:
self.error.exception = exception
self.error.set()
self.closed.set()
return
else:
with serial.threaded.ReaderThread(device, self
.protocol_class) as protocol:
self.protocol = protocol
connected_event = OrEvent(protocol.connected,
self.close_request)
disconnected_event = OrEvent(protocol.disconnected,
self.close_request)
# Wait for connection.
connected_event.wait(None if self.has_connected.is_set()
else self.default_timeout_s)
if self.close_request.is_set():
# Quit run loop. Serial connection will be closed by
# `ReaderThread` context manager.
self.closed.set()
return
self.connected.set()
self.has_connected.set()
# Wait for disconnection.
disconnected_event.wait()
if self.close_request.is_set():
# Quit run loop.
self.closed.set()
return
self.connected.clear()
# Loop to try to reconnect to serial device.
def write(self, data, timeout_s=None):
'''
Write to serial port.
Waits for serial connection to be established before writing.
Parameters
----------
data : str or bytes
Data to write to serial port.
timeout_s : float, optional
Maximum number of seconds to wait for serial connection to be
established.
By default, block until serial connection is ready.
'''
self.connected.wait(timeout_s)
self.protocol.transport.write(data)
def close(self):
self.close_request.set()
# - - context manager, returns protocol
def __enter__(self):
"""\
Enter context handler. May raise RuntimeError in case the connection
could not be created.
"""
self.start()
# Wait for protocol to connect.
event = OrEvent(self.connected, self.closed)
event.wait(self.default_timeout_s)
return self
def __exit__(self, *args):
"""Leave context: close port"""
self.close()
self.closed.wait()
|
sci-bots/serial-device
|
serial_device/mqtt.py
|
SerialDeviceManager.on_connect
|
python
|
def on_connect(self, client, userdata, flags, rc):
'''
Callback for when the client receives a ``CONNACK`` response from the
broker.
Parameters
----------
client : paho.mqtt.client.Client
The client instance for this callback.
userdata : object
The private user data as set in :class:`paho.mqtt.client.Client`
constructor or :func:`paho.mqtt.client.Client.userdata_set`.
flags : dict
Response flags sent by the broker.
The flag ``flags['session present']`` is useful for clients that
are using clean session set to 0 only.
If a client with clean session=0, that reconnects to a broker that
it has previously connected to, this flag indicates whether the
broker still has the session information for the client.
If 1, the session still exists.
rc : int
The connection result.
The value of rc indicates success or not:
- 0: Connection successful
- 1: Connection refused - incorrect protocol version
- 2: Connection refused - invalid client identifier
- 3: Connection refused - server unavailable
- 4: Connection refused - bad username or password
- 5: Connection refused - not authorised
- 6-255: Currently unused.
Notes
-----
Subscriptions should be defined in this method to ensure subscriptions
will be renewed upon reconnecting after a loss of connection.
'''
super(SerialDeviceManager, self).on_connect(client, userdata, flags, rc)
if rc == 0:
self.mqtt_client.subscribe('serial_device/+/connect')
self.mqtt_client.subscribe('serial_device/+/send')
self.mqtt_client.subscribe('serial_device/+/close')
self.mqtt_client.subscribe('serial_device/refresh_comports')
self.refresh_comports()
|
Callback for when the client receives a ``CONNACK`` response from the
broker.
Parameters
----------
client : paho.mqtt.client.Client
The client instance for this callback.
userdata : object
The private user data as set in :class:`paho.mqtt.client.Client`
constructor or :func:`paho.mqtt.client.Client.userdata_set`.
flags : dict
Response flags sent by the broker.
The flag ``flags['session present']`` is useful for clients that
are using clean session set to 0 only.
If a client with clean session=0, that reconnects to a broker that
it has previously connected to, this flag indicates whether the
broker still has the session information for the client.
If 1, the session still exists.
rc : int
The connection result.
The value of rc indicates success or not:
- 0: Connection successful
- 1: Connection refused - incorrect protocol version
- 2: Connection refused - invalid client identifier
- 3: Connection refused - server unavailable
- 4: Connection refused - bad username or password
- 5: Connection refused - not authorised
- 6-255: Currently unused.
Notes
-----
Subscriptions should be defined in this method to ensure subscriptions
will be renewed upon reconnecting after a loss of connection.
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/mqtt.py#L57-L106
| null |
class SerialDeviceManager(pmh.BaseMqttReactor):
def __init__(self, *args, **kwargs):
super(SerialDeviceManager, self).__init__(*args, **kwargs)
# Open devices.
self.open_devices = {}
def refresh_comports(self):
# Query list of available serial ports
comports = _comports().T.to_dict()
comports_json = json.dumps(comports)
# Publish list of available serial communication ports.
self.mqtt_client.publish('serial_device/comports',
payload=comports_json, retain=True)
# Publish current status of each port.
for port_i in comports:
self._publish_status(port_i)
###########################################################################
# MQTT client handlers
# ====================
def on_message(self, client, userdata, msg):
'''
Callback for when a ``PUBLISH`` message is received from the broker.
'''
if msg.topic == 'serial_device/refresh_comports':
self.refresh_comports()
return
match = CRE_MANAGER.match(msg.topic)
if match is None:
logger.debug('Topic NOT matched: `%s`', msg.topic)
else:
logger.debug('Topic matched: `%s`', msg.topic)
# Message topic matches command. Handle request.
command = match.group('command')
port = match.group('port')
# serial_device/<port>/send # Bytes to send
if command == 'send':
self._serial_send(port, msg.payload)
elif command == 'connect':
# serial_device/<port>/connect # Request connection
try:
request = json.loads(msg.payload)
except ValueError as exception:
logger.error('Error decoding "%s (%s)" request: %s',
command, port, exception)
return
self._serial_connect(port, request)
elif command == 'close':
self._serial_close(port)
# serial_device/<port>/close # Request to close connection
def _publish_status(self, port):
'''
Publish status for specified port.
Parameters
----------
port : str
Device name/port.
'''
if port not in self.open_devices:
status = {}
else:
device = self.open_devices[port].serial
properties = ('port', 'baudrate', 'bytesize', 'parity', 'stopbits',
'timeout', 'xonxoff', 'rtscts', 'dsrdtr')
status = {k: getattr(device, k) for k in properties}
status_json = json.dumps(status)
self.mqtt_client.publish(topic='serial_device/%s/status' % port,
payload=status_json, retain=True)
def _serial_close(self, port):
'''
Handle close request.
Parameters
----------
port : str
Device name/port.
'''
if port in self.open_devices:
try:
self.open_devices[port].close()
except Exception as exception:
logger.error('Error closing device `%s`: %s', port, exception)
return
else:
logger.debug('Device not connected to `%s`', port)
self._publish_status(port)
return
def _serial_connect(self, port, request):
'''
Handle connection request.
Parameters
----------
port : str
Device name/port.
request : dict
'''
# baudrate : int
# Baud rate such as 9600 or 115200 etc.
# bytesize : str, optional
# Number of data bits.
#
# Possible values: ``'FIVEBITS'``, ``'SIXBITS'``, ``'SEVENBITS'``,
# ``'EIGHTBITS'``.
#
# Default: ``'EIGHTBITS'``
# parity : str, optional
# Enable parity checking.
#
# Possible values: ``'PARITY_NONE'``, ``'PARITY_EVEN'``, ``'PARITY_ODD'``,
# ``'PARITY_MARK'``, ``'PARITY_SPACE'``.
#
# Default: ``'PARITY_NONE'``
# stopbits : str, optional
# Number of stop bits.
#
# Possible values: STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO
# xonxoff : bool, optional
# Enable software flow control.
#
# Default: ``False``
# rtscts : bool, optional
# Enable hardware (RTS/CTS) flow control.
#
# Default: ``False``
# dsrdtr : bool, optional
# Enable hardware (DSR/DTR) flow control.
#
# Default: ``False``
command = 'connect'
if port in self.open_devices:
logger.debug('Already connected to: `%s`', port)
self._publish_status(port)
return
# TODO Write JSON schema definition for valid connect request.
if 'baudrate' not in request:
logger.error('Invalid `%s` request: `baudrate` must be '
'specified.', command)
return
if 'bytesize' in request:
try:
bytesize = getattr(serial, request['bytesize'])
if not bytesize in serial.Serial.BYTESIZES:
logger.error('`%s` request: `bytesize` `%s` not '
'available on current platform.', command,
request['bytesize'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `bytesize`, `%s`', command,
request['bytesize'])
return
else:
bytesize = serial.EIGHTBITS
if 'parity' in request:
try:
parity = getattr(serial, request['parity'])
if not parity in serial.Serial.PARITIES:
logger.error('`%s` request: `parity` `%s` not available '
'on current platform.', command,
request['parity'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `parity`, `%s`', command,
request['parity'])
return
else:
parity = serial.PARITY_NONE
if 'stopbits' in request:
try:
stopbits = getattr(serial, request['stopbits'])
if not stopbits in serial.Serial.STOPBITS:
logger.error('`%s` request: `stopbits` `%s` not '
'available on current platform.', command,
request['stopbits'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `stopbits`, `%s`', command,
request['stopbits'])
return
else:
stopbits = serial.STOPBITS_ONE
try:
baudrate = int(request['baudrate'])
xonxoff = bool(request.get('xonxoff'))
rtscts = bool(request.get('rtscts'))
dsrdtr = bool(request.get('dsrdtr'))
except TypeError as exception:
logger.error('`%s` request: %s', command, exception)
return
try:
device = serial.serial_for_url(port, baudrate=baudrate,
bytesize=bytesize, parity=parity,
stopbits=stopbits, xonxoff=xonxoff,
rtscts=rtscts, dsrdtr=dsrdtr)
parent = self
class PassThroughProtocol(serial.threaded.Protocol):
PORT = port
def connection_made(self, transport):
"""Called when reader thread is started"""
parent.open_devices[port] = transport
parent._publish_status(self.PORT)
def data_received(self, data):
"""Called with snippets received from the serial port"""
parent.mqtt_client.publish(topic='serial_device/%s/received'
% self.PORT, payload=data)
def connection_lost(self, exception):
"""\
Called when the serial port is closed or the reader loop terminated
otherwise.
"""
if isinstance(exception, Exception):
logger.error('Connection to port `%s` lost: %s',
self.PORT, exception)
del parent.open_devices[self.PORT]
parent._publish_status(self.PORT)
reader_thread = serial.threaded.ReaderThread(device,
PassThroughProtocol)
reader_thread.start()
reader_thread.connect()
except Exception as exception:
logger.error('`%s` request: %s', command, exception)
return
def _serial_send(self, port, payload):
'''
Send data to connected device.
Parameters
----------
port : str
Device name/port.
payload : bytes
Payload to send to device.
'''
if port not in self.open_devices:
# Not connected to device.
logger.error('Error sending data: `%s` not connected', port)
self._publish_status(port)
else:
try:
device = self.open_devices[port]
device.write(payload)
logger.debug('Sent data to `%s`', port)
except Exception as exception:
logger.error('Error sending data to `%s`: %s', port, exception)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
logger.info('Shutting down, closing all open ports.')
for port_i in list(self.open_devices.keys()):
self._serial_close(port_i)
super(SerialDeviceManager, self).stop()
|
sci-bots/serial-device
|
serial_device/mqtt.py
|
SerialDeviceManager.on_message
|
python
|
def on_message(self, client, userdata, msg):
'''
Callback for when a ``PUBLISH`` message is received from the broker.
'''
if msg.topic == 'serial_device/refresh_comports':
self.refresh_comports()
return
match = CRE_MANAGER.match(msg.topic)
if match is None:
logger.debug('Topic NOT matched: `%s`', msg.topic)
else:
logger.debug('Topic matched: `%s`', msg.topic)
# Message topic matches command. Handle request.
command = match.group('command')
port = match.group('port')
# serial_device/<port>/send # Bytes to send
if command == 'send':
self._serial_send(port, msg.payload)
elif command == 'connect':
# serial_device/<port>/connect # Request connection
try:
request = json.loads(msg.payload)
except ValueError as exception:
logger.error('Error decoding "%s (%s)" request: %s',
command, port, exception)
return
self._serial_connect(port, request)
elif command == 'close':
self._serial_close(port)
|
Callback for when a ``PUBLISH`` message is received from the broker.
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/mqtt.py#L108-L138
| null |
class SerialDeviceManager(pmh.BaseMqttReactor):
def __init__(self, *args, **kwargs):
super(SerialDeviceManager, self).__init__(*args, **kwargs)
# Open devices.
self.open_devices = {}
def refresh_comports(self):
# Query list of available serial ports
comports = _comports().T.to_dict()
comports_json = json.dumps(comports)
# Publish list of available serial communication ports.
self.mqtt_client.publish('serial_device/comports',
payload=comports_json, retain=True)
# Publish current status of each port.
for port_i in comports:
self._publish_status(port_i)
###########################################################################
# MQTT client handlers
# ====================
def on_connect(self, client, userdata, flags, rc):
'''
Callback for when the client receives a ``CONNACK`` response from the
broker.
Parameters
----------
client : paho.mqtt.client.Client
The client instance for this callback.
userdata : object
The private user data as set in :class:`paho.mqtt.client.Client`
constructor or :func:`paho.mqtt.client.Client.userdata_set`.
flags : dict
Response flags sent by the broker.
The flag ``flags['session present']`` is useful for clients that
are using clean session set to 0 only.
If a client with clean session=0, that reconnects to a broker that
it has previously connected to, this flag indicates whether the
broker still has the session information for the client.
If 1, the session still exists.
rc : int
The connection result.
The value of rc indicates success or not:
- 0: Connection successful
- 1: Connection refused - incorrect protocol version
- 2: Connection refused - invalid client identifier
- 3: Connection refused - server unavailable
- 4: Connection refused - bad username or password
- 5: Connection refused - not authorised
- 6-255: Currently unused.
Notes
-----
Subscriptions should be defined in this method to ensure subscriptions
will be renewed upon reconnecting after a loss of connection.
'''
super(SerialDeviceManager, self).on_connect(client, userdata, flags, rc)
if rc == 0:
self.mqtt_client.subscribe('serial_device/+/connect')
self.mqtt_client.subscribe('serial_device/+/send')
self.mqtt_client.subscribe('serial_device/+/close')
self.mqtt_client.subscribe('serial_device/refresh_comports')
self.refresh_comports()
# serial_device/<port>/close # Request to close connection
def _publish_status(self, port):
'''
Publish status for specified port.
Parameters
----------
port : str
Device name/port.
'''
if port not in self.open_devices:
status = {}
else:
device = self.open_devices[port].serial
properties = ('port', 'baudrate', 'bytesize', 'parity', 'stopbits',
'timeout', 'xonxoff', 'rtscts', 'dsrdtr')
status = {k: getattr(device, k) for k in properties}
status_json = json.dumps(status)
self.mqtt_client.publish(topic='serial_device/%s/status' % port,
payload=status_json, retain=True)
def _serial_close(self, port):
'''
Handle close request.
Parameters
----------
port : str
Device name/port.
'''
if port in self.open_devices:
try:
self.open_devices[port].close()
except Exception as exception:
logger.error('Error closing device `%s`: %s', port, exception)
return
else:
logger.debug('Device not connected to `%s`', port)
self._publish_status(port)
return
def _serial_connect(self, port, request):
'''
Handle connection request.
Parameters
----------
port : str
Device name/port.
request : dict
'''
# baudrate : int
# Baud rate such as 9600 or 115200 etc.
# bytesize : str, optional
# Number of data bits.
#
# Possible values: ``'FIVEBITS'``, ``'SIXBITS'``, ``'SEVENBITS'``,
# ``'EIGHTBITS'``.
#
# Default: ``'EIGHTBITS'``
# parity : str, optional
# Enable parity checking.
#
# Possible values: ``'PARITY_NONE'``, ``'PARITY_EVEN'``, ``'PARITY_ODD'``,
# ``'PARITY_MARK'``, ``'PARITY_SPACE'``.
#
# Default: ``'PARITY_NONE'``
# stopbits : str, optional
# Number of stop bits.
#
# Possible values: STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO
# xonxoff : bool, optional
# Enable software flow control.
#
# Default: ``False``
# rtscts : bool, optional
# Enable hardware (RTS/CTS) flow control.
#
# Default: ``False``
# dsrdtr : bool, optional
# Enable hardware (DSR/DTR) flow control.
#
# Default: ``False``
command = 'connect'
if port in self.open_devices:
logger.debug('Already connected to: `%s`', port)
self._publish_status(port)
return
# TODO Write JSON schema definition for valid connect request.
if 'baudrate' not in request:
logger.error('Invalid `%s` request: `baudrate` must be '
'specified.', command)
return
if 'bytesize' in request:
try:
bytesize = getattr(serial, request['bytesize'])
if not bytesize in serial.Serial.BYTESIZES:
logger.error('`%s` request: `bytesize` `%s` not '
'available on current platform.', command,
request['bytesize'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `bytesize`, `%s`', command,
request['bytesize'])
return
else:
bytesize = serial.EIGHTBITS
if 'parity' in request:
try:
parity = getattr(serial, request['parity'])
if not parity in serial.Serial.PARITIES:
logger.error('`%s` request: `parity` `%s` not available '
'on current platform.', command,
request['parity'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `parity`, `%s`', command,
request['parity'])
return
else:
parity = serial.PARITY_NONE
if 'stopbits' in request:
try:
stopbits = getattr(serial, request['stopbits'])
if not stopbits in serial.Serial.STOPBITS:
logger.error('`%s` request: `stopbits` `%s` not '
'available on current platform.', command,
request['stopbits'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `stopbits`, `%s`', command,
request['stopbits'])
return
else:
stopbits = serial.STOPBITS_ONE
try:
baudrate = int(request['baudrate'])
xonxoff = bool(request.get('xonxoff'))
rtscts = bool(request.get('rtscts'))
dsrdtr = bool(request.get('dsrdtr'))
except TypeError as exception:
logger.error('`%s` request: %s', command, exception)
return
try:
device = serial.serial_for_url(port, baudrate=baudrate,
bytesize=bytesize, parity=parity,
stopbits=stopbits, xonxoff=xonxoff,
rtscts=rtscts, dsrdtr=dsrdtr)
parent = self
class PassThroughProtocol(serial.threaded.Protocol):
PORT = port
def connection_made(self, transport):
"""Called when reader thread is started"""
parent.open_devices[port] = transport
parent._publish_status(self.PORT)
def data_received(self, data):
"""Called with snippets received from the serial port"""
parent.mqtt_client.publish(topic='serial_device/%s/received'
% self.PORT, payload=data)
def connection_lost(self, exception):
"""\
Called when the serial port is closed or the reader loop terminated
otherwise.
"""
if isinstance(exception, Exception):
logger.error('Connection to port `%s` lost: %s',
self.PORT, exception)
del parent.open_devices[self.PORT]
parent._publish_status(self.PORT)
reader_thread = serial.threaded.ReaderThread(device,
PassThroughProtocol)
reader_thread.start()
reader_thread.connect()
except Exception as exception:
logger.error('`%s` request: %s', command, exception)
return
def _serial_send(self, port, payload):
'''
Send data to connected device.
Parameters
----------
port : str
Device name/port.
payload : bytes
Payload to send to device.
'''
if port not in self.open_devices:
# Not connected to device.
logger.error('Error sending data: `%s` not connected', port)
self._publish_status(port)
else:
try:
device = self.open_devices[port]
device.write(payload)
logger.debug('Sent data to `%s`', port)
except Exception as exception:
logger.error('Error sending data to `%s`: %s', port, exception)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
logger.info('Shutting down, closing all open ports.')
for port_i in list(self.open_devices.keys()):
self._serial_close(port_i)
super(SerialDeviceManager, self).stop()
|
sci-bots/serial-device
|
serial_device/mqtt.py
|
SerialDeviceManager._publish_status
|
python
|
def _publish_status(self, port):
'''
Publish status for specified port.
Parameters
----------
port : str
Device name/port.
'''
if port not in self.open_devices:
status = {}
else:
device = self.open_devices[port].serial
properties = ('port', 'baudrate', 'bytesize', 'parity', 'stopbits',
'timeout', 'xonxoff', 'rtscts', 'dsrdtr')
status = {k: getattr(device, k) for k in properties}
status_json = json.dumps(status)
self.mqtt_client.publish(topic='serial_device/%s/status' % port,
payload=status_json, retain=True)
|
Publish status for specified port.
Parameters
----------
port : str
Device name/port.
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/mqtt.py#L142-L160
| null |
class SerialDeviceManager(pmh.BaseMqttReactor):
def __init__(self, *args, **kwargs):
super(SerialDeviceManager, self).__init__(*args, **kwargs)
# Open devices.
self.open_devices = {}
def refresh_comports(self):
# Query list of available serial ports
comports = _comports().T.to_dict()
comports_json = json.dumps(comports)
# Publish list of available serial communication ports.
self.mqtt_client.publish('serial_device/comports',
payload=comports_json, retain=True)
# Publish current status of each port.
for port_i in comports:
self._publish_status(port_i)
###########################################################################
# MQTT client handlers
# ====================
def on_connect(self, client, userdata, flags, rc):
'''
Callback for when the client receives a ``CONNACK`` response from the
broker.
Parameters
----------
client : paho.mqtt.client.Client
The client instance for this callback.
userdata : object
The private user data as set in :class:`paho.mqtt.client.Client`
constructor or :func:`paho.mqtt.client.Client.userdata_set`.
flags : dict
Response flags sent by the broker.
The flag ``flags['session present']`` is useful for clients that
are using clean session set to 0 only.
If a client with clean session=0, that reconnects to a broker that
it has previously connected to, this flag indicates whether the
broker still has the session information for the client.
If 1, the session still exists.
rc : int
The connection result.
The value of rc indicates success or not:
- 0: Connection successful
- 1: Connection refused - incorrect protocol version
- 2: Connection refused - invalid client identifier
- 3: Connection refused - server unavailable
- 4: Connection refused - bad username or password
- 5: Connection refused - not authorised
- 6-255: Currently unused.
Notes
-----
Subscriptions should be defined in this method to ensure subscriptions
will be renewed upon reconnecting after a loss of connection.
'''
super(SerialDeviceManager, self).on_connect(client, userdata, flags, rc)
if rc == 0:
self.mqtt_client.subscribe('serial_device/+/connect')
self.mqtt_client.subscribe('serial_device/+/send')
self.mqtt_client.subscribe('serial_device/+/close')
self.mqtt_client.subscribe('serial_device/refresh_comports')
self.refresh_comports()
def on_message(self, client, userdata, msg):
'''
Callback for when a ``PUBLISH`` message is received from the broker.
'''
if msg.topic == 'serial_device/refresh_comports':
self.refresh_comports()
return
match = CRE_MANAGER.match(msg.topic)
if match is None:
logger.debug('Topic NOT matched: `%s`', msg.topic)
else:
logger.debug('Topic matched: `%s`', msg.topic)
# Message topic matches command. Handle request.
command = match.group('command')
port = match.group('port')
# serial_device/<port>/send # Bytes to send
if command == 'send':
self._serial_send(port, msg.payload)
elif command == 'connect':
# serial_device/<port>/connect # Request connection
try:
request = json.loads(msg.payload)
except ValueError as exception:
logger.error('Error decoding "%s (%s)" request: %s',
command, port, exception)
return
self._serial_connect(port, request)
elif command == 'close':
self._serial_close(port)
# serial_device/<port>/close # Request to close connection
def _serial_close(self, port):
'''
Handle close request.
Parameters
----------
port : str
Device name/port.
'''
if port in self.open_devices:
try:
self.open_devices[port].close()
except Exception as exception:
logger.error('Error closing device `%s`: %s', port, exception)
return
else:
logger.debug('Device not connected to `%s`', port)
self._publish_status(port)
return
def _serial_connect(self, port, request):
'''
Handle connection request.
Parameters
----------
port : str
Device name/port.
request : dict
'''
# baudrate : int
# Baud rate such as 9600 or 115200 etc.
# bytesize : str, optional
# Number of data bits.
#
# Possible values: ``'FIVEBITS'``, ``'SIXBITS'``, ``'SEVENBITS'``,
# ``'EIGHTBITS'``.
#
# Default: ``'EIGHTBITS'``
# parity : str, optional
# Enable parity checking.
#
# Possible values: ``'PARITY_NONE'``, ``'PARITY_EVEN'``, ``'PARITY_ODD'``,
# ``'PARITY_MARK'``, ``'PARITY_SPACE'``.
#
# Default: ``'PARITY_NONE'``
# stopbits : str, optional
# Number of stop bits.
#
# Possible values: STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO
# xonxoff : bool, optional
# Enable software flow control.
#
# Default: ``False``
# rtscts : bool, optional
# Enable hardware (RTS/CTS) flow control.
#
# Default: ``False``
# dsrdtr : bool, optional
# Enable hardware (DSR/DTR) flow control.
#
# Default: ``False``
command = 'connect'
if port in self.open_devices:
logger.debug('Already connected to: `%s`', port)
self._publish_status(port)
return
# TODO Write JSON schema definition for valid connect request.
if 'baudrate' not in request:
logger.error('Invalid `%s` request: `baudrate` must be '
'specified.', command)
return
if 'bytesize' in request:
try:
bytesize = getattr(serial, request['bytesize'])
if not bytesize in serial.Serial.BYTESIZES:
logger.error('`%s` request: `bytesize` `%s` not '
'available on current platform.', command,
request['bytesize'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `bytesize`, `%s`', command,
request['bytesize'])
return
else:
bytesize = serial.EIGHTBITS
if 'parity' in request:
try:
parity = getattr(serial, request['parity'])
if not parity in serial.Serial.PARITIES:
logger.error('`%s` request: `parity` `%s` not available '
'on current platform.', command,
request['parity'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `parity`, `%s`', command,
request['parity'])
return
else:
parity = serial.PARITY_NONE
if 'stopbits' in request:
try:
stopbits = getattr(serial, request['stopbits'])
if not stopbits in serial.Serial.STOPBITS:
logger.error('`%s` request: `stopbits` `%s` not '
'available on current platform.', command,
request['stopbits'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `stopbits`, `%s`', command,
request['stopbits'])
return
else:
stopbits = serial.STOPBITS_ONE
try:
baudrate = int(request['baudrate'])
xonxoff = bool(request.get('xonxoff'))
rtscts = bool(request.get('rtscts'))
dsrdtr = bool(request.get('dsrdtr'))
except TypeError as exception:
logger.error('`%s` request: %s', command, exception)
return
try:
device = serial.serial_for_url(port, baudrate=baudrate,
bytesize=bytesize, parity=parity,
stopbits=stopbits, xonxoff=xonxoff,
rtscts=rtscts, dsrdtr=dsrdtr)
parent = self
class PassThroughProtocol(serial.threaded.Protocol):
PORT = port
def connection_made(self, transport):
"""Called when reader thread is started"""
parent.open_devices[port] = transport
parent._publish_status(self.PORT)
def data_received(self, data):
"""Called with snippets received from the serial port"""
parent.mqtt_client.publish(topic='serial_device/%s/received'
% self.PORT, payload=data)
def connection_lost(self, exception):
"""\
Called when the serial port is closed or the reader loop terminated
otherwise.
"""
if isinstance(exception, Exception):
logger.error('Connection to port `%s` lost: %s',
self.PORT, exception)
del parent.open_devices[self.PORT]
parent._publish_status(self.PORT)
reader_thread = serial.threaded.ReaderThread(device,
PassThroughProtocol)
reader_thread.start()
reader_thread.connect()
except Exception as exception:
logger.error('`%s` request: %s', command, exception)
return
def _serial_send(self, port, payload):
'''
Send data to connected device.
Parameters
----------
port : str
Device name/port.
payload : bytes
Payload to send to device.
'''
if port not in self.open_devices:
# Not connected to device.
logger.error('Error sending data: `%s` not connected', port)
self._publish_status(port)
else:
try:
device = self.open_devices[port]
device.write(payload)
logger.debug('Sent data to `%s`', port)
except Exception as exception:
logger.error('Error sending data to `%s`: %s', port, exception)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
logger.info('Shutting down, closing all open ports.')
for port_i in list(self.open_devices.keys()):
self._serial_close(port_i)
super(SerialDeviceManager, self).stop()
|
sci-bots/serial-device
|
serial_device/mqtt.py
|
SerialDeviceManager._serial_close
|
python
|
def _serial_close(self, port):
'''
Handle close request.
Parameters
----------
port : str
Device name/port.
'''
if port in self.open_devices:
try:
self.open_devices[port].close()
except Exception as exception:
logger.error('Error closing device `%s`: %s', port, exception)
return
else:
logger.debug('Device not connected to `%s`', port)
self._publish_status(port)
return
|
Handle close request.
Parameters
----------
port : str
Device name/port.
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/mqtt.py#L162-L180
| null |
class SerialDeviceManager(pmh.BaseMqttReactor):
def __init__(self, *args, **kwargs):
super(SerialDeviceManager, self).__init__(*args, **kwargs)
# Open devices.
self.open_devices = {}
def refresh_comports(self):
# Query list of available serial ports
comports = _comports().T.to_dict()
comports_json = json.dumps(comports)
# Publish list of available serial communication ports.
self.mqtt_client.publish('serial_device/comports',
payload=comports_json, retain=True)
# Publish current status of each port.
for port_i in comports:
self._publish_status(port_i)
###########################################################################
# MQTT client handlers
# ====================
def on_connect(self, client, userdata, flags, rc):
'''
Callback for when the client receives a ``CONNACK`` response from the
broker.
Parameters
----------
client : paho.mqtt.client.Client
The client instance for this callback.
userdata : object
The private user data as set in :class:`paho.mqtt.client.Client`
constructor or :func:`paho.mqtt.client.Client.userdata_set`.
flags : dict
Response flags sent by the broker.
The flag ``flags['session present']`` is useful for clients that
are using clean session set to 0 only.
If a client with clean session=0, that reconnects to a broker that
it has previously connected to, this flag indicates whether the
broker still has the session information for the client.
If 1, the session still exists.
rc : int
The connection result.
The value of rc indicates success or not:
- 0: Connection successful
- 1: Connection refused - incorrect protocol version
- 2: Connection refused - invalid client identifier
- 3: Connection refused - server unavailable
- 4: Connection refused - bad username or password
- 5: Connection refused - not authorised
- 6-255: Currently unused.
Notes
-----
Subscriptions should be defined in this method to ensure subscriptions
will be renewed upon reconnecting after a loss of connection.
'''
super(SerialDeviceManager, self).on_connect(client, userdata, flags, rc)
if rc == 0:
self.mqtt_client.subscribe('serial_device/+/connect')
self.mqtt_client.subscribe('serial_device/+/send')
self.mqtt_client.subscribe('serial_device/+/close')
self.mqtt_client.subscribe('serial_device/refresh_comports')
self.refresh_comports()
def on_message(self, client, userdata, msg):
'''
Callback for when a ``PUBLISH`` message is received from the broker.
'''
if msg.topic == 'serial_device/refresh_comports':
self.refresh_comports()
return
match = CRE_MANAGER.match(msg.topic)
if match is None:
logger.debug('Topic NOT matched: `%s`', msg.topic)
else:
logger.debug('Topic matched: `%s`', msg.topic)
# Message topic matches command. Handle request.
command = match.group('command')
port = match.group('port')
# serial_device/<port>/send # Bytes to send
if command == 'send':
self._serial_send(port, msg.payload)
elif command == 'connect':
# serial_device/<port>/connect # Request connection
try:
request = json.loads(msg.payload)
except ValueError as exception:
logger.error('Error decoding "%s (%s)" request: %s',
command, port, exception)
return
self._serial_connect(port, request)
elif command == 'close':
self._serial_close(port)
# serial_device/<port>/close # Request to close connection
def _publish_status(self, port):
'''
Publish status for specified port.
Parameters
----------
port : str
Device name/port.
'''
if port not in self.open_devices:
status = {}
else:
device = self.open_devices[port].serial
properties = ('port', 'baudrate', 'bytesize', 'parity', 'stopbits',
'timeout', 'xonxoff', 'rtscts', 'dsrdtr')
status = {k: getattr(device, k) for k in properties}
status_json = json.dumps(status)
self.mqtt_client.publish(topic='serial_device/%s/status' % port,
payload=status_json, retain=True)
def _serial_connect(self, port, request):
'''
Handle connection request.
Parameters
----------
port : str
Device name/port.
request : dict
'''
# baudrate : int
# Baud rate such as 9600 or 115200 etc.
# bytesize : str, optional
# Number of data bits.
#
# Possible values: ``'FIVEBITS'``, ``'SIXBITS'``, ``'SEVENBITS'``,
# ``'EIGHTBITS'``.
#
# Default: ``'EIGHTBITS'``
# parity : str, optional
# Enable parity checking.
#
# Possible values: ``'PARITY_NONE'``, ``'PARITY_EVEN'``, ``'PARITY_ODD'``,
# ``'PARITY_MARK'``, ``'PARITY_SPACE'``.
#
# Default: ``'PARITY_NONE'``
# stopbits : str, optional
# Number of stop bits.
#
# Possible values: STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO
# xonxoff : bool, optional
# Enable software flow control.
#
# Default: ``False``
# rtscts : bool, optional
# Enable hardware (RTS/CTS) flow control.
#
# Default: ``False``
# dsrdtr : bool, optional
# Enable hardware (DSR/DTR) flow control.
#
# Default: ``False``
command = 'connect'
if port in self.open_devices:
logger.debug('Already connected to: `%s`', port)
self._publish_status(port)
return
# TODO Write JSON schema definition for valid connect request.
if 'baudrate' not in request:
logger.error('Invalid `%s` request: `baudrate` must be '
'specified.', command)
return
if 'bytesize' in request:
try:
bytesize = getattr(serial, request['bytesize'])
if not bytesize in serial.Serial.BYTESIZES:
logger.error('`%s` request: `bytesize` `%s` not '
'available on current platform.', command,
request['bytesize'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `bytesize`, `%s`', command,
request['bytesize'])
return
else:
bytesize = serial.EIGHTBITS
if 'parity' in request:
try:
parity = getattr(serial, request['parity'])
if not parity in serial.Serial.PARITIES:
logger.error('`%s` request: `parity` `%s` not available '
'on current platform.', command,
request['parity'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `parity`, `%s`', command,
request['parity'])
return
else:
parity = serial.PARITY_NONE
if 'stopbits' in request:
try:
stopbits = getattr(serial, request['stopbits'])
if not stopbits in serial.Serial.STOPBITS:
logger.error('`%s` request: `stopbits` `%s` not '
'available on current platform.', command,
request['stopbits'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `stopbits`, `%s`', command,
request['stopbits'])
return
else:
stopbits = serial.STOPBITS_ONE
try:
baudrate = int(request['baudrate'])
xonxoff = bool(request.get('xonxoff'))
rtscts = bool(request.get('rtscts'))
dsrdtr = bool(request.get('dsrdtr'))
except TypeError as exception:
logger.error('`%s` request: %s', command, exception)
return
try:
device = serial.serial_for_url(port, baudrate=baudrate,
bytesize=bytesize, parity=parity,
stopbits=stopbits, xonxoff=xonxoff,
rtscts=rtscts, dsrdtr=dsrdtr)
parent = self
class PassThroughProtocol(serial.threaded.Protocol):
PORT = port
def connection_made(self, transport):
"""Called when reader thread is started"""
parent.open_devices[port] = transport
parent._publish_status(self.PORT)
def data_received(self, data):
"""Called with snippets received from the serial port"""
parent.mqtt_client.publish(topic='serial_device/%s/received'
% self.PORT, payload=data)
def connection_lost(self, exception):
"""\
Called when the serial port is closed or the reader loop terminated
otherwise.
"""
if isinstance(exception, Exception):
logger.error('Connection to port `%s` lost: %s',
self.PORT, exception)
del parent.open_devices[self.PORT]
parent._publish_status(self.PORT)
reader_thread = serial.threaded.ReaderThread(device,
PassThroughProtocol)
reader_thread.start()
reader_thread.connect()
except Exception as exception:
logger.error('`%s` request: %s', command, exception)
return
def _serial_send(self, port, payload):
'''
Send data to connected device.
Parameters
----------
port : str
Device name/port.
payload : bytes
Payload to send to device.
'''
if port not in self.open_devices:
# Not connected to device.
logger.error('Error sending data: `%s` not connected', port)
self._publish_status(port)
else:
try:
device = self.open_devices[port]
device.write(payload)
logger.debug('Sent data to `%s`', port)
except Exception as exception:
logger.error('Error sending data to `%s`: %s', port, exception)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
logger.info('Shutting down, closing all open ports.')
for port_i in list(self.open_devices.keys()):
self._serial_close(port_i)
super(SerialDeviceManager, self).stop()
|
sci-bots/serial-device
|
serial_device/mqtt.py
|
SerialDeviceManager._serial_connect
|
python
|
def _serial_connect(self, port, request):
'''
Handle connection request.
Parameters
----------
port : str
Device name/port.
request : dict
'''
# baudrate : int
# Baud rate such as 9600 or 115200 etc.
# bytesize : str, optional
# Number of data bits.
#
# Possible values: ``'FIVEBITS'``, ``'SIXBITS'``, ``'SEVENBITS'``,
# ``'EIGHTBITS'``.
#
# Default: ``'EIGHTBITS'``
# parity : str, optional
# Enable parity checking.
#
# Possible values: ``'PARITY_NONE'``, ``'PARITY_EVEN'``, ``'PARITY_ODD'``,
# ``'PARITY_MARK'``, ``'PARITY_SPACE'``.
#
# Default: ``'PARITY_NONE'``
# stopbits : str, optional
# Number of stop bits.
#
# Possible values: STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO
# xonxoff : bool, optional
# Enable software flow control.
#
# Default: ``False``
# rtscts : bool, optional
# Enable hardware (RTS/CTS) flow control.
#
# Default: ``False``
# dsrdtr : bool, optional
# Enable hardware (DSR/DTR) flow control.
#
# Default: ``False``
command = 'connect'
if port in self.open_devices:
logger.debug('Already connected to: `%s`', port)
self._publish_status(port)
return
# TODO Write JSON schema definition for valid connect request.
if 'baudrate' not in request:
logger.error('Invalid `%s` request: `baudrate` must be '
'specified.', command)
return
if 'bytesize' in request:
try:
bytesize = getattr(serial, request['bytesize'])
if not bytesize in serial.Serial.BYTESIZES:
logger.error('`%s` request: `bytesize` `%s` not '
'available on current platform.', command,
request['bytesize'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `bytesize`, `%s`', command,
request['bytesize'])
return
else:
bytesize = serial.EIGHTBITS
if 'parity' in request:
try:
parity = getattr(serial, request['parity'])
if not parity in serial.Serial.PARITIES:
logger.error('`%s` request: `parity` `%s` not available '
'on current platform.', command,
request['parity'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `parity`, `%s`', command,
request['parity'])
return
else:
parity = serial.PARITY_NONE
if 'stopbits' in request:
try:
stopbits = getattr(serial, request['stopbits'])
if not stopbits in serial.Serial.STOPBITS:
logger.error('`%s` request: `stopbits` `%s` not '
'available on current platform.', command,
request['stopbits'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `stopbits`, `%s`', command,
request['stopbits'])
return
else:
stopbits = serial.STOPBITS_ONE
try:
baudrate = int(request['baudrate'])
xonxoff = bool(request.get('xonxoff'))
rtscts = bool(request.get('rtscts'))
dsrdtr = bool(request.get('dsrdtr'))
except TypeError as exception:
logger.error('`%s` request: %s', command, exception)
return
try:
device = serial.serial_for_url(port, baudrate=baudrate,
bytesize=bytesize, parity=parity,
stopbits=stopbits, xonxoff=xonxoff,
rtscts=rtscts, dsrdtr=dsrdtr)
parent = self
class PassThroughProtocol(serial.threaded.Protocol):
PORT = port
def connection_made(self, transport):
"""Called when reader thread is started"""
parent.open_devices[port] = transport
parent._publish_status(self.PORT)
def data_received(self, data):
"""Called with snippets received from the serial port"""
parent.mqtt_client.publish(topic='serial_device/%s/received'
% self.PORT, payload=data)
def connection_lost(self, exception):
"""\
Called when the serial port is closed or the reader loop terminated
otherwise.
"""
if isinstance(exception, Exception):
logger.error('Connection to port `%s` lost: %s',
self.PORT, exception)
del parent.open_devices[self.PORT]
parent._publish_status(self.PORT)
reader_thread = serial.threaded.ReaderThread(device,
PassThroughProtocol)
reader_thread.start()
reader_thread.connect()
except Exception as exception:
logger.error('`%s` request: %s', command, exception)
return
|
Handle connection request.
Parameters
----------
port : str
Device name/port.
request : dict
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/mqtt.py#L182-L324
| null |
class SerialDeviceManager(pmh.BaseMqttReactor):
def __init__(self, *args, **kwargs):
super(SerialDeviceManager, self).__init__(*args, **kwargs)
# Open devices.
self.open_devices = {}
def refresh_comports(self):
# Query list of available serial ports
comports = _comports().T.to_dict()
comports_json = json.dumps(comports)
# Publish list of available serial communication ports.
self.mqtt_client.publish('serial_device/comports',
payload=comports_json, retain=True)
# Publish current status of each port.
for port_i in comports:
self._publish_status(port_i)
###########################################################################
# MQTT client handlers
# ====================
def on_connect(self, client, userdata, flags, rc):
'''
Callback for when the client receives a ``CONNACK`` response from the
broker.
Parameters
----------
client : paho.mqtt.client.Client
The client instance for this callback.
userdata : object
The private user data as set in :class:`paho.mqtt.client.Client`
constructor or :func:`paho.mqtt.client.Client.userdata_set`.
flags : dict
Response flags sent by the broker.
The flag ``flags['session present']`` is useful for clients that
are using clean session set to 0 only.
If a client with clean session=0, that reconnects to a broker that
it has previously connected to, this flag indicates whether the
broker still has the session information for the client.
If 1, the session still exists.
rc : int
The connection result.
The value of rc indicates success or not:
- 0: Connection successful
- 1: Connection refused - incorrect protocol version
- 2: Connection refused - invalid client identifier
- 3: Connection refused - server unavailable
- 4: Connection refused - bad username or password
- 5: Connection refused - not authorised
- 6-255: Currently unused.
Notes
-----
Subscriptions should be defined in this method to ensure subscriptions
will be renewed upon reconnecting after a loss of connection.
'''
super(SerialDeviceManager, self).on_connect(client, userdata, flags, rc)
if rc == 0:
self.mqtt_client.subscribe('serial_device/+/connect')
self.mqtt_client.subscribe('serial_device/+/send')
self.mqtt_client.subscribe('serial_device/+/close')
self.mqtt_client.subscribe('serial_device/refresh_comports')
self.refresh_comports()
def on_message(self, client, userdata, msg):
'''
Callback for when a ``PUBLISH`` message is received from the broker.
'''
if msg.topic == 'serial_device/refresh_comports':
self.refresh_comports()
return
match = CRE_MANAGER.match(msg.topic)
if match is None:
logger.debug('Topic NOT matched: `%s`', msg.topic)
else:
logger.debug('Topic matched: `%s`', msg.topic)
# Message topic matches command. Handle request.
command = match.group('command')
port = match.group('port')
# serial_device/<port>/send # Bytes to send
if command == 'send':
self._serial_send(port, msg.payload)
elif command == 'connect':
# serial_device/<port>/connect # Request connection
try:
request = json.loads(msg.payload)
except ValueError as exception:
logger.error('Error decoding "%s (%s)" request: %s',
command, port, exception)
return
self._serial_connect(port, request)
elif command == 'close':
self._serial_close(port)
# serial_device/<port>/close # Request to close connection
def _publish_status(self, port):
'''
Publish status for specified port.
Parameters
----------
port : str
Device name/port.
'''
if port not in self.open_devices:
status = {}
else:
device = self.open_devices[port].serial
properties = ('port', 'baudrate', 'bytesize', 'parity', 'stopbits',
'timeout', 'xonxoff', 'rtscts', 'dsrdtr')
status = {k: getattr(device, k) for k in properties}
status_json = json.dumps(status)
self.mqtt_client.publish(topic='serial_device/%s/status' % port,
payload=status_json, retain=True)
def _serial_close(self, port):
'''
Handle close request.
Parameters
----------
port : str
Device name/port.
'''
if port in self.open_devices:
try:
self.open_devices[port].close()
except Exception as exception:
logger.error('Error closing device `%s`: %s', port, exception)
return
else:
logger.debug('Device not connected to `%s`', port)
self._publish_status(port)
return
def _serial_send(self, port, payload):
'''
Send data to connected device.
Parameters
----------
port : str
Device name/port.
payload : bytes
Payload to send to device.
'''
if port not in self.open_devices:
# Not connected to device.
logger.error('Error sending data: `%s` not connected', port)
self._publish_status(port)
else:
try:
device = self.open_devices[port]
device.write(payload)
logger.debug('Sent data to `%s`', port)
except Exception as exception:
logger.error('Error sending data to `%s`: %s', port, exception)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
logger.info('Shutting down, closing all open ports.')
for port_i in list(self.open_devices.keys()):
self._serial_close(port_i)
super(SerialDeviceManager, self).stop()
|
sci-bots/serial-device
|
serial_device/mqtt.py
|
SerialDeviceManager._serial_send
|
python
|
def _serial_send(self, port, payload):
'''
Send data to connected device.
Parameters
----------
port : str
Device name/port.
payload : bytes
Payload to send to device.
'''
if port not in self.open_devices:
# Not connected to device.
logger.error('Error sending data: `%s` not connected', port)
self._publish_status(port)
else:
try:
device = self.open_devices[port]
device.write(payload)
logger.debug('Sent data to `%s`', port)
except Exception as exception:
logger.error('Error sending data to `%s`: %s', port, exception)
|
Send data to connected device.
Parameters
----------
port : str
Device name/port.
payload : bytes
Payload to send to device.
|
train
|
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/mqtt.py#L326-L347
| null |
class SerialDeviceManager(pmh.BaseMqttReactor):
def __init__(self, *args, **kwargs):
super(SerialDeviceManager, self).__init__(*args, **kwargs)
# Open devices.
self.open_devices = {}
def refresh_comports(self):
# Query list of available serial ports
comports = _comports().T.to_dict()
comports_json = json.dumps(comports)
# Publish list of available serial communication ports.
self.mqtt_client.publish('serial_device/comports',
payload=comports_json, retain=True)
# Publish current status of each port.
for port_i in comports:
self._publish_status(port_i)
###########################################################################
# MQTT client handlers
# ====================
def on_connect(self, client, userdata, flags, rc):
'''
Callback for when the client receives a ``CONNACK`` response from the
broker.
Parameters
----------
client : paho.mqtt.client.Client
The client instance for this callback.
userdata : object
The private user data as set in :class:`paho.mqtt.client.Client`
constructor or :func:`paho.mqtt.client.Client.userdata_set`.
flags : dict
Response flags sent by the broker.
The flag ``flags['session present']`` is useful for clients that
are using clean session set to 0 only.
If a client with clean session=0, that reconnects to a broker that
it has previously connected to, this flag indicates whether the
broker still has the session information for the client.
If 1, the session still exists.
rc : int
The connection result.
The value of rc indicates success or not:
- 0: Connection successful
- 1: Connection refused - incorrect protocol version
- 2: Connection refused - invalid client identifier
- 3: Connection refused - server unavailable
- 4: Connection refused - bad username or password
- 5: Connection refused - not authorised
- 6-255: Currently unused.
Notes
-----
Subscriptions should be defined in this method to ensure subscriptions
will be renewed upon reconnecting after a loss of connection.
'''
super(SerialDeviceManager, self).on_connect(client, userdata, flags, rc)
if rc == 0:
self.mqtt_client.subscribe('serial_device/+/connect')
self.mqtt_client.subscribe('serial_device/+/send')
self.mqtt_client.subscribe('serial_device/+/close')
self.mqtt_client.subscribe('serial_device/refresh_comports')
self.refresh_comports()
def on_message(self, client, userdata, msg):
'''
Callback for when a ``PUBLISH`` message is received from the broker.
'''
if msg.topic == 'serial_device/refresh_comports':
self.refresh_comports()
return
match = CRE_MANAGER.match(msg.topic)
if match is None:
logger.debug('Topic NOT matched: `%s`', msg.topic)
else:
logger.debug('Topic matched: `%s`', msg.topic)
# Message topic matches command. Handle request.
command = match.group('command')
port = match.group('port')
# serial_device/<port>/send # Bytes to send
if command == 'send':
self._serial_send(port, msg.payload)
elif command == 'connect':
# serial_device/<port>/connect # Request connection
try:
request = json.loads(msg.payload)
except ValueError as exception:
logger.error('Error decoding "%s (%s)" request: %s',
command, port, exception)
return
self._serial_connect(port, request)
elif command == 'close':
self._serial_close(port)
# serial_device/<port>/close # Request to close connection
def _publish_status(self, port):
'''
Publish status for specified port.
Parameters
----------
port : str
Device name/port.
'''
if port not in self.open_devices:
status = {}
else:
device = self.open_devices[port].serial
properties = ('port', 'baudrate', 'bytesize', 'parity', 'stopbits',
'timeout', 'xonxoff', 'rtscts', 'dsrdtr')
status = {k: getattr(device, k) for k in properties}
status_json = json.dumps(status)
self.mqtt_client.publish(topic='serial_device/%s/status' % port,
payload=status_json, retain=True)
def _serial_close(self, port):
'''
Handle close request.
Parameters
----------
port : str
Device name/port.
'''
if port in self.open_devices:
try:
self.open_devices[port].close()
except Exception as exception:
logger.error('Error closing device `%s`: %s', port, exception)
return
else:
logger.debug('Device not connected to `%s`', port)
self._publish_status(port)
return
def _serial_connect(self, port, request):
'''
Handle connection request.
Parameters
----------
port : str
Device name/port.
request : dict
'''
# baudrate : int
# Baud rate such as 9600 or 115200 etc.
# bytesize : str, optional
# Number of data bits.
#
# Possible values: ``'FIVEBITS'``, ``'SIXBITS'``, ``'SEVENBITS'``,
# ``'EIGHTBITS'``.
#
# Default: ``'EIGHTBITS'``
# parity : str, optional
# Enable parity checking.
#
# Possible values: ``'PARITY_NONE'``, ``'PARITY_EVEN'``, ``'PARITY_ODD'``,
# ``'PARITY_MARK'``, ``'PARITY_SPACE'``.
#
# Default: ``'PARITY_NONE'``
# stopbits : str, optional
# Number of stop bits.
#
# Possible values: STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO
# xonxoff : bool, optional
# Enable software flow control.
#
# Default: ``False``
# rtscts : bool, optional
# Enable hardware (RTS/CTS) flow control.
#
# Default: ``False``
# dsrdtr : bool, optional
# Enable hardware (DSR/DTR) flow control.
#
# Default: ``False``
command = 'connect'
if port in self.open_devices:
logger.debug('Already connected to: `%s`', port)
self._publish_status(port)
return
# TODO Write JSON schema definition for valid connect request.
if 'baudrate' not in request:
logger.error('Invalid `%s` request: `baudrate` must be '
'specified.', command)
return
if 'bytesize' in request:
try:
bytesize = getattr(serial, request['bytesize'])
if not bytesize in serial.Serial.BYTESIZES:
logger.error('`%s` request: `bytesize` `%s` not '
'available on current platform.', command,
request['bytesize'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `bytesize`, `%s`', command,
request['bytesize'])
return
else:
bytesize = serial.EIGHTBITS
if 'parity' in request:
try:
parity = getattr(serial, request['parity'])
if not parity in serial.Serial.PARITIES:
logger.error('`%s` request: `parity` `%s` not available '
'on current platform.', command,
request['parity'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `parity`, `%s`', command,
request['parity'])
return
else:
parity = serial.PARITY_NONE
if 'stopbits' in request:
try:
stopbits = getattr(serial, request['stopbits'])
if not stopbits in serial.Serial.STOPBITS:
logger.error('`%s` request: `stopbits` `%s` not '
'available on current platform.', command,
request['stopbits'])
return
except AttributeError as exception:
logger.error('`%s` request: invalid `stopbits`, `%s`', command,
request['stopbits'])
return
else:
stopbits = serial.STOPBITS_ONE
try:
baudrate = int(request['baudrate'])
xonxoff = bool(request.get('xonxoff'))
rtscts = bool(request.get('rtscts'))
dsrdtr = bool(request.get('dsrdtr'))
except TypeError as exception:
logger.error('`%s` request: %s', command, exception)
return
try:
device = serial.serial_for_url(port, baudrate=baudrate,
bytesize=bytesize, parity=parity,
stopbits=stopbits, xonxoff=xonxoff,
rtscts=rtscts, dsrdtr=dsrdtr)
parent = self
class PassThroughProtocol(serial.threaded.Protocol):
PORT = port
def connection_made(self, transport):
"""Called when reader thread is started"""
parent.open_devices[port] = transport
parent._publish_status(self.PORT)
def data_received(self, data):
"""Called with snippets received from the serial port"""
parent.mqtt_client.publish(topic='serial_device/%s/received'
% self.PORT, payload=data)
def connection_lost(self, exception):
"""\
Called when the serial port is closed or the reader loop terminated
otherwise.
"""
if isinstance(exception, Exception):
logger.error('Connection to port `%s` lost: %s',
self.PORT, exception)
del parent.open_devices[self.PORT]
parent._publish_status(self.PORT)
reader_thread = serial.threaded.ReaderThread(device,
PassThroughProtocol)
reader_thread.start()
reader_thread.connect()
except Exception as exception:
logger.error('`%s` request: %s', command, exception)
return
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
logger.info('Shutting down, closing all open ports.')
for port_i in list(self.open_devices.keys()):
self._serial_close(port_i)
super(SerialDeviceManager, self).stop()
|
matthewgilbert/mapping
|
mapping/plot.py
|
plot_composition
|
python
|
def plot_composition(df, intervals, axes=None):
generics = df.columns
if (axes is not None) and (len(axes) != len(generics)):
raise ValueError("If 'axes' is not None then it must be the same "
"length as 'df.columns'")
if axes is None:
_, axes = plt.subplots(nrows=len(generics), ncols=1)
if len(generics) == 1:
axes = [axes]
for ax, generic in zip(axes, generics):
ax.plot(df.loc[:, generic], label=generic)
# no legend line to avoid clutter
ax.legend(loc='center right', handlelength=0)
dates = intervals.loc[intervals.loc[:, "generic"] == generic,
["start_date", "end_date", "contract"]]
date_ticks = set(
dates.loc[:, "start_date"].tolist() +
dates.loc[:, "end_date"].tolist()
)
xticks = [ts.toordinal() for ts in date_ticks]
xlabels = [ts.strftime("%Y-%m-%d") for ts in date_ticks]
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels)
y_top = ax.get_ylim()[1]
count = 0
# label and colour each underlying
for _, dt1, dt2, instr in dates.itertuples():
if count % 2:
fc = "b"
else:
fc = "r"
count += 1
ax.axvspan(dt1, dt2, facecolor=fc, alpha=0.2)
x_mid = dt1 + (dt2 - dt1) / 2
ax.text(x_mid, y_top, instr, rotation=45)
return axes
|
Plot time series of generics and label underlying instruments which
these series are composed of.
Parameters:
-----------
df: pd.DataFrame
DataFrame of time series to be plotted. Each column is a generic time
series.
intervals: pd.DataFrame
A DataFrame including information for when each contract is used in the
generic series.
Columns are['contract', 'generic', 'start_date', 'end_date']
axes: list
List of matplotlib.axes.Axes
Example
-------
>>> import mapping.plot as mplot
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> idx = pd.date_range("2017-01-01", "2017-01-15")
>>> rets_data = pd.np.random.randn(len(idx))
>>> rets = pd.DataFrame({"CL1": rets_data, "CL2": rets_data}, index=idx)
>>> intervals = pd.DataFrame(
... [(TS("2017-01-01"), TS("2017-01-05"), "2017_CL_F", "CL1"),
... (TS("2017-01-05"), TS("2017-01-15"), "2017_CL_G", "CL1"),
... (TS("2017-01-01"), TS("2017-01-12"), "2017_CL_G", "CL2"),
... (TS("2017-01-10"), TS("2017-01-15"), "2017_CL_H", "CL2")],
... columns=["start_date", "end_date", "contract", "generic"])
>>> mplot.plot_composition(rets, intervals)
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/plot.py#L5-L76
| null |
import matplotlib.pyplot as plt
import pandas as pd
def intervals(weights):
"""
Extract intervals where generics are composed of different tradeable
instruments.
Parameters
----------
weights: DataFrame or dict
A DataFrame or dictionary of DataFrames with columns representing
generics and a MultiIndex of date and contract. Values represent
weights on tradeables for each generic.
Returns
-------
A DataFrame with [columns]
['contract', 'generic', 'start_date', 'end_date']
"""
intrvls = []
if isinstance(weights, dict):
for root in weights:
wts = weights[root]
intrvls.append(_intervals(wts))
intrvls = pd.concat(intrvls, axis=0)
else:
intrvls = _intervals(weights)
intrvls = intrvls.reset_index(drop=True)
return intrvls
def _intervals(weights):
# since weights denote weightings for returns, not holdings. To determine
# the previous day we look at the index since lagging would depend on the
# calendar. As a kludge we omit the first date since impossible to
# know
dates = weights.index.get_level_values(0)
date_lookup = dict(zip(dates[1:], dates[:-1]))
weights = weights.stack()
weights.index.names = ["date", "contract", "generic"]
weights.name = "weight"
weights = weights.reset_index()
grps = (weights.loc[weights.weight != 0, :].drop("weight", axis=1)
.groupby(["contract", "generic"]))
intrvls = pd.concat([
grps.min().rename({"date": "start_date"}, axis=1),
grps.max().rename({"date": "end_date"}, axis=1)],
axis=1)
intrvls = intrvls.reset_index().sort_values(["generic", "start_date"])
# start date should be the previous trading day since returns are from
# t-1 to t therefore position established at time t-1
intrvls.loc[:, "start_date"] = (
intrvls.loc[:, "start_date"].apply(lambda x: date_lookup.get(x, x))
)
intrvls = intrvls.loc[:, ['contract', 'generic', 'start_date', 'end_date']]
return intrvls
|
matthewgilbert/mapping
|
mapping/plot.py
|
intervals
|
python
|
def intervals(weights):
intrvls = []
if isinstance(weights, dict):
for root in weights:
wts = weights[root]
intrvls.append(_intervals(wts))
intrvls = pd.concat(intrvls, axis=0)
else:
intrvls = _intervals(weights)
intrvls = intrvls.reset_index(drop=True)
return intrvls
|
Extract intervals where generics are composed of different tradeable
instruments.
Parameters
----------
weights: DataFrame or dict
A DataFrame or dictionary of DataFrames with columns representing
generics and a MultiIndex of date and contract. Values represent
weights on tradeables for each generic.
Returns
-------
A DataFrame with [columns]
['contract', 'generic', 'start_date', 'end_date']
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/plot.py#L79-L106
|
[
"def _intervals(weights):\n # since weights denote weightings for returns, not holdings. To determine\n # the previous day we look at the index since lagging would depend on the\n # calendar. As a kludge we omit the first date since impossible to\n # know\n dates = weights.index.get_level_values(0)\n date_lookup = dict(zip(dates[1:], dates[:-1]))\n\n weights = weights.stack()\n weights.index.names = [\"date\", \"contract\", \"generic\"]\n weights.name = \"weight\"\n weights = weights.reset_index()\n grps = (weights.loc[weights.weight != 0, :].drop(\"weight\", axis=1)\n .groupby([\"contract\", \"generic\"]))\n\n intrvls = pd.concat([\n grps.min().rename({\"date\": \"start_date\"}, axis=1),\n grps.max().rename({\"date\": \"end_date\"}, axis=1)],\n axis=1)\n\n intrvls = intrvls.reset_index().sort_values([\"generic\", \"start_date\"])\n # start date should be the previous trading day since returns are from\n # t-1 to t therefore position established at time t-1\n intrvls.loc[:, \"start_date\"] = (\n intrvls.loc[:, \"start_date\"].apply(lambda x: date_lookup.get(x, x))\n )\n intrvls = intrvls.loc[:, ['contract', 'generic', 'start_date', 'end_date']]\n return intrvls\n"
] |
import matplotlib.pyplot as plt
import pandas as pd
def plot_composition(df, intervals, axes=None):
"""
Plot time series of generics and label underlying instruments which
these series are composed of.
Parameters:
-----------
df: pd.DataFrame
DataFrame of time series to be plotted. Each column is a generic time
series.
intervals: pd.DataFrame
A DataFrame including information for when each contract is used in the
generic series.
Columns are['contract', 'generic', 'start_date', 'end_date']
axes: list
List of matplotlib.axes.Axes
Example
-------
>>> import mapping.plot as mplot
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> idx = pd.date_range("2017-01-01", "2017-01-15")
>>> rets_data = pd.np.random.randn(len(idx))
>>> rets = pd.DataFrame({"CL1": rets_data, "CL2": rets_data}, index=idx)
>>> intervals = pd.DataFrame(
... [(TS("2017-01-01"), TS("2017-01-05"), "2017_CL_F", "CL1"),
... (TS("2017-01-05"), TS("2017-01-15"), "2017_CL_G", "CL1"),
... (TS("2017-01-01"), TS("2017-01-12"), "2017_CL_G", "CL2"),
... (TS("2017-01-10"), TS("2017-01-15"), "2017_CL_H", "CL2")],
... columns=["start_date", "end_date", "contract", "generic"])
>>> mplot.plot_composition(rets, intervals)
"""
generics = df.columns
if (axes is not None) and (len(axes) != len(generics)):
raise ValueError("If 'axes' is not None then it must be the same "
"length as 'df.columns'")
if axes is None:
_, axes = plt.subplots(nrows=len(generics), ncols=1)
if len(generics) == 1:
axes = [axes]
for ax, generic in zip(axes, generics):
ax.plot(df.loc[:, generic], label=generic)
# no legend line to avoid clutter
ax.legend(loc='center right', handlelength=0)
dates = intervals.loc[intervals.loc[:, "generic"] == generic,
["start_date", "end_date", "contract"]]
date_ticks = set(
dates.loc[:, "start_date"].tolist() +
dates.loc[:, "end_date"].tolist()
)
xticks = [ts.toordinal() for ts in date_ticks]
xlabels = [ts.strftime("%Y-%m-%d") for ts in date_ticks]
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels)
y_top = ax.get_ylim()[1]
count = 0
# label and colour each underlying
for _, dt1, dt2, instr in dates.itertuples():
if count % 2:
fc = "b"
else:
fc = "r"
count += 1
ax.axvspan(dt1, dt2, facecolor=fc, alpha=0.2)
x_mid = dt1 + (dt2 - dt1) / 2
ax.text(x_mid, y_top, instr, rotation=45)
return axes
def _intervals(weights):
# since weights denote weightings for returns, not holdings. To determine
# the previous day we look at the index since lagging would depend on the
# calendar. As a kludge we omit the first date since impossible to
# know
dates = weights.index.get_level_values(0)
date_lookup = dict(zip(dates[1:], dates[:-1]))
weights = weights.stack()
weights.index.names = ["date", "contract", "generic"]
weights.name = "weight"
weights = weights.reset_index()
grps = (weights.loc[weights.weight != 0, :].drop("weight", axis=1)
.groupby(["contract", "generic"]))
intrvls = pd.concat([
grps.min().rename({"date": "start_date"}, axis=1),
grps.max().rename({"date": "end_date"}, axis=1)],
axis=1)
intrvls = intrvls.reset_index().sort_values(["generic", "start_date"])
# start date should be the previous trading day since returns are from
# t-1 to t therefore position established at time t-1
intrvls.loc[:, "start_date"] = (
intrvls.loc[:, "start_date"].apply(lambda x: date_lookup.get(x, x))
)
intrvls = intrvls.loc[:, ['contract', 'generic', 'start_date', 'end_date']]
return intrvls
|
matthewgilbert/mapping
|
mapping/util.py
|
read_price_data
|
python
|
def read_price_data(files, name_func=None):
if name_func is None:
def name_func(x):
return os.path.split(x)[1].split(".")[0]
dfs = []
for f in files:
name = name_func(f)
df = pd.read_csv(f, index_col=0, parse_dates=True)
df.sort_index(inplace=True)
df.index = pd.MultiIndex.from_product([df.index, [name]],
names=["date", "contract"])
dfs.append(df)
return pd.concat(dfs, axis=0, sort=False).sort_index()
|
Convenience function for reading in pricing data from csv files
Parameters
----------
files: list
List of strings refering to csv files to read data in from, first
column should be dates
name_func: func
A function to apply to the file strings to infer the instrument name,
used in the second level of the MultiIndex index. Default is the file
name excluding the pathname and file ending,
e.g. /path/to/file/name.csv -> name
Returns
-------
A pandas.DataFrame with a pandas.MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Columns are
given by the csv file columns.
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L6-L40
|
[
"def name_func(fstr):\n file_name = os.path.split(fstr)[-1]\n name = file_name.split('-')[1].split('.')[0]\n return name[-4:] + name[:3]\n",
"def name_func(x):\n return os.path.split(x)[1].split(\".\")[0]\n"
] |
import pandas as pd
import numpy as np
import os
def flatten(weights):
"""
Flatten weights into a long DataFrame.
Parameters
----------
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas. Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A long DataFrame of weights, where columns are "date", "contract",
"generic" and "weight". If a dictionary is passed, DataFrame will contain
additional colum "key" containing the key value and be sorted according to
this key value.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLH5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
>>> util.flatten(weights)
""" # NOQA
if isinstance(weights, pd.DataFrame):
wts = weights.stack().reset_index()
wts.columns = ["date", "contract", "generic", "weight"]
elif isinstance(weights, dict):
wts = []
for key in sorted(weights.keys()):
wt = weights[key].stack().reset_index()
wt.columns = ["date", "contract", "generic", "weight"]
wt.loc[:, "key"] = key
wts.append(wt)
wts = pd.concat(wts, axis=0).reset_index(drop=True)
else:
raise ValueError("weights must be pd.DataFrame or dict")
return wts
def unflatten(flat_weights):
"""
Pivot weights from long DataFrame into weighting matrix.
Parameters
----------
flat_weights: pandas.DataFrame
A long DataFrame of weights, where columns are "date", "contract",
"generic", "weight" and optionally "key". If "key" column is
present a dictionary of unflattened DataFrames is returned with the
dictionary keys corresponding to the "key" column and each sub
DataFrame containing rows for this key.
Returns
-------
A DataFrame or dict of DataFrames of instrument weights with a MultiIndex
where the top level contains pandas.Timestamps and the second level is
instrument names. The columns consist of generic names. If dict is returned
the dict keys correspond to the "key" column of the input.
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> long_wts = pd.DataFrame(
... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
... "generic": ["CL1", "CL2"] * 4,
... "weight": [1, 0, 0, 1, 1, 0, 0, 1]}
... ).loc[:, ["date", "contract", "generic", "weight"]]
>>> util.unflatten(long_wts)
See also: calc_rets()
""" # NOQA
if flat_weights.columns.contains("key"):
weights = {}
for key in flat_weights.loc[:, "key"].unique():
flt_wts = flat_weights.loc[flat_weights.loc[:, "key"] == key, :]
flt_wts = flt_wts.drop(labels="key", axis=1)
wts = flt_wts.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
wts.columns = wts.columns.droplevel(0)
weights[key] = wts
else:
weights = flat_weights.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
weights.columns = weights.columns.droplevel(0)
return weights
def calc_rets(returns, weights):
"""
Calculate continuous return series for futures instruments. These consist
of weighted underlying instrument returns, who's weights can vary over
time.
Parameters
----------
returns: pandas.Series or dict
A Series of instrument returns with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Values
correspond to one period instrument returns. returns should be
available for all for all Timestamps and instruments provided in
weights. If dict is given this should be a dict of pandas.Series in the
above format, with keys which are a subset of the keys given in weights
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A pandas.DataFrame of continuous returns for generics. The index is
pandas.Timestamps and the columns is generic names, corresponding to
weights.columns
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-02'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> price = pd.Series([45.63, 45.85, 46.13, 46.05, 46.25, 46.20], index=idx)
>>> vals = [1, 1/2, 1/2, 1]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
>>> irets = price.groupby(level=-1).pct_change()
>>> util.calc_rets(irets, weights)
""" # NOQA
if not isinstance(returns, dict):
returns = {"": returns}
if not isinstance(weights, dict):
weights = {"": weights}
generic_superset = []
for root in weights:
generic_superset.extend(weights[root].columns.tolist())
if len(set(generic_superset)) != len(generic_superset):
raise ValueError("Columns for weights must all be unique")
_check_indices(returns, weights)
grets = []
cols = []
for root in returns:
root_wts = weights[root]
root_rets = returns[root]
for generic in root_wts.columns:
gnrc_wts = root_wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
rets = root_rets.loc[gnrc_wts.index]
# groupby time
group_rets = (rets * gnrc_wts).groupby(level=0)
grets.append(group_rets.apply(pd.DataFrame.sum, skipna=False))
cols.extend(root_wts.columns.tolist())
rets = pd.concat(grets, axis=1, keys=cols).sort_index(axis=1)
return rets
def _stringify(xs):
if len(xs) <= 2:
return repr(xs)
return '[{!r}, ..., {!r}]'.format(xs[0], xs[-1])
def _check_indices(returns, weights):
# dictionaries of returns and weights
# check 1: ensure that all non zero instrument weights have associated
# returns, see https://github.com/matthewgilbert/mapping/issues/3
# check 2: ensure that returns are not dropped if reindexed from weights,
# see https://github.com/matthewgilbert/mapping/issues/8
if list(returns.keys()) == [""]:
msg1 = ("'returns.index.get_level_values(0)' must contain dates which "
"are a subset of 'weights.index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights.loc[:, '{2}'].index' are not in 'returns.index'")
else:
msg1 = ("'returns['{0}'].index.get_level_values(0)' must contain "
"dates which are a subset of "
"'weights['{0}'].index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights['{1}'].loc[:, '{2}'].index' are not in "
"'returns['{1}'].index'")
for root in returns:
wts = weights[root]
rets = returns[root]
dts_rets = rets.index.get_level_values(0)
dts_wts = wts.index.get_level_values(0)
# check 1
if not dts_rets.isin(dts_wts).all():
missing_dates = dts_rets.difference(dts_wts).tolist()
raise ValueError(msg1.format(root, _stringify(missing_dates)))
# check 2
for generic in wts.columns:
gnrc_wts = wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
# necessary instead of missing_keys.any() to support MultiIndex
if not gnrc_wts.index.isin(rets.index).all():
# as list instead of MultiIndex for legibility when stack trace
missing_keys = (gnrc_wts.index.difference(rets.index).tolist())
msg2 = msg2.format(_stringify(missing_keys), root, generic)
raise KeyError(msg2)
def reindex(prices, index, limit):
"""
Reindex a pd.Series of prices such that when instrument level returns are
calculated they are compatible with a pd.MultiIndex of instrument weights
in calc_rets(). This amount to reindexing the series by an augmented
version of index which includes the preceding date for the first appearance
of each instrument. Fill forward missing values with previous price up to
some limit.
Parameters
----------
prices: pandas.Series
A Series of instrument prices with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names.
index: pandas.MultiIndex
A MultiIndex where the top level contains pandas.Timestamps and the
second level is instrument names.
limt: int
Number of periods to fill prices forward.
Returns
-------
A pandas.Series of reindexed prices where the top level is
pandas.Timestamps and the second level is instrument names.
See also: calc_rets()
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(TS('2015-01-04'), 'CLF5'),
... (TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-06'), 'CLF5'),
... (TS('2015-01-06'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> prices = pd.Series([100.12, 101.50, 102.51, 103.51, 102.73, 102.15,
... 104.37], index=idx)
>>> widx = pd.MultiIndex.from_tuples([(TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> util.reindex(prices, widx, limit=0)
"""
if not index.is_unique:
raise ValueError("'index' must be unique")
index = index.sort_values()
index.names = ["date", "instrument"]
price_dts = prices.sort_index().index.unique(level=0)
index_dts = index.unique(level=0)
mask = price_dts < index_dts[0]
leading_price_dts = price_dts[mask]
if len(leading_price_dts) == 0:
raise ValueError("'prices' must have a date preceding first date in "
"'index'")
prev_dts = index_dts.tolist()
prev_dts.insert(0, leading_price_dts[-1])
# avoid just lagging to preserve the calendar
previous_date = dict(zip(index_dts, prev_dts))
first_instr = index.to_frame(index=False)
first_instr = (
first_instr.drop_duplicates(subset=["instrument"], keep="first")
)
first_instr.loc[:, "prev_date"] = (
first_instr.loc[:, "date"].apply(lambda x: previous_date[x])
)
additional_indices = pd.MultiIndex.from_tuples(
first_instr.loc[:, ["prev_date", "instrument"]].values.tolist()
)
augmented_index = index.union(additional_indices).sort_values()
prices = prices.reindex(augmented_index)
if limit != 0:
prices = prices.groupby(level=1).fillna(method="ffill", limit=limit)
return prices
def calc_trades(current_contracts, desired_holdings, trade_weights, prices,
multipliers, **kwargs):
"""
Calculate the number of tradeable contracts for rebalancing from a set
of current contract holdings to a set of desired generic notional holdings
based on prevailing prices and mapping from generics to tradeable
instruments. Differences between current holdings and desired holdings
are treated as 0. Zero trades are dropped.
Parameters
----------
current_contracts: pandas.Series
Series of current number of contracts held for tradeable instruments.
Can pass 0 if all holdings are 0.
desired_holdings: pandas.Series
Series of desired holdings in base notional currency of generics. Index
is generic contracts, these should be the same generics as in
trade_weights.
trade_weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns refer to generic
contracts and the index is strings representing instrument names.
If dict is given keys should be root generic names, e.g. 'CL', and
values should be pandas.DataFrames of loadings. The union of all
columns should be a superset of the desired_holdings.index
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
number of contracts. Extra instrument prices will be ignored.
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of mapped desired_holdings
intruments.
kwargs: key word arguments
Key word arguments to be passed to to_contracts()
Returns
-------
A pandas.Series of instrument contract trades, lexigraphically sorted.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> desired_holdings = pd.Series([200000, -50000], index=["CL1", "CL2"])
>>> current_contracts = pd.Series([0, 1, 0],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> prices = pd.Series([50.32, 50.41, 50.48],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> multipliers = pd.Series([100, 100, 100],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> trades = util.calc_trades(current_contracts, desired_holdings, wts,
... prices, multipliers)
"""
if not isinstance(trade_weights, dict):
trade_weights = {"": trade_weights}
generics = []
for key in trade_weights:
generics.extend(trade_weights[key].columns)
if not set(desired_holdings.index).issubset(set(generics)):
raise ValueError("'desired_holdings.index' contains values which "
"cannot be mapped to tradeables.\n"
"Received: 'desired_holdings.index'\n {0}\n"
"Expected in 'trade_weights' set of columns:\n {1}\n"
.format(sorted(desired_holdings.index),
sorted(generics)))
desired_contracts = []
for root_key in trade_weights:
gnrc_weights = trade_weights[root_key]
subset = gnrc_weights.columns.intersection(desired_holdings.index)
gnrc_des_hlds = desired_holdings.loc[subset]
gnrc_weights = gnrc_weights.loc[:, subset]
# drop indexes where all non zero weights were in columns dropped above
gnrc_weights = gnrc_weights.loc[~(gnrc_weights == 0).all(axis=1)]
instr_des_hlds = gnrc_des_hlds * gnrc_weights
instr_des_hlds = instr_des_hlds.sum(axis=1)
wprices = prices.loc[instr_des_hlds.index]
desired_contracts.append(to_contracts(instr_des_hlds, wprices,
multipliers, **kwargs))
desired_contracts = pd.concat(desired_contracts, axis=0)
trades = desired_contracts.subtract(current_contracts, fill_value=0)
trades = trades.loc[trades != 0]
trades = trades.sort_index()
return trades
def to_notional(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None):
"""
Convert number of contracts of tradeable instruments to notional value of
tradeable instruments in a desired currency.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
number of contracts.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index otherwise NaN returned for instruments without prices
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
Returns
-------
pandas.Series of notional amounts of instruments with Index of instruments
names
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> current_contracts = pd.Series([-1, 1], index=['CLX16', 'CLZ16'])
>>> prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
>>> multipliers = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
>>> ntln = util.to_notional(current_contracts, prices, multipliers)
"""
notionals = _instr_conv(instruments, prices, multipliers, True,
desired_ccy, instr_fx, fx_rates)
return notionals
def to_contracts(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None, rounder=None):
"""
Convert notional amount of tradeable instruments to number of instrument
contracts, rounding to nearest integer number of contracts.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
notional amount on instrument.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
rounder: function
Function to round pd.Series contracts to integers, if None default
pd.Series.round is used.
Returns
-------
pandas.Series of contract numbers of instruments with Index of instruments
names
"""
contracts = _instr_conv(instruments, prices, multipliers, False,
desired_ccy, instr_fx, fx_rates)
if rounder is None:
rounder = pd.Series.round
contracts = rounder(contracts)
contracts = contracts.astype(int)
return contracts
def _instr_conv(instruments, prices, multipliers, to_notional, desired_ccy,
instr_fx, fx_rates):
if not instruments.index.is_unique:
raise ValueError("'instruments' must have unique index")
if not prices.index.is_unique:
raise ValueError("'prices' must have unique index")
if not multipliers.index.is_unique:
raise ValueError("'multipliers' must have unique index")
if desired_ccy:
if not instr_fx.index.is_unique:
raise ValueError("'instr_fx' must have unique index")
if not fx_rates.index.is_unique:
raise ValueError("'fx_rates' must have unique index")
prices = prices.loc[instr_fx.index]
conv_rate = []
for ccy in instr_fx.values:
conv_rate.append(_get_fx_conversions(fx_rates, ccy, desired_ccy))
fx_adj_prices = prices * np.array(conv_rate)
else:
fx_adj_prices = prices
if to_notional:
amounts = instruments * fx_adj_prices * multipliers
else:
amounts = (instruments / fx_adj_prices) / multipliers
amounts = amounts.loc[instruments.index]
return amounts
def get_multiplier(weights, root_generic_multiplier):
"""
Determine tradeable instrument multiplier based on generic asset
multipliers and weights mapping from generics to tradeables.
Parameters
----------
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns are integers refering to
generic number indexed from 0, e.g. [0, 1], and the index is strings
representing instrument names. If dict is given keys should be generic
instrument names, e.g. 'CL', and values should be pandas.DataFrames of
loadings. The union of all indexes should be a superset of the
instruments.index
root_generic_multiplier: pandas.Series
Series of multipliers for generic instruments lexigraphically sorted.
If a dictionary of weights is given, root_generic_multiplier.index
should correspond to the weights keys.
Returns
-------
A pandas.Series of multipliers for tradeable instruments.
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=[0, 1])
>>> ast_mult = pd.Series([1000], index=["CL"])
>>> util.get_multiplier(wts, ast_mult)
"""
if len(root_generic_multiplier) > 1 and not isinstance(weights, dict):
raise ValueError("For multiple generic instruments weights must be a "
"dictionary")
mults = []
intrs = []
for ast, multiplier in root_generic_multiplier.iteritems():
if isinstance(weights, dict):
weights_ast = weights[ast].index
else:
weights_ast = weights.index
mults.extend(np.repeat(multiplier, len(weights_ast)))
intrs.extend(weights_ast)
imults = pd.Series(mults, intrs)
imults = imults.sort_index()
return imults
def weighted_expiration(weights, contract_dates):
"""
Calculate the days to expiration for generic futures, weighted by the
composition of the underlying tradeable instruments.
Parameters:
-----------
weights: pandas.DataFrame
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names.
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values
Returns:
--------
A pandas.DataFrame with columns of generic futures and index of dates.
Values are the weighted average of days to expiration for the underlying
contracts.
Examples:
---------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF15'),
... (pd.Timestamp('2015-01-03'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLF15'),
... (pd.Timestamp('2015-01-04'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLH15'),
... (pd.Timestamp('2015-01-05'), 'CLG15'),
... (pd.Timestamp('2015-01-05'), 'CLH15')])
>>> weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=widx)
>>> contract_dates = pd.Series([pd.Timestamp('2015-01-20'),
... pd.Timestamp('2015-02-21'),
... pd.Timestamp('2015-03-20')],
... index=['CLF15', 'CLG15', 'CLH15'])
>>> util.weighted_expiration(weights, contract_dates)
""" # NOQA
cols = weights.columns
weights = weights.reset_index(level=-1)
expiries = contract_dates.to_dict()
weights.loc[:, "expiry"] = weights.iloc[:, 0].apply(lambda x: expiries[x])
diffs = (pd.DatetimeIndex(weights.expiry)
- pd.Series(weights.index, weights.index)).apply(lambda x: x.days)
weights = weights.loc[:, cols]
wexp = weights.mul(diffs, axis=0).groupby(level=0).sum()
return wexp
def _get_fx_conversions(fx_rates, ccy, desired_ccy):
# return rate to multiply through by to convert from instrument ccy to
# desired ccy
# fx_rates is a series of fx rates with index names of the form AUDUSD,
# USDCAD, etc. ccy is a st
ccy_pair1 = ccy + desired_ccy
ccy_pair2 = desired_ccy + ccy
if ccy == desired_ccy:
conv_rate = 1.0
elif ccy_pair1 in fx_rates:
conv_rate = fx_rates.loc[ccy_pair1]
elif ccy_pair2 in fx_rates:
conv_rate = 1 / fx_rates.loc[ccy_pair2]
else:
raise ValueError("Cannot convert from {0} to {1} with any of "
"rates:\n{2}".format(ccy, desired_ccy, fx_rates))
return conv_rate
|
matthewgilbert/mapping
|
mapping/util.py
|
flatten
|
python
|
def flatten(weights):
"""
Flatten weights into a long DataFrame.
Parameters
----------
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas. Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A long DataFrame of weights, where columns are "date", "contract",
"generic" and "weight". If a dictionary is passed, DataFrame will contain
additional colum "key" containing the key value and be sorted according to
this key value.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLH5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
>>> util.flatten(weights)
""" # NOQA
if isinstance(weights, pd.DataFrame):
wts = weights.stack().reset_index()
wts.columns = ["date", "contract", "generic", "weight"]
elif isinstance(weights, dict):
wts = []
for key in sorted(weights.keys()):
wt = weights[key].stack().reset_index()
wt.columns = ["date", "contract", "generic", "weight"]
wt.loc[:, "key"] = key
wts.append(wt)
wts = pd.concat(wts, axis=0).reset_index(drop=True)
else:
raise ValueError("weights must be pd.DataFrame or dict")
return wts
|
Flatten weights into a long DataFrame.
Parameters
----------
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas. Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A long DataFrame of weights, where columns are "date", "contract",
"generic" and "weight". If a dictionary is passed, DataFrame will contain
additional colum "key" containing the key value and be sorted according to
this key value.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLH5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
>>> util.flatten(weights)
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L43-L89
| null |
import pandas as pd
import numpy as np
import os
def read_price_data(files, name_func=None):
"""
Convenience function for reading in pricing data from csv files
Parameters
----------
files: list
List of strings refering to csv files to read data in from, first
column should be dates
name_func: func
A function to apply to the file strings to infer the instrument name,
used in the second level of the MultiIndex index. Default is the file
name excluding the pathname and file ending,
e.g. /path/to/file/name.csv -> name
Returns
-------
A pandas.DataFrame with a pandas.MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Columns are
given by the csv file columns.
"""
if name_func is None:
def name_func(x):
return os.path.split(x)[1].split(".")[0]
dfs = []
for f in files:
name = name_func(f)
df = pd.read_csv(f, index_col=0, parse_dates=True)
df.sort_index(inplace=True)
df.index = pd.MultiIndex.from_product([df.index, [name]],
names=["date", "contract"])
dfs.append(df)
return pd.concat(dfs, axis=0, sort=False).sort_index()
def unflatten(flat_weights):
"""
Pivot weights from long DataFrame into weighting matrix.
Parameters
----------
flat_weights: pandas.DataFrame
A long DataFrame of weights, where columns are "date", "contract",
"generic", "weight" and optionally "key". If "key" column is
present a dictionary of unflattened DataFrames is returned with the
dictionary keys corresponding to the "key" column and each sub
DataFrame containing rows for this key.
Returns
-------
A DataFrame or dict of DataFrames of instrument weights with a MultiIndex
where the top level contains pandas.Timestamps and the second level is
instrument names. The columns consist of generic names. If dict is returned
the dict keys correspond to the "key" column of the input.
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> long_wts = pd.DataFrame(
... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
... "generic": ["CL1", "CL2"] * 4,
... "weight": [1, 0, 0, 1, 1, 0, 0, 1]}
... ).loc[:, ["date", "contract", "generic", "weight"]]
>>> util.unflatten(long_wts)
See also: calc_rets()
""" # NOQA
if flat_weights.columns.contains("key"):
weights = {}
for key in flat_weights.loc[:, "key"].unique():
flt_wts = flat_weights.loc[flat_weights.loc[:, "key"] == key, :]
flt_wts = flt_wts.drop(labels="key", axis=1)
wts = flt_wts.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
wts.columns = wts.columns.droplevel(0)
weights[key] = wts
else:
weights = flat_weights.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
weights.columns = weights.columns.droplevel(0)
return weights
def calc_rets(returns, weights):
"""
Calculate continuous return series for futures instruments. These consist
of weighted underlying instrument returns, who's weights can vary over
time.
Parameters
----------
returns: pandas.Series or dict
A Series of instrument returns with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Values
correspond to one period instrument returns. returns should be
available for all for all Timestamps and instruments provided in
weights. If dict is given this should be a dict of pandas.Series in the
above format, with keys which are a subset of the keys given in weights
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A pandas.DataFrame of continuous returns for generics. The index is
pandas.Timestamps and the columns is generic names, corresponding to
weights.columns
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-02'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> price = pd.Series([45.63, 45.85, 46.13, 46.05, 46.25, 46.20], index=idx)
>>> vals = [1, 1/2, 1/2, 1]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
>>> irets = price.groupby(level=-1).pct_change()
>>> util.calc_rets(irets, weights)
""" # NOQA
if not isinstance(returns, dict):
returns = {"": returns}
if not isinstance(weights, dict):
weights = {"": weights}
generic_superset = []
for root in weights:
generic_superset.extend(weights[root].columns.tolist())
if len(set(generic_superset)) != len(generic_superset):
raise ValueError("Columns for weights must all be unique")
_check_indices(returns, weights)
grets = []
cols = []
for root in returns:
root_wts = weights[root]
root_rets = returns[root]
for generic in root_wts.columns:
gnrc_wts = root_wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
rets = root_rets.loc[gnrc_wts.index]
# groupby time
group_rets = (rets * gnrc_wts).groupby(level=0)
grets.append(group_rets.apply(pd.DataFrame.sum, skipna=False))
cols.extend(root_wts.columns.tolist())
rets = pd.concat(grets, axis=1, keys=cols).sort_index(axis=1)
return rets
def _stringify(xs):
if len(xs) <= 2:
return repr(xs)
return '[{!r}, ..., {!r}]'.format(xs[0], xs[-1])
def _check_indices(returns, weights):
# dictionaries of returns and weights
# check 1: ensure that all non zero instrument weights have associated
# returns, see https://github.com/matthewgilbert/mapping/issues/3
# check 2: ensure that returns are not dropped if reindexed from weights,
# see https://github.com/matthewgilbert/mapping/issues/8
if list(returns.keys()) == [""]:
msg1 = ("'returns.index.get_level_values(0)' must contain dates which "
"are a subset of 'weights.index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights.loc[:, '{2}'].index' are not in 'returns.index'")
else:
msg1 = ("'returns['{0}'].index.get_level_values(0)' must contain "
"dates which are a subset of "
"'weights['{0}'].index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights['{1}'].loc[:, '{2}'].index' are not in "
"'returns['{1}'].index'")
for root in returns:
wts = weights[root]
rets = returns[root]
dts_rets = rets.index.get_level_values(0)
dts_wts = wts.index.get_level_values(0)
# check 1
if not dts_rets.isin(dts_wts).all():
missing_dates = dts_rets.difference(dts_wts).tolist()
raise ValueError(msg1.format(root, _stringify(missing_dates)))
# check 2
for generic in wts.columns:
gnrc_wts = wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
# necessary instead of missing_keys.any() to support MultiIndex
if not gnrc_wts.index.isin(rets.index).all():
# as list instead of MultiIndex for legibility when stack trace
missing_keys = (gnrc_wts.index.difference(rets.index).tolist())
msg2 = msg2.format(_stringify(missing_keys), root, generic)
raise KeyError(msg2)
def reindex(prices, index, limit):
"""
Reindex a pd.Series of prices such that when instrument level returns are
calculated they are compatible with a pd.MultiIndex of instrument weights
in calc_rets(). This amount to reindexing the series by an augmented
version of index which includes the preceding date for the first appearance
of each instrument. Fill forward missing values with previous price up to
some limit.
Parameters
----------
prices: pandas.Series
A Series of instrument prices with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names.
index: pandas.MultiIndex
A MultiIndex where the top level contains pandas.Timestamps and the
second level is instrument names.
limt: int
Number of periods to fill prices forward.
Returns
-------
A pandas.Series of reindexed prices where the top level is
pandas.Timestamps and the second level is instrument names.
See also: calc_rets()
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(TS('2015-01-04'), 'CLF5'),
... (TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-06'), 'CLF5'),
... (TS('2015-01-06'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> prices = pd.Series([100.12, 101.50, 102.51, 103.51, 102.73, 102.15,
... 104.37], index=idx)
>>> widx = pd.MultiIndex.from_tuples([(TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> util.reindex(prices, widx, limit=0)
"""
if not index.is_unique:
raise ValueError("'index' must be unique")
index = index.sort_values()
index.names = ["date", "instrument"]
price_dts = prices.sort_index().index.unique(level=0)
index_dts = index.unique(level=0)
mask = price_dts < index_dts[0]
leading_price_dts = price_dts[mask]
if len(leading_price_dts) == 0:
raise ValueError("'prices' must have a date preceding first date in "
"'index'")
prev_dts = index_dts.tolist()
prev_dts.insert(0, leading_price_dts[-1])
# avoid just lagging to preserve the calendar
previous_date = dict(zip(index_dts, prev_dts))
first_instr = index.to_frame(index=False)
first_instr = (
first_instr.drop_duplicates(subset=["instrument"], keep="first")
)
first_instr.loc[:, "prev_date"] = (
first_instr.loc[:, "date"].apply(lambda x: previous_date[x])
)
additional_indices = pd.MultiIndex.from_tuples(
first_instr.loc[:, ["prev_date", "instrument"]].values.tolist()
)
augmented_index = index.union(additional_indices).sort_values()
prices = prices.reindex(augmented_index)
if limit != 0:
prices = prices.groupby(level=1).fillna(method="ffill", limit=limit)
return prices
def calc_trades(current_contracts, desired_holdings, trade_weights, prices,
multipliers, **kwargs):
"""
Calculate the number of tradeable contracts for rebalancing from a set
of current contract holdings to a set of desired generic notional holdings
based on prevailing prices and mapping from generics to tradeable
instruments. Differences between current holdings and desired holdings
are treated as 0. Zero trades are dropped.
Parameters
----------
current_contracts: pandas.Series
Series of current number of contracts held for tradeable instruments.
Can pass 0 if all holdings are 0.
desired_holdings: pandas.Series
Series of desired holdings in base notional currency of generics. Index
is generic contracts, these should be the same generics as in
trade_weights.
trade_weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns refer to generic
contracts and the index is strings representing instrument names.
If dict is given keys should be root generic names, e.g. 'CL', and
values should be pandas.DataFrames of loadings. The union of all
columns should be a superset of the desired_holdings.index
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
number of contracts. Extra instrument prices will be ignored.
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of mapped desired_holdings
intruments.
kwargs: key word arguments
Key word arguments to be passed to to_contracts()
Returns
-------
A pandas.Series of instrument contract trades, lexigraphically sorted.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> desired_holdings = pd.Series([200000, -50000], index=["CL1", "CL2"])
>>> current_contracts = pd.Series([0, 1, 0],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> prices = pd.Series([50.32, 50.41, 50.48],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> multipliers = pd.Series([100, 100, 100],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> trades = util.calc_trades(current_contracts, desired_holdings, wts,
... prices, multipliers)
"""
if not isinstance(trade_weights, dict):
trade_weights = {"": trade_weights}
generics = []
for key in trade_weights:
generics.extend(trade_weights[key].columns)
if not set(desired_holdings.index).issubset(set(generics)):
raise ValueError("'desired_holdings.index' contains values which "
"cannot be mapped to tradeables.\n"
"Received: 'desired_holdings.index'\n {0}\n"
"Expected in 'trade_weights' set of columns:\n {1}\n"
.format(sorted(desired_holdings.index),
sorted(generics)))
desired_contracts = []
for root_key in trade_weights:
gnrc_weights = trade_weights[root_key]
subset = gnrc_weights.columns.intersection(desired_holdings.index)
gnrc_des_hlds = desired_holdings.loc[subset]
gnrc_weights = gnrc_weights.loc[:, subset]
# drop indexes where all non zero weights were in columns dropped above
gnrc_weights = gnrc_weights.loc[~(gnrc_weights == 0).all(axis=1)]
instr_des_hlds = gnrc_des_hlds * gnrc_weights
instr_des_hlds = instr_des_hlds.sum(axis=1)
wprices = prices.loc[instr_des_hlds.index]
desired_contracts.append(to_contracts(instr_des_hlds, wprices,
multipliers, **kwargs))
desired_contracts = pd.concat(desired_contracts, axis=0)
trades = desired_contracts.subtract(current_contracts, fill_value=0)
trades = trades.loc[trades != 0]
trades = trades.sort_index()
return trades
def to_notional(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None):
"""
Convert number of contracts of tradeable instruments to notional value of
tradeable instruments in a desired currency.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
number of contracts.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index otherwise NaN returned for instruments without prices
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
Returns
-------
pandas.Series of notional amounts of instruments with Index of instruments
names
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> current_contracts = pd.Series([-1, 1], index=['CLX16', 'CLZ16'])
>>> prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
>>> multipliers = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
>>> ntln = util.to_notional(current_contracts, prices, multipliers)
"""
notionals = _instr_conv(instruments, prices, multipliers, True,
desired_ccy, instr_fx, fx_rates)
return notionals
def to_contracts(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None, rounder=None):
"""
Convert notional amount of tradeable instruments to number of instrument
contracts, rounding to nearest integer number of contracts.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
notional amount on instrument.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
rounder: function
Function to round pd.Series contracts to integers, if None default
pd.Series.round is used.
Returns
-------
pandas.Series of contract numbers of instruments with Index of instruments
names
"""
contracts = _instr_conv(instruments, prices, multipliers, False,
desired_ccy, instr_fx, fx_rates)
if rounder is None:
rounder = pd.Series.round
contracts = rounder(contracts)
contracts = contracts.astype(int)
return contracts
def _instr_conv(instruments, prices, multipliers, to_notional, desired_ccy,
instr_fx, fx_rates):
if not instruments.index.is_unique:
raise ValueError("'instruments' must have unique index")
if not prices.index.is_unique:
raise ValueError("'prices' must have unique index")
if not multipliers.index.is_unique:
raise ValueError("'multipliers' must have unique index")
if desired_ccy:
if not instr_fx.index.is_unique:
raise ValueError("'instr_fx' must have unique index")
if not fx_rates.index.is_unique:
raise ValueError("'fx_rates' must have unique index")
prices = prices.loc[instr_fx.index]
conv_rate = []
for ccy in instr_fx.values:
conv_rate.append(_get_fx_conversions(fx_rates, ccy, desired_ccy))
fx_adj_prices = prices * np.array(conv_rate)
else:
fx_adj_prices = prices
if to_notional:
amounts = instruments * fx_adj_prices * multipliers
else:
amounts = (instruments / fx_adj_prices) / multipliers
amounts = amounts.loc[instruments.index]
return amounts
def get_multiplier(weights, root_generic_multiplier):
"""
Determine tradeable instrument multiplier based on generic asset
multipliers and weights mapping from generics to tradeables.
Parameters
----------
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns are integers refering to
generic number indexed from 0, e.g. [0, 1], and the index is strings
representing instrument names. If dict is given keys should be generic
instrument names, e.g. 'CL', and values should be pandas.DataFrames of
loadings. The union of all indexes should be a superset of the
instruments.index
root_generic_multiplier: pandas.Series
Series of multipliers for generic instruments lexigraphically sorted.
If a dictionary of weights is given, root_generic_multiplier.index
should correspond to the weights keys.
Returns
-------
A pandas.Series of multipliers for tradeable instruments.
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=[0, 1])
>>> ast_mult = pd.Series([1000], index=["CL"])
>>> util.get_multiplier(wts, ast_mult)
"""
if len(root_generic_multiplier) > 1 and not isinstance(weights, dict):
raise ValueError("For multiple generic instruments weights must be a "
"dictionary")
mults = []
intrs = []
for ast, multiplier in root_generic_multiplier.iteritems():
if isinstance(weights, dict):
weights_ast = weights[ast].index
else:
weights_ast = weights.index
mults.extend(np.repeat(multiplier, len(weights_ast)))
intrs.extend(weights_ast)
imults = pd.Series(mults, intrs)
imults = imults.sort_index()
return imults
def weighted_expiration(weights, contract_dates):
"""
Calculate the days to expiration for generic futures, weighted by the
composition of the underlying tradeable instruments.
Parameters:
-----------
weights: pandas.DataFrame
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names.
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values
Returns:
--------
A pandas.DataFrame with columns of generic futures and index of dates.
Values are the weighted average of days to expiration for the underlying
contracts.
Examples:
---------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF15'),
... (pd.Timestamp('2015-01-03'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLF15'),
... (pd.Timestamp('2015-01-04'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLH15'),
... (pd.Timestamp('2015-01-05'), 'CLG15'),
... (pd.Timestamp('2015-01-05'), 'CLH15')])
>>> weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=widx)
>>> contract_dates = pd.Series([pd.Timestamp('2015-01-20'),
... pd.Timestamp('2015-02-21'),
... pd.Timestamp('2015-03-20')],
... index=['CLF15', 'CLG15', 'CLH15'])
>>> util.weighted_expiration(weights, contract_dates)
""" # NOQA
cols = weights.columns
weights = weights.reset_index(level=-1)
expiries = contract_dates.to_dict()
weights.loc[:, "expiry"] = weights.iloc[:, 0].apply(lambda x: expiries[x])
diffs = (pd.DatetimeIndex(weights.expiry)
- pd.Series(weights.index, weights.index)).apply(lambda x: x.days)
weights = weights.loc[:, cols]
wexp = weights.mul(diffs, axis=0).groupby(level=0).sum()
return wexp
def _get_fx_conversions(fx_rates, ccy, desired_ccy):
# return rate to multiply through by to convert from instrument ccy to
# desired ccy
# fx_rates is a series of fx rates with index names of the form AUDUSD,
# USDCAD, etc. ccy is a st
ccy_pair1 = ccy + desired_ccy
ccy_pair2 = desired_ccy + ccy
if ccy == desired_ccy:
conv_rate = 1.0
elif ccy_pair1 in fx_rates:
conv_rate = fx_rates.loc[ccy_pair1]
elif ccy_pair2 in fx_rates:
conv_rate = 1 / fx_rates.loc[ccy_pair2]
else:
raise ValueError("Cannot convert from {0} to {1} with any of "
"rates:\n{2}".format(ccy, desired_ccy, fx_rates))
return conv_rate
|
matthewgilbert/mapping
|
mapping/util.py
|
unflatten
|
python
|
def unflatten(flat_weights):
"""
Pivot weights from long DataFrame into weighting matrix.
Parameters
----------
flat_weights: pandas.DataFrame
A long DataFrame of weights, where columns are "date", "contract",
"generic", "weight" and optionally "key". If "key" column is
present a dictionary of unflattened DataFrames is returned with the
dictionary keys corresponding to the "key" column and each sub
DataFrame containing rows for this key.
Returns
-------
A DataFrame or dict of DataFrames of instrument weights with a MultiIndex
where the top level contains pandas.Timestamps and the second level is
instrument names. The columns consist of generic names. If dict is returned
the dict keys correspond to the "key" column of the input.
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> long_wts = pd.DataFrame(
... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
... "generic": ["CL1", "CL2"] * 4,
... "weight": [1, 0, 0, 1, 1, 0, 0, 1]}
... ).loc[:, ["date", "contract", "generic", "weight"]]
>>> util.unflatten(long_wts)
See also: calc_rets()
""" # NOQA
if flat_weights.columns.contains("key"):
weights = {}
for key in flat_weights.loc[:, "key"].unique():
flt_wts = flat_weights.loc[flat_weights.loc[:, "key"] == key, :]
flt_wts = flt_wts.drop(labels="key", axis=1)
wts = flt_wts.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
wts.columns = wts.columns.droplevel(0)
weights[key] = wts
else:
weights = flat_weights.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
weights.columns = weights.columns.droplevel(0)
return weights
|
Pivot weights from long DataFrame into weighting matrix.
Parameters
----------
flat_weights: pandas.DataFrame
A long DataFrame of weights, where columns are "date", "contract",
"generic", "weight" and optionally "key". If "key" column is
present a dictionary of unflattened DataFrames is returned with the
dictionary keys corresponding to the "key" column and each sub
DataFrame containing rows for this key.
Returns
-------
A DataFrame or dict of DataFrames of instrument weights with a MultiIndex
where the top level contains pandas.Timestamps and the second level is
instrument names. The columns consist of generic names. If dict is returned
the dict keys correspond to the "key" column of the input.
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> long_wts = pd.DataFrame(
... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
... "generic": ["CL1", "CL2"] * 4,
... "weight": [1, 0, 0, 1, 1, 0, 0, 1]}
... ).loc[:, ["date", "contract", "generic", "weight"]]
>>> util.unflatten(long_wts)
See also: calc_rets()
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L92-L143
| null |
import pandas as pd
import numpy as np
import os
def read_price_data(files, name_func=None):
"""
Convenience function for reading in pricing data from csv files
Parameters
----------
files: list
List of strings refering to csv files to read data in from, first
column should be dates
name_func: func
A function to apply to the file strings to infer the instrument name,
used in the second level of the MultiIndex index. Default is the file
name excluding the pathname and file ending,
e.g. /path/to/file/name.csv -> name
Returns
-------
A pandas.DataFrame with a pandas.MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Columns are
given by the csv file columns.
"""
if name_func is None:
def name_func(x):
return os.path.split(x)[1].split(".")[0]
dfs = []
for f in files:
name = name_func(f)
df = pd.read_csv(f, index_col=0, parse_dates=True)
df.sort_index(inplace=True)
df.index = pd.MultiIndex.from_product([df.index, [name]],
names=["date", "contract"])
dfs.append(df)
return pd.concat(dfs, axis=0, sort=False).sort_index()
def flatten(weights):
"""
Flatten weights into a long DataFrame.
Parameters
----------
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas. Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A long DataFrame of weights, where columns are "date", "contract",
"generic" and "weight". If a dictionary is passed, DataFrame will contain
additional colum "key" containing the key value and be sorted according to
this key value.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLH5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
>>> util.flatten(weights)
""" # NOQA
if isinstance(weights, pd.DataFrame):
wts = weights.stack().reset_index()
wts.columns = ["date", "contract", "generic", "weight"]
elif isinstance(weights, dict):
wts = []
for key in sorted(weights.keys()):
wt = weights[key].stack().reset_index()
wt.columns = ["date", "contract", "generic", "weight"]
wt.loc[:, "key"] = key
wts.append(wt)
wts = pd.concat(wts, axis=0).reset_index(drop=True)
else:
raise ValueError("weights must be pd.DataFrame or dict")
return wts
def calc_rets(returns, weights):
"""
Calculate continuous return series for futures instruments. These consist
of weighted underlying instrument returns, who's weights can vary over
time.
Parameters
----------
returns: pandas.Series or dict
A Series of instrument returns with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Values
correspond to one period instrument returns. returns should be
available for all for all Timestamps and instruments provided in
weights. If dict is given this should be a dict of pandas.Series in the
above format, with keys which are a subset of the keys given in weights
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A pandas.DataFrame of continuous returns for generics. The index is
pandas.Timestamps and the columns is generic names, corresponding to
weights.columns
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-02'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> price = pd.Series([45.63, 45.85, 46.13, 46.05, 46.25, 46.20], index=idx)
>>> vals = [1, 1/2, 1/2, 1]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
>>> irets = price.groupby(level=-1).pct_change()
>>> util.calc_rets(irets, weights)
""" # NOQA
if not isinstance(returns, dict):
returns = {"": returns}
if not isinstance(weights, dict):
weights = {"": weights}
generic_superset = []
for root in weights:
generic_superset.extend(weights[root].columns.tolist())
if len(set(generic_superset)) != len(generic_superset):
raise ValueError("Columns for weights must all be unique")
_check_indices(returns, weights)
grets = []
cols = []
for root in returns:
root_wts = weights[root]
root_rets = returns[root]
for generic in root_wts.columns:
gnrc_wts = root_wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
rets = root_rets.loc[gnrc_wts.index]
# groupby time
group_rets = (rets * gnrc_wts).groupby(level=0)
grets.append(group_rets.apply(pd.DataFrame.sum, skipna=False))
cols.extend(root_wts.columns.tolist())
rets = pd.concat(grets, axis=1, keys=cols).sort_index(axis=1)
return rets
def _stringify(xs):
if len(xs) <= 2:
return repr(xs)
return '[{!r}, ..., {!r}]'.format(xs[0], xs[-1])
def _check_indices(returns, weights):
# dictionaries of returns and weights
# check 1: ensure that all non zero instrument weights have associated
# returns, see https://github.com/matthewgilbert/mapping/issues/3
# check 2: ensure that returns are not dropped if reindexed from weights,
# see https://github.com/matthewgilbert/mapping/issues/8
if list(returns.keys()) == [""]:
msg1 = ("'returns.index.get_level_values(0)' must contain dates which "
"are a subset of 'weights.index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights.loc[:, '{2}'].index' are not in 'returns.index'")
else:
msg1 = ("'returns['{0}'].index.get_level_values(0)' must contain "
"dates which are a subset of "
"'weights['{0}'].index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights['{1}'].loc[:, '{2}'].index' are not in "
"'returns['{1}'].index'")
for root in returns:
wts = weights[root]
rets = returns[root]
dts_rets = rets.index.get_level_values(0)
dts_wts = wts.index.get_level_values(0)
# check 1
if not dts_rets.isin(dts_wts).all():
missing_dates = dts_rets.difference(dts_wts).tolist()
raise ValueError(msg1.format(root, _stringify(missing_dates)))
# check 2
for generic in wts.columns:
gnrc_wts = wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
# necessary instead of missing_keys.any() to support MultiIndex
if not gnrc_wts.index.isin(rets.index).all():
# as list instead of MultiIndex for legibility when stack trace
missing_keys = (gnrc_wts.index.difference(rets.index).tolist())
msg2 = msg2.format(_stringify(missing_keys), root, generic)
raise KeyError(msg2)
def reindex(prices, index, limit):
"""
Reindex a pd.Series of prices such that when instrument level returns are
calculated they are compatible with a pd.MultiIndex of instrument weights
in calc_rets(). This amount to reindexing the series by an augmented
version of index which includes the preceding date for the first appearance
of each instrument. Fill forward missing values with previous price up to
some limit.
Parameters
----------
prices: pandas.Series
A Series of instrument prices with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names.
index: pandas.MultiIndex
A MultiIndex where the top level contains pandas.Timestamps and the
second level is instrument names.
limt: int
Number of periods to fill prices forward.
Returns
-------
A pandas.Series of reindexed prices where the top level is
pandas.Timestamps and the second level is instrument names.
See also: calc_rets()
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(TS('2015-01-04'), 'CLF5'),
... (TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-06'), 'CLF5'),
... (TS('2015-01-06'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> prices = pd.Series([100.12, 101.50, 102.51, 103.51, 102.73, 102.15,
... 104.37], index=idx)
>>> widx = pd.MultiIndex.from_tuples([(TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> util.reindex(prices, widx, limit=0)
"""
if not index.is_unique:
raise ValueError("'index' must be unique")
index = index.sort_values()
index.names = ["date", "instrument"]
price_dts = prices.sort_index().index.unique(level=0)
index_dts = index.unique(level=0)
mask = price_dts < index_dts[0]
leading_price_dts = price_dts[mask]
if len(leading_price_dts) == 0:
raise ValueError("'prices' must have a date preceding first date in "
"'index'")
prev_dts = index_dts.tolist()
prev_dts.insert(0, leading_price_dts[-1])
# avoid just lagging to preserve the calendar
previous_date = dict(zip(index_dts, prev_dts))
first_instr = index.to_frame(index=False)
first_instr = (
first_instr.drop_duplicates(subset=["instrument"], keep="first")
)
first_instr.loc[:, "prev_date"] = (
first_instr.loc[:, "date"].apply(lambda x: previous_date[x])
)
additional_indices = pd.MultiIndex.from_tuples(
first_instr.loc[:, ["prev_date", "instrument"]].values.tolist()
)
augmented_index = index.union(additional_indices).sort_values()
prices = prices.reindex(augmented_index)
if limit != 0:
prices = prices.groupby(level=1).fillna(method="ffill", limit=limit)
return prices
def calc_trades(current_contracts, desired_holdings, trade_weights, prices,
multipliers, **kwargs):
"""
Calculate the number of tradeable contracts for rebalancing from a set
of current contract holdings to a set of desired generic notional holdings
based on prevailing prices and mapping from generics to tradeable
instruments. Differences between current holdings and desired holdings
are treated as 0. Zero trades are dropped.
Parameters
----------
current_contracts: pandas.Series
Series of current number of contracts held for tradeable instruments.
Can pass 0 if all holdings are 0.
desired_holdings: pandas.Series
Series of desired holdings in base notional currency of generics. Index
is generic contracts, these should be the same generics as in
trade_weights.
trade_weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns refer to generic
contracts and the index is strings representing instrument names.
If dict is given keys should be root generic names, e.g. 'CL', and
values should be pandas.DataFrames of loadings. The union of all
columns should be a superset of the desired_holdings.index
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
number of contracts. Extra instrument prices will be ignored.
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of mapped desired_holdings
intruments.
kwargs: key word arguments
Key word arguments to be passed to to_contracts()
Returns
-------
A pandas.Series of instrument contract trades, lexigraphically sorted.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> desired_holdings = pd.Series([200000, -50000], index=["CL1", "CL2"])
>>> current_contracts = pd.Series([0, 1, 0],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> prices = pd.Series([50.32, 50.41, 50.48],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> multipliers = pd.Series([100, 100, 100],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> trades = util.calc_trades(current_contracts, desired_holdings, wts,
... prices, multipliers)
"""
if not isinstance(trade_weights, dict):
trade_weights = {"": trade_weights}
generics = []
for key in trade_weights:
generics.extend(trade_weights[key].columns)
if not set(desired_holdings.index).issubset(set(generics)):
raise ValueError("'desired_holdings.index' contains values which "
"cannot be mapped to tradeables.\n"
"Received: 'desired_holdings.index'\n {0}\n"
"Expected in 'trade_weights' set of columns:\n {1}\n"
.format(sorted(desired_holdings.index),
sorted(generics)))
desired_contracts = []
for root_key in trade_weights:
gnrc_weights = trade_weights[root_key]
subset = gnrc_weights.columns.intersection(desired_holdings.index)
gnrc_des_hlds = desired_holdings.loc[subset]
gnrc_weights = gnrc_weights.loc[:, subset]
# drop indexes where all non zero weights were in columns dropped above
gnrc_weights = gnrc_weights.loc[~(gnrc_weights == 0).all(axis=1)]
instr_des_hlds = gnrc_des_hlds * gnrc_weights
instr_des_hlds = instr_des_hlds.sum(axis=1)
wprices = prices.loc[instr_des_hlds.index]
desired_contracts.append(to_contracts(instr_des_hlds, wprices,
multipliers, **kwargs))
desired_contracts = pd.concat(desired_contracts, axis=0)
trades = desired_contracts.subtract(current_contracts, fill_value=0)
trades = trades.loc[trades != 0]
trades = trades.sort_index()
return trades
def to_notional(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None):
"""
Convert number of contracts of tradeable instruments to notional value of
tradeable instruments in a desired currency.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
number of contracts.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index otherwise NaN returned for instruments without prices
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
Returns
-------
pandas.Series of notional amounts of instruments with Index of instruments
names
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> current_contracts = pd.Series([-1, 1], index=['CLX16', 'CLZ16'])
>>> prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
>>> multipliers = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
>>> ntln = util.to_notional(current_contracts, prices, multipliers)
"""
notionals = _instr_conv(instruments, prices, multipliers, True,
desired_ccy, instr_fx, fx_rates)
return notionals
def to_contracts(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None, rounder=None):
"""
Convert notional amount of tradeable instruments to number of instrument
contracts, rounding to nearest integer number of contracts.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
notional amount on instrument.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
rounder: function
Function to round pd.Series contracts to integers, if None default
pd.Series.round is used.
Returns
-------
pandas.Series of contract numbers of instruments with Index of instruments
names
"""
contracts = _instr_conv(instruments, prices, multipliers, False,
desired_ccy, instr_fx, fx_rates)
if rounder is None:
rounder = pd.Series.round
contracts = rounder(contracts)
contracts = contracts.astype(int)
return contracts
def _instr_conv(instruments, prices, multipliers, to_notional, desired_ccy,
instr_fx, fx_rates):
if not instruments.index.is_unique:
raise ValueError("'instruments' must have unique index")
if not prices.index.is_unique:
raise ValueError("'prices' must have unique index")
if not multipliers.index.is_unique:
raise ValueError("'multipliers' must have unique index")
if desired_ccy:
if not instr_fx.index.is_unique:
raise ValueError("'instr_fx' must have unique index")
if not fx_rates.index.is_unique:
raise ValueError("'fx_rates' must have unique index")
prices = prices.loc[instr_fx.index]
conv_rate = []
for ccy in instr_fx.values:
conv_rate.append(_get_fx_conversions(fx_rates, ccy, desired_ccy))
fx_adj_prices = prices * np.array(conv_rate)
else:
fx_adj_prices = prices
if to_notional:
amounts = instruments * fx_adj_prices * multipliers
else:
amounts = (instruments / fx_adj_prices) / multipliers
amounts = amounts.loc[instruments.index]
return amounts
def get_multiplier(weights, root_generic_multiplier):
"""
Determine tradeable instrument multiplier based on generic asset
multipliers and weights mapping from generics to tradeables.
Parameters
----------
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns are integers refering to
generic number indexed from 0, e.g. [0, 1], and the index is strings
representing instrument names. If dict is given keys should be generic
instrument names, e.g. 'CL', and values should be pandas.DataFrames of
loadings. The union of all indexes should be a superset of the
instruments.index
root_generic_multiplier: pandas.Series
Series of multipliers for generic instruments lexigraphically sorted.
If a dictionary of weights is given, root_generic_multiplier.index
should correspond to the weights keys.
Returns
-------
A pandas.Series of multipliers for tradeable instruments.
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=[0, 1])
>>> ast_mult = pd.Series([1000], index=["CL"])
>>> util.get_multiplier(wts, ast_mult)
"""
if len(root_generic_multiplier) > 1 and not isinstance(weights, dict):
raise ValueError("For multiple generic instruments weights must be a "
"dictionary")
mults = []
intrs = []
for ast, multiplier in root_generic_multiplier.iteritems():
if isinstance(weights, dict):
weights_ast = weights[ast].index
else:
weights_ast = weights.index
mults.extend(np.repeat(multiplier, len(weights_ast)))
intrs.extend(weights_ast)
imults = pd.Series(mults, intrs)
imults = imults.sort_index()
return imults
def weighted_expiration(weights, contract_dates):
"""
Calculate the days to expiration for generic futures, weighted by the
composition of the underlying tradeable instruments.
Parameters:
-----------
weights: pandas.DataFrame
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names.
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values
Returns:
--------
A pandas.DataFrame with columns of generic futures and index of dates.
Values are the weighted average of days to expiration for the underlying
contracts.
Examples:
---------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF15'),
... (pd.Timestamp('2015-01-03'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLF15'),
... (pd.Timestamp('2015-01-04'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLH15'),
... (pd.Timestamp('2015-01-05'), 'CLG15'),
... (pd.Timestamp('2015-01-05'), 'CLH15')])
>>> weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=widx)
>>> contract_dates = pd.Series([pd.Timestamp('2015-01-20'),
... pd.Timestamp('2015-02-21'),
... pd.Timestamp('2015-03-20')],
... index=['CLF15', 'CLG15', 'CLH15'])
>>> util.weighted_expiration(weights, contract_dates)
""" # NOQA
cols = weights.columns
weights = weights.reset_index(level=-1)
expiries = contract_dates.to_dict()
weights.loc[:, "expiry"] = weights.iloc[:, 0].apply(lambda x: expiries[x])
diffs = (pd.DatetimeIndex(weights.expiry)
- pd.Series(weights.index, weights.index)).apply(lambda x: x.days)
weights = weights.loc[:, cols]
wexp = weights.mul(diffs, axis=0).groupby(level=0).sum()
return wexp
def _get_fx_conversions(fx_rates, ccy, desired_ccy):
# return rate to multiply through by to convert from instrument ccy to
# desired ccy
# fx_rates is a series of fx rates with index names of the form AUDUSD,
# USDCAD, etc. ccy is a st
ccy_pair1 = ccy + desired_ccy
ccy_pair2 = desired_ccy + ccy
if ccy == desired_ccy:
conv_rate = 1.0
elif ccy_pair1 in fx_rates:
conv_rate = fx_rates.loc[ccy_pair1]
elif ccy_pair2 in fx_rates:
conv_rate = 1 / fx_rates.loc[ccy_pair2]
else:
raise ValueError("Cannot convert from {0} to {1} with any of "
"rates:\n{2}".format(ccy, desired_ccy, fx_rates))
return conv_rate
|
matthewgilbert/mapping
|
mapping/util.py
|
calc_rets
|
python
|
def calc_rets(returns, weights):
"""
Calculate continuous return series for futures instruments. These consist
of weighted underlying instrument returns, who's weights can vary over
time.
Parameters
----------
returns: pandas.Series or dict
A Series of instrument returns with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Values
correspond to one period instrument returns. returns should be
available for all for all Timestamps and instruments provided in
weights. If dict is given this should be a dict of pandas.Series in the
above format, with keys which are a subset of the keys given in weights
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A pandas.DataFrame of continuous returns for generics. The index is
pandas.Timestamps and the columns is generic names, corresponding to
weights.columns
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-02'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> price = pd.Series([45.63, 45.85, 46.13, 46.05, 46.25, 46.20], index=idx)
>>> vals = [1, 1/2, 1/2, 1]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
>>> irets = price.groupby(level=-1).pct_change()
>>> util.calc_rets(irets, weights)
""" # NOQA
if not isinstance(returns, dict):
returns = {"": returns}
if not isinstance(weights, dict):
weights = {"": weights}
generic_superset = []
for root in weights:
generic_superset.extend(weights[root].columns.tolist())
if len(set(generic_superset)) != len(generic_superset):
raise ValueError("Columns for weights must all be unique")
_check_indices(returns, weights)
grets = []
cols = []
for root in returns:
root_wts = weights[root]
root_rets = returns[root]
for generic in root_wts.columns:
gnrc_wts = root_wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
rets = root_rets.loc[gnrc_wts.index]
# groupby time
group_rets = (rets * gnrc_wts).groupby(level=0)
grets.append(group_rets.apply(pd.DataFrame.sum, skipna=False))
cols.extend(root_wts.columns.tolist())
rets = pd.concat(grets, axis=1, keys=cols).sort_index(axis=1)
return rets
|
Calculate continuous return series for futures instruments. These consist
of weighted underlying instrument returns, who's weights can vary over
time.
Parameters
----------
returns: pandas.Series or dict
A Series of instrument returns with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Values
correspond to one period instrument returns. returns should be
available for all for all Timestamps and instruments provided in
weights. If dict is given this should be a dict of pandas.Series in the
above format, with keys which are a subset of the keys given in weights
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A pandas.DataFrame of continuous returns for generics. The index is
pandas.Timestamps and the columns is generic names, corresponding to
weights.columns
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-02'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> price = pd.Series([45.63, 45.85, 46.13, 46.05, 46.25, 46.20], index=idx)
>>> vals = [1, 1/2, 1/2, 1]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
>>> irets = price.groupby(level=-1).pct_change()
>>> util.calc_rets(irets, weights)
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L146-L225
|
[
"def _check_indices(returns, weights):\n # dictionaries of returns and weights\n\n # check 1: ensure that all non zero instrument weights have associated\n # returns, see https://github.com/matthewgilbert/mapping/issues/3\n\n # check 2: ensure that returns are not dropped if reindexed from weights,\n # see https://github.com/matthewgilbert/mapping/issues/8\n\n if list(returns.keys()) == [\"\"]:\n msg1 = (\"'returns.index.get_level_values(0)' must contain dates which \"\n \"are a subset of 'weights.index.get_level_values(0)'\"\n \"\\nExtra keys: {1}\")\n msg2 = (\"{0} from the non zero elements of \"\n \"'weights.loc[:, '{2}'].index' are not in 'returns.index'\")\n else:\n msg1 = (\"'returns['{0}'].index.get_level_values(0)' must contain \"\n \"dates which are a subset of \"\n \"'weights['{0}'].index.get_level_values(0)'\"\n \"\\nExtra keys: {1}\")\n msg2 = (\"{0} from the non zero elements of \"\n \"'weights['{1}'].loc[:, '{2}'].index' are not in \"\n \"'returns['{1}'].index'\")\n\n for root in returns:\n wts = weights[root]\n rets = returns[root]\n\n dts_rets = rets.index.get_level_values(0)\n dts_wts = wts.index.get_level_values(0)\n # check 1\n if not dts_rets.isin(dts_wts).all():\n missing_dates = dts_rets.difference(dts_wts).tolist()\n raise ValueError(msg1.format(root, _stringify(missing_dates)))\n # check 2\n for generic in wts.columns:\n gnrc_wts = wts.loc[:, generic]\n # drop generics where weight is 0, this avoids potential KeyError\n # in later indexing of rets even when ret has weight of 0\n gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]\n # necessary instead of missing_keys.any() to support MultiIndex\n if not gnrc_wts.index.isin(rets.index).all():\n # as list instead of MultiIndex for legibility when stack trace\n missing_keys = (gnrc_wts.index.difference(rets.index).tolist())\n msg2 = msg2.format(_stringify(missing_keys), root, generic)\n raise KeyError(msg2)\n"
] |
import pandas as pd
import numpy as np
import os
def read_price_data(files, name_func=None):
"""
Convenience function for reading in pricing data from csv files
Parameters
----------
files: list
List of strings refering to csv files to read data in from, first
column should be dates
name_func: func
A function to apply to the file strings to infer the instrument name,
used in the second level of the MultiIndex index. Default is the file
name excluding the pathname and file ending,
e.g. /path/to/file/name.csv -> name
Returns
-------
A pandas.DataFrame with a pandas.MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Columns are
given by the csv file columns.
"""
if name_func is None:
def name_func(x):
return os.path.split(x)[1].split(".")[0]
dfs = []
for f in files:
name = name_func(f)
df = pd.read_csv(f, index_col=0, parse_dates=True)
df.sort_index(inplace=True)
df.index = pd.MultiIndex.from_product([df.index, [name]],
names=["date", "contract"])
dfs.append(df)
return pd.concat(dfs, axis=0, sort=False).sort_index()
def flatten(weights):
"""
Flatten weights into a long DataFrame.
Parameters
----------
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas. Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A long DataFrame of weights, where columns are "date", "contract",
"generic" and "weight". If a dictionary is passed, DataFrame will contain
additional colum "key" containing the key value and be sorted according to
this key value.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLH5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
>>> util.flatten(weights)
""" # NOQA
if isinstance(weights, pd.DataFrame):
wts = weights.stack().reset_index()
wts.columns = ["date", "contract", "generic", "weight"]
elif isinstance(weights, dict):
wts = []
for key in sorted(weights.keys()):
wt = weights[key].stack().reset_index()
wt.columns = ["date", "contract", "generic", "weight"]
wt.loc[:, "key"] = key
wts.append(wt)
wts = pd.concat(wts, axis=0).reset_index(drop=True)
else:
raise ValueError("weights must be pd.DataFrame or dict")
return wts
def unflatten(flat_weights):
"""
Pivot weights from long DataFrame into weighting matrix.
Parameters
----------
flat_weights: pandas.DataFrame
A long DataFrame of weights, where columns are "date", "contract",
"generic", "weight" and optionally "key". If "key" column is
present a dictionary of unflattened DataFrames is returned with the
dictionary keys corresponding to the "key" column and each sub
DataFrame containing rows for this key.
Returns
-------
A DataFrame or dict of DataFrames of instrument weights with a MultiIndex
where the top level contains pandas.Timestamps and the second level is
instrument names. The columns consist of generic names. If dict is returned
the dict keys correspond to the "key" column of the input.
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> long_wts = pd.DataFrame(
... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
... "generic": ["CL1", "CL2"] * 4,
... "weight": [1, 0, 0, 1, 1, 0, 0, 1]}
... ).loc[:, ["date", "contract", "generic", "weight"]]
>>> util.unflatten(long_wts)
See also: calc_rets()
""" # NOQA
if flat_weights.columns.contains("key"):
weights = {}
for key in flat_weights.loc[:, "key"].unique():
flt_wts = flat_weights.loc[flat_weights.loc[:, "key"] == key, :]
flt_wts = flt_wts.drop(labels="key", axis=1)
wts = flt_wts.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
wts.columns = wts.columns.droplevel(0)
weights[key] = wts
else:
weights = flat_weights.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
weights.columns = weights.columns.droplevel(0)
return weights
def _stringify(xs):
if len(xs) <= 2:
return repr(xs)
return '[{!r}, ..., {!r}]'.format(xs[0], xs[-1])
def _check_indices(returns, weights):
# dictionaries of returns and weights
# check 1: ensure that all non zero instrument weights have associated
# returns, see https://github.com/matthewgilbert/mapping/issues/3
# check 2: ensure that returns are not dropped if reindexed from weights,
# see https://github.com/matthewgilbert/mapping/issues/8
if list(returns.keys()) == [""]:
msg1 = ("'returns.index.get_level_values(0)' must contain dates which "
"are a subset of 'weights.index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights.loc[:, '{2}'].index' are not in 'returns.index'")
else:
msg1 = ("'returns['{0}'].index.get_level_values(0)' must contain "
"dates which are a subset of "
"'weights['{0}'].index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights['{1}'].loc[:, '{2}'].index' are not in "
"'returns['{1}'].index'")
for root in returns:
wts = weights[root]
rets = returns[root]
dts_rets = rets.index.get_level_values(0)
dts_wts = wts.index.get_level_values(0)
# check 1
if not dts_rets.isin(dts_wts).all():
missing_dates = dts_rets.difference(dts_wts).tolist()
raise ValueError(msg1.format(root, _stringify(missing_dates)))
# check 2
for generic in wts.columns:
gnrc_wts = wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
# necessary instead of missing_keys.any() to support MultiIndex
if not gnrc_wts.index.isin(rets.index).all():
# as list instead of MultiIndex for legibility when stack trace
missing_keys = (gnrc_wts.index.difference(rets.index).tolist())
msg2 = msg2.format(_stringify(missing_keys), root, generic)
raise KeyError(msg2)
def reindex(prices, index, limit):
"""
Reindex a pd.Series of prices such that when instrument level returns are
calculated they are compatible with a pd.MultiIndex of instrument weights
in calc_rets(). This amount to reindexing the series by an augmented
version of index which includes the preceding date for the first appearance
of each instrument. Fill forward missing values with previous price up to
some limit.
Parameters
----------
prices: pandas.Series
A Series of instrument prices with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names.
index: pandas.MultiIndex
A MultiIndex where the top level contains pandas.Timestamps and the
second level is instrument names.
limt: int
Number of periods to fill prices forward.
Returns
-------
A pandas.Series of reindexed prices where the top level is
pandas.Timestamps and the second level is instrument names.
See also: calc_rets()
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(TS('2015-01-04'), 'CLF5'),
... (TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-06'), 'CLF5'),
... (TS('2015-01-06'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> prices = pd.Series([100.12, 101.50, 102.51, 103.51, 102.73, 102.15,
... 104.37], index=idx)
>>> widx = pd.MultiIndex.from_tuples([(TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> util.reindex(prices, widx, limit=0)
"""
if not index.is_unique:
raise ValueError("'index' must be unique")
index = index.sort_values()
index.names = ["date", "instrument"]
price_dts = prices.sort_index().index.unique(level=0)
index_dts = index.unique(level=0)
mask = price_dts < index_dts[0]
leading_price_dts = price_dts[mask]
if len(leading_price_dts) == 0:
raise ValueError("'prices' must have a date preceding first date in "
"'index'")
prev_dts = index_dts.tolist()
prev_dts.insert(0, leading_price_dts[-1])
# avoid just lagging to preserve the calendar
previous_date = dict(zip(index_dts, prev_dts))
first_instr = index.to_frame(index=False)
first_instr = (
first_instr.drop_duplicates(subset=["instrument"], keep="first")
)
first_instr.loc[:, "prev_date"] = (
first_instr.loc[:, "date"].apply(lambda x: previous_date[x])
)
additional_indices = pd.MultiIndex.from_tuples(
first_instr.loc[:, ["prev_date", "instrument"]].values.tolist()
)
augmented_index = index.union(additional_indices).sort_values()
prices = prices.reindex(augmented_index)
if limit != 0:
prices = prices.groupby(level=1).fillna(method="ffill", limit=limit)
return prices
def calc_trades(current_contracts, desired_holdings, trade_weights, prices,
multipliers, **kwargs):
"""
Calculate the number of tradeable contracts for rebalancing from a set
of current contract holdings to a set of desired generic notional holdings
based on prevailing prices and mapping from generics to tradeable
instruments. Differences between current holdings and desired holdings
are treated as 0. Zero trades are dropped.
Parameters
----------
current_contracts: pandas.Series
Series of current number of contracts held for tradeable instruments.
Can pass 0 if all holdings are 0.
desired_holdings: pandas.Series
Series of desired holdings in base notional currency of generics. Index
is generic contracts, these should be the same generics as in
trade_weights.
trade_weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns refer to generic
contracts and the index is strings representing instrument names.
If dict is given keys should be root generic names, e.g. 'CL', and
values should be pandas.DataFrames of loadings. The union of all
columns should be a superset of the desired_holdings.index
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
number of contracts. Extra instrument prices will be ignored.
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of mapped desired_holdings
intruments.
kwargs: key word arguments
Key word arguments to be passed to to_contracts()
Returns
-------
A pandas.Series of instrument contract trades, lexigraphically sorted.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> desired_holdings = pd.Series([200000, -50000], index=["CL1", "CL2"])
>>> current_contracts = pd.Series([0, 1, 0],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> prices = pd.Series([50.32, 50.41, 50.48],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> multipliers = pd.Series([100, 100, 100],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> trades = util.calc_trades(current_contracts, desired_holdings, wts,
... prices, multipliers)
"""
if not isinstance(trade_weights, dict):
trade_weights = {"": trade_weights}
generics = []
for key in trade_weights:
generics.extend(trade_weights[key].columns)
if not set(desired_holdings.index).issubset(set(generics)):
raise ValueError("'desired_holdings.index' contains values which "
"cannot be mapped to tradeables.\n"
"Received: 'desired_holdings.index'\n {0}\n"
"Expected in 'trade_weights' set of columns:\n {1}\n"
.format(sorted(desired_holdings.index),
sorted(generics)))
desired_contracts = []
for root_key in trade_weights:
gnrc_weights = trade_weights[root_key]
subset = gnrc_weights.columns.intersection(desired_holdings.index)
gnrc_des_hlds = desired_holdings.loc[subset]
gnrc_weights = gnrc_weights.loc[:, subset]
# drop indexes where all non zero weights were in columns dropped above
gnrc_weights = gnrc_weights.loc[~(gnrc_weights == 0).all(axis=1)]
instr_des_hlds = gnrc_des_hlds * gnrc_weights
instr_des_hlds = instr_des_hlds.sum(axis=1)
wprices = prices.loc[instr_des_hlds.index]
desired_contracts.append(to_contracts(instr_des_hlds, wprices,
multipliers, **kwargs))
desired_contracts = pd.concat(desired_contracts, axis=0)
trades = desired_contracts.subtract(current_contracts, fill_value=0)
trades = trades.loc[trades != 0]
trades = trades.sort_index()
return trades
def to_notional(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None):
"""
Convert number of contracts of tradeable instruments to notional value of
tradeable instruments in a desired currency.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
number of contracts.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index otherwise NaN returned for instruments without prices
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
Returns
-------
pandas.Series of notional amounts of instruments with Index of instruments
names
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> current_contracts = pd.Series([-1, 1], index=['CLX16', 'CLZ16'])
>>> prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
>>> multipliers = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
>>> ntln = util.to_notional(current_contracts, prices, multipliers)
"""
notionals = _instr_conv(instruments, prices, multipliers, True,
desired_ccy, instr_fx, fx_rates)
return notionals
def to_contracts(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None, rounder=None):
"""
Convert notional amount of tradeable instruments to number of instrument
contracts, rounding to nearest integer number of contracts.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
notional amount on instrument.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
rounder: function
Function to round pd.Series contracts to integers, if None default
pd.Series.round is used.
Returns
-------
pandas.Series of contract numbers of instruments with Index of instruments
names
"""
contracts = _instr_conv(instruments, prices, multipliers, False,
desired_ccy, instr_fx, fx_rates)
if rounder is None:
rounder = pd.Series.round
contracts = rounder(contracts)
contracts = contracts.astype(int)
return contracts
def _instr_conv(instruments, prices, multipliers, to_notional, desired_ccy,
instr_fx, fx_rates):
if not instruments.index.is_unique:
raise ValueError("'instruments' must have unique index")
if not prices.index.is_unique:
raise ValueError("'prices' must have unique index")
if not multipliers.index.is_unique:
raise ValueError("'multipliers' must have unique index")
if desired_ccy:
if not instr_fx.index.is_unique:
raise ValueError("'instr_fx' must have unique index")
if not fx_rates.index.is_unique:
raise ValueError("'fx_rates' must have unique index")
prices = prices.loc[instr_fx.index]
conv_rate = []
for ccy in instr_fx.values:
conv_rate.append(_get_fx_conversions(fx_rates, ccy, desired_ccy))
fx_adj_prices = prices * np.array(conv_rate)
else:
fx_adj_prices = prices
if to_notional:
amounts = instruments * fx_adj_prices * multipliers
else:
amounts = (instruments / fx_adj_prices) / multipliers
amounts = amounts.loc[instruments.index]
return amounts
def get_multiplier(weights, root_generic_multiplier):
"""
Determine tradeable instrument multiplier based on generic asset
multipliers and weights mapping from generics to tradeables.
Parameters
----------
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns are integers refering to
generic number indexed from 0, e.g. [0, 1], and the index is strings
representing instrument names. If dict is given keys should be generic
instrument names, e.g. 'CL', and values should be pandas.DataFrames of
loadings. The union of all indexes should be a superset of the
instruments.index
root_generic_multiplier: pandas.Series
Series of multipliers for generic instruments lexigraphically sorted.
If a dictionary of weights is given, root_generic_multiplier.index
should correspond to the weights keys.
Returns
-------
A pandas.Series of multipliers for tradeable instruments.
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=[0, 1])
>>> ast_mult = pd.Series([1000], index=["CL"])
>>> util.get_multiplier(wts, ast_mult)
"""
if len(root_generic_multiplier) > 1 and not isinstance(weights, dict):
raise ValueError("For multiple generic instruments weights must be a "
"dictionary")
mults = []
intrs = []
for ast, multiplier in root_generic_multiplier.iteritems():
if isinstance(weights, dict):
weights_ast = weights[ast].index
else:
weights_ast = weights.index
mults.extend(np.repeat(multiplier, len(weights_ast)))
intrs.extend(weights_ast)
imults = pd.Series(mults, intrs)
imults = imults.sort_index()
return imults
def weighted_expiration(weights, contract_dates):
"""
Calculate the days to expiration for generic futures, weighted by the
composition of the underlying tradeable instruments.
Parameters:
-----------
weights: pandas.DataFrame
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names.
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values
Returns:
--------
A pandas.DataFrame with columns of generic futures and index of dates.
Values are the weighted average of days to expiration for the underlying
contracts.
Examples:
---------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF15'),
... (pd.Timestamp('2015-01-03'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLF15'),
... (pd.Timestamp('2015-01-04'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLH15'),
... (pd.Timestamp('2015-01-05'), 'CLG15'),
... (pd.Timestamp('2015-01-05'), 'CLH15')])
>>> weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=widx)
>>> contract_dates = pd.Series([pd.Timestamp('2015-01-20'),
... pd.Timestamp('2015-02-21'),
... pd.Timestamp('2015-03-20')],
... index=['CLF15', 'CLG15', 'CLH15'])
>>> util.weighted_expiration(weights, contract_dates)
""" # NOQA
cols = weights.columns
weights = weights.reset_index(level=-1)
expiries = contract_dates.to_dict()
weights.loc[:, "expiry"] = weights.iloc[:, 0].apply(lambda x: expiries[x])
diffs = (pd.DatetimeIndex(weights.expiry)
- pd.Series(weights.index, weights.index)).apply(lambda x: x.days)
weights = weights.loc[:, cols]
wexp = weights.mul(diffs, axis=0).groupby(level=0).sum()
return wexp
def _get_fx_conversions(fx_rates, ccy, desired_ccy):
# return rate to multiply through by to convert from instrument ccy to
# desired ccy
# fx_rates is a series of fx rates with index names of the form AUDUSD,
# USDCAD, etc. ccy is a st
ccy_pair1 = ccy + desired_ccy
ccy_pair2 = desired_ccy + ccy
if ccy == desired_ccy:
conv_rate = 1.0
elif ccy_pair1 in fx_rates:
conv_rate = fx_rates.loc[ccy_pair1]
elif ccy_pair2 in fx_rates:
conv_rate = 1 / fx_rates.loc[ccy_pair2]
else:
raise ValueError("Cannot convert from {0} to {1} with any of "
"rates:\n{2}".format(ccy, desired_ccy, fx_rates))
return conv_rate
|
matthewgilbert/mapping
|
mapping/util.py
|
reindex
|
python
|
def reindex(prices, index, limit):
if not index.is_unique:
raise ValueError("'index' must be unique")
index = index.sort_values()
index.names = ["date", "instrument"]
price_dts = prices.sort_index().index.unique(level=0)
index_dts = index.unique(level=0)
mask = price_dts < index_dts[0]
leading_price_dts = price_dts[mask]
if len(leading_price_dts) == 0:
raise ValueError("'prices' must have a date preceding first date in "
"'index'")
prev_dts = index_dts.tolist()
prev_dts.insert(0, leading_price_dts[-1])
# avoid just lagging to preserve the calendar
previous_date = dict(zip(index_dts, prev_dts))
first_instr = index.to_frame(index=False)
first_instr = (
first_instr.drop_duplicates(subset=["instrument"], keep="first")
)
first_instr.loc[:, "prev_date"] = (
first_instr.loc[:, "date"].apply(lambda x: previous_date[x])
)
additional_indices = pd.MultiIndex.from_tuples(
first_instr.loc[:, ["prev_date", "instrument"]].values.tolist()
)
augmented_index = index.union(additional_indices).sort_values()
prices = prices.reindex(augmented_index)
if limit != 0:
prices = prices.groupby(level=1).fillna(method="ffill", limit=limit)
return prices
|
Reindex a pd.Series of prices such that when instrument level returns are
calculated they are compatible with a pd.MultiIndex of instrument weights
in calc_rets(). This amount to reindexing the series by an augmented
version of index which includes the preceding date for the first appearance
of each instrument. Fill forward missing values with previous price up to
some limit.
Parameters
----------
prices: pandas.Series
A Series of instrument prices with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names.
index: pandas.MultiIndex
A MultiIndex where the top level contains pandas.Timestamps and the
second level is instrument names.
limt: int
Number of periods to fill prices forward.
Returns
-------
A pandas.Series of reindexed prices where the top level is
pandas.Timestamps and the second level is instrument names.
See also: calc_rets()
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(TS('2015-01-04'), 'CLF5'),
... (TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-06'), 'CLF5'),
... (TS('2015-01-06'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> prices = pd.Series([100.12, 101.50, 102.51, 103.51, 102.73, 102.15,
... 104.37], index=idx)
>>> widx = pd.MultiIndex.from_tuples([(TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> util.reindex(prices, widx, limit=0)
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L282-L362
| null |
import pandas as pd
import numpy as np
import os
def read_price_data(files, name_func=None):
"""
Convenience function for reading in pricing data from csv files
Parameters
----------
files: list
List of strings refering to csv files to read data in from, first
column should be dates
name_func: func
A function to apply to the file strings to infer the instrument name,
used in the second level of the MultiIndex index. Default is the file
name excluding the pathname and file ending,
e.g. /path/to/file/name.csv -> name
Returns
-------
A pandas.DataFrame with a pandas.MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Columns are
given by the csv file columns.
"""
if name_func is None:
def name_func(x):
return os.path.split(x)[1].split(".")[0]
dfs = []
for f in files:
name = name_func(f)
df = pd.read_csv(f, index_col=0, parse_dates=True)
df.sort_index(inplace=True)
df.index = pd.MultiIndex.from_product([df.index, [name]],
names=["date", "contract"])
dfs.append(df)
return pd.concat(dfs, axis=0, sort=False).sort_index()
def flatten(weights):
"""
Flatten weights into a long DataFrame.
Parameters
----------
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas. Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A long DataFrame of weights, where columns are "date", "contract",
"generic" and "weight". If a dictionary is passed, DataFrame will contain
additional colum "key" containing the key value and be sorted according to
this key value.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLH5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
>>> util.flatten(weights)
""" # NOQA
if isinstance(weights, pd.DataFrame):
wts = weights.stack().reset_index()
wts.columns = ["date", "contract", "generic", "weight"]
elif isinstance(weights, dict):
wts = []
for key in sorted(weights.keys()):
wt = weights[key].stack().reset_index()
wt.columns = ["date", "contract", "generic", "weight"]
wt.loc[:, "key"] = key
wts.append(wt)
wts = pd.concat(wts, axis=0).reset_index(drop=True)
else:
raise ValueError("weights must be pd.DataFrame or dict")
return wts
def unflatten(flat_weights):
"""
Pivot weights from long DataFrame into weighting matrix.
Parameters
----------
flat_weights: pandas.DataFrame
A long DataFrame of weights, where columns are "date", "contract",
"generic", "weight" and optionally "key". If "key" column is
present a dictionary of unflattened DataFrames is returned with the
dictionary keys corresponding to the "key" column and each sub
DataFrame containing rows for this key.
Returns
-------
A DataFrame or dict of DataFrames of instrument weights with a MultiIndex
where the top level contains pandas.Timestamps and the second level is
instrument names. The columns consist of generic names. If dict is returned
the dict keys correspond to the "key" column of the input.
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> long_wts = pd.DataFrame(
... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
... "generic": ["CL1", "CL2"] * 4,
... "weight": [1, 0, 0, 1, 1, 0, 0, 1]}
... ).loc[:, ["date", "contract", "generic", "weight"]]
>>> util.unflatten(long_wts)
See also: calc_rets()
""" # NOQA
if flat_weights.columns.contains("key"):
weights = {}
for key in flat_weights.loc[:, "key"].unique():
flt_wts = flat_weights.loc[flat_weights.loc[:, "key"] == key, :]
flt_wts = flt_wts.drop(labels="key", axis=1)
wts = flt_wts.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
wts.columns = wts.columns.droplevel(0)
weights[key] = wts
else:
weights = flat_weights.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
weights.columns = weights.columns.droplevel(0)
return weights
def calc_rets(returns, weights):
"""
Calculate continuous return series for futures instruments. These consist
of weighted underlying instrument returns, who's weights can vary over
time.
Parameters
----------
returns: pandas.Series or dict
A Series of instrument returns with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Values
correspond to one period instrument returns. returns should be
available for all for all Timestamps and instruments provided in
weights. If dict is given this should be a dict of pandas.Series in the
above format, with keys which are a subset of the keys given in weights
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A pandas.DataFrame of continuous returns for generics. The index is
pandas.Timestamps and the columns is generic names, corresponding to
weights.columns
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-02'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> price = pd.Series([45.63, 45.85, 46.13, 46.05, 46.25, 46.20], index=idx)
>>> vals = [1, 1/2, 1/2, 1]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
>>> irets = price.groupby(level=-1).pct_change()
>>> util.calc_rets(irets, weights)
""" # NOQA
if not isinstance(returns, dict):
returns = {"": returns}
if not isinstance(weights, dict):
weights = {"": weights}
generic_superset = []
for root in weights:
generic_superset.extend(weights[root].columns.tolist())
if len(set(generic_superset)) != len(generic_superset):
raise ValueError("Columns for weights must all be unique")
_check_indices(returns, weights)
grets = []
cols = []
for root in returns:
root_wts = weights[root]
root_rets = returns[root]
for generic in root_wts.columns:
gnrc_wts = root_wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
rets = root_rets.loc[gnrc_wts.index]
# groupby time
group_rets = (rets * gnrc_wts).groupby(level=0)
grets.append(group_rets.apply(pd.DataFrame.sum, skipna=False))
cols.extend(root_wts.columns.tolist())
rets = pd.concat(grets, axis=1, keys=cols).sort_index(axis=1)
return rets
def _stringify(xs):
if len(xs) <= 2:
return repr(xs)
return '[{!r}, ..., {!r}]'.format(xs[0], xs[-1])
def _check_indices(returns, weights):
# dictionaries of returns and weights
# check 1: ensure that all non zero instrument weights have associated
# returns, see https://github.com/matthewgilbert/mapping/issues/3
# check 2: ensure that returns are not dropped if reindexed from weights,
# see https://github.com/matthewgilbert/mapping/issues/8
if list(returns.keys()) == [""]:
msg1 = ("'returns.index.get_level_values(0)' must contain dates which "
"are a subset of 'weights.index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights.loc[:, '{2}'].index' are not in 'returns.index'")
else:
msg1 = ("'returns['{0}'].index.get_level_values(0)' must contain "
"dates which are a subset of "
"'weights['{0}'].index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights['{1}'].loc[:, '{2}'].index' are not in "
"'returns['{1}'].index'")
for root in returns:
wts = weights[root]
rets = returns[root]
dts_rets = rets.index.get_level_values(0)
dts_wts = wts.index.get_level_values(0)
# check 1
if not dts_rets.isin(dts_wts).all():
missing_dates = dts_rets.difference(dts_wts).tolist()
raise ValueError(msg1.format(root, _stringify(missing_dates)))
# check 2
for generic in wts.columns:
gnrc_wts = wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
# necessary instead of missing_keys.any() to support MultiIndex
if not gnrc_wts.index.isin(rets.index).all():
# as list instead of MultiIndex for legibility when stack trace
missing_keys = (gnrc_wts.index.difference(rets.index).tolist())
msg2 = msg2.format(_stringify(missing_keys), root, generic)
raise KeyError(msg2)
def calc_trades(current_contracts, desired_holdings, trade_weights, prices,
multipliers, **kwargs):
"""
Calculate the number of tradeable contracts for rebalancing from a set
of current contract holdings to a set of desired generic notional holdings
based on prevailing prices and mapping from generics to tradeable
instruments. Differences between current holdings and desired holdings
are treated as 0. Zero trades are dropped.
Parameters
----------
current_contracts: pandas.Series
Series of current number of contracts held for tradeable instruments.
Can pass 0 if all holdings are 0.
desired_holdings: pandas.Series
Series of desired holdings in base notional currency of generics. Index
is generic contracts, these should be the same generics as in
trade_weights.
trade_weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns refer to generic
contracts and the index is strings representing instrument names.
If dict is given keys should be root generic names, e.g. 'CL', and
values should be pandas.DataFrames of loadings. The union of all
columns should be a superset of the desired_holdings.index
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
number of contracts. Extra instrument prices will be ignored.
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of mapped desired_holdings
intruments.
kwargs: key word arguments
Key word arguments to be passed to to_contracts()
Returns
-------
A pandas.Series of instrument contract trades, lexigraphically sorted.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> desired_holdings = pd.Series([200000, -50000], index=["CL1", "CL2"])
>>> current_contracts = pd.Series([0, 1, 0],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> prices = pd.Series([50.32, 50.41, 50.48],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> multipliers = pd.Series([100, 100, 100],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> trades = util.calc_trades(current_contracts, desired_holdings, wts,
... prices, multipliers)
"""
if not isinstance(trade_weights, dict):
trade_weights = {"": trade_weights}
generics = []
for key in trade_weights:
generics.extend(trade_weights[key].columns)
if not set(desired_holdings.index).issubset(set(generics)):
raise ValueError("'desired_holdings.index' contains values which "
"cannot be mapped to tradeables.\n"
"Received: 'desired_holdings.index'\n {0}\n"
"Expected in 'trade_weights' set of columns:\n {1}\n"
.format(sorted(desired_holdings.index),
sorted(generics)))
desired_contracts = []
for root_key in trade_weights:
gnrc_weights = trade_weights[root_key]
subset = gnrc_weights.columns.intersection(desired_holdings.index)
gnrc_des_hlds = desired_holdings.loc[subset]
gnrc_weights = gnrc_weights.loc[:, subset]
# drop indexes where all non zero weights were in columns dropped above
gnrc_weights = gnrc_weights.loc[~(gnrc_weights == 0).all(axis=1)]
instr_des_hlds = gnrc_des_hlds * gnrc_weights
instr_des_hlds = instr_des_hlds.sum(axis=1)
wprices = prices.loc[instr_des_hlds.index]
desired_contracts.append(to_contracts(instr_des_hlds, wprices,
multipliers, **kwargs))
desired_contracts = pd.concat(desired_contracts, axis=0)
trades = desired_contracts.subtract(current_contracts, fill_value=0)
trades = trades.loc[trades != 0]
trades = trades.sort_index()
return trades
def to_notional(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None):
"""
Convert number of contracts of tradeable instruments to notional value of
tradeable instruments in a desired currency.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
number of contracts.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index otherwise NaN returned for instruments without prices
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
Returns
-------
pandas.Series of notional amounts of instruments with Index of instruments
names
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> current_contracts = pd.Series([-1, 1], index=['CLX16', 'CLZ16'])
>>> prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
>>> multipliers = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
>>> ntln = util.to_notional(current_contracts, prices, multipliers)
"""
notionals = _instr_conv(instruments, prices, multipliers, True,
desired_ccy, instr_fx, fx_rates)
return notionals
def to_contracts(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None, rounder=None):
"""
Convert notional amount of tradeable instruments to number of instrument
contracts, rounding to nearest integer number of contracts.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
notional amount on instrument.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
rounder: function
Function to round pd.Series contracts to integers, if None default
pd.Series.round is used.
Returns
-------
pandas.Series of contract numbers of instruments with Index of instruments
names
"""
contracts = _instr_conv(instruments, prices, multipliers, False,
desired_ccy, instr_fx, fx_rates)
if rounder is None:
rounder = pd.Series.round
contracts = rounder(contracts)
contracts = contracts.astype(int)
return contracts
def _instr_conv(instruments, prices, multipliers, to_notional, desired_ccy,
instr_fx, fx_rates):
if not instruments.index.is_unique:
raise ValueError("'instruments' must have unique index")
if not prices.index.is_unique:
raise ValueError("'prices' must have unique index")
if not multipliers.index.is_unique:
raise ValueError("'multipliers' must have unique index")
if desired_ccy:
if not instr_fx.index.is_unique:
raise ValueError("'instr_fx' must have unique index")
if not fx_rates.index.is_unique:
raise ValueError("'fx_rates' must have unique index")
prices = prices.loc[instr_fx.index]
conv_rate = []
for ccy in instr_fx.values:
conv_rate.append(_get_fx_conversions(fx_rates, ccy, desired_ccy))
fx_adj_prices = prices * np.array(conv_rate)
else:
fx_adj_prices = prices
if to_notional:
amounts = instruments * fx_adj_prices * multipliers
else:
amounts = (instruments / fx_adj_prices) / multipliers
amounts = amounts.loc[instruments.index]
return amounts
def get_multiplier(weights, root_generic_multiplier):
"""
Determine tradeable instrument multiplier based on generic asset
multipliers and weights mapping from generics to tradeables.
Parameters
----------
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns are integers refering to
generic number indexed from 0, e.g. [0, 1], and the index is strings
representing instrument names. If dict is given keys should be generic
instrument names, e.g. 'CL', and values should be pandas.DataFrames of
loadings. The union of all indexes should be a superset of the
instruments.index
root_generic_multiplier: pandas.Series
Series of multipliers for generic instruments lexigraphically sorted.
If a dictionary of weights is given, root_generic_multiplier.index
should correspond to the weights keys.
Returns
-------
A pandas.Series of multipliers for tradeable instruments.
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=[0, 1])
>>> ast_mult = pd.Series([1000], index=["CL"])
>>> util.get_multiplier(wts, ast_mult)
"""
if len(root_generic_multiplier) > 1 and not isinstance(weights, dict):
raise ValueError("For multiple generic instruments weights must be a "
"dictionary")
mults = []
intrs = []
for ast, multiplier in root_generic_multiplier.iteritems():
if isinstance(weights, dict):
weights_ast = weights[ast].index
else:
weights_ast = weights.index
mults.extend(np.repeat(multiplier, len(weights_ast)))
intrs.extend(weights_ast)
imults = pd.Series(mults, intrs)
imults = imults.sort_index()
return imults
def weighted_expiration(weights, contract_dates):
"""
Calculate the days to expiration for generic futures, weighted by the
composition of the underlying tradeable instruments.
Parameters:
-----------
weights: pandas.DataFrame
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names.
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values
Returns:
--------
A pandas.DataFrame with columns of generic futures and index of dates.
Values are the weighted average of days to expiration for the underlying
contracts.
Examples:
---------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF15'),
... (pd.Timestamp('2015-01-03'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLF15'),
... (pd.Timestamp('2015-01-04'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLH15'),
... (pd.Timestamp('2015-01-05'), 'CLG15'),
... (pd.Timestamp('2015-01-05'), 'CLH15')])
>>> weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=widx)
>>> contract_dates = pd.Series([pd.Timestamp('2015-01-20'),
... pd.Timestamp('2015-02-21'),
... pd.Timestamp('2015-03-20')],
... index=['CLF15', 'CLG15', 'CLH15'])
>>> util.weighted_expiration(weights, contract_dates)
""" # NOQA
cols = weights.columns
weights = weights.reset_index(level=-1)
expiries = contract_dates.to_dict()
weights.loc[:, "expiry"] = weights.iloc[:, 0].apply(lambda x: expiries[x])
diffs = (pd.DatetimeIndex(weights.expiry)
- pd.Series(weights.index, weights.index)).apply(lambda x: x.days)
weights = weights.loc[:, cols]
wexp = weights.mul(diffs, axis=0).groupby(level=0).sum()
return wexp
def _get_fx_conversions(fx_rates, ccy, desired_ccy):
# return rate to multiply through by to convert from instrument ccy to
# desired ccy
# fx_rates is a series of fx rates with index names of the form AUDUSD,
# USDCAD, etc. ccy is a st
ccy_pair1 = ccy + desired_ccy
ccy_pair2 = desired_ccy + ccy
if ccy == desired_ccy:
conv_rate = 1.0
elif ccy_pair1 in fx_rates:
conv_rate = fx_rates.loc[ccy_pair1]
elif ccy_pair2 in fx_rates:
conv_rate = 1 / fx_rates.loc[ccy_pair2]
else:
raise ValueError("Cannot convert from {0} to {1} with any of "
"rates:\n{2}".format(ccy, desired_ccy, fx_rates))
return conv_rate
|
matthewgilbert/mapping
|
mapping/util.py
|
calc_trades
|
python
|
def calc_trades(current_contracts, desired_holdings, trade_weights, prices,
multipliers, **kwargs):
if not isinstance(trade_weights, dict):
trade_weights = {"": trade_weights}
generics = []
for key in trade_weights:
generics.extend(trade_weights[key].columns)
if not set(desired_holdings.index).issubset(set(generics)):
raise ValueError("'desired_holdings.index' contains values which "
"cannot be mapped to tradeables.\n"
"Received: 'desired_holdings.index'\n {0}\n"
"Expected in 'trade_weights' set of columns:\n {1}\n"
.format(sorted(desired_holdings.index),
sorted(generics)))
desired_contracts = []
for root_key in trade_weights:
gnrc_weights = trade_weights[root_key]
subset = gnrc_weights.columns.intersection(desired_holdings.index)
gnrc_des_hlds = desired_holdings.loc[subset]
gnrc_weights = gnrc_weights.loc[:, subset]
# drop indexes where all non zero weights were in columns dropped above
gnrc_weights = gnrc_weights.loc[~(gnrc_weights == 0).all(axis=1)]
instr_des_hlds = gnrc_des_hlds * gnrc_weights
instr_des_hlds = instr_des_hlds.sum(axis=1)
wprices = prices.loc[instr_des_hlds.index]
desired_contracts.append(to_contracts(instr_des_hlds, wprices,
multipliers, **kwargs))
desired_contracts = pd.concat(desired_contracts, axis=0)
trades = desired_contracts.subtract(current_contracts, fill_value=0)
trades = trades.loc[trades != 0]
trades = trades.sort_index()
return trades
|
Calculate the number of tradeable contracts for rebalancing from a set
of current contract holdings to a set of desired generic notional holdings
based on prevailing prices and mapping from generics to tradeable
instruments. Differences between current holdings and desired holdings
are treated as 0. Zero trades are dropped.
Parameters
----------
current_contracts: pandas.Series
Series of current number of contracts held for tradeable instruments.
Can pass 0 if all holdings are 0.
desired_holdings: pandas.Series
Series of desired holdings in base notional currency of generics. Index
is generic contracts, these should be the same generics as in
trade_weights.
trade_weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns refer to generic
contracts and the index is strings representing instrument names.
If dict is given keys should be root generic names, e.g. 'CL', and
values should be pandas.DataFrames of loadings. The union of all
columns should be a superset of the desired_holdings.index
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
number of contracts. Extra instrument prices will be ignored.
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of mapped desired_holdings
intruments.
kwargs: key word arguments
Key word arguments to be passed to to_contracts()
Returns
-------
A pandas.Series of instrument contract trades, lexigraphically sorted.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> desired_holdings = pd.Series([200000, -50000], index=["CL1", "CL2"])
>>> current_contracts = pd.Series([0, 1, 0],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> prices = pd.Series([50.32, 50.41, 50.48],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> multipliers = pd.Series([100, 100, 100],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> trades = util.calc_trades(current_contracts, desired_holdings, wts,
... prices, multipliers)
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L365-L458
|
[
"def to_contracts(instruments, prices, multipliers, desired_ccy=None,\n instr_fx=None, fx_rates=None, rounder=None):\n \"\"\"\n Convert notional amount of tradeable instruments to number of instrument\n contracts, rounding to nearest integer number of contracts.\n\n Parameters\n ----------\n instruments: pandas.Series\n Series of instrument holdings. Index is instrument name and values are\n notional amount on instrument.\n prices: pandas.Series\n Series of instrument prices. Index is instrument name and values are\n instrument prices. prices.index should be a superset of\n instruments.index\n multipliers: pandas.Series\n Series of instrument multipliers. Index is instrument name and\n values are the multiplier associated with the contract.\n multipliers.index should be a superset of instruments.index\n desired_ccy: str\n Three letter string representing desired currency to convert notional\n values to, e.g. 'USD'. If None is given currency conversion is ignored.\n instr_fx: pandas.Series\n Series of instrument fx denominations. Index is instrument name and\n values are three letter strings representing the currency the\n instrument is denominated in. instr_fx.index should match prices.index\n fx_rates: pandas.Series\n Series of fx rates used for conversion to desired_ccy. Index is strings\n representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the\n corresponding exchange rates.\n rounder: function\n Function to round pd.Series contracts to integers, if None default\n pd.Series.round is used.\n\n Returns\n -------\n pandas.Series of contract numbers of instruments with Index of instruments\n names\n \"\"\"\n contracts = _instr_conv(instruments, prices, multipliers, False,\n desired_ccy, instr_fx, fx_rates)\n if rounder is None:\n rounder = pd.Series.round\n\n contracts = rounder(contracts)\n contracts = contracts.astype(int)\n return contracts\n"
] |
import pandas as pd
import numpy as np
import os
def read_price_data(files, name_func=None):
"""
Convenience function for reading in pricing data from csv files
Parameters
----------
files: list
List of strings refering to csv files to read data in from, first
column should be dates
name_func: func
A function to apply to the file strings to infer the instrument name,
used in the second level of the MultiIndex index. Default is the file
name excluding the pathname and file ending,
e.g. /path/to/file/name.csv -> name
Returns
-------
A pandas.DataFrame with a pandas.MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Columns are
given by the csv file columns.
"""
if name_func is None:
def name_func(x):
return os.path.split(x)[1].split(".")[0]
dfs = []
for f in files:
name = name_func(f)
df = pd.read_csv(f, index_col=0, parse_dates=True)
df.sort_index(inplace=True)
df.index = pd.MultiIndex.from_product([df.index, [name]],
names=["date", "contract"])
dfs.append(df)
return pd.concat(dfs, axis=0, sort=False).sort_index()
def flatten(weights):
"""
Flatten weights into a long DataFrame.
Parameters
----------
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas. Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A long DataFrame of weights, where columns are "date", "contract",
"generic" and "weight". If a dictionary is passed, DataFrame will contain
additional colum "key" containing the key value and be sorted according to
this key value.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLH5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
>>> util.flatten(weights)
""" # NOQA
if isinstance(weights, pd.DataFrame):
wts = weights.stack().reset_index()
wts.columns = ["date", "contract", "generic", "weight"]
elif isinstance(weights, dict):
wts = []
for key in sorted(weights.keys()):
wt = weights[key].stack().reset_index()
wt.columns = ["date", "contract", "generic", "weight"]
wt.loc[:, "key"] = key
wts.append(wt)
wts = pd.concat(wts, axis=0).reset_index(drop=True)
else:
raise ValueError("weights must be pd.DataFrame or dict")
return wts
def unflatten(flat_weights):
"""
Pivot weights from long DataFrame into weighting matrix.
Parameters
----------
flat_weights: pandas.DataFrame
A long DataFrame of weights, where columns are "date", "contract",
"generic", "weight" and optionally "key". If "key" column is
present a dictionary of unflattened DataFrames is returned with the
dictionary keys corresponding to the "key" column and each sub
DataFrame containing rows for this key.
Returns
-------
A DataFrame or dict of DataFrames of instrument weights with a MultiIndex
where the top level contains pandas.Timestamps and the second level is
instrument names. The columns consist of generic names. If dict is returned
the dict keys correspond to the "key" column of the input.
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> long_wts = pd.DataFrame(
... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
... "generic": ["CL1", "CL2"] * 4,
... "weight": [1, 0, 0, 1, 1, 0, 0, 1]}
... ).loc[:, ["date", "contract", "generic", "weight"]]
>>> util.unflatten(long_wts)
See also: calc_rets()
""" # NOQA
if flat_weights.columns.contains("key"):
weights = {}
for key in flat_weights.loc[:, "key"].unique():
flt_wts = flat_weights.loc[flat_weights.loc[:, "key"] == key, :]
flt_wts = flt_wts.drop(labels="key", axis=1)
wts = flt_wts.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
wts.columns = wts.columns.droplevel(0)
weights[key] = wts
else:
weights = flat_weights.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
weights.columns = weights.columns.droplevel(0)
return weights
def calc_rets(returns, weights):
"""
Calculate continuous return series for futures instruments. These consist
of weighted underlying instrument returns, who's weights can vary over
time.
Parameters
----------
returns: pandas.Series or dict
A Series of instrument returns with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Values
correspond to one period instrument returns. returns should be
available for all for all Timestamps and instruments provided in
weights. If dict is given this should be a dict of pandas.Series in the
above format, with keys which are a subset of the keys given in weights
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A pandas.DataFrame of continuous returns for generics. The index is
pandas.Timestamps and the columns is generic names, corresponding to
weights.columns
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-02'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> price = pd.Series([45.63, 45.85, 46.13, 46.05, 46.25, 46.20], index=idx)
>>> vals = [1, 1/2, 1/2, 1]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
>>> irets = price.groupby(level=-1).pct_change()
>>> util.calc_rets(irets, weights)
""" # NOQA
if not isinstance(returns, dict):
returns = {"": returns}
if not isinstance(weights, dict):
weights = {"": weights}
generic_superset = []
for root in weights:
generic_superset.extend(weights[root].columns.tolist())
if len(set(generic_superset)) != len(generic_superset):
raise ValueError("Columns for weights must all be unique")
_check_indices(returns, weights)
grets = []
cols = []
for root in returns:
root_wts = weights[root]
root_rets = returns[root]
for generic in root_wts.columns:
gnrc_wts = root_wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
rets = root_rets.loc[gnrc_wts.index]
# groupby time
group_rets = (rets * gnrc_wts).groupby(level=0)
grets.append(group_rets.apply(pd.DataFrame.sum, skipna=False))
cols.extend(root_wts.columns.tolist())
rets = pd.concat(grets, axis=1, keys=cols).sort_index(axis=1)
return rets
def _stringify(xs):
if len(xs) <= 2:
return repr(xs)
return '[{!r}, ..., {!r}]'.format(xs[0], xs[-1])
def _check_indices(returns, weights):
# dictionaries of returns and weights
# check 1: ensure that all non zero instrument weights have associated
# returns, see https://github.com/matthewgilbert/mapping/issues/3
# check 2: ensure that returns are not dropped if reindexed from weights,
# see https://github.com/matthewgilbert/mapping/issues/8
if list(returns.keys()) == [""]:
msg1 = ("'returns.index.get_level_values(0)' must contain dates which "
"are a subset of 'weights.index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights.loc[:, '{2}'].index' are not in 'returns.index'")
else:
msg1 = ("'returns['{0}'].index.get_level_values(0)' must contain "
"dates which are a subset of "
"'weights['{0}'].index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights['{1}'].loc[:, '{2}'].index' are not in "
"'returns['{1}'].index'")
for root in returns:
wts = weights[root]
rets = returns[root]
dts_rets = rets.index.get_level_values(0)
dts_wts = wts.index.get_level_values(0)
# check 1
if not dts_rets.isin(dts_wts).all():
missing_dates = dts_rets.difference(dts_wts).tolist()
raise ValueError(msg1.format(root, _stringify(missing_dates)))
# check 2
for generic in wts.columns:
gnrc_wts = wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
# necessary instead of missing_keys.any() to support MultiIndex
if not gnrc_wts.index.isin(rets.index).all():
# as list instead of MultiIndex for legibility when stack trace
missing_keys = (gnrc_wts.index.difference(rets.index).tolist())
msg2 = msg2.format(_stringify(missing_keys), root, generic)
raise KeyError(msg2)
def reindex(prices, index, limit):
"""
Reindex a pd.Series of prices such that when instrument level returns are
calculated they are compatible with a pd.MultiIndex of instrument weights
in calc_rets(). This amount to reindexing the series by an augmented
version of index which includes the preceding date for the first appearance
of each instrument. Fill forward missing values with previous price up to
some limit.
Parameters
----------
prices: pandas.Series
A Series of instrument prices with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names.
index: pandas.MultiIndex
A MultiIndex where the top level contains pandas.Timestamps and the
second level is instrument names.
limt: int
Number of periods to fill prices forward.
Returns
-------
A pandas.Series of reindexed prices where the top level is
pandas.Timestamps and the second level is instrument names.
See also: calc_rets()
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(TS('2015-01-04'), 'CLF5'),
... (TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-06'), 'CLF5'),
... (TS('2015-01-06'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> prices = pd.Series([100.12, 101.50, 102.51, 103.51, 102.73, 102.15,
... 104.37], index=idx)
>>> widx = pd.MultiIndex.from_tuples([(TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> util.reindex(prices, widx, limit=0)
"""
if not index.is_unique:
raise ValueError("'index' must be unique")
index = index.sort_values()
index.names = ["date", "instrument"]
price_dts = prices.sort_index().index.unique(level=0)
index_dts = index.unique(level=0)
mask = price_dts < index_dts[0]
leading_price_dts = price_dts[mask]
if len(leading_price_dts) == 0:
raise ValueError("'prices' must have a date preceding first date in "
"'index'")
prev_dts = index_dts.tolist()
prev_dts.insert(0, leading_price_dts[-1])
# avoid just lagging to preserve the calendar
previous_date = dict(zip(index_dts, prev_dts))
first_instr = index.to_frame(index=False)
first_instr = (
first_instr.drop_duplicates(subset=["instrument"], keep="first")
)
first_instr.loc[:, "prev_date"] = (
first_instr.loc[:, "date"].apply(lambda x: previous_date[x])
)
additional_indices = pd.MultiIndex.from_tuples(
first_instr.loc[:, ["prev_date", "instrument"]].values.tolist()
)
augmented_index = index.union(additional_indices).sort_values()
prices = prices.reindex(augmented_index)
if limit != 0:
prices = prices.groupby(level=1).fillna(method="ffill", limit=limit)
return prices
def to_notional(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None):
"""
Convert number of contracts of tradeable instruments to notional value of
tradeable instruments in a desired currency.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
number of contracts.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index otherwise NaN returned for instruments without prices
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
Returns
-------
pandas.Series of notional amounts of instruments with Index of instruments
names
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> current_contracts = pd.Series([-1, 1], index=['CLX16', 'CLZ16'])
>>> prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
>>> multipliers = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
>>> ntln = util.to_notional(current_contracts, prices, multipliers)
"""
notionals = _instr_conv(instruments, prices, multipliers, True,
desired_ccy, instr_fx, fx_rates)
return notionals
def to_contracts(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None, rounder=None):
"""
Convert notional amount of tradeable instruments to number of instrument
contracts, rounding to nearest integer number of contracts.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
notional amount on instrument.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
rounder: function
Function to round pd.Series contracts to integers, if None default
pd.Series.round is used.
Returns
-------
pandas.Series of contract numbers of instruments with Index of instruments
names
"""
contracts = _instr_conv(instruments, prices, multipliers, False,
desired_ccy, instr_fx, fx_rates)
if rounder is None:
rounder = pd.Series.round
contracts = rounder(contracts)
contracts = contracts.astype(int)
return contracts
def _instr_conv(instruments, prices, multipliers, to_notional, desired_ccy,
instr_fx, fx_rates):
if not instruments.index.is_unique:
raise ValueError("'instruments' must have unique index")
if not prices.index.is_unique:
raise ValueError("'prices' must have unique index")
if not multipliers.index.is_unique:
raise ValueError("'multipliers' must have unique index")
if desired_ccy:
if not instr_fx.index.is_unique:
raise ValueError("'instr_fx' must have unique index")
if not fx_rates.index.is_unique:
raise ValueError("'fx_rates' must have unique index")
prices = prices.loc[instr_fx.index]
conv_rate = []
for ccy in instr_fx.values:
conv_rate.append(_get_fx_conversions(fx_rates, ccy, desired_ccy))
fx_adj_prices = prices * np.array(conv_rate)
else:
fx_adj_prices = prices
if to_notional:
amounts = instruments * fx_adj_prices * multipliers
else:
amounts = (instruments / fx_adj_prices) / multipliers
amounts = amounts.loc[instruments.index]
return amounts
def get_multiplier(weights, root_generic_multiplier):
"""
Determine tradeable instrument multiplier based on generic asset
multipliers and weights mapping from generics to tradeables.
Parameters
----------
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns are integers refering to
generic number indexed from 0, e.g. [0, 1], and the index is strings
representing instrument names. If dict is given keys should be generic
instrument names, e.g. 'CL', and values should be pandas.DataFrames of
loadings. The union of all indexes should be a superset of the
instruments.index
root_generic_multiplier: pandas.Series
Series of multipliers for generic instruments lexigraphically sorted.
If a dictionary of weights is given, root_generic_multiplier.index
should correspond to the weights keys.
Returns
-------
A pandas.Series of multipliers for tradeable instruments.
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=[0, 1])
>>> ast_mult = pd.Series([1000], index=["CL"])
>>> util.get_multiplier(wts, ast_mult)
"""
if len(root_generic_multiplier) > 1 and not isinstance(weights, dict):
raise ValueError("For multiple generic instruments weights must be a "
"dictionary")
mults = []
intrs = []
for ast, multiplier in root_generic_multiplier.iteritems():
if isinstance(weights, dict):
weights_ast = weights[ast].index
else:
weights_ast = weights.index
mults.extend(np.repeat(multiplier, len(weights_ast)))
intrs.extend(weights_ast)
imults = pd.Series(mults, intrs)
imults = imults.sort_index()
return imults
def weighted_expiration(weights, contract_dates):
"""
Calculate the days to expiration for generic futures, weighted by the
composition of the underlying tradeable instruments.
Parameters:
-----------
weights: pandas.DataFrame
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names.
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values
Returns:
--------
A pandas.DataFrame with columns of generic futures and index of dates.
Values are the weighted average of days to expiration for the underlying
contracts.
Examples:
---------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF15'),
... (pd.Timestamp('2015-01-03'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLF15'),
... (pd.Timestamp('2015-01-04'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLH15'),
... (pd.Timestamp('2015-01-05'), 'CLG15'),
... (pd.Timestamp('2015-01-05'), 'CLH15')])
>>> weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=widx)
>>> contract_dates = pd.Series([pd.Timestamp('2015-01-20'),
... pd.Timestamp('2015-02-21'),
... pd.Timestamp('2015-03-20')],
... index=['CLF15', 'CLG15', 'CLH15'])
>>> util.weighted_expiration(weights, contract_dates)
""" # NOQA
cols = weights.columns
weights = weights.reset_index(level=-1)
expiries = contract_dates.to_dict()
weights.loc[:, "expiry"] = weights.iloc[:, 0].apply(lambda x: expiries[x])
diffs = (pd.DatetimeIndex(weights.expiry)
- pd.Series(weights.index, weights.index)).apply(lambda x: x.days)
weights = weights.loc[:, cols]
wexp = weights.mul(diffs, axis=0).groupby(level=0).sum()
return wexp
def _get_fx_conversions(fx_rates, ccy, desired_ccy):
# return rate to multiply through by to convert from instrument ccy to
# desired ccy
# fx_rates is a series of fx rates with index names of the form AUDUSD,
# USDCAD, etc. ccy is a st
ccy_pair1 = ccy + desired_ccy
ccy_pair2 = desired_ccy + ccy
if ccy == desired_ccy:
conv_rate = 1.0
elif ccy_pair1 in fx_rates:
conv_rate = fx_rates.loc[ccy_pair1]
elif ccy_pair2 in fx_rates:
conv_rate = 1 / fx_rates.loc[ccy_pair2]
else:
raise ValueError("Cannot convert from {0} to {1} with any of "
"rates:\n{2}".format(ccy, desired_ccy, fx_rates))
return conv_rate
|
matthewgilbert/mapping
|
mapping/util.py
|
to_notional
|
python
|
def to_notional(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None):
notionals = _instr_conv(instruments, prices, multipliers, True,
desired_ccy, instr_fx, fx_rates)
return notionals
|
Convert number of contracts of tradeable instruments to notional value of
tradeable instruments in a desired currency.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
number of contracts.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index otherwise NaN returned for instruments without prices
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
Returns
-------
pandas.Series of notional amounts of instruments with Index of instruments
names
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> current_contracts = pd.Series([-1, 1], index=['CLX16', 'CLZ16'])
>>> prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
>>> multipliers = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
>>> ntln = util.to_notional(current_contracts, prices, multipliers)
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L461-L508
|
[
"def _instr_conv(instruments, prices, multipliers, to_notional, desired_ccy,\n instr_fx, fx_rates):\n\n if not instruments.index.is_unique:\n raise ValueError(\"'instruments' must have unique index\")\n if not prices.index.is_unique:\n raise ValueError(\"'prices' must have unique index\")\n if not multipliers.index.is_unique:\n raise ValueError(\"'multipliers' must have unique index\")\n\n if desired_ccy:\n if not instr_fx.index.is_unique:\n raise ValueError(\"'instr_fx' must have unique index\")\n if not fx_rates.index.is_unique:\n raise ValueError(\"'fx_rates' must have unique index\")\n prices = prices.loc[instr_fx.index]\n conv_rate = []\n for ccy in instr_fx.values:\n conv_rate.append(_get_fx_conversions(fx_rates, ccy, desired_ccy))\n fx_adj_prices = prices * np.array(conv_rate)\n else:\n fx_adj_prices = prices\n\n if to_notional:\n amounts = instruments * fx_adj_prices * multipliers\n else:\n amounts = (instruments / fx_adj_prices) / multipliers\n\n amounts = amounts.loc[instruments.index]\n\n return amounts\n"
] |
import pandas as pd
import numpy as np
import os
def read_price_data(files, name_func=None):
"""
Convenience function for reading in pricing data from csv files
Parameters
----------
files: list
List of strings refering to csv files to read data in from, first
column should be dates
name_func: func
A function to apply to the file strings to infer the instrument name,
used in the second level of the MultiIndex index. Default is the file
name excluding the pathname and file ending,
e.g. /path/to/file/name.csv -> name
Returns
-------
A pandas.DataFrame with a pandas.MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Columns are
given by the csv file columns.
"""
if name_func is None:
def name_func(x):
return os.path.split(x)[1].split(".")[0]
dfs = []
for f in files:
name = name_func(f)
df = pd.read_csv(f, index_col=0, parse_dates=True)
df.sort_index(inplace=True)
df.index = pd.MultiIndex.from_product([df.index, [name]],
names=["date", "contract"])
dfs.append(df)
return pd.concat(dfs, axis=0, sort=False).sort_index()
def flatten(weights):
"""
Flatten weights into a long DataFrame.
Parameters
----------
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas. Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A long DataFrame of weights, where columns are "date", "contract",
"generic" and "weight". If a dictionary is passed, DataFrame will contain
additional colum "key" containing the key value and be sorted according to
this key value.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLH5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
>>> util.flatten(weights)
""" # NOQA
if isinstance(weights, pd.DataFrame):
wts = weights.stack().reset_index()
wts.columns = ["date", "contract", "generic", "weight"]
elif isinstance(weights, dict):
wts = []
for key in sorted(weights.keys()):
wt = weights[key].stack().reset_index()
wt.columns = ["date", "contract", "generic", "weight"]
wt.loc[:, "key"] = key
wts.append(wt)
wts = pd.concat(wts, axis=0).reset_index(drop=True)
else:
raise ValueError("weights must be pd.DataFrame or dict")
return wts
def unflatten(flat_weights):
"""
Pivot weights from long DataFrame into weighting matrix.
Parameters
----------
flat_weights: pandas.DataFrame
A long DataFrame of weights, where columns are "date", "contract",
"generic", "weight" and optionally "key". If "key" column is
present a dictionary of unflattened DataFrames is returned with the
dictionary keys corresponding to the "key" column and each sub
DataFrame containing rows for this key.
Returns
-------
A DataFrame or dict of DataFrames of instrument weights with a MultiIndex
where the top level contains pandas.Timestamps and the second level is
instrument names. The columns consist of generic names. If dict is returned
the dict keys correspond to the "key" column of the input.
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> long_wts = pd.DataFrame(
... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
... "generic": ["CL1", "CL2"] * 4,
... "weight": [1, 0, 0, 1, 1, 0, 0, 1]}
... ).loc[:, ["date", "contract", "generic", "weight"]]
>>> util.unflatten(long_wts)
See also: calc_rets()
""" # NOQA
if flat_weights.columns.contains("key"):
weights = {}
for key in flat_weights.loc[:, "key"].unique():
flt_wts = flat_weights.loc[flat_weights.loc[:, "key"] == key, :]
flt_wts = flt_wts.drop(labels="key", axis=1)
wts = flt_wts.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
wts.columns = wts.columns.droplevel(0)
weights[key] = wts
else:
weights = flat_weights.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
weights.columns = weights.columns.droplevel(0)
return weights
def calc_rets(returns, weights):
"""
Calculate continuous return series for futures instruments. These consist
of weighted underlying instrument returns, who's weights can vary over
time.
Parameters
----------
returns: pandas.Series or dict
A Series of instrument returns with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Values
correspond to one period instrument returns. returns should be
available for all for all Timestamps and instruments provided in
weights. If dict is given this should be a dict of pandas.Series in the
above format, with keys which are a subset of the keys given in weights
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A pandas.DataFrame of continuous returns for generics. The index is
pandas.Timestamps and the columns is generic names, corresponding to
weights.columns
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-02'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> price = pd.Series([45.63, 45.85, 46.13, 46.05, 46.25, 46.20], index=idx)
>>> vals = [1, 1/2, 1/2, 1]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
>>> irets = price.groupby(level=-1).pct_change()
>>> util.calc_rets(irets, weights)
""" # NOQA
if not isinstance(returns, dict):
returns = {"": returns}
if not isinstance(weights, dict):
weights = {"": weights}
generic_superset = []
for root in weights:
generic_superset.extend(weights[root].columns.tolist())
if len(set(generic_superset)) != len(generic_superset):
raise ValueError("Columns for weights must all be unique")
_check_indices(returns, weights)
grets = []
cols = []
for root in returns:
root_wts = weights[root]
root_rets = returns[root]
for generic in root_wts.columns:
gnrc_wts = root_wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
rets = root_rets.loc[gnrc_wts.index]
# groupby time
group_rets = (rets * gnrc_wts).groupby(level=0)
grets.append(group_rets.apply(pd.DataFrame.sum, skipna=False))
cols.extend(root_wts.columns.tolist())
rets = pd.concat(grets, axis=1, keys=cols).sort_index(axis=1)
return rets
def _stringify(xs):
if len(xs) <= 2:
return repr(xs)
return '[{!r}, ..., {!r}]'.format(xs[0], xs[-1])
def _check_indices(returns, weights):
# dictionaries of returns and weights
# check 1: ensure that all non zero instrument weights have associated
# returns, see https://github.com/matthewgilbert/mapping/issues/3
# check 2: ensure that returns are not dropped if reindexed from weights,
# see https://github.com/matthewgilbert/mapping/issues/8
if list(returns.keys()) == [""]:
msg1 = ("'returns.index.get_level_values(0)' must contain dates which "
"are a subset of 'weights.index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights.loc[:, '{2}'].index' are not in 'returns.index'")
else:
msg1 = ("'returns['{0}'].index.get_level_values(0)' must contain "
"dates which are a subset of "
"'weights['{0}'].index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights['{1}'].loc[:, '{2}'].index' are not in "
"'returns['{1}'].index'")
for root in returns:
wts = weights[root]
rets = returns[root]
dts_rets = rets.index.get_level_values(0)
dts_wts = wts.index.get_level_values(0)
# check 1
if not dts_rets.isin(dts_wts).all():
missing_dates = dts_rets.difference(dts_wts).tolist()
raise ValueError(msg1.format(root, _stringify(missing_dates)))
# check 2
for generic in wts.columns:
gnrc_wts = wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
# necessary instead of missing_keys.any() to support MultiIndex
if not gnrc_wts.index.isin(rets.index).all():
# as list instead of MultiIndex for legibility when stack trace
missing_keys = (gnrc_wts.index.difference(rets.index).tolist())
msg2 = msg2.format(_stringify(missing_keys), root, generic)
raise KeyError(msg2)
def reindex(prices, index, limit):
"""
Reindex a pd.Series of prices such that when instrument level returns are
calculated they are compatible with a pd.MultiIndex of instrument weights
in calc_rets(). This amount to reindexing the series by an augmented
version of index which includes the preceding date for the first appearance
of each instrument. Fill forward missing values with previous price up to
some limit.
Parameters
----------
prices: pandas.Series
A Series of instrument prices with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names.
index: pandas.MultiIndex
A MultiIndex where the top level contains pandas.Timestamps and the
second level is instrument names.
limt: int
Number of periods to fill prices forward.
Returns
-------
A pandas.Series of reindexed prices where the top level is
pandas.Timestamps and the second level is instrument names.
See also: calc_rets()
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(TS('2015-01-04'), 'CLF5'),
... (TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-06'), 'CLF5'),
... (TS('2015-01-06'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> prices = pd.Series([100.12, 101.50, 102.51, 103.51, 102.73, 102.15,
... 104.37], index=idx)
>>> widx = pd.MultiIndex.from_tuples([(TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> util.reindex(prices, widx, limit=0)
"""
if not index.is_unique:
raise ValueError("'index' must be unique")
index = index.sort_values()
index.names = ["date", "instrument"]
price_dts = prices.sort_index().index.unique(level=0)
index_dts = index.unique(level=0)
mask = price_dts < index_dts[0]
leading_price_dts = price_dts[mask]
if len(leading_price_dts) == 0:
raise ValueError("'prices' must have a date preceding first date in "
"'index'")
prev_dts = index_dts.tolist()
prev_dts.insert(0, leading_price_dts[-1])
# avoid just lagging to preserve the calendar
previous_date = dict(zip(index_dts, prev_dts))
first_instr = index.to_frame(index=False)
first_instr = (
first_instr.drop_duplicates(subset=["instrument"], keep="first")
)
first_instr.loc[:, "prev_date"] = (
first_instr.loc[:, "date"].apply(lambda x: previous_date[x])
)
additional_indices = pd.MultiIndex.from_tuples(
first_instr.loc[:, ["prev_date", "instrument"]].values.tolist()
)
augmented_index = index.union(additional_indices).sort_values()
prices = prices.reindex(augmented_index)
if limit != 0:
prices = prices.groupby(level=1).fillna(method="ffill", limit=limit)
return prices
def calc_trades(current_contracts, desired_holdings, trade_weights, prices,
multipliers, **kwargs):
"""
Calculate the number of tradeable contracts for rebalancing from a set
of current contract holdings to a set of desired generic notional holdings
based on prevailing prices and mapping from generics to tradeable
instruments. Differences between current holdings and desired holdings
are treated as 0. Zero trades are dropped.
Parameters
----------
current_contracts: pandas.Series
Series of current number of contracts held for tradeable instruments.
Can pass 0 if all holdings are 0.
desired_holdings: pandas.Series
Series of desired holdings in base notional currency of generics. Index
is generic contracts, these should be the same generics as in
trade_weights.
trade_weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns refer to generic
contracts and the index is strings representing instrument names.
If dict is given keys should be root generic names, e.g. 'CL', and
values should be pandas.DataFrames of loadings. The union of all
columns should be a superset of the desired_holdings.index
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
number of contracts. Extra instrument prices will be ignored.
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of mapped desired_holdings
intruments.
kwargs: key word arguments
Key word arguments to be passed to to_contracts()
Returns
-------
A pandas.Series of instrument contract trades, lexigraphically sorted.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> desired_holdings = pd.Series([200000, -50000], index=["CL1", "CL2"])
>>> current_contracts = pd.Series([0, 1, 0],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> prices = pd.Series([50.32, 50.41, 50.48],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> multipliers = pd.Series([100, 100, 100],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> trades = util.calc_trades(current_contracts, desired_holdings, wts,
... prices, multipliers)
"""
if not isinstance(trade_weights, dict):
trade_weights = {"": trade_weights}
generics = []
for key in trade_weights:
generics.extend(trade_weights[key].columns)
if not set(desired_holdings.index).issubset(set(generics)):
raise ValueError("'desired_holdings.index' contains values which "
"cannot be mapped to tradeables.\n"
"Received: 'desired_holdings.index'\n {0}\n"
"Expected in 'trade_weights' set of columns:\n {1}\n"
.format(sorted(desired_holdings.index),
sorted(generics)))
desired_contracts = []
for root_key in trade_weights:
gnrc_weights = trade_weights[root_key]
subset = gnrc_weights.columns.intersection(desired_holdings.index)
gnrc_des_hlds = desired_holdings.loc[subset]
gnrc_weights = gnrc_weights.loc[:, subset]
# drop indexes where all non zero weights were in columns dropped above
gnrc_weights = gnrc_weights.loc[~(gnrc_weights == 0).all(axis=1)]
instr_des_hlds = gnrc_des_hlds * gnrc_weights
instr_des_hlds = instr_des_hlds.sum(axis=1)
wprices = prices.loc[instr_des_hlds.index]
desired_contracts.append(to_contracts(instr_des_hlds, wprices,
multipliers, **kwargs))
desired_contracts = pd.concat(desired_contracts, axis=0)
trades = desired_contracts.subtract(current_contracts, fill_value=0)
trades = trades.loc[trades != 0]
trades = trades.sort_index()
return trades
def to_contracts(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None, rounder=None):
"""
Convert notional amount of tradeable instruments to number of instrument
contracts, rounding to nearest integer number of contracts.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
notional amount on instrument.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
rounder: function
Function to round pd.Series contracts to integers, if None default
pd.Series.round is used.
Returns
-------
pandas.Series of contract numbers of instruments with Index of instruments
names
"""
contracts = _instr_conv(instruments, prices, multipliers, False,
desired_ccy, instr_fx, fx_rates)
if rounder is None:
rounder = pd.Series.round
contracts = rounder(contracts)
contracts = contracts.astype(int)
return contracts
def _instr_conv(instruments, prices, multipliers, to_notional, desired_ccy,
instr_fx, fx_rates):
if not instruments.index.is_unique:
raise ValueError("'instruments' must have unique index")
if not prices.index.is_unique:
raise ValueError("'prices' must have unique index")
if not multipliers.index.is_unique:
raise ValueError("'multipliers' must have unique index")
if desired_ccy:
if not instr_fx.index.is_unique:
raise ValueError("'instr_fx' must have unique index")
if not fx_rates.index.is_unique:
raise ValueError("'fx_rates' must have unique index")
prices = prices.loc[instr_fx.index]
conv_rate = []
for ccy in instr_fx.values:
conv_rate.append(_get_fx_conversions(fx_rates, ccy, desired_ccy))
fx_adj_prices = prices * np.array(conv_rate)
else:
fx_adj_prices = prices
if to_notional:
amounts = instruments * fx_adj_prices * multipliers
else:
amounts = (instruments / fx_adj_prices) / multipliers
amounts = amounts.loc[instruments.index]
return amounts
def get_multiplier(weights, root_generic_multiplier):
"""
Determine tradeable instrument multiplier based on generic asset
multipliers and weights mapping from generics to tradeables.
Parameters
----------
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns are integers refering to
generic number indexed from 0, e.g. [0, 1], and the index is strings
representing instrument names. If dict is given keys should be generic
instrument names, e.g. 'CL', and values should be pandas.DataFrames of
loadings. The union of all indexes should be a superset of the
instruments.index
root_generic_multiplier: pandas.Series
Series of multipliers for generic instruments lexigraphically sorted.
If a dictionary of weights is given, root_generic_multiplier.index
should correspond to the weights keys.
Returns
-------
A pandas.Series of multipliers for tradeable instruments.
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=[0, 1])
>>> ast_mult = pd.Series([1000], index=["CL"])
>>> util.get_multiplier(wts, ast_mult)
"""
if len(root_generic_multiplier) > 1 and not isinstance(weights, dict):
raise ValueError("For multiple generic instruments weights must be a "
"dictionary")
mults = []
intrs = []
for ast, multiplier in root_generic_multiplier.iteritems():
if isinstance(weights, dict):
weights_ast = weights[ast].index
else:
weights_ast = weights.index
mults.extend(np.repeat(multiplier, len(weights_ast)))
intrs.extend(weights_ast)
imults = pd.Series(mults, intrs)
imults = imults.sort_index()
return imults
def weighted_expiration(weights, contract_dates):
"""
Calculate the days to expiration for generic futures, weighted by the
composition of the underlying tradeable instruments.
Parameters:
-----------
weights: pandas.DataFrame
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names.
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values
Returns:
--------
A pandas.DataFrame with columns of generic futures and index of dates.
Values are the weighted average of days to expiration for the underlying
contracts.
Examples:
---------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF15'),
... (pd.Timestamp('2015-01-03'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLF15'),
... (pd.Timestamp('2015-01-04'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLH15'),
... (pd.Timestamp('2015-01-05'), 'CLG15'),
... (pd.Timestamp('2015-01-05'), 'CLH15')])
>>> weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=widx)
>>> contract_dates = pd.Series([pd.Timestamp('2015-01-20'),
... pd.Timestamp('2015-02-21'),
... pd.Timestamp('2015-03-20')],
... index=['CLF15', 'CLG15', 'CLH15'])
>>> util.weighted_expiration(weights, contract_dates)
""" # NOQA
cols = weights.columns
weights = weights.reset_index(level=-1)
expiries = contract_dates.to_dict()
weights.loc[:, "expiry"] = weights.iloc[:, 0].apply(lambda x: expiries[x])
diffs = (pd.DatetimeIndex(weights.expiry)
- pd.Series(weights.index, weights.index)).apply(lambda x: x.days)
weights = weights.loc[:, cols]
wexp = weights.mul(diffs, axis=0).groupby(level=0).sum()
return wexp
def _get_fx_conversions(fx_rates, ccy, desired_ccy):
# return rate to multiply through by to convert from instrument ccy to
# desired ccy
# fx_rates is a series of fx rates with index names of the form AUDUSD,
# USDCAD, etc. ccy is a st
ccy_pair1 = ccy + desired_ccy
ccy_pair2 = desired_ccy + ccy
if ccy == desired_ccy:
conv_rate = 1.0
elif ccy_pair1 in fx_rates:
conv_rate = fx_rates.loc[ccy_pair1]
elif ccy_pair2 in fx_rates:
conv_rate = 1 / fx_rates.loc[ccy_pair2]
else:
raise ValueError("Cannot convert from {0} to {1} with any of "
"rates:\n{2}".format(ccy, desired_ccy, fx_rates))
return conv_rate
|
matthewgilbert/mapping
|
mapping/util.py
|
to_contracts
|
python
|
def to_contracts(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None, rounder=None):
contracts = _instr_conv(instruments, prices, multipliers, False,
desired_ccy, instr_fx, fx_rates)
if rounder is None:
rounder = pd.Series.round
contracts = rounder(contracts)
contracts = contracts.astype(int)
return contracts
|
Convert notional amount of tradeable instruments to number of instrument
contracts, rounding to nearest integer number of contracts.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
notional amount on instrument.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
rounder: function
Function to round pd.Series contracts to integers, if None default
pd.Series.round is used.
Returns
-------
pandas.Series of contract numbers of instruments with Index of instruments
names
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L511-L557
|
[
"def _instr_conv(instruments, prices, multipliers, to_notional, desired_ccy,\n instr_fx, fx_rates):\n\n if not instruments.index.is_unique:\n raise ValueError(\"'instruments' must have unique index\")\n if not prices.index.is_unique:\n raise ValueError(\"'prices' must have unique index\")\n if not multipliers.index.is_unique:\n raise ValueError(\"'multipliers' must have unique index\")\n\n if desired_ccy:\n if not instr_fx.index.is_unique:\n raise ValueError(\"'instr_fx' must have unique index\")\n if not fx_rates.index.is_unique:\n raise ValueError(\"'fx_rates' must have unique index\")\n prices = prices.loc[instr_fx.index]\n conv_rate = []\n for ccy in instr_fx.values:\n conv_rate.append(_get_fx_conversions(fx_rates, ccy, desired_ccy))\n fx_adj_prices = prices * np.array(conv_rate)\n else:\n fx_adj_prices = prices\n\n if to_notional:\n amounts = instruments * fx_adj_prices * multipliers\n else:\n amounts = (instruments / fx_adj_prices) / multipliers\n\n amounts = amounts.loc[instruments.index]\n\n return amounts\n"
] |
import pandas as pd
import numpy as np
import os
def read_price_data(files, name_func=None):
"""
Convenience function for reading in pricing data from csv files
Parameters
----------
files: list
List of strings refering to csv files to read data in from, first
column should be dates
name_func: func
A function to apply to the file strings to infer the instrument name,
used in the second level of the MultiIndex index. Default is the file
name excluding the pathname and file ending,
e.g. /path/to/file/name.csv -> name
Returns
-------
A pandas.DataFrame with a pandas.MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Columns are
given by the csv file columns.
"""
if name_func is None:
def name_func(x):
return os.path.split(x)[1].split(".")[0]
dfs = []
for f in files:
name = name_func(f)
df = pd.read_csv(f, index_col=0, parse_dates=True)
df.sort_index(inplace=True)
df.index = pd.MultiIndex.from_product([df.index, [name]],
names=["date", "contract"])
dfs.append(df)
return pd.concat(dfs, axis=0, sort=False).sort_index()
def flatten(weights):
"""
Flatten weights into a long DataFrame.
Parameters
----------
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas. Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A long DataFrame of weights, where columns are "date", "contract",
"generic" and "weight". If a dictionary is passed, DataFrame will contain
additional colum "key" containing the key value and be sorted according to
this key value.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLH5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
>>> util.flatten(weights)
""" # NOQA
if isinstance(weights, pd.DataFrame):
wts = weights.stack().reset_index()
wts.columns = ["date", "contract", "generic", "weight"]
elif isinstance(weights, dict):
wts = []
for key in sorted(weights.keys()):
wt = weights[key].stack().reset_index()
wt.columns = ["date", "contract", "generic", "weight"]
wt.loc[:, "key"] = key
wts.append(wt)
wts = pd.concat(wts, axis=0).reset_index(drop=True)
else:
raise ValueError("weights must be pd.DataFrame or dict")
return wts
def unflatten(flat_weights):
"""
Pivot weights from long DataFrame into weighting matrix.
Parameters
----------
flat_weights: pandas.DataFrame
A long DataFrame of weights, where columns are "date", "contract",
"generic", "weight" and optionally "key". If "key" column is
present a dictionary of unflattened DataFrames is returned with the
dictionary keys corresponding to the "key" column and each sub
DataFrame containing rows for this key.
Returns
-------
A DataFrame or dict of DataFrames of instrument weights with a MultiIndex
where the top level contains pandas.Timestamps and the second level is
instrument names. The columns consist of generic names. If dict is returned
the dict keys correspond to the "key" column of the input.
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> long_wts = pd.DataFrame(
... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
... "generic": ["CL1", "CL2"] * 4,
... "weight": [1, 0, 0, 1, 1, 0, 0, 1]}
... ).loc[:, ["date", "contract", "generic", "weight"]]
>>> util.unflatten(long_wts)
See also: calc_rets()
""" # NOQA
if flat_weights.columns.contains("key"):
weights = {}
for key in flat_weights.loc[:, "key"].unique():
flt_wts = flat_weights.loc[flat_weights.loc[:, "key"] == key, :]
flt_wts = flt_wts.drop(labels="key", axis=1)
wts = flt_wts.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
wts.columns = wts.columns.droplevel(0)
weights[key] = wts
else:
weights = flat_weights.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
weights.columns = weights.columns.droplevel(0)
return weights
def calc_rets(returns, weights):
"""
Calculate continuous return series for futures instruments. These consist
of weighted underlying instrument returns, who's weights can vary over
time.
Parameters
----------
returns: pandas.Series or dict
A Series of instrument returns with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Values
correspond to one period instrument returns. returns should be
available for all for all Timestamps and instruments provided in
weights. If dict is given this should be a dict of pandas.Series in the
above format, with keys which are a subset of the keys given in weights
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A pandas.DataFrame of continuous returns for generics. The index is
pandas.Timestamps and the columns is generic names, corresponding to
weights.columns
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-02'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> price = pd.Series([45.63, 45.85, 46.13, 46.05, 46.25, 46.20], index=idx)
>>> vals = [1, 1/2, 1/2, 1]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
>>> irets = price.groupby(level=-1).pct_change()
>>> util.calc_rets(irets, weights)
""" # NOQA
if not isinstance(returns, dict):
returns = {"": returns}
if not isinstance(weights, dict):
weights = {"": weights}
generic_superset = []
for root in weights:
generic_superset.extend(weights[root].columns.tolist())
if len(set(generic_superset)) != len(generic_superset):
raise ValueError("Columns for weights must all be unique")
_check_indices(returns, weights)
grets = []
cols = []
for root in returns:
root_wts = weights[root]
root_rets = returns[root]
for generic in root_wts.columns:
gnrc_wts = root_wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
rets = root_rets.loc[gnrc_wts.index]
# groupby time
group_rets = (rets * gnrc_wts).groupby(level=0)
grets.append(group_rets.apply(pd.DataFrame.sum, skipna=False))
cols.extend(root_wts.columns.tolist())
rets = pd.concat(grets, axis=1, keys=cols).sort_index(axis=1)
return rets
def _stringify(xs):
if len(xs) <= 2:
return repr(xs)
return '[{!r}, ..., {!r}]'.format(xs[0], xs[-1])
def _check_indices(returns, weights):
# dictionaries of returns and weights
# check 1: ensure that all non zero instrument weights have associated
# returns, see https://github.com/matthewgilbert/mapping/issues/3
# check 2: ensure that returns are not dropped if reindexed from weights,
# see https://github.com/matthewgilbert/mapping/issues/8
if list(returns.keys()) == [""]:
msg1 = ("'returns.index.get_level_values(0)' must contain dates which "
"are a subset of 'weights.index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights.loc[:, '{2}'].index' are not in 'returns.index'")
else:
msg1 = ("'returns['{0}'].index.get_level_values(0)' must contain "
"dates which are a subset of "
"'weights['{0}'].index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights['{1}'].loc[:, '{2}'].index' are not in "
"'returns['{1}'].index'")
for root in returns:
wts = weights[root]
rets = returns[root]
dts_rets = rets.index.get_level_values(0)
dts_wts = wts.index.get_level_values(0)
# check 1
if not dts_rets.isin(dts_wts).all():
missing_dates = dts_rets.difference(dts_wts).tolist()
raise ValueError(msg1.format(root, _stringify(missing_dates)))
# check 2
for generic in wts.columns:
gnrc_wts = wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
# necessary instead of missing_keys.any() to support MultiIndex
if not gnrc_wts.index.isin(rets.index).all():
# as list instead of MultiIndex for legibility when stack trace
missing_keys = (gnrc_wts.index.difference(rets.index).tolist())
msg2 = msg2.format(_stringify(missing_keys), root, generic)
raise KeyError(msg2)
def reindex(prices, index, limit):
"""
Reindex a pd.Series of prices such that when instrument level returns are
calculated they are compatible with a pd.MultiIndex of instrument weights
in calc_rets(). This amount to reindexing the series by an augmented
version of index which includes the preceding date for the first appearance
of each instrument. Fill forward missing values with previous price up to
some limit.
Parameters
----------
prices: pandas.Series
A Series of instrument prices with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names.
index: pandas.MultiIndex
A MultiIndex where the top level contains pandas.Timestamps and the
second level is instrument names.
limt: int
Number of periods to fill prices forward.
Returns
-------
A pandas.Series of reindexed prices where the top level is
pandas.Timestamps and the second level is instrument names.
See also: calc_rets()
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(TS('2015-01-04'), 'CLF5'),
... (TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-06'), 'CLF5'),
... (TS('2015-01-06'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> prices = pd.Series([100.12, 101.50, 102.51, 103.51, 102.73, 102.15,
... 104.37], index=idx)
>>> widx = pd.MultiIndex.from_tuples([(TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> util.reindex(prices, widx, limit=0)
"""
if not index.is_unique:
raise ValueError("'index' must be unique")
index = index.sort_values()
index.names = ["date", "instrument"]
price_dts = prices.sort_index().index.unique(level=0)
index_dts = index.unique(level=0)
mask = price_dts < index_dts[0]
leading_price_dts = price_dts[mask]
if len(leading_price_dts) == 0:
raise ValueError("'prices' must have a date preceding first date in "
"'index'")
prev_dts = index_dts.tolist()
prev_dts.insert(0, leading_price_dts[-1])
# avoid just lagging to preserve the calendar
previous_date = dict(zip(index_dts, prev_dts))
first_instr = index.to_frame(index=False)
first_instr = (
first_instr.drop_duplicates(subset=["instrument"], keep="first")
)
first_instr.loc[:, "prev_date"] = (
first_instr.loc[:, "date"].apply(lambda x: previous_date[x])
)
additional_indices = pd.MultiIndex.from_tuples(
first_instr.loc[:, ["prev_date", "instrument"]].values.tolist()
)
augmented_index = index.union(additional_indices).sort_values()
prices = prices.reindex(augmented_index)
if limit != 0:
prices = prices.groupby(level=1).fillna(method="ffill", limit=limit)
return prices
def calc_trades(current_contracts, desired_holdings, trade_weights, prices,
multipliers, **kwargs):
"""
Calculate the number of tradeable contracts for rebalancing from a set
of current contract holdings to a set of desired generic notional holdings
based on prevailing prices and mapping from generics to tradeable
instruments. Differences between current holdings and desired holdings
are treated as 0. Zero trades are dropped.
Parameters
----------
current_contracts: pandas.Series
Series of current number of contracts held for tradeable instruments.
Can pass 0 if all holdings are 0.
desired_holdings: pandas.Series
Series of desired holdings in base notional currency of generics. Index
is generic contracts, these should be the same generics as in
trade_weights.
trade_weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns refer to generic
contracts and the index is strings representing instrument names.
If dict is given keys should be root generic names, e.g. 'CL', and
values should be pandas.DataFrames of loadings. The union of all
columns should be a superset of the desired_holdings.index
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
number of contracts. Extra instrument prices will be ignored.
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of mapped desired_holdings
intruments.
kwargs: key word arguments
Key word arguments to be passed to to_contracts()
Returns
-------
A pandas.Series of instrument contract trades, lexigraphically sorted.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> desired_holdings = pd.Series([200000, -50000], index=["CL1", "CL2"])
>>> current_contracts = pd.Series([0, 1, 0],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> prices = pd.Series([50.32, 50.41, 50.48],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> multipliers = pd.Series([100, 100, 100],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> trades = util.calc_trades(current_contracts, desired_holdings, wts,
... prices, multipliers)
"""
if not isinstance(trade_weights, dict):
trade_weights = {"": trade_weights}
generics = []
for key in trade_weights:
generics.extend(trade_weights[key].columns)
if not set(desired_holdings.index).issubset(set(generics)):
raise ValueError("'desired_holdings.index' contains values which "
"cannot be mapped to tradeables.\n"
"Received: 'desired_holdings.index'\n {0}\n"
"Expected in 'trade_weights' set of columns:\n {1}\n"
.format(sorted(desired_holdings.index),
sorted(generics)))
desired_contracts = []
for root_key in trade_weights:
gnrc_weights = trade_weights[root_key]
subset = gnrc_weights.columns.intersection(desired_holdings.index)
gnrc_des_hlds = desired_holdings.loc[subset]
gnrc_weights = gnrc_weights.loc[:, subset]
# drop indexes where all non zero weights were in columns dropped above
gnrc_weights = gnrc_weights.loc[~(gnrc_weights == 0).all(axis=1)]
instr_des_hlds = gnrc_des_hlds * gnrc_weights
instr_des_hlds = instr_des_hlds.sum(axis=1)
wprices = prices.loc[instr_des_hlds.index]
desired_contracts.append(to_contracts(instr_des_hlds, wprices,
multipliers, **kwargs))
desired_contracts = pd.concat(desired_contracts, axis=0)
trades = desired_contracts.subtract(current_contracts, fill_value=0)
trades = trades.loc[trades != 0]
trades = trades.sort_index()
return trades
def to_notional(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None):
"""
Convert number of contracts of tradeable instruments to notional value of
tradeable instruments in a desired currency.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
number of contracts.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index otherwise NaN returned for instruments without prices
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
Returns
-------
pandas.Series of notional amounts of instruments with Index of instruments
names
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> current_contracts = pd.Series([-1, 1], index=['CLX16', 'CLZ16'])
>>> prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
>>> multipliers = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
>>> ntln = util.to_notional(current_contracts, prices, multipliers)
"""
notionals = _instr_conv(instruments, prices, multipliers, True,
desired_ccy, instr_fx, fx_rates)
return notionals
def _instr_conv(instruments, prices, multipliers, to_notional, desired_ccy,
instr_fx, fx_rates):
if not instruments.index.is_unique:
raise ValueError("'instruments' must have unique index")
if not prices.index.is_unique:
raise ValueError("'prices' must have unique index")
if not multipliers.index.is_unique:
raise ValueError("'multipliers' must have unique index")
if desired_ccy:
if not instr_fx.index.is_unique:
raise ValueError("'instr_fx' must have unique index")
if not fx_rates.index.is_unique:
raise ValueError("'fx_rates' must have unique index")
prices = prices.loc[instr_fx.index]
conv_rate = []
for ccy in instr_fx.values:
conv_rate.append(_get_fx_conversions(fx_rates, ccy, desired_ccy))
fx_adj_prices = prices * np.array(conv_rate)
else:
fx_adj_prices = prices
if to_notional:
amounts = instruments * fx_adj_prices * multipliers
else:
amounts = (instruments / fx_adj_prices) / multipliers
amounts = amounts.loc[instruments.index]
return amounts
def get_multiplier(weights, root_generic_multiplier):
"""
Determine tradeable instrument multiplier based on generic asset
multipliers and weights mapping from generics to tradeables.
Parameters
----------
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns are integers refering to
generic number indexed from 0, e.g. [0, 1], and the index is strings
representing instrument names. If dict is given keys should be generic
instrument names, e.g. 'CL', and values should be pandas.DataFrames of
loadings. The union of all indexes should be a superset of the
instruments.index
root_generic_multiplier: pandas.Series
Series of multipliers for generic instruments lexigraphically sorted.
If a dictionary of weights is given, root_generic_multiplier.index
should correspond to the weights keys.
Returns
-------
A pandas.Series of multipliers for tradeable instruments.
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=[0, 1])
>>> ast_mult = pd.Series([1000], index=["CL"])
>>> util.get_multiplier(wts, ast_mult)
"""
if len(root_generic_multiplier) > 1 and not isinstance(weights, dict):
raise ValueError("For multiple generic instruments weights must be a "
"dictionary")
mults = []
intrs = []
for ast, multiplier in root_generic_multiplier.iteritems():
if isinstance(weights, dict):
weights_ast = weights[ast].index
else:
weights_ast = weights.index
mults.extend(np.repeat(multiplier, len(weights_ast)))
intrs.extend(weights_ast)
imults = pd.Series(mults, intrs)
imults = imults.sort_index()
return imults
def weighted_expiration(weights, contract_dates):
"""
Calculate the days to expiration for generic futures, weighted by the
composition of the underlying tradeable instruments.
Parameters:
-----------
weights: pandas.DataFrame
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names.
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values
Returns:
--------
A pandas.DataFrame with columns of generic futures and index of dates.
Values are the weighted average of days to expiration for the underlying
contracts.
Examples:
---------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF15'),
... (pd.Timestamp('2015-01-03'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLF15'),
... (pd.Timestamp('2015-01-04'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLH15'),
... (pd.Timestamp('2015-01-05'), 'CLG15'),
... (pd.Timestamp('2015-01-05'), 'CLH15')])
>>> weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=widx)
>>> contract_dates = pd.Series([pd.Timestamp('2015-01-20'),
... pd.Timestamp('2015-02-21'),
... pd.Timestamp('2015-03-20')],
... index=['CLF15', 'CLG15', 'CLH15'])
>>> util.weighted_expiration(weights, contract_dates)
""" # NOQA
cols = weights.columns
weights = weights.reset_index(level=-1)
expiries = contract_dates.to_dict()
weights.loc[:, "expiry"] = weights.iloc[:, 0].apply(lambda x: expiries[x])
diffs = (pd.DatetimeIndex(weights.expiry)
- pd.Series(weights.index, weights.index)).apply(lambda x: x.days)
weights = weights.loc[:, cols]
wexp = weights.mul(diffs, axis=0).groupby(level=0).sum()
return wexp
def _get_fx_conversions(fx_rates, ccy, desired_ccy):
# return rate to multiply through by to convert from instrument ccy to
# desired ccy
# fx_rates is a series of fx rates with index names of the form AUDUSD,
# USDCAD, etc. ccy is a st
ccy_pair1 = ccy + desired_ccy
ccy_pair2 = desired_ccy + ccy
if ccy == desired_ccy:
conv_rate = 1.0
elif ccy_pair1 in fx_rates:
conv_rate = fx_rates.loc[ccy_pair1]
elif ccy_pair2 in fx_rates:
conv_rate = 1 / fx_rates.loc[ccy_pair2]
else:
raise ValueError("Cannot convert from {0} to {1} with any of "
"rates:\n{2}".format(ccy, desired_ccy, fx_rates))
return conv_rate
|
matthewgilbert/mapping
|
mapping/util.py
|
get_multiplier
|
python
|
def get_multiplier(weights, root_generic_multiplier):
if len(root_generic_multiplier) > 1 and not isinstance(weights, dict):
raise ValueError("For multiple generic instruments weights must be a "
"dictionary")
mults = []
intrs = []
for ast, multiplier in root_generic_multiplier.iteritems():
if isinstance(weights, dict):
weights_ast = weights[ast].index
else:
weights_ast = weights.index
mults.extend(np.repeat(multiplier, len(weights_ast)))
intrs.extend(weights_ast)
imults = pd.Series(mults, intrs)
imults = imults.sort_index()
return imults
|
Determine tradeable instrument multiplier based on generic asset
multipliers and weights mapping from generics to tradeables.
Parameters
----------
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns are integers refering to
generic number indexed from 0, e.g. [0, 1], and the index is strings
representing instrument names. If dict is given keys should be generic
instrument names, e.g. 'CL', and values should be pandas.DataFrames of
loadings. The union of all indexes should be a superset of the
instruments.index
root_generic_multiplier: pandas.Series
Series of multipliers for generic instruments lexigraphically sorted.
If a dictionary of weights is given, root_generic_multiplier.index
should correspond to the weights keys.
Returns
-------
A pandas.Series of multipliers for tradeable instruments.
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=[0, 1])
>>> ast_mult = pd.Series([1000], index=["CL"])
>>> util.get_multiplier(wts, ast_mult)
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L593-L643
| null |
import pandas as pd
import numpy as np
import os
def read_price_data(files, name_func=None):
"""
Convenience function for reading in pricing data from csv files
Parameters
----------
files: list
List of strings refering to csv files to read data in from, first
column should be dates
name_func: func
A function to apply to the file strings to infer the instrument name,
used in the second level of the MultiIndex index. Default is the file
name excluding the pathname and file ending,
e.g. /path/to/file/name.csv -> name
Returns
-------
A pandas.DataFrame with a pandas.MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Columns are
given by the csv file columns.
"""
if name_func is None:
def name_func(x):
return os.path.split(x)[1].split(".")[0]
dfs = []
for f in files:
name = name_func(f)
df = pd.read_csv(f, index_col=0, parse_dates=True)
df.sort_index(inplace=True)
df.index = pd.MultiIndex.from_product([df.index, [name]],
names=["date", "contract"])
dfs.append(df)
return pd.concat(dfs, axis=0, sort=False).sort_index()
def flatten(weights):
"""
Flatten weights into a long DataFrame.
Parameters
----------
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas. Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A long DataFrame of weights, where columns are "date", "contract",
"generic" and "weight". If a dictionary is passed, DataFrame will contain
additional colum "key" containing the key value and be sorted according to
this key value.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLH5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
>>> util.flatten(weights)
""" # NOQA
if isinstance(weights, pd.DataFrame):
wts = weights.stack().reset_index()
wts.columns = ["date", "contract", "generic", "weight"]
elif isinstance(weights, dict):
wts = []
for key in sorted(weights.keys()):
wt = weights[key].stack().reset_index()
wt.columns = ["date", "contract", "generic", "weight"]
wt.loc[:, "key"] = key
wts.append(wt)
wts = pd.concat(wts, axis=0).reset_index(drop=True)
else:
raise ValueError("weights must be pd.DataFrame or dict")
return wts
def unflatten(flat_weights):
"""
Pivot weights from long DataFrame into weighting matrix.
Parameters
----------
flat_weights: pandas.DataFrame
A long DataFrame of weights, where columns are "date", "contract",
"generic", "weight" and optionally "key". If "key" column is
present a dictionary of unflattened DataFrames is returned with the
dictionary keys corresponding to the "key" column and each sub
DataFrame containing rows for this key.
Returns
-------
A DataFrame or dict of DataFrames of instrument weights with a MultiIndex
where the top level contains pandas.Timestamps and the second level is
instrument names. The columns consist of generic names. If dict is returned
the dict keys correspond to the "key" column of the input.
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> long_wts = pd.DataFrame(
... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
... "generic": ["CL1", "CL2"] * 4,
... "weight": [1, 0, 0, 1, 1, 0, 0, 1]}
... ).loc[:, ["date", "contract", "generic", "weight"]]
>>> util.unflatten(long_wts)
See also: calc_rets()
""" # NOQA
if flat_weights.columns.contains("key"):
weights = {}
for key in flat_weights.loc[:, "key"].unique():
flt_wts = flat_weights.loc[flat_weights.loc[:, "key"] == key, :]
flt_wts = flt_wts.drop(labels="key", axis=1)
wts = flt_wts.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
wts.columns = wts.columns.droplevel(0)
weights[key] = wts
else:
weights = flat_weights.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
weights.columns = weights.columns.droplevel(0)
return weights
def calc_rets(returns, weights):
"""
Calculate continuous return series for futures instruments. These consist
of weighted underlying instrument returns, who's weights can vary over
time.
Parameters
----------
returns: pandas.Series or dict
A Series of instrument returns with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Values
correspond to one period instrument returns. returns should be
available for all for all Timestamps and instruments provided in
weights. If dict is given this should be a dict of pandas.Series in the
above format, with keys which are a subset of the keys given in weights
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A pandas.DataFrame of continuous returns for generics. The index is
pandas.Timestamps and the columns is generic names, corresponding to
weights.columns
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-02'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> price = pd.Series([45.63, 45.85, 46.13, 46.05, 46.25, 46.20], index=idx)
>>> vals = [1, 1/2, 1/2, 1]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
>>> irets = price.groupby(level=-1).pct_change()
>>> util.calc_rets(irets, weights)
""" # NOQA
if not isinstance(returns, dict):
returns = {"": returns}
if not isinstance(weights, dict):
weights = {"": weights}
generic_superset = []
for root in weights:
generic_superset.extend(weights[root].columns.tolist())
if len(set(generic_superset)) != len(generic_superset):
raise ValueError("Columns for weights must all be unique")
_check_indices(returns, weights)
grets = []
cols = []
for root in returns:
root_wts = weights[root]
root_rets = returns[root]
for generic in root_wts.columns:
gnrc_wts = root_wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
rets = root_rets.loc[gnrc_wts.index]
# groupby time
group_rets = (rets * gnrc_wts).groupby(level=0)
grets.append(group_rets.apply(pd.DataFrame.sum, skipna=False))
cols.extend(root_wts.columns.tolist())
rets = pd.concat(grets, axis=1, keys=cols).sort_index(axis=1)
return rets
def _stringify(xs):
if len(xs) <= 2:
return repr(xs)
return '[{!r}, ..., {!r}]'.format(xs[0], xs[-1])
def _check_indices(returns, weights):
# dictionaries of returns and weights
# check 1: ensure that all non zero instrument weights have associated
# returns, see https://github.com/matthewgilbert/mapping/issues/3
# check 2: ensure that returns are not dropped if reindexed from weights,
# see https://github.com/matthewgilbert/mapping/issues/8
if list(returns.keys()) == [""]:
msg1 = ("'returns.index.get_level_values(0)' must contain dates which "
"are a subset of 'weights.index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights.loc[:, '{2}'].index' are not in 'returns.index'")
else:
msg1 = ("'returns['{0}'].index.get_level_values(0)' must contain "
"dates which are a subset of "
"'weights['{0}'].index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights['{1}'].loc[:, '{2}'].index' are not in "
"'returns['{1}'].index'")
for root in returns:
wts = weights[root]
rets = returns[root]
dts_rets = rets.index.get_level_values(0)
dts_wts = wts.index.get_level_values(0)
# check 1
if not dts_rets.isin(dts_wts).all():
missing_dates = dts_rets.difference(dts_wts).tolist()
raise ValueError(msg1.format(root, _stringify(missing_dates)))
# check 2
for generic in wts.columns:
gnrc_wts = wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
# necessary instead of missing_keys.any() to support MultiIndex
if not gnrc_wts.index.isin(rets.index).all():
# as list instead of MultiIndex for legibility when stack trace
missing_keys = (gnrc_wts.index.difference(rets.index).tolist())
msg2 = msg2.format(_stringify(missing_keys), root, generic)
raise KeyError(msg2)
def reindex(prices, index, limit):
"""
Reindex a pd.Series of prices such that when instrument level returns are
calculated they are compatible with a pd.MultiIndex of instrument weights
in calc_rets(). This amount to reindexing the series by an augmented
version of index which includes the preceding date for the first appearance
of each instrument. Fill forward missing values with previous price up to
some limit.
Parameters
----------
prices: pandas.Series
A Series of instrument prices with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names.
index: pandas.MultiIndex
A MultiIndex where the top level contains pandas.Timestamps and the
second level is instrument names.
limt: int
Number of periods to fill prices forward.
Returns
-------
A pandas.Series of reindexed prices where the top level is
pandas.Timestamps and the second level is instrument names.
See also: calc_rets()
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(TS('2015-01-04'), 'CLF5'),
... (TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-06'), 'CLF5'),
... (TS('2015-01-06'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> prices = pd.Series([100.12, 101.50, 102.51, 103.51, 102.73, 102.15,
... 104.37], index=idx)
>>> widx = pd.MultiIndex.from_tuples([(TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> util.reindex(prices, widx, limit=0)
"""
if not index.is_unique:
raise ValueError("'index' must be unique")
index = index.sort_values()
index.names = ["date", "instrument"]
price_dts = prices.sort_index().index.unique(level=0)
index_dts = index.unique(level=0)
mask = price_dts < index_dts[0]
leading_price_dts = price_dts[mask]
if len(leading_price_dts) == 0:
raise ValueError("'prices' must have a date preceding first date in "
"'index'")
prev_dts = index_dts.tolist()
prev_dts.insert(0, leading_price_dts[-1])
# avoid just lagging to preserve the calendar
previous_date = dict(zip(index_dts, prev_dts))
first_instr = index.to_frame(index=False)
first_instr = (
first_instr.drop_duplicates(subset=["instrument"], keep="first")
)
first_instr.loc[:, "prev_date"] = (
first_instr.loc[:, "date"].apply(lambda x: previous_date[x])
)
additional_indices = pd.MultiIndex.from_tuples(
first_instr.loc[:, ["prev_date", "instrument"]].values.tolist()
)
augmented_index = index.union(additional_indices).sort_values()
prices = prices.reindex(augmented_index)
if limit != 0:
prices = prices.groupby(level=1).fillna(method="ffill", limit=limit)
return prices
def calc_trades(current_contracts, desired_holdings, trade_weights, prices,
multipliers, **kwargs):
"""
Calculate the number of tradeable contracts for rebalancing from a set
of current contract holdings to a set of desired generic notional holdings
based on prevailing prices and mapping from generics to tradeable
instruments. Differences between current holdings and desired holdings
are treated as 0. Zero trades are dropped.
Parameters
----------
current_contracts: pandas.Series
Series of current number of contracts held for tradeable instruments.
Can pass 0 if all holdings are 0.
desired_holdings: pandas.Series
Series of desired holdings in base notional currency of generics. Index
is generic contracts, these should be the same generics as in
trade_weights.
trade_weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns refer to generic
contracts and the index is strings representing instrument names.
If dict is given keys should be root generic names, e.g. 'CL', and
values should be pandas.DataFrames of loadings. The union of all
columns should be a superset of the desired_holdings.index
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
number of contracts. Extra instrument prices will be ignored.
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of mapped desired_holdings
intruments.
kwargs: key word arguments
Key word arguments to be passed to to_contracts()
Returns
-------
A pandas.Series of instrument contract trades, lexigraphically sorted.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> desired_holdings = pd.Series([200000, -50000], index=["CL1", "CL2"])
>>> current_contracts = pd.Series([0, 1, 0],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> prices = pd.Series([50.32, 50.41, 50.48],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> multipliers = pd.Series([100, 100, 100],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> trades = util.calc_trades(current_contracts, desired_holdings, wts,
... prices, multipliers)
"""
if not isinstance(trade_weights, dict):
trade_weights = {"": trade_weights}
generics = []
for key in trade_weights:
generics.extend(trade_weights[key].columns)
if not set(desired_holdings.index).issubset(set(generics)):
raise ValueError("'desired_holdings.index' contains values which "
"cannot be mapped to tradeables.\n"
"Received: 'desired_holdings.index'\n {0}\n"
"Expected in 'trade_weights' set of columns:\n {1}\n"
.format(sorted(desired_holdings.index),
sorted(generics)))
desired_contracts = []
for root_key in trade_weights:
gnrc_weights = trade_weights[root_key]
subset = gnrc_weights.columns.intersection(desired_holdings.index)
gnrc_des_hlds = desired_holdings.loc[subset]
gnrc_weights = gnrc_weights.loc[:, subset]
# drop indexes where all non zero weights were in columns dropped above
gnrc_weights = gnrc_weights.loc[~(gnrc_weights == 0).all(axis=1)]
instr_des_hlds = gnrc_des_hlds * gnrc_weights
instr_des_hlds = instr_des_hlds.sum(axis=1)
wprices = prices.loc[instr_des_hlds.index]
desired_contracts.append(to_contracts(instr_des_hlds, wprices,
multipliers, **kwargs))
desired_contracts = pd.concat(desired_contracts, axis=0)
trades = desired_contracts.subtract(current_contracts, fill_value=0)
trades = trades.loc[trades != 0]
trades = trades.sort_index()
return trades
def to_notional(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None):
"""
Convert number of contracts of tradeable instruments to notional value of
tradeable instruments in a desired currency.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
number of contracts.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index otherwise NaN returned for instruments without prices
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
Returns
-------
pandas.Series of notional amounts of instruments with Index of instruments
names
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> current_contracts = pd.Series([-1, 1], index=['CLX16', 'CLZ16'])
>>> prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
>>> multipliers = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
>>> ntln = util.to_notional(current_contracts, prices, multipliers)
"""
notionals = _instr_conv(instruments, prices, multipliers, True,
desired_ccy, instr_fx, fx_rates)
return notionals
def to_contracts(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None, rounder=None):
"""
Convert notional amount of tradeable instruments to number of instrument
contracts, rounding to nearest integer number of contracts.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
notional amount on instrument.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
rounder: function
Function to round pd.Series contracts to integers, if None default
pd.Series.round is used.
Returns
-------
pandas.Series of contract numbers of instruments with Index of instruments
names
"""
contracts = _instr_conv(instruments, prices, multipliers, False,
desired_ccy, instr_fx, fx_rates)
if rounder is None:
rounder = pd.Series.round
contracts = rounder(contracts)
contracts = contracts.astype(int)
return contracts
def _instr_conv(instruments, prices, multipliers, to_notional, desired_ccy,
instr_fx, fx_rates):
if not instruments.index.is_unique:
raise ValueError("'instruments' must have unique index")
if not prices.index.is_unique:
raise ValueError("'prices' must have unique index")
if not multipliers.index.is_unique:
raise ValueError("'multipliers' must have unique index")
if desired_ccy:
if not instr_fx.index.is_unique:
raise ValueError("'instr_fx' must have unique index")
if not fx_rates.index.is_unique:
raise ValueError("'fx_rates' must have unique index")
prices = prices.loc[instr_fx.index]
conv_rate = []
for ccy in instr_fx.values:
conv_rate.append(_get_fx_conversions(fx_rates, ccy, desired_ccy))
fx_adj_prices = prices * np.array(conv_rate)
else:
fx_adj_prices = prices
if to_notional:
amounts = instruments * fx_adj_prices * multipliers
else:
amounts = (instruments / fx_adj_prices) / multipliers
amounts = amounts.loc[instruments.index]
return amounts
def weighted_expiration(weights, contract_dates):
"""
Calculate the days to expiration for generic futures, weighted by the
composition of the underlying tradeable instruments.
Parameters:
-----------
weights: pandas.DataFrame
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names.
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values
Returns:
--------
A pandas.DataFrame with columns of generic futures and index of dates.
Values are the weighted average of days to expiration for the underlying
contracts.
Examples:
---------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF15'),
... (pd.Timestamp('2015-01-03'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLF15'),
... (pd.Timestamp('2015-01-04'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLH15'),
... (pd.Timestamp('2015-01-05'), 'CLG15'),
... (pd.Timestamp('2015-01-05'), 'CLH15')])
>>> weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=widx)
>>> contract_dates = pd.Series([pd.Timestamp('2015-01-20'),
... pd.Timestamp('2015-02-21'),
... pd.Timestamp('2015-03-20')],
... index=['CLF15', 'CLG15', 'CLH15'])
>>> util.weighted_expiration(weights, contract_dates)
""" # NOQA
cols = weights.columns
weights = weights.reset_index(level=-1)
expiries = contract_dates.to_dict()
weights.loc[:, "expiry"] = weights.iloc[:, 0].apply(lambda x: expiries[x])
diffs = (pd.DatetimeIndex(weights.expiry)
- pd.Series(weights.index, weights.index)).apply(lambda x: x.days)
weights = weights.loc[:, cols]
wexp = weights.mul(diffs, axis=0).groupby(level=0).sum()
return wexp
def _get_fx_conversions(fx_rates, ccy, desired_ccy):
# return rate to multiply through by to convert from instrument ccy to
# desired ccy
# fx_rates is a series of fx rates with index names of the form AUDUSD,
# USDCAD, etc. ccy is a st
ccy_pair1 = ccy + desired_ccy
ccy_pair2 = desired_ccy + ccy
if ccy == desired_ccy:
conv_rate = 1.0
elif ccy_pair1 in fx_rates:
conv_rate = fx_rates.loc[ccy_pair1]
elif ccy_pair2 in fx_rates:
conv_rate = 1 / fx_rates.loc[ccy_pair2]
else:
raise ValueError("Cannot convert from {0} to {1} with any of "
"rates:\n{2}".format(ccy, desired_ccy, fx_rates))
return conv_rate
|
matthewgilbert/mapping
|
mapping/util.py
|
weighted_expiration
|
python
|
def weighted_expiration(weights, contract_dates):
"""
Calculate the days to expiration for generic futures, weighted by the
composition of the underlying tradeable instruments.
Parameters:
-----------
weights: pandas.DataFrame
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names.
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values
Returns:
--------
A pandas.DataFrame with columns of generic futures and index of dates.
Values are the weighted average of days to expiration for the underlying
contracts.
Examples:
---------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF15'),
... (pd.Timestamp('2015-01-03'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLF15'),
... (pd.Timestamp('2015-01-04'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLH15'),
... (pd.Timestamp('2015-01-05'), 'CLG15'),
... (pd.Timestamp('2015-01-05'), 'CLH15')])
>>> weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=widx)
>>> contract_dates = pd.Series([pd.Timestamp('2015-01-20'),
... pd.Timestamp('2015-02-21'),
... pd.Timestamp('2015-03-20')],
... index=['CLF15', 'CLG15', 'CLH15'])
>>> util.weighted_expiration(weights, contract_dates)
""" # NOQA
cols = weights.columns
weights = weights.reset_index(level=-1)
expiries = contract_dates.to_dict()
weights.loc[:, "expiry"] = weights.iloc[:, 0].apply(lambda x: expiries[x])
diffs = (pd.DatetimeIndex(weights.expiry)
- pd.Series(weights.index, weights.index)).apply(lambda x: x.days)
weights = weights.loc[:, cols]
wexp = weights.mul(diffs, axis=0).groupby(level=0).sum()
return wexp
|
Calculate the days to expiration for generic futures, weighted by the
composition of the underlying tradeable instruments.
Parameters:
-----------
weights: pandas.DataFrame
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names.
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values
Returns:
--------
A pandas.DataFrame with columns of generic futures and index of dates.
Values are the weighted average of days to expiration for the underlying
contracts.
Examples:
---------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF15'),
... (pd.Timestamp('2015-01-03'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLF15'),
... (pd.Timestamp('2015-01-04'), 'CLG15'),
... (pd.Timestamp('2015-01-04'), 'CLH15'),
... (pd.Timestamp('2015-01-05'), 'CLG15'),
... (pd.Timestamp('2015-01-05'), 'CLH15')])
>>> weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=widx)
>>> contract_dates = pd.Series([pd.Timestamp('2015-01-20'),
... pd.Timestamp('2015-02-21'),
... pd.Timestamp('2015-03-20')],
... index=['CLF15', 'CLG15', 'CLH15'])
>>> util.weighted_expiration(weights, contract_dates)
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/util.py#L646-L694
| null |
import pandas as pd
import numpy as np
import os
def read_price_data(files, name_func=None):
"""
Convenience function for reading in pricing data from csv files
Parameters
----------
files: list
List of strings refering to csv files to read data in from, first
column should be dates
name_func: func
A function to apply to the file strings to infer the instrument name,
used in the second level of the MultiIndex index. Default is the file
name excluding the pathname and file ending,
e.g. /path/to/file/name.csv -> name
Returns
-------
A pandas.DataFrame with a pandas.MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Columns are
given by the csv file columns.
"""
if name_func is None:
def name_func(x):
return os.path.split(x)[1].split(".")[0]
dfs = []
for f in files:
name = name_func(f)
df = pd.read_csv(f, index_col=0, parse_dates=True)
df.sort_index(inplace=True)
df.index = pd.MultiIndex.from_product([df.index, [name]],
names=["date", "contract"])
dfs.append(df)
return pd.concat(dfs, axis=0, sort=False).sort_index()
def flatten(weights):
"""
Flatten weights into a long DataFrame.
Parameters
----------
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas. Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A long DataFrame of weights, where columns are "date", "contract",
"generic" and "weight". If a dictionary is passed, DataFrame will contain
additional colum "key" containing the key value and be sorted according to
this key value.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLH5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
>>> util.flatten(weights)
""" # NOQA
if isinstance(weights, pd.DataFrame):
wts = weights.stack().reset_index()
wts.columns = ["date", "contract", "generic", "weight"]
elif isinstance(weights, dict):
wts = []
for key in sorted(weights.keys()):
wt = weights[key].stack().reset_index()
wt.columns = ["date", "contract", "generic", "weight"]
wt.loc[:, "key"] = key
wts.append(wt)
wts = pd.concat(wts, axis=0).reset_index(drop=True)
else:
raise ValueError("weights must be pd.DataFrame or dict")
return wts
def unflatten(flat_weights):
"""
Pivot weights from long DataFrame into weighting matrix.
Parameters
----------
flat_weights: pandas.DataFrame
A long DataFrame of weights, where columns are "date", "contract",
"generic", "weight" and optionally "key". If "key" column is
present a dictionary of unflattened DataFrames is returned with the
dictionary keys corresponding to the "key" column and each sub
DataFrame containing rows for this key.
Returns
-------
A DataFrame or dict of DataFrames of instrument weights with a MultiIndex
where the top level contains pandas.Timestamps and the second level is
instrument names. The columns consist of generic names. If dict is returned
the dict keys correspond to the "key" column of the input.
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> long_wts = pd.DataFrame(
... {"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
... "contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
... "generic": ["CL1", "CL2"] * 4,
... "weight": [1, 0, 0, 1, 1, 0, 0, 1]}
... ).loc[:, ["date", "contract", "generic", "weight"]]
>>> util.unflatten(long_wts)
See also: calc_rets()
""" # NOQA
if flat_weights.columns.contains("key"):
weights = {}
for key in flat_weights.loc[:, "key"].unique():
flt_wts = flat_weights.loc[flat_weights.loc[:, "key"] == key, :]
flt_wts = flt_wts.drop(labels="key", axis=1)
wts = flt_wts.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
wts.columns = wts.columns.droplevel(0)
weights[key] = wts
else:
weights = flat_weights.pivot_table(index=["date", "contract"],
columns=["generic"],
values=["weight"])
weights.columns = weights.columns.droplevel(0)
return weights
def calc_rets(returns, weights):
"""
Calculate continuous return series for futures instruments. These consist
of weighted underlying instrument returns, who's weights can vary over
time.
Parameters
----------
returns: pandas.Series or dict
A Series of instrument returns with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names. Values
correspond to one period instrument returns. returns should be
available for all for all Timestamps and instruments provided in
weights. If dict is given this should be a dict of pandas.Series in the
above format, with keys which are a subset of the keys given in weights
weights: pandas.DataFrame or dict
A DataFrame of instrument weights with a MultiIndex where the top level
contains pandas.Timestamps and the second level is instrument names.
The columns consist of generic names. If dict is given this should be
a dict of pandas.DataFrame in the above format, with keys for different
root generics, e.g. 'CL'
Returns
-------
A pandas.DataFrame of continuous returns for generics. The index is
pandas.Timestamps and the columns is generic names, corresponding to
weights.columns
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-02'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-03'), 'CLG5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> price = pd.Series([45.63, 45.85, 46.13, 46.05, 46.25, 46.20], index=idx)
>>> vals = [1, 1/2, 1/2, 1]
>>> widx = pd.MultiIndex.from_tuples([(pd.Timestamp('2015-01-03'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLF5'),
... (pd.Timestamp('2015-01-04'), 'CLG5'),
... (pd.Timestamp('2015-01-05'), 'CLG5')])
>>> weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
>>> irets = price.groupby(level=-1).pct_change()
>>> util.calc_rets(irets, weights)
""" # NOQA
if not isinstance(returns, dict):
returns = {"": returns}
if not isinstance(weights, dict):
weights = {"": weights}
generic_superset = []
for root in weights:
generic_superset.extend(weights[root].columns.tolist())
if len(set(generic_superset)) != len(generic_superset):
raise ValueError("Columns for weights must all be unique")
_check_indices(returns, weights)
grets = []
cols = []
for root in returns:
root_wts = weights[root]
root_rets = returns[root]
for generic in root_wts.columns:
gnrc_wts = root_wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
rets = root_rets.loc[gnrc_wts.index]
# groupby time
group_rets = (rets * gnrc_wts).groupby(level=0)
grets.append(group_rets.apply(pd.DataFrame.sum, skipna=False))
cols.extend(root_wts.columns.tolist())
rets = pd.concat(grets, axis=1, keys=cols).sort_index(axis=1)
return rets
def _stringify(xs):
if len(xs) <= 2:
return repr(xs)
return '[{!r}, ..., {!r}]'.format(xs[0], xs[-1])
def _check_indices(returns, weights):
# dictionaries of returns and weights
# check 1: ensure that all non zero instrument weights have associated
# returns, see https://github.com/matthewgilbert/mapping/issues/3
# check 2: ensure that returns are not dropped if reindexed from weights,
# see https://github.com/matthewgilbert/mapping/issues/8
if list(returns.keys()) == [""]:
msg1 = ("'returns.index.get_level_values(0)' must contain dates which "
"are a subset of 'weights.index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights.loc[:, '{2}'].index' are not in 'returns.index'")
else:
msg1 = ("'returns['{0}'].index.get_level_values(0)' must contain "
"dates which are a subset of "
"'weights['{0}'].index.get_level_values(0)'"
"\nExtra keys: {1}")
msg2 = ("{0} from the non zero elements of "
"'weights['{1}'].loc[:, '{2}'].index' are not in "
"'returns['{1}'].index'")
for root in returns:
wts = weights[root]
rets = returns[root]
dts_rets = rets.index.get_level_values(0)
dts_wts = wts.index.get_level_values(0)
# check 1
if not dts_rets.isin(dts_wts).all():
missing_dates = dts_rets.difference(dts_wts).tolist()
raise ValueError(msg1.format(root, _stringify(missing_dates)))
# check 2
for generic in wts.columns:
gnrc_wts = wts.loc[:, generic]
# drop generics where weight is 0, this avoids potential KeyError
# in later indexing of rets even when ret has weight of 0
gnrc_wts = gnrc_wts.loc[gnrc_wts != 0]
# necessary instead of missing_keys.any() to support MultiIndex
if not gnrc_wts.index.isin(rets.index).all():
# as list instead of MultiIndex for legibility when stack trace
missing_keys = (gnrc_wts.index.difference(rets.index).tolist())
msg2 = msg2.format(_stringify(missing_keys), root, generic)
raise KeyError(msg2)
def reindex(prices, index, limit):
"""
Reindex a pd.Series of prices such that when instrument level returns are
calculated they are compatible with a pd.MultiIndex of instrument weights
in calc_rets(). This amount to reindexing the series by an augmented
version of index which includes the preceding date for the first appearance
of each instrument. Fill forward missing values with previous price up to
some limit.
Parameters
----------
prices: pandas.Series
A Series of instrument prices with a MultiIndex where the top level is
pandas.Timestamps and the second level is instrument names.
index: pandas.MultiIndex
A MultiIndex where the top level contains pandas.Timestamps and the
second level is instrument names.
limt: int
Number of periods to fill prices forward.
Returns
-------
A pandas.Series of reindexed prices where the top level is
pandas.Timestamps and the second level is instrument names.
See also: calc_rets()
Example
-------
>>> import pandas as pd
>>> from pandas import Timestamp as TS
>>> import mapping.util as util
>>> idx = pd.MultiIndex.from_tuples([(TS('2015-01-04'), 'CLF5'),
... (TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-06'), 'CLF5'),
... (TS('2015-01-06'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> prices = pd.Series([100.12, 101.50, 102.51, 103.51, 102.73, 102.15,
... 104.37], index=idx)
>>> widx = pd.MultiIndex.from_tuples([(TS('2015-01-05'), 'CLF5'),
... (TS('2015-01-05'), 'CLH5'),
... (TS('2015-01-07'), 'CLF5'),
... (TS('2015-01-07'), 'CLH5')])
>>> util.reindex(prices, widx, limit=0)
"""
if not index.is_unique:
raise ValueError("'index' must be unique")
index = index.sort_values()
index.names = ["date", "instrument"]
price_dts = prices.sort_index().index.unique(level=0)
index_dts = index.unique(level=0)
mask = price_dts < index_dts[0]
leading_price_dts = price_dts[mask]
if len(leading_price_dts) == 0:
raise ValueError("'prices' must have a date preceding first date in "
"'index'")
prev_dts = index_dts.tolist()
prev_dts.insert(0, leading_price_dts[-1])
# avoid just lagging to preserve the calendar
previous_date = dict(zip(index_dts, prev_dts))
first_instr = index.to_frame(index=False)
first_instr = (
first_instr.drop_duplicates(subset=["instrument"], keep="first")
)
first_instr.loc[:, "prev_date"] = (
first_instr.loc[:, "date"].apply(lambda x: previous_date[x])
)
additional_indices = pd.MultiIndex.from_tuples(
first_instr.loc[:, ["prev_date", "instrument"]].values.tolist()
)
augmented_index = index.union(additional_indices).sort_values()
prices = prices.reindex(augmented_index)
if limit != 0:
prices = prices.groupby(level=1).fillna(method="ffill", limit=limit)
return prices
def calc_trades(current_contracts, desired_holdings, trade_weights, prices,
multipliers, **kwargs):
"""
Calculate the number of tradeable contracts for rebalancing from a set
of current contract holdings to a set of desired generic notional holdings
based on prevailing prices and mapping from generics to tradeable
instruments. Differences between current holdings and desired holdings
are treated as 0. Zero trades are dropped.
Parameters
----------
current_contracts: pandas.Series
Series of current number of contracts held for tradeable instruments.
Can pass 0 if all holdings are 0.
desired_holdings: pandas.Series
Series of desired holdings in base notional currency of generics. Index
is generic contracts, these should be the same generics as in
trade_weights.
trade_weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns refer to generic
contracts and the index is strings representing instrument names.
If dict is given keys should be root generic names, e.g. 'CL', and
values should be pandas.DataFrames of loadings. The union of all
columns should be a superset of the desired_holdings.index
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
number of contracts. Extra instrument prices will be ignored.
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of mapped desired_holdings
intruments.
kwargs: key word arguments
Key word arguments to be passed to to_contracts()
Returns
-------
A pandas.Series of instrument contract trades, lexigraphically sorted.
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> desired_holdings = pd.Series([200000, -50000], index=["CL1", "CL2"])
>>> current_contracts = pd.Series([0, 1, 0],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> prices = pd.Series([50.32, 50.41, 50.48],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> multipliers = pd.Series([100, 100, 100],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> trades = util.calc_trades(current_contracts, desired_holdings, wts,
... prices, multipliers)
"""
if not isinstance(trade_weights, dict):
trade_weights = {"": trade_weights}
generics = []
for key in trade_weights:
generics.extend(trade_weights[key].columns)
if not set(desired_holdings.index).issubset(set(generics)):
raise ValueError("'desired_holdings.index' contains values which "
"cannot be mapped to tradeables.\n"
"Received: 'desired_holdings.index'\n {0}\n"
"Expected in 'trade_weights' set of columns:\n {1}\n"
.format(sorted(desired_holdings.index),
sorted(generics)))
desired_contracts = []
for root_key in trade_weights:
gnrc_weights = trade_weights[root_key]
subset = gnrc_weights.columns.intersection(desired_holdings.index)
gnrc_des_hlds = desired_holdings.loc[subset]
gnrc_weights = gnrc_weights.loc[:, subset]
# drop indexes where all non zero weights were in columns dropped above
gnrc_weights = gnrc_weights.loc[~(gnrc_weights == 0).all(axis=1)]
instr_des_hlds = gnrc_des_hlds * gnrc_weights
instr_des_hlds = instr_des_hlds.sum(axis=1)
wprices = prices.loc[instr_des_hlds.index]
desired_contracts.append(to_contracts(instr_des_hlds, wprices,
multipliers, **kwargs))
desired_contracts = pd.concat(desired_contracts, axis=0)
trades = desired_contracts.subtract(current_contracts, fill_value=0)
trades = trades.loc[trades != 0]
trades = trades.sort_index()
return trades
def to_notional(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None):
"""
Convert number of contracts of tradeable instruments to notional value of
tradeable instruments in a desired currency.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
number of contracts.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index otherwise NaN returned for instruments without prices
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
Returns
-------
pandas.Series of notional amounts of instruments with Index of instruments
names
Example
-------
>>> import pandas as pd
>>> import mapping.util as util
>>> current_contracts = pd.Series([-1, 1], index=['CLX16', 'CLZ16'])
>>> prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
>>> multipliers = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
>>> ntln = util.to_notional(current_contracts, prices, multipliers)
"""
notionals = _instr_conv(instruments, prices, multipliers, True,
desired_ccy, instr_fx, fx_rates)
return notionals
def to_contracts(instruments, prices, multipliers, desired_ccy=None,
instr_fx=None, fx_rates=None, rounder=None):
"""
Convert notional amount of tradeable instruments to number of instrument
contracts, rounding to nearest integer number of contracts.
Parameters
----------
instruments: pandas.Series
Series of instrument holdings. Index is instrument name and values are
notional amount on instrument.
prices: pandas.Series
Series of instrument prices. Index is instrument name and values are
instrument prices. prices.index should be a superset of
instruments.index
multipliers: pandas.Series
Series of instrument multipliers. Index is instrument name and
values are the multiplier associated with the contract.
multipliers.index should be a superset of instruments.index
desired_ccy: str
Three letter string representing desired currency to convert notional
values to, e.g. 'USD'. If None is given currency conversion is ignored.
instr_fx: pandas.Series
Series of instrument fx denominations. Index is instrument name and
values are three letter strings representing the currency the
instrument is denominated in. instr_fx.index should match prices.index
fx_rates: pandas.Series
Series of fx rates used for conversion to desired_ccy. Index is strings
representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the
corresponding exchange rates.
rounder: function
Function to round pd.Series contracts to integers, if None default
pd.Series.round is used.
Returns
-------
pandas.Series of contract numbers of instruments with Index of instruments
names
"""
contracts = _instr_conv(instruments, prices, multipliers, False,
desired_ccy, instr_fx, fx_rates)
if rounder is None:
rounder = pd.Series.round
contracts = rounder(contracts)
contracts = contracts.astype(int)
return contracts
def _instr_conv(instruments, prices, multipliers, to_notional, desired_ccy,
instr_fx, fx_rates):
if not instruments.index.is_unique:
raise ValueError("'instruments' must have unique index")
if not prices.index.is_unique:
raise ValueError("'prices' must have unique index")
if not multipliers.index.is_unique:
raise ValueError("'multipliers' must have unique index")
if desired_ccy:
if not instr_fx.index.is_unique:
raise ValueError("'instr_fx' must have unique index")
if not fx_rates.index.is_unique:
raise ValueError("'fx_rates' must have unique index")
prices = prices.loc[instr_fx.index]
conv_rate = []
for ccy in instr_fx.values:
conv_rate.append(_get_fx_conversions(fx_rates, ccy, desired_ccy))
fx_adj_prices = prices * np.array(conv_rate)
else:
fx_adj_prices = prices
if to_notional:
amounts = instruments * fx_adj_prices * multipliers
else:
amounts = (instruments / fx_adj_prices) / multipliers
amounts = amounts.loc[instruments.index]
return amounts
def get_multiplier(weights, root_generic_multiplier):
"""
Determine tradeable instrument multiplier based on generic asset
multipliers and weights mapping from generics to tradeables.
Parameters
----------
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments **for a given date**. The columns are integers refering to
generic number indexed from 0, e.g. [0, 1], and the index is strings
representing instrument names. If dict is given keys should be generic
instrument names, e.g. 'CL', and values should be pandas.DataFrames of
loadings. The union of all indexes should be a superset of the
instruments.index
root_generic_multiplier: pandas.Series
Series of multipliers for generic instruments lexigraphically sorted.
If a dictionary of weights is given, root_generic_multiplier.index
should correspond to the weights keys.
Returns
-------
A pandas.Series of multipliers for tradeable instruments.
Examples
--------
>>> import pandas as pd
>>> import mapping.util as util
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=[0, 1])
>>> ast_mult = pd.Series([1000], index=["CL"])
>>> util.get_multiplier(wts, ast_mult)
"""
if len(root_generic_multiplier) > 1 and not isinstance(weights, dict):
raise ValueError("For multiple generic instruments weights must be a "
"dictionary")
mults = []
intrs = []
for ast, multiplier in root_generic_multiplier.iteritems():
if isinstance(weights, dict):
weights_ast = weights[ast].index
else:
weights_ast = weights.index
mults.extend(np.repeat(multiplier, len(weights_ast)))
intrs.extend(weights_ast)
imults = pd.Series(mults, intrs)
imults = imults.sort_index()
return imults
def _get_fx_conversions(fx_rates, ccy, desired_ccy):
# return rate to multiply through by to convert from instrument ccy to
# desired ccy
# fx_rates is a series of fx rates with index names of the form AUDUSD,
# USDCAD, etc. ccy is a st
ccy_pair1 = ccy + desired_ccy
ccy_pair2 = desired_ccy + ccy
if ccy == desired_ccy:
conv_rate = 1.0
elif ccy_pair1 in fx_rates:
conv_rate = fx_rates.loc[ccy_pair1]
elif ccy_pair2 in fx_rates:
conv_rate = 1 / fx_rates.loc[ccy_pair2]
else:
raise ValueError("Cannot convert from {0} to {1} with any of "
"rates:\n{2}".format(ccy, desired_ccy, fx_rates))
return conv_rate
|
matthewgilbert/mapping
|
mapping/mappings.py
|
bdom_roll_date
|
python
|
def bdom_roll_date(sd, ed, bdom, months, holidays=[]):
if not isinstance(bdom, int):
raise ValueError("'bdom' must be integer")
sd = pd.Timestamp(sd)
ed = pd.Timestamp(ed)
t1 = sd
if not t1.is_month_start:
t1 = t1 - pd.offsets.MonthBegin(1)
t2 = ed
if not t2.is_month_end:
t2 = t2 + pd.offsets.MonthEnd(1)
dates = pd.date_range(t1, t2, freq="b")
dates = dates.difference(holidays)
date_data = pd.DataFrame({"date": dates, "year": dates.year,
"month": dates.month, "bdom": 1})
date_data.loc[:, "bdom"] = (
date_data.groupby(by=["year", "month"])["bdom"].cumsum()
)
date_data = date_data.loc[date_data.bdom == bdom, :]
date_data = date_data.loc[date_data.month.isin(months), :]
date_data.loc[:, "month_code"] = date_data.month.apply(lambda x: months[x])
idx = (date_data.date >= sd) & (date_data.date <= ed)
order = ['date', 'year', 'month', 'bdom', 'month_code']
date_data = (date_data.loc[idx, order]
.reset_index(drop=True))
return date_data
|
Convenience function for getting business day data associated with
contracts. Usefully for generating business day derived 'contract_dates'
which can be used as input to roller(). Returns dates for a business day of
the month for months in months.keys() between the start date and end date.
Parameters
----------
sd: str
String representing start date, %Y%m%d
ed: str
String representing end date, %Y%m%d
bdom: int
Integer indicating business day of month
months: dict
Dictionnary where key is integer representation of month [1-12] and
value is the month code [FGHJKMNQUVXZ]
holidays: list
List of holidays to exclude from business days
Return
------
A DataFrame with columns ['date', 'year', 'month', 'bdom', 'month_code']
Examples
--------
>>> import pandas as pd
>>> from mapping.mappings import bdom_roll_date
>>> bdom_roll_date("20160101", "20180501", 7, {1:"G", 3:"J", 8:"U"})
>>> bdom_roll_date("20160101", "20180501", 7, {1:"G", 3:"J", 8:"U"},
... holidays=[pd.Timestamp("20160101")])
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/mappings.py#L18-L78
| null |
import pandas as pd
import numpy as np
import cvxpy
import sys
# deal with API change from cvxpy version 0.4 to 1.0
if hasattr(cvxpy, "sum_entries"):
CVX_SUM = getattr(cvxpy, "sum_entries")
else:
CVX_SUM = getattr(cvxpy, "sum")
TO_MONTH_CODE = dict(zip(range(1, 13), "FGHJKMNQUVXZ"))
FROM_MONTH_CODE = dict(zip("FGHJKMNQUVXZ", range(1, 13)))
def roller(timestamps, contract_dates, get_weights, **kwargs):
"""
Calculate weight allocations to tradeable instruments for generic futures
at a set of timestamps for a given root generic.
Paramters
---------
timestamps: iterable
Sorted iterable of of pandas.Timestamps to calculate weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
get_weights: function
A function which takes in a timestamp, contract_dates, validate_inputs
and **kwargs. Returns a list of tuples consisting of the generic
instrument name, the tradeable contract as a string, the weight on this
contract as a float and the date as a pandas.Timestamp.
kwargs: keyword arguments
Arguements to pass to get_weights
Return
------
A pandas.DataFrame with columns representing generics and a MultiIndex of
date and contract. Values represent weights on tradeables for each generic.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]], index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.DatetimeIndex([pd.Timestamp('2016-10-18'),
... pd.Timestamp('2016-10-19'),
... pd.Timestamp('2016-10-19')])
>>> wts = mappings.roller(ts, contract_dates, mappings.static_transition,
... transition=trans)
"""
timestamps = sorted(timestamps)
contract_dates = contract_dates.sort_values()
_check_contract_dates(contract_dates)
weights = []
# for loop speedup only validate inputs the first function call to
# get_weights()
validate_inputs = True
ts = timestamps[0]
weights.extend(get_weights(ts, contract_dates,
validate_inputs=validate_inputs, **kwargs))
validate_inputs = False
for ts in timestamps[1:]:
weights.extend(get_weights(ts, contract_dates,
validate_inputs=validate_inputs, **kwargs))
weights = aggregate_weights(weights)
return weights
def aggregate_weights(weights, drop_date=False):
"""
Transforms list of tuples of weights into pandas.DataFrame of weights.
Parameters:
-----------
weights: list
A list of tuples consisting of the generic instrument name,
the tradeable contract as a string, the weight on this contract as a
float and the date as a pandas.Timestamp.
drop_date: boolean
Whether to drop the date from the multiIndex
Returns
-------
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instrument names and
the index is strings representing instrument names.
"""
dwts = pd.DataFrame(weights,
columns=["generic", "contract", "weight", "date"])
dwts = dwts.pivot_table(index=['date', 'contract'],
columns=['generic'], values='weight', fill_value=0)
dwts = dwts.astype(float)
dwts = dwts.sort_index()
if drop_date:
dwts.index = dwts.index.levels[-1]
return dwts
def static_transition(timestamp, contract_dates, transition, holidays=None,
validate_inputs=True):
"""
An implementation of *get_weights* parameter in roller().
Return weights to tradeable instruments for a given date based on a
transition DataFrame which indicates how to roll through the roll period.
Parameters
----------
timestamp: pandas.Timestamp
The timestamp to return instrument weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
transition: pandas.DataFrame
A DataFrame with a index of integers representing business day offsets
from the last roll date and a column which is a MultiIndex where the
top level is generic instruments and the second level is
['front', 'back'] which refer to the front month contract and the back
month contract of the roll. Note that for different generics, e.g. CL1,
CL2, the front and back month contract during a roll would refer to
different underlying instruments. The values represent the fraction of
the roll on each day during the roll period. The first row of the
transition period should be completely allocated to the front contract
and the last row should be completely allocated to the back contract.
holidays: array_like of datetime64[D]
Holidays to exclude when calculating business day offsets from the last
roll date. See numpy.busday_count.
validate_inputs: Boolean
Whether or not to validate ordering of contract_dates and transition.
**Caution** this is provided for speed however if this is set to False
and inputs are not defined properly algorithm may return incorrect
data.
Returns
-------
A list of tuples consisting of the generic instrument name, the tradeable
contract as a string, the weight on this contract as a float and the date
as a pandas.Timestamp.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]],
... index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.Timestamp('2016-10-19')
>>> wts = mappings.static_transition(ts, contract_dates, transition)
"""
if validate_inputs:
# required for MultiIndex slicing
_check_static(transition.sort_index(axis=1))
# the algorithm below will return invalid results if contract_dates is
# not as expected so better to fail explicitly
_check_contract_dates(contract_dates)
if not holidays:
holidays = []
# further speedup can be obtained using contract_dates.loc[timestamp:]
# but this requires swapping contract_dates index and values
after_contract_dates = contract_dates.loc[contract_dates >= timestamp]
contracts = after_contract_dates.index
front_expiry_dt = after_contract_dates.iloc[0]
days_to_expiry = np.busday_count(front_expiry_dt.date(), timestamp.date(),
holidays=holidays)
name2num = dict(zip(transition.columns.levels[0],
range(len(transition.columns.levels[0]))))
if days_to_expiry in transition.index:
weights_iter = transition.loc[days_to_expiry].iteritems()
# roll hasn't started yet
elif days_to_expiry < transition.index.min():
# provides significant speedup over transition.iloc[0].iteritems()
vals = transition.values[0]
weights_iter = zip(transition.columns.tolist(), vals)
# roll is finished
else:
vals = transition.values[-1]
weights_iter = zip(transition.columns.tolist(), vals)
cwts = []
for idx_tuple, weighting in weights_iter:
gen_name, position = idx_tuple
if weighting != 0:
if position == "front":
cntrct_idx = name2num[gen_name]
elif position == "back":
cntrct_idx = name2num[gen_name] + 1
try:
cntrct_name = contracts[cntrct_idx]
except IndexError as e:
raise type(e)(("index {0} is out of bounds in\n{1}\nas of {2} "
"resulting from {3} mapping")
.format(cntrct_idx, after_contract_dates,
timestamp, idx_tuple)
).with_traceback(sys.exc_info()[2])
cwts.append((gen_name, cntrct_name, weighting, timestamp))
return cwts
def _check_contract_dates(contract_dates):
if not contract_dates.index.is_unique:
raise ValueError("'contract_dates.index' must be unique")
if not contract_dates.is_unique:
raise ValueError("'contract_dates' must be unique")
# since from above we know this is unique if not monotonic means not
# strictly monotonic if we know it is sorted
if not contract_dates.is_monotonic_increasing:
raise ValueError("'contract_dates' must be strictly monotonic "
"increasing")
def _check_static(transition):
if set(transition.columns.levels[-1]) != {"front", "back"}:
raise ValueError("transition.columns.levels[-1] must consist of"
"'front' and 'back'")
generic_row_sums = transition.groupby(level=0, axis=1).sum()
if not (generic_row_sums == 1).all().all():
raise ValueError("transition rows for each generic must sum to"
" 1\n %s" % transition)
if not transition.loc[:, (slice(None), "front")].apply(lambda x: np.all(np.diff(x.values) <= 0)).all(): # NOQA
raise ValueError("'front' columns must be monotonically decreasing and"
" 'back' columns must be monotonically increasing,"
" invalid transtion:\n %s" % transition)
return
def to_generics(instruments, weights):
"""
Map tradeable instruments to generics given weights and tradeable
instrument holdings. This is solving the equation Ax = b where A is the
weights, and b is the instrument holdings. When Ax = b has no solution we
solve for x' such that Ax' is closest to b in the least squares sense with
the additional constraint that sum(x') = sum(instruments).
Scenarios with exact solutions and non exact solutions are depicted below
+------------+-----+-----+ Instruments
| contract | CL1 | CL2 | ------------------------------------
|------------+-----+-----| Scenario 1 | Scenario 2 | Scenario 3
| CLX16 | 0.5 | 0 | 10 | 10 | 10
| CLZ16 | 0.5 | 0.5 | 20 | 20 | 25
| CLF17 | 0 | 0.5 | 10 | 11 | 11
+------------+-----+-----+
In scenario 1 the solution is given by x = [20, 20], in scenario 2 the
solution is given by x = [19.5, 21.5], and in scenario 3 the solution is
given by x = [22, 24].
NOTE: Integer solutions are not guruanteed, as demonstrated above. This is
intended for use with contract numbers but can also be used with notional
amounts of contracts.
Parameters
----------
instruments: pandas.Series
Series of tradeable instrument holdings where the index is the name of
the tradeable instrument and the value is the number of that instrument
held.
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instruments
and the index is strings representing instrument names. If dict is
given keys should be root generic, e.g. 'CL', and values should be
pandas.DataFrames of loadings. The union of all indexes should be a
superset of the instruments.index
Returns
-------
A pandas.Series where the index is the generic and the value is the number
of contracts, sorted by index.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> instrs = pd.Series([10, 20, 10], index=["CLX16", "CLZ16", "CLF17"])
>>> generics = mappings.to_generics(instrs, wts)
"""
if not isinstance(weights, dict):
weights = {"": weights}
allocations = []
unmapped_instr = instruments.index
for key in weights:
w = weights[key]
# may not always have instrument holdings for a set of weights so allow
# weights to be a superset of instruments, drop values where no
# holdings
winstrs = instruments.reindex(w.index).dropna()
w = w.loc[winstrs.index]
# drop generics where all weights for instruments on the genric are 0.
# This avoids numerical rounding issues where solution has epsilon
# weight on a generic
w = w.loc[:, ~(w == 0).all(axis=0)]
unmapped_instr = unmapped_instr.difference(winstrs.index)
A = w.values
b = winstrs.values
x = cvxpy.Variable(A.shape[1])
constrs = [CVX_SUM(x) == np.sum(b)]
obj = cvxpy.Minimize(cvxpy.sum_squares(A * x - b))
prob = cvxpy.Problem(obj, constrs)
prob.solve()
vals = np.array(x.value).squeeze()
idx = w.columns.tolist()
allocations.append(pd.Series(vals, index=idx))
if len(unmapped_instr) > 0:
raise KeyError("Unmapped instruments %s. weights must be a superset of"
" instruments" % unmapped_instr.tolist())
allocations = pd.concat(allocations, axis=0)
allocations = allocations.sort_index()
return allocations
|
matthewgilbert/mapping
|
mapping/mappings.py
|
roller
|
python
|
def roller(timestamps, contract_dates, get_weights, **kwargs):
timestamps = sorted(timestamps)
contract_dates = contract_dates.sort_values()
_check_contract_dates(contract_dates)
weights = []
# for loop speedup only validate inputs the first function call to
# get_weights()
validate_inputs = True
ts = timestamps[0]
weights.extend(get_weights(ts, contract_dates,
validate_inputs=validate_inputs, **kwargs))
validate_inputs = False
for ts in timestamps[1:]:
weights.extend(get_weights(ts, contract_dates,
validate_inputs=validate_inputs, **kwargs))
weights = aggregate_weights(weights)
return weights
|
Calculate weight allocations to tradeable instruments for generic futures
at a set of timestamps for a given root generic.
Paramters
---------
timestamps: iterable
Sorted iterable of of pandas.Timestamps to calculate weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
get_weights: function
A function which takes in a timestamp, contract_dates, validate_inputs
and **kwargs. Returns a list of tuples consisting of the generic
instrument name, the tradeable contract as a string, the weight on this
contract as a float and the date as a pandas.Timestamp.
kwargs: keyword arguments
Arguements to pass to get_weights
Return
------
A pandas.DataFrame with columns representing generics and a MultiIndex of
date and contract. Values represent weights on tradeables for each generic.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]], index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.DatetimeIndex([pd.Timestamp('2016-10-18'),
... pd.Timestamp('2016-10-19'),
... pd.Timestamp('2016-10-19')])
>>> wts = mappings.roller(ts, contract_dates, mappings.static_transition,
... transition=trans)
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/mappings.py#L81-L141
|
[
"def _check_contract_dates(contract_dates):\n if not contract_dates.index.is_unique:\n raise ValueError(\"'contract_dates.index' must be unique\")\n if not contract_dates.is_unique:\n raise ValueError(\"'contract_dates' must be unique\")\n # since from above we know this is unique if not monotonic means not\n # strictly monotonic if we know it is sorted\n if not contract_dates.is_monotonic_increasing:\n raise ValueError(\"'contract_dates' must be strictly monotonic \"\n \"increasing\")\n",
"def aggregate_weights(weights, drop_date=False):\n \"\"\"\n Transforms list of tuples of weights into pandas.DataFrame of weights.\n\n Parameters:\n -----------\n weights: list\n A list of tuples consisting of the generic instrument name,\n the tradeable contract as a string, the weight on this contract as a\n float and the date as a pandas.Timestamp.\n drop_date: boolean\n Whether to drop the date from the multiIndex\n\n Returns\n -------\n A pandas.DataFrame of loadings of generic contracts on tradeable\n instruments for a given date. The columns are generic instrument names and\n the index is strings representing instrument names.\n \"\"\"\n dwts = pd.DataFrame(weights,\n columns=[\"generic\", \"contract\", \"weight\", \"date\"])\n dwts = dwts.pivot_table(index=['date', 'contract'],\n columns=['generic'], values='weight', fill_value=0)\n dwts = dwts.astype(float)\n dwts = dwts.sort_index()\n if drop_date:\n dwts.index = dwts.index.levels[-1]\n return dwts\n",
"def static_transition(timestamp, contract_dates, transition, holidays=None,\n validate_inputs=True):\n \"\"\"\n An implementation of *get_weights* parameter in roller().\n Return weights to tradeable instruments for a given date based on a\n transition DataFrame which indicates how to roll through the roll period.\n\n Parameters\n ----------\n timestamp: pandas.Timestamp\n The timestamp to return instrument weights for\n contract_dates: pandas.Series\n Series with index of tradeable contract names and pandas.Timestamps\n representing the last date of the roll as values, sorted by values.\n Index must be unique and values must be strictly monotonic.\n transition: pandas.DataFrame\n A DataFrame with a index of integers representing business day offsets\n from the last roll date and a column which is a MultiIndex where the\n top level is generic instruments and the second level is\n ['front', 'back'] which refer to the front month contract and the back\n month contract of the roll. Note that for different generics, e.g. CL1,\n CL2, the front and back month contract during a roll would refer to\n different underlying instruments. The values represent the fraction of\n the roll on each day during the roll period. The first row of the\n transition period should be completely allocated to the front contract\n and the last row should be completely allocated to the back contract.\n holidays: array_like of datetime64[D]\n Holidays to exclude when calculating business day offsets from the last\n roll date. See numpy.busday_count.\n validate_inputs: Boolean\n Whether or not to validate ordering of contract_dates and transition.\n **Caution** this is provided for speed however if this is set to False\n and inputs are not defined properly algorithm may return incorrect\n data.\n\n Returns\n -------\n A list of tuples consisting of the generic instrument name, the tradeable\n contract as a string, the weight on this contract as a float and the date\n as a pandas.Timestamp.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import mapping.mappings as mappings\n >>> cols = pd.MultiIndex.from_product([[\"CL1\", \"CL2\"], ['front', 'back']])\n >>> idx = [-2, -1, 0]\n >>> transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],\n ... [0.0, 1.0, 0.0, 1.0]],\n ... index=idx, columns=cols)\n >>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),\n ... pd.Timestamp('2016-11-21'),\n ... pd.Timestamp('2016-12-20')],\n ... index=['CLX16', 'CLZ16', 'CLF17'])\n >>> ts = pd.Timestamp('2016-10-19')\n >>> wts = mappings.static_transition(ts, contract_dates, transition)\n \"\"\"\n\n if validate_inputs:\n # required for MultiIndex slicing\n _check_static(transition.sort_index(axis=1))\n # the algorithm below will return invalid results if contract_dates is\n # not as expected so better to fail explicitly\n _check_contract_dates(contract_dates)\n\n if not holidays:\n holidays = []\n\n # further speedup can be obtained using contract_dates.loc[timestamp:]\n # but this requires swapping contract_dates index and values\n after_contract_dates = contract_dates.loc[contract_dates >= timestamp]\n contracts = after_contract_dates.index\n front_expiry_dt = after_contract_dates.iloc[0]\n days_to_expiry = np.busday_count(front_expiry_dt.date(), timestamp.date(),\n holidays=holidays)\n\n name2num = dict(zip(transition.columns.levels[0],\n range(len(transition.columns.levels[0]))))\n if days_to_expiry in transition.index:\n weights_iter = transition.loc[days_to_expiry].iteritems()\n # roll hasn't started yet\n elif days_to_expiry < transition.index.min():\n # provides significant speedup over transition.iloc[0].iteritems()\n vals = transition.values[0]\n weights_iter = zip(transition.columns.tolist(), vals)\n # roll is finished\n else:\n vals = transition.values[-1]\n weights_iter = zip(transition.columns.tolist(), vals)\n\n cwts = []\n for idx_tuple, weighting in weights_iter:\n gen_name, position = idx_tuple\n if weighting != 0:\n if position == \"front\":\n cntrct_idx = name2num[gen_name]\n elif position == \"back\":\n cntrct_idx = name2num[gen_name] + 1\n try:\n cntrct_name = contracts[cntrct_idx]\n except IndexError as e:\n raise type(e)((\"index {0} is out of bounds in\\n{1}\\nas of {2} \"\n \"resulting from {3} mapping\")\n .format(cntrct_idx, after_contract_dates,\n timestamp, idx_tuple)\n ).with_traceback(sys.exc_info()[2])\n cwts.append((gen_name, cntrct_name, weighting, timestamp))\n\n return cwts\n"
] |
import pandas as pd
import numpy as np
import cvxpy
import sys
# deal with API change from cvxpy version 0.4 to 1.0
if hasattr(cvxpy, "sum_entries"):
CVX_SUM = getattr(cvxpy, "sum_entries")
else:
CVX_SUM = getattr(cvxpy, "sum")
TO_MONTH_CODE = dict(zip(range(1, 13), "FGHJKMNQUVXZ"))
FROM_MONTH_CODE = dict(zip("FGHJKMNQUVXZ", range(1, 13)))
def bdom_roll_date(sd, ed, bdom, months, holidays=[]):
"""
Convenience function for getting business day data associated with
contracts. Usefully for generating business day derived 'contract_dates'
which can be used as input to roller(). Returns dates for a business day of
the month for months in months.keys() between the start date and end date.
Parameters
----------
sd: str
String representing start date, %Y%m%d
ed: str
String representing end date, %Y%m%d
bdom: int
Integer indicating business day of month
months: dict
Dictionnary where key is integer representation of month [1-12] and
value is the month code [FGHJKMNQUVXZ]
holidays: list
List of holidays to exclude from business days
Return
------
A DataFrame with columns ['date', 'year', 'month', 'bdom', 'month_code']
Examples
--------
>>> import pandas as pd
>>> from mapping.mappings import bdom_roll_date
>>> bdom_roll_date("20160101", "20180501", 7, {1:"G", 3:"J", 8:"U"})
>>> bdom_roll_date("20160101", "20180501", 7, {1:"G", 3:"J", 8:"U"},
... holidays=[pd.Timestamp("20160101")])
"""
if not isinstance(bdom, int):
raise ValueError("'bdom' must be integer")
sd = pd.Timestamp(sd)
ed = pd.Timestamp(ed)
t1 = sd
if not t1.is_month_start:
t1 = t1 - pd.offsets.MonthBegin(1)
t2 = ed
if not t2.is_month_end:
t2 = t2 + pd.offsets.MonthEnd(1)
dates = pd.date_range(t1, t2, freq="b")
dates = dates.difference(holidays)
date_data = pd.DataFrame({"date": dates, "year": dates.year,
"month": dates.month, "bdom": 1})
date_data.loc[:, "bdom"] = (
date_data.groupby(by=["year", "month"])["bdom"].cumsum()
)
date_data = date_data.loc[date_data.bdom == bdom, :]
date_data = date_data.loc[date_data.month.isin(months), :]
date_data.loc[:, "month_code"] = date_data.month.apply(lambda x: months[x])
idx = (date_data.date >= sd) & (date_data.date <= ed)
order = ['date', 'year', 'month', 'bdom', 'month_code']
date_data = (date_data.loc[idx, order]
.reset_index(drop=True))
return date_data
def aggregate_weights(weights, drop_date=False):
"""
Transforms list of tuples of weights into pandas.DataFrame of weights.
Parameters:
-----------
weights: list
A list of tuples consisting of the generic instrument name,
the tradeable contract as a string, the weight on this contract as a
float and the date as a pandas.Timestamp.
drop_date: boolean
Whether to drop the date from the multiIndex
Returns
-------
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instrument names and
the index is strings representing instrument names.
"""
dwts = pd.DataFrame(weights,
columns=["generic", "contract", "weight", "date"])
dwts = dwts.pivot_table(index=['date', 'contract'],
columns=['generic'], values='weight', fill_value=0)
dwts = dwts.astype(float)
dwts = dwts.sort_index()
if drop_date:
dwts.index = dwts.index.levels[-1]
return dwts
def static_transition(timestamp, contract_dates, transition, holidays=None,
validate_inputs=True):
"""
An implementation of *get_weights* parameter in roller().
Return weights to tradeable instruments for a given date based on a
transition DataFrame which indicates how to roll through the roll period.
Parameters
----------
timestamp: pandas.Timestamp
The timestamp to return instrument weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
transition: pandas.DataFrame
A DataFrame with a index of integers representing business day offsets
from the last roll date and a column which is a MultiIndex where the
top level is generic instruments and the second level is
['front', 'back'] which refer to the front month contract and the back
month contract of the roll. Note that for different generics, e.g. CL1,
CL2, the front and back month contract during a roll would refer to
different underlying instruments. The values represent the fraction of
the roll on each day during the roll period. The first row of the
transition period should be completely allocated to the front contract
and the last row should be completely allocated to the back contract.
holidays: array_like of datetime64[D]
Holidays to exclude when calculating business day offsets from the last
roll date. See numpy.busday_count.
validate_inputs: Boolean
Whether or not to validate ordering of contract_dates and transition.
**Caution** this is provided for speed however if this is set to False
and inputs are not defined properly algorithm may return incorrect
data.
Returns
-------
A list of tuples consisting of the generic instrument name, the tradeable
contract as a string, the weight on this contract as a float and the date
as a pandas.Timestamp.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]],
... index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.Timestamp('2016-10-19')
>>> wts = mappings.static_transition(ts, contract_dates, transition)
"""
if validate_inputs:
# required for MultiIndex slicing
_check_static(transition.sort_index(axis=1))
# the algorithm below will return invalid results if contract_dates is
# not as expected so better to fail explicitly
_check_contract_dates(contract_dates)
if not holidays:
holidays = []
# further speedup can be obtained using contract_dates.loc[timestamp:]
# but this requires swapping contract_dates index and values
after_contract_dates = contract_dates.loc[contract_dates >= timestamp]
contracts = after_contract_dates.index
front_expiry_dt = after_contract_dates.iloc[0]
days_to_expiry = np.busday_count(front_expiry_dt.date(), timestamp.date(),
holidays=holidays)
name2num = dict(zip(transition.columns.levels[0],
range(len(transition.columns.levels[0]))))
if days_to_expiry in transition.index:
weights_iter = transition.loc[days_to_expiry].iteritems()
# roll hasn't started yet
elif days_to_expiry < transition.index.min():
# provides significant speedup over transition.iloc[0].iteritems()
vals = transition.values[0]
weights_iter = zip(transition.columns.tolist(), vals)
# roll is finished
else:
vals = transition.values[-1]
weights_iter = zip(transition.columns.tolist(), vals)
cwts = []
for idx_tuple, weighting in weights_iter:
gen_name, position = idx_tuple
if weighting != 0:
if position == "front":
cntrct_idx = name2num[gen_name]
elif position == "back":
cntrct_idx = name2num[gen_name] + 1
try:
cntrct_name = contracts[cntrct_idx]
except IndexError as e:
raise type(e)(("index {0} is out of bounds in\n{1}\nas of {2} "
"resulting from {3} mapping")
.format(cntrct_idx, after_contract_dates,
timestamp, idx_tuple)
).with_traceback(sys.exc_info()[2])
cwts.append((gen_name, cntrct_name, weighting, timestamp))
return cwts
def _check_contract_dates(contract_dates):
if not contract_dates.index.is_unique:
raise ValueError("'contract_dates.index' must be unique")
if not contract_dates.is_unique:
raise ValueError("'contract_dates' must be unique")
# since from above we know this is unique if not monotonic means not
# strictly monotonic if we know it is sorted
if not contract_dates.is_monotonic_increasing:
raise ValueError("'contract_dates' must be strictly monotonic "
"increasing")
def _check_static(transition):
if set(transition.columns.levels[-1]) != {"front", "back"}:
raise ValueError("transition.columns.levels[-1] must consist of"
"'front' and 'back'")
generic_row_sums = transition.groupby(level=0, axis=1).sum()
if not (generic_row_sums == 1).all().all():
raise ValueError("transition rows for each generic must sum to"
" 1\n %s" % transition)
if not transition.loc[:, (slice(None), "front")].apply(lambda x: np.all(np.diff(x.values) <= 0)).all(): # NOQA
raise ValueError("'front' columns must be monotonically decreasing and"
" 'back' columns must be monotonically increasing,"
" invalid transtion:\n %s" % transition)
return
def to_generics(instruments, weights):
"""
Map tradeable instruments to generics given weights and tradeable
instrument holdings. This is solving the equation Ax = b where A is the
weights, and b is the instrument holdings. When Ax = b has no solution we
solve for x' such that Ax' is closest to b in the least squares sense with
the additional constraint that sum(x') = sum(instruments).
Scenarios with exact solutions and non exact solutions are depicted below
+------------+-----+-----+ Instruments
| contract | CL1 | CL2 | ------------------------------------
|------------+-----+-----| Scenario 1 | Scenario 2 | Scenario 3
| CLX16 | 0.5 | 0 | 10 | 10 | 10
| CLZ16 | 0.5 | 0.5 | 20 | 20 | 25
| CLF17 | 0 | 0.5 | 10 | 11 | 11
+------------+-----+-----+
In scenario 1 the solution is given by x = [20, 20], in scenario 2 the
solution is given by x = [19.5, 21.5], and in scenario 3 the solution is
given by x = [22, 24].
NOTE: Integer solutions are not guruanteed, as demonstrated above. This is
intended for use with contract numbers but can also be used with notional
amounts of contracts.
Parameters
----------
instruments: pandas.Series
Series of tradeable instrument holdings where the index is the name of
the tradeable instrument and the value is the number of that instrument
held.
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instruments
and the index is strings representing instrument names. If dict is
given keys should be root generic, e.g. 'CL', and values should be
pandas.DataFrames of loadings. The union of all indexes should be a
superset of the instruments.index
Returns
-------
A pandas.Series where the index is the generic and the value is the number
of contracts, sorted by index.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> instrs = pd.Series([10, 20, 10], index=["CLX16", "CLZ16", "CLF17"])
>>> generics = mappings.to_generics(instrs, wts)
"""
if not isinstance(weights, dict):
weights = {"": weights}
allocations = []
unmapped_instr = instruments.index
for key in weights:
w = weights[key]
# may not always have instrument holdings for a set of weights so allow
# weights to be a superset of instruments, drop values where no
# holdings
winstrs = instruments.reindex(w.index).dropna()
w = w.loc[winstrs.index]
# drop generics where all weights for instruments on the genric are 0.
# This avoids numerical rounding issues where solution has epsilon
# weight on a generic
w = w.loc[:, ~(w == 0).all(axis=0)]
unmapped_instr = unmapped_instr.difference(winstrs.index)
A = w.values
b = winstrs.values
x = cvxpy.Variable(A.shape[1])
constrs = [CVX_SUM(x) == np.sum(b)]
obj = cvxpy.Minimize(cvxpy.sum_squares(A * x - b))
prob = cvxpy.Problem(obj, constrs)
prob.solve()
vals = np.array(x.value).squeeze()
idx = w.columns.tolist()
allocations.append(pd.Series(vals, index=idx))
if len(unmapped_instr) > 0:
raise KeyError("Unmapped instruments %s. weights must be a superset of"
" instruments" % unmapped_instr.tolist())
allocations = pd.concat(allocations, axis=0)
allocations = allocations.sort_index()
return allocations
|
matthewgilbert/mapping
|
mapping/mappings.py
|
aggregate_weights
|
python
|
def aggregate_weights(weights, drop_date=False):
dwts = pd.DataFrame(weights,
columns=["generic", "contract", "weight", "date"])
dwts = dwts.pivot_table(index=['date', 'contract'],
columns=['generic'], values='weight', fill_value=0)
dwts = dwts.astype(float)
dwts = dwts.sort_index()
if drop_date:
dwts.index = dwts.index.levels[-1]
return dwts
|
Transforms list of tuples of weights into pandas.DataFrame of weights.
Parameters:
-----------
weights: list
A list of tuples consisting of the generic instrument name,
the tradeable contract as a string, the weight on this contract as a
float and the date as a pandas.Timestamp.
drop_date: boolean
Whether to drop the date from the multiIndex
Returns
-------
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instrument names and
the index is strings representing instrument names.
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/mappings.py#L144-L171
| null |
import pandas as pd
import numpy as np
import cvxpy
import sys
# deal with API change from cvxpy version 0.4 to 1.0
if hasattr(cvxpy, "sum_entries"):
CVX_SUM = getattr(cvxpy, "sum_entries")
else:
CVX_SUM = getattr(cvxpy, "sum")
TO_MONTH_CODE = dict(zip(range(1, 13), "FGHJKMNQUVXZ"))
FROM_MONTH_CODE = dict(zip("FGHJKMNQUVXZ", range(1, 13)))
def bdom_roll_date(sd, ed, bdom, months, holidays=[]):
"""
Convenience function for getting business day data associated with
contracts. Usefully for generating business day derived 'contract_dates'
which can be used as input to roller(). Returns dates for a business day of
the month for months in months.keys() between the start date and end date.
Parameters
----------
sd: str
String representing start date, %Y%m%d
ed: str
String representing end date, %Y%m%d
bdom: int
Integer indicating business day of month
months: dict
Dictionnary where key is integer representation of month [1-12] and
value is the month code [FGHJKMNQUVXZ]
holidays: list
List of holidays to exclude from business days
Return
------
A DataFrame with columns ['date', 'year', 'month', 'bdom', 'month_code']
Examples
--------
>>> import pandas as pd
>>> from mapping.mappings import bdom_roll_date
>>> bdom_roll_date("20160101", "20180501", 7, {1:"G", 3:"J", 8:"U"})
>>> bdom_roll_date("20160101", "20180501", 7, {1:"G", 3:"J", 8:"U"},
... holidays=[pd.Timestamp("20160101")])
"""
if not isinstance(bdom, int):
raise ValueError("'bdom' must be integer")
sd = pd.Timestamp(sd)
ed = pd.Timestamp(ed)
t1 = sd
if not t1.is_month_start:
t1 = t1 - pd.offsets.MonthBegin(1)
t2 = ed
if not t2.is_month_end:
t2 = t2 + pd.offsets.MonthEnd(1)
dates = pd.date_range(t1, t2, freq="b")
dates = dates.difference(holidays)
date_data = pd.DataFrame({"date": dates, "year": dates.year,
"month": dates.month, "bdom": 1})
date_data.loc[:, "bdom"] = (
date_data.groupby(by=["year", "month"])["bdom"].cumsum()
)
date_data = date_data.loc[date_data.bdom == bdom, :]
date_data = date_data.loc[date_data.month.isin(months), :]
date_data.loc[:, "month_code"] = date_data.month.apply(lambda x: months[x])
idx = (date_data.date >= sd) & (date_data.date <= ed)
order = ['date', 'year', 'month', 'bdom', 'month_code']
date_data = (date_data.loc[idx, order]
.reset_index(drop=True))
return date_data
def roller(timestamps, contract_dates, get_weights, **kwargs):
"""
Calculate weight allocations to tradeable instruments for generic futures
at a set of timestamps for a given root generic.
Paramters
---------
timestamps: iterable
Sorted iterable of of pandas.Timestamps to calculate weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
get_weights: function
A function which takes in a timestamp, contract_dates, validate_inputs
and **kwargs. Returns a list of tuples consisting of the generic
instrument name, the tradeable contract as a string, the weight on this
contract as a float and the date as a pandas.Timestamp.
kwargs: keyword arguments
Arguements to pass to get_weights
Return
------
A pandas.DataFrame with columns representing generics and a MultiIndex of
date and contract. Values represent weights on tradeables for each generic.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]], index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.DatetimeIndex([pd.Timestamp('2016-10-18'),
... pd.Timestamp('2016-10-19'),
... pd.Timestamp('2016-10-19')])
>>> wts = mappings.roller(ts, contract_dates, mappings.static_transition,
... transition=trans)
"""
timestamps = sorted(timestamps)
contract_dates = contract_dates.sort_values()
_check_contract_dates(contract_dates)
weights = []
# for loop speedup only validate inputs the first function call to
# get_weights()
validate_inputs = True
ts = timestamps[0]
weights.extend(get_weights(ts, contract_dates,
validate_inputs=validate_inputs, **kwargs))
validate_inputs = False
for ts in timestamps[1:]:
weights.extend(get_weights(ts, contract_dates,
validate_inputs=validate_inputs, **kwargs))
weights = aggregate_weights(weights)
return weights
def static_transition(timestamp, contract_dates, transition, holidays=None,
validate_inputs=True):
"""
An implementation of *get_weights* parameter in roller().
Return weights to tradeable instruments for a given date based on a
transition DataFrame which indicates how to roll through the roll period.
Parameters
----------
timestamp: pandas.Timestamp
The timestamp to return instrument weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
transition: pandas.DataFrame
A DataFrame with a index of integers representing business day offsets
from the last roll date and a column which is a MultiIndex where the
top level is generic instruments and the second level is
['front', 'back'] which refer to the front month contract and the back
month contract of the roll. Note that for different generics, e.g. CL1,
CL2, the front and back month contract during a roll would refer to
different underlying instruments. The values represent the fraction of
the roll on each day during the roll period. The first row of the
transition period should be completely allocated to the front contract
and the last row should be completely allocated to the back contract.
holidays: array_like of datetime64[D]
Holidays to exclude when calculating business day offsets from the last
roll date. See numpy.busday_count.
validate_inputs: Boolean
Whether or not to validate ordering of contract_dates and transition.
**Caution** this is provided for speed however if this is set to False
and inputs are not defined properly algorithm may return incorrect
data.
Returns
-------
A list of tuples consisting of the generic instrument name, the tradeable
contract as a string, the weight on this contract as a float and the date
as a pandas.Timestamp.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]],
... index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.Timestamp('2016-10-19')
>>> wts = mappings.static_transition(ts, contract_dates, transition)
"""
if validate_inputs:
# required for MultiIndex slicing
_check_static(transition.sort_index(axis=1))
# the algorithm below will return invalid results if contract_dates is
# not as expected so better to fail explicitly
_check_contract_dates(contract_dates)
if not holidays:
holidays = []
# further speedup can be obtained using contract_dates.loc[timestamp:]
# but this requires swapping contract_dates index and values
after_contract_dates = contract_dates.loc[contract_dates >= timestamp]
contracts = after_contract_dates.index
front_expiry_dt = after_contract_dates.iloc[0]
days_to_expiry = np.busday_count(front_expiry_dt.date(), timestamp.date(),
holidays=holidays)
name2num = dict(zip(transition.columns.levels[0],
range(len(transition.columns.levels[0]))))
if days_to_expiry in transition.index:
weights_iter = transition.loc[days_to_expiry].iteritems()
# roll hasn't started yet
elif days_to_expiry < transition.index.min():
# provides significant speedup over transition.iloc[0].iteritems()
vals = transition.values[0]
weights_iter = zip(transition.columns.tolist(), vals)
# roll is finished
else:
vals = transition.values[-1]
weights_iter = zip(transition.columns.tolist(), vals)
cwts = []
for idx_tuple, weighting in weights_iter:
gen_name, position = idx_tuple
if weighting != 0:
if position == "front":
cntrct_idx = name2num[gen_name]
elif position == "back":
cntrct_idx = name2num[gen_name] + 1
try:
cntrct_name = contracts[cntrct_idx]
except IndexError as e:
raise type(e)(("index {0} is out of bounds in\n{1}\nas of {2} "
"resulting from {3} mapping")
.format(cntrct_idx, after_contract_dates,
timestamp, idx_tuple)
).with_traceback(sys.exc_info()[2])
cwts.append((gen_name, cntrct_name, weighting, timestamp))
return cwts
def _check_contract_dates(contract_dates):
if not contract_dates.index.is_unique:
raise ValueError("'contract_dates.index' must be unique")
if not contract_dates.is_unique:
raise ValueError("'contract_dates' must be unique")
# since from above we know this is unique if not monotonic means not
# strictly monotonic if we know it is sorted
if not contract_dates.is_monotonic_increasing:
raise ValueError("'contract_dates' must be strictly monotonic "
"increasing")
def _check_static(transition):
if set(transition.columns.levels[-1]) != {"front", "back"}:
raise ValueError("transition.columns.levels[-1] must consist of"
"'front' and 'back'")
generic_row_sums = transition.groupby(level=0, axis=1).sum()
if not (generic_row_sums == 1).all().all():
raise ValueError("transition rows for each generic must sum to"
" 1\n %s" % transition)
if not transition.loc[:, (slice(None), "front")].apply(lambda x: np.all(np.diff(x.values) <= 0)).all(): # NOQA
raise ValueError("'front' columns must be monotonically decreasing and"
" 'back' columns must be monotonically increasing,"
" invalid transtion:\n %s" % transition)
return
def to_generics(instruments, weights):
"""
Map tradeable instruments to generics given weights and tradeable
instrument holdings. This is solving the equation Ax = b where A is the
weights, and b is the instrument holdings. When Ax = b has no solution we
solve for x' such that Ax' is closest to b in the least squares sense with
the additional constraint that sum(x') = sum(instruments).
Scenarios with exact solutions and non exact solutions are depicted below
+------------+-----+-----+ Instruments
| contract | CL1 | CL2 | ------------------------------------
|------------+-----+-----| Scenario 1 | Scenario 2 | Scenario 3
| CLX16 | 0.5 | 0 | 10 | 10 | 10
| CLZ16 | 0.5 | 0.5 | 20 | 20 | 25
| CLF17 | 0 | 0.5 | 10 | 11 | 11
+------------+-----+-----+
In scenario 1 the solution is given by x = [20, 20], in scenario 2 the
solution is given by x = [19.5, 21.5], and in scenario 3 the solution is
given by x = [22, 24].
NOTE: Integer solutions are not guruanteed, as demonstrated above. This is
intended for use with contract numbers but can also be used with notional
amounts of contracts.
Parameters
----------
instruments: pandas.Series
Series of tradeable instrument holdings where the index is the name of
the tradeable instrument and the value is the number of that instrument
held.
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instruments
and the index is strings representing instrument names. If dict is
given keys should be root generic, e.g. 'CL', and values should be
pandas.DataFrames of loadings. The union of all indexes should be a
superset of the instruments.index
Returns
-------
A pandas.Series where the index is the generic and the value is the number
of contracts, sorted by index.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> instrs = pd.Series([10, 20, 10], index=["CLX16", "CLZ16", "CLF17"])
>>> generics = mappings.to_generics(instrs, wts)
"""
if not isinstance(weights, dict):
weights = {"": weights}
allocations = []
unmapped_instr = instruments.index
for key in weights:
w = weights[key]
# may not always have instrument holdings for a set of weights so allow
# weights to be a superset of instruments, drop values where no
# holdings
winstrs = instruments.reindex(w.index).dropna()
w = w.loc[winstrs.index]
# drop generics where all weights for instruments on the genric are 0.
# This avoids numerical rounding issues where solution has epsilon
# weight on a generic
w = w.loc[:, ~(w == 0).all(axis=0)]
unmapped_instr = unmapped_instr.difference(winstrs.index)
A = w.values
b = winstrs.values
x = cvxpy.Variable(A.shape[1])
constrs = [CVX_SUM(x) == np.sum(b)]
obj = cvxpy.Minimize(cvxpy.sum_squares(A * x - b))
prob = cvxpy.Problem(obj, constrs)
prob.solve()
vals = np.array(x.value).squeeze()
idx = w.columns.tolist()
allocations.append(pd.Series(vals, index=idx))
if len(unmapped_instr) > 0:
raise KeyError("Unmapped instruments %s. weights must be a superset of"
" instruments" % unmapped_instr.tolist())
allocations = pd.concat(allocations, axis=0)
allocations = allocations.sort_index()
return allocations
|
matthewgilbert/mapping
|
mapping/mappings.py
|
static_transition
|
python
|
def static_transition(timestamp, contract_dates, transition, holidays=None,
validate_inputs=True):
if validate_inputs:
# required for MultiIndex slicing
_check_static(transition.sort_index(axis=1))
# the algorithm below will return invalid results if contract_dates is
# not as expected so better to fail explicitly
_check_contract_dates(contract_dates)
if not holidays:
holidays = []
# further speedup can be obtained using contract_dates.loc[timestamp:]
# but this requires swapping contract_dates index and values
after_contract_dates = contract_dates.loc[contract_dates >= timestamp]
contracts = after_contract_dates.index
front_expiry_dt = after_contract_dates.iloc[0]
days_to_expiry = np.busday_count(front_expiry_dt.date(), timestamp.date(),
holidays=holidays)
name2num = dict(zip(transition.columns.levels[0],
range(len(transition.columns.levels[0]))))
if days_to_expiry in transition.index:
weights_iter = transition.loc[days_to_expiry].iteritems()
# roll hasn't started yet
elif days_to_expiry < transition.index.min():
# provides significant speedup over transition.iloc[0].iteritems()
vals = transition.values[0]
weights_iter = zip(transition.columns.tolist(), vals)
# roll is finished
else:
vals = transition.values[-1]
weights_iter = zip(transition.columns.tolist(), vals)
cwts = []
for idx_tuple, weighting in weights_iter:
gen_name, position = idx_tuple
if weighting != 0:
if position == "front":
cntrct_idx = name2num[gen_name]
elif position == "back":
cntrct_idx = name2num[gen_name] + 1
try:
cntrct_name = contracts[cntrct_idx]
except IndexError as e:
raise type(e)(("index {0} is out of bounds in\n{1}\nas of {2} "
"resulting from {3} mapping")
.format(cntrct_idx, after_contract_dates,
timestamp, idx_tuple)
).with_traceback(sys.exc_info()[2])
cwts.append((gen_name, cntrct_name, weighting, timestamp))
return cwts
|
An implementation of *get_weights* parameter in roller().
Return weights to tradeable instruments for a given date based on a
transition DataFrame which indicates how to roll through the roll period.
Parameters
----------
timestamp: pandas.Timestamp
The timestamp to return instrument weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
transition: pandas.DataFrame
A DataFrame with a index of integers representing business day offsets
from the last roll date and a column which is a MultiIndex where the
top level is generic instruments and the second level is
['front', 'back'] which refer to the front month contract and the back
month contract of the roll. Note that for different generics, e.g. CL1,
CL2, the front and back month contract during a roll would refer to
different underlying instruments. The values represent the fraction of
the roll on each day during the roll period. The first row of the
transition period should be completely allocated to the front contract
and the last row should be completely allocated to the back contract.
holidays: array_like of datetime64[D]
Holidays to exclude when calculating business day offsets from the last
roll date. See numpy.busday_count.
validate_inputs: Boolean
Whether or not to validate ordering of contract_dates and transition.
**Caution** this is provided for speed however if this is set to False
and inputs are not defined properly algorithm may return incorrect
data.
Returns
-------
A list of tuples consisting of the generic instrument name, the tradeable
contract as a string, the weight on this contract as a float and the date
as a pandas.Timestamp.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]],
... index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.Timestamp('2016-10-19')
>>> wts = mappings.static_transition(ts, contract_dates, transition)
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/mappings.py#L174-L282
|
[
"def _check_contract_dates(contract_dates):\n if not contract_dates.index.is_unique:\n raise ValueError(\"'contract_dates.index' must be unique\")\n if not contract_dates.is_unique:\n raise ValueError(\"'contract_dates' must be unique\")\n # since from above we know this is unique if not monotonic means not\n # strictly monotonic if we know it is sorted\n if not contract_dates.is_monotonic_increasing:\n raise ValueError(\"'contract_dates' must be strictly monotonic \"\n \"increasing\")\n",
"def _check_static(transition):\n if set(transition.columns.levels[-1]) != {\"front\", \"back\"}:\n raise ValueError(\"transition.columns.levels[-1] must consist of\"\n \"'front' and 'back'\")\n\n generic_row_sums = transition.groupby(level=0, axis=1).sum()\n if not (generic_row_sums == 1).all().all():\n raise ValueError(\"transition rows for each generic must sum to\"\n \" 1\\n %s\" % transition)\n\n if not transition.loc[:, (slice(None), \"front\")].apply(lambda x: np.all(np.diff(x.values) <= 0)).all(): # NOQA\n raise ValueError(\"'front' columns must be monotonically decreasing and\"\n \" 'back' columns must be monotonically increasing,\"\n \" invalid transtion:\\n %s\" % transition)\n\n return\n"
] |
import pandas as pd
import numpy as np
import cvxpy
import sys
# deal with API change from cvxpy version 0.4 to 1.0
if hasattr(cvxpy, "sum_entries"):
CVX_SUM = getattr(cvxpy, "sum_entries")
else:
CVX_SUM = getattr(cvxpy, "sum")
TO_MONTH_CODE = dict(zip(range(1, 13), "FGHJKMNQUVXZ"))
FROM_MONTH_CODE = dict(zip("FGHJKMNQUVXZ", range(1, 13)))
def bdom_roll_date(sd, ed, bdom, months, holidays=[]):
"""
Convenience function for getting business day data associated with
contracts. Usefully for generating business day derived 'contract_dates'
which can be used as input to roller(). Returns dates for a business day of
the month for months in months.keys() between the start date and end date.
Parameters
----------
sd: str
String representing start date, %Y%m%d
ed: str
String representing end date, %Y%m%d
bdom: int
Integer indicating business day of month
months: dict
Dictionnary where key is integer representation of month [1-12] and
value is the month code [FGHJKMNQUVXZ]
holidays: list
List of holidays to exclude from business days
Return
------
A DataFrame with columns ['date', 'year', 'month', 'bdom', 'month_code']
Examples
--------
>>> import pandas as pd
>>> from mapping.mappings import bdom_roll_date
>>> bdom_roll_date("20160101", "20180501", 7, {1:"G", 3:"J", 8:"U"})
>>> bdom_roll_date("20160101", "20180501", 7, {1:"G", 3:"J", 8:"U"},
... holidays=[pd.Timestamp("20160101")])
"""
if not isinstance(bdom, int):
raise ValueError("'bdom' must be integer")
sd = pd.Timestamp(sd)
ed = pd.Timestamp(ed)
t1 = sd
if not t1.is_month_start:
t1 = t1 - pd.offsets.MonthBegin(1)
t2 = ed
if not t2.is_month_end:
t2 = t2 + pd.offsets.MonthEnd(1)
dates = pd.date_range(t1, t2, freq="b")
dates = dates.difference(holidays)
date_data = pd.DataFrame({"date": dates, "year": dates.year,
"month": dates.month, "bdom": 1})
date_data.loc[:, "bdom"] = (
date_data.groupby(by=["year", "month"])["bdom"].cumsum()
)
date_data = date_data.loc[date_data.bdom == bdom, :]
date_data = date_data.loc[date_data.month.isin(months), :]
date_data.loc[:, "month_code"] = date_data.month.apply(lambda x: months[x])
idx = (date_data.date >= sd) & (date_data.date <= ed)
order = ['date', 'year', 'month', 'bdom', 'month_code']
date_data = (date_data.loc[idx, order]
.reset_index(drop=True))
return date_data
def roller(timestamps, contract_dates, get_weights, **kwargs):
"""
Calculate weight allocations to tradeable instruments for generic futures
at a set of timestamps for a given root generic.
Paramters
---------
timestamps: iterable
Sorted iterable of of pandas.Timestamps to calculate weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
get_weights: function
A function which takes in a timestamp, contract_dates, validate_inputs
and **kwargs. Returns a list of tuples consisting of the generic
instrument name, the tradeable contract as a string, the weight on this
contract as a float and the date as a pandas.Timestamp.
kwargs: keyword arguments
Arguements to pass to get_weights
Return
------
A pandas.DataFrame with columns representing generics and a MultiIndex of
date and contract. Values represent weights on tradeables for each generic.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]], index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.DatetimeIndex([pd.Timestamp('2016-10-18'),
... pd.Timestamp('2016-10-19'),
... pd.Timestamp('2016-10-19')])
>>> wts = mappings.roller(ts, contract_dates, mappings.static_transition,
... transition=trans)
"""
timestamps = sorted(timestamps)
contract_dates = contract_dates.sort_values()
_check_contract_dates(contract_dates)
weights = []
# for loop speedup only validate inputs the first function call to
# get_weights()
validate_inputs = True
ts = timestamps[0]
weights.extend(get_weights(ts, contract_dates,
validate_inputs=validate_inputs, **kwargs))
validate_inputs = False
for ts in timestamps[1:]:
weights.extend(get_weights(ts, contract_dates,
validate_inputs=validate_inputs, **kwargs))
weights = aggregate_weights(weights)
return weights
def aggregate_weights(weights, drop_date=False):
"""
Transforms list of tuples of weights into pandas.DataFrame of weights.
Parameters:
-----------
weights: list
A list of tuples consisting of the generic instrument name,
the tradeable contract as a string, the weight on this contract as a
float and the date as a pandas.Timestamp.
drop_date: boolean
Whether to drop the date from the multiIndex
Returns
-------
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instrument names and
the index is strings representing instrument names.
"""
dwts = pd.DataFrame(weights,
columns=["generic", "contract", "weight", "date"])
dwts = dwts.pivot_table(index=['date', 'contract'],
columns=['generic'], values='weight', fill_value=0)
dwts = dwts.astype(float)
dwts = dwts.sort_index()
if drop_date:
dwts.index = dwts.index.levels[-1]
return dwts
def _check_contract_dates(contract_dates):
if not contract_dates.index.is_unique:
raise ValueError("'contract_dates.index' must be unique")
if not contract_dates.is_unique:
raise ValueError("'contract_dates' must be unique")
# since from above we know this is unique if not monotonic means not
# strictly monotonic if we know it is sorted
if not contract_dates.is_monotonic_increasing:
raise ValueError("'contract_dates' must be strictly monotonic "
"increasing")
def _check_static(transition):
if set(transition.columns.levels[-1]) != {"front", "back"}:
raise ValueError("transition.columns.levels[-1] must consist of"
"'front' and 'back'")
generic_row_sums = transition.groupby(level=0, axis=1).sum()
if not (generic_row_sums == 1).all().all():
raise ValueError("transition rows for each generic must sum to"
" 1\n %s" % transition)
if not transition.loc[:, (slice(None), "front")].apply(lambda x: np.all(np.diff(x.values) <= 0)).all(): # NOQA
raise ValueError("'front' columns must be monotonically decreasing and"
" 'back' columns must be monotonically increasing,"
" invalid transtion:\n %s" % transition)
return
def to_generics(instruments, weights):
"""
Map tradeable instruments to generics given weights and tradeable
instrument holdings. This is solving the equation Ax = b where A is the
weights, and b is the instrument holdings. When Ax = b has no solution we
solve for x' such that Ax' is closest to b in the least squares sense with
the additional constraint that sum(x') = sum(instruments).
Scenarios with exact solutions and non exact solutions are depicted below
+------------+-----+-----+ Instruments
| contract | CL1 | CL2 | ------------------------------------
|------------+-----+-----| Scenario 1 | Scenario 2 | Scenario 3
| CLX16 | 0.5 | 0 | 10 | 10 | 10
| CLZ16 | 0.5 | 0.5 | 20 | 20 | 25
| CLF17 | 0 | 0.5 | 10 | 11 | 11
+------------+-----+-----+
In scenario 1 the solution is given by x = [20, 20], in scenario 2 the
solution is given by x = [19.5, 21.5], and in scenario 3 the solution is
given by x = [22, 24].
NOTE: Integer solutions are not guruanteed, as demonstrated above. This is
intended for use with contract numbers but can also be used with notional
amounts of contracts.
Parameters
----------
instruments: pandas.Series
Series of tradeable instrument holdings where the index is the name of
the tradeable instrument and the value is the number of that instrument
held.
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instruments
and the index is strings representing instrument names. If dict is
given keys should be root generic, e.g. 'CL', and values should be
pandas.DataFrames of loadings. The union of all indexes should be a
superset of the instruments.index
Returns
-------
A pandas.Series where the index is the generic and the value is the number
of contracts, sorted by index.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> instrs = pd.Series([10, 20, 10], index=["CLX16", "CLZ16", "CLF17"])
>>> generics = mappings.to_generics(instrs, wts)
"""
if not isinstance(weights, dict):
weights = {"": weights}
allocations = []
unmapped_instr = instruments.index
for key in weights:
w = weights[key]
# may not always have instrument holdings for a set of weights so allow
# weights to be a superset of instruments, drop values where no
# holdings
winstrs = instruments.reindex(w.index).dropna()
w = w.loc[winstrs.index]
# drop generics where all weights for instruments on the genric are 0.
# This avoids numerical rounding issues where solution has epsilon
# weight on a generic
w = w.loc[:, ~(w == 0).all(axis=0)]
unmapped_instr = unmapped_instr.difference(winstrs.index)
A = w.values
b = winstrs.values
x = cvxpy.Variable(A.shape[1])
constrs = [CVX_SUM(x) == np.sum(b)]
obj = cvxpy.Minimize(cvxpy.sum_squares(A * x - b))
prob = cvxpy.Problem(obj, constrs)
prob.solve()
vals = np.array(x.value).squeeze()
idx = w.columns.tolist()
allocations.append(pd.Series(vals, index=idx))
if len(unmapped_instr) > 0:
raise KeyError("Unmapped instruments %s. weights must be a superset of"
" instruments" % unmapped_instr.tolist())
allocations = pd.concat(allocations, axis=0)
allocations = allocations.sort_index()
return allocations
|
matthewgilbert/mapping
|
mapping/mappings.py
|
to_generics
|
python
|
def to_generics(instruments, weights):
if not isinstance(weights, dict):
weights = {"": weights}
allocations = []
unmapped_instr = instruments.index
for key in weights:
w = weights[key]
# may not always have instrument holdings for a set of weights so allow
# weights to be a superset of instruments, drop values where no
# holdings
winstrs = instruments.reindex(w.index).dropna()
w = w.loc[winstrs.index]
# drop generics where all weights for instruments on the genric are 0.
# This avoids numerical rounding issues where solution has epsilon
# weight on a generic
w = w.loc[:, ~(w == 0).all(axis=0)]
unmapped_instr = unmapped_instr.difference(winstrs.index)
A = w.values
b = winstrs.values
x = cvxpy.Variable(A.shape[1])
constrs = [CVX_SUM(x) == np.sum(b)]
obj = cvxpy.Minimize(cvxpy.sum_squares(A * x - b))
prob = cvxpy.Problem(obj, constrs)
prob.solve()
vals = np.array(x.value).squeeze()
idx = w.columns.tolist()
allocations.append(pd.Series(vals, index=idx))
if len(unmapped_instr) > 0:
raise KeyError("Unmapped instruments %s. weights must be a superset of"
" instruments" % unmapped_instr.tolist())
allocations = pd.concat(allocations, axis=0)
allocations = allocations.sort_index()
return allocations
|
Map tradeable instruments to generics given weights and tradeable
instrument holdings. This is solving the equation Ax = b where A is the
weights, and b is the instrument holdings. When Ax = b has no solution we
solve for x' such that Ax' is closest to b in the least squares sense with
the additional constraint that sum(x') = sum(instruments).
Scenarios with exact solutions and non exact solutions are depicted below
+------------+-----+-----+ Instruments
| contract | CL1 | CL2 | ------------------------------------
|------------+-----+-----| Scenario 1 | Scenario 2 | Scenario 3
| CLX16 | 0.5 | 0 | 10 | 10 | 10
| CLZ16 | 0.5 | 0.5 | 20 | 20 | 25
| CLF17 | 0 | 0.5 | 10 | 11 | 11
+------------+-----+-----+
In scenario 1 the solution is given by x = [20, 20], in scenario 2 the
solution is given by x = [19.5, 21.5], and in scenario 3 the solution is
given by x = [22, 24].
NOTE: Integer solutions are not guruanteed, as demonstrated above. This is
intended for use with contract numbers but can also be used with notional
amounts of contracts.
Parameters
----------
instruments: pandas.Series
Series of tradeable instrument holdings where the index is the name of
the tradeable instrument and the value is the number of that instrument
held.
weights: pandas.DataFrame or dict
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instruments
and the index is strings representing instrument names. If dict is
given keys should be root generic, e.g. 'CL', and values should be
pandas.DataFrames of loadings. The union of all indexes should be a
superset of the instruments.index
Returns
-------
A pandas.Series where the index is the generic and the value is the number
of contracts, sorted by index.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
... index=["CLX16", "CLZ16", "CLF17"],
... columns=["CL1", "CL2"])
>>> instrs = pd.Series([10, 20, 10], index=["CLX16", "CLZ16", "CLF17"])
>>> generics = mappings.to_generics(instrs, wts)
|
train
|
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/mappings.py#L315-L407
| null |
import pandas as pd
import numpy as np
import cvxpy
import sys
# deal with API change from cvxpy version 0.4 to 1.0
if hasattr(cvxpy, "sum_entries"):
CVX_SUM = getattr(cvxpy, "sum_entries")
else:
CVX_SUM = getattr(cvxpy, "sum")
TO_MONTH_CODE = dict(zip(range(1, 13), "FGHJKMNQUVXZ"))
FROM_MONTH_CODE = dict(zip("FGHJKMNQUVXZ", range(1, 13)))
def bdom_roll_date(sd, ed, bdom, months, holidays=[]):
"""
Convenience function for getting business day data associated with
contracts. Usefully for generating business day derived 'contract_dates'
which can be used as input to roller(). Returns dates for a business day of
the month for months in months.keys() between the start date and end date.
Parameters
----------
sd: str
String representing start date, %Y%m%d
ed: str
String representing end date, %Y%m%d
bdom: int
Integer indicating business day of month
months: dict
Dictionnary where key is integer representation of month [1-12] and
value is the month code [FGHJKMNQUVXZ]
holidays: list
List of holidays to exclude from business days
Return
------
A DataFrame with columns ['date', 'year', 'month', 'bdom', 'month_code']
Examples
--------
>>> import pandas as pd
>>> from mapping.mappings import bdom_roll_date
>>> bdom_roll_date("20160101", "20180501", 7, {1:"G", 3:"J", 8:"U"})
>>> bdom_roll_date("20160101", "20180501", 7, {1:"G", 3:"J", 8:"U"},
... holidays=[pd.Timestamp("20160101")])
"""
if not isinstance(bdom, int):
raise ValueError("'bdom' must be integer")
sd = pd.Timestamp(sd)
ed = pd.Timestamp(ed)
t1 = sd
if not t1.is_month_start:
t1 = t1 - pd.offsets.MonthBegin(1)
t2 = ed
if not t2.is_month_end:
t2 = t2 + pd.offsets.MonthEnd(1)
dates = pd.date_range(t1, t2, freq="b")
dates = dates.difference(holidays)
date_data = pd.DataFrame({"date": dates, "year": dates.year,
"month": dates.month, "bdom": 1})
date_data.loc[:, "bdom"] = (
date_data.groupby(by=["year", "month"])["bdom"].cumsum()
)
date_data = date_data.loc[date_data.bdom == bdom, :]
date_data = date_data.loc[date_data.month.isin(months), :]
date_data.loc[:, "month_code"] = date_data.month.apply(lambda x: months[x])
idx = (date_data.date >= sd) & (date_data.date <= ed)
order = ['date', 'year', 'month', 'bdom', 'month_code']
date_data = (date_data.loc[idx, order]
.reset_index(drop=True))
return date_data
def roller(timestamps, contract_dates, get_weights, **kwargs):
"""
Calculate weight allocations to tradeable instruments for generic futures
at a set of timestamps for a given root generic.
Paramters
---------
timestamps: iterable
Sorted iterable of of pandas.Timestamps to calculate weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
get_weights: function
A function which takes in a timestamp, contract_dates, validate_inputs
and **kwargs. Returns a list of tuples consisting of the generic
instrument name, the tradeable contract as a string, the weight on this
contract as a float and the date as a pandas.Timestamp.
kwargs: keyword arguments
Arguements to pass to get_weights
Return
------
A pandas.DataFrame with columns representing generics and a MultiIndex of
date and contract. Values represent weights on tradeables for each generic.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> trans = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]], index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.DatetimeIndex([pd.Timestamp('2016-10-18'),
... pd.Timestamp('2016-10-19'),
... pd.Timestamp('2016-10-19')])
>>> wts = mappings.roller(ts, contract_dates, mappings.static_transition,
... transition=trans)
"""
timestamps = sorted(timestamps)
contract_dates = contract_dates.sort_values()
_check_contract_dates(contract_dates)
weights = []
# for loop speedup only validate inputs the first function call to
# get_weights()
validate_inputs = True
ts = timestamps[0]
weights.extend(get_weights(ts, contract_dates,
validate_inputs=validate_inputs, **kwargs))
validate_inputs = False
for ts in timestamps[1:]:
weights.extend(get_weights(ts, contract_dates,
validate_inputs=validate_inputs, **kwargs))
weights = aggregate_weights(weights)
return weights
def aggregate_weights(weights, drop_date=False):
"""
Transforms list of tuples of weights into pandas.DataFrame of weights.
Parameters:
-----------
weights: list
A list of tuples consisting of the generic instrument name,
the tradeable contract as a string, the weight on this contract as a
float and the date as a pandas.Timestamp.
drop_date: boolean
Whether to drop the date from the multiIndex
Returns
-------
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instrument names and
the index is strings representing instrument names.
"""
dwts = pd.DataFrame(weights,
columns=["generic", "contract", "weight", "date"])
dwts = dwts.pivot_table(index=['date', 'contract'],
columns=['generic'], values='weight', fill_value=0)
dwts = dwts.astype(float)
dwts = dwts.sort_index()
if drop_date:
dwts.index = dwts.index.levels[-1]
return dwts
def static_transition(timestamp, contract_dates, transition, holidays=None,
validate_inputs=True):
"""
An implementation of *get_weights* parameter in roller().
Return weights to tradeable instruments for a given date based on a
transition DataFrame which indicates how to roll through the roll period.
Parameters
----------
timestamp: pandas.Timestamp
The timestamp to return instrument weights for
contract_dates: pandas.Series
Series with index of tradeable contract names and pandas.Timestamps
representing the last date of the roll as values, sorted by values.
Index must be unique and values must be strictly monotonic.
transition: pandas.DataFrame
A DataFrame with a index of integers representing business day offsets
from the last roll date and a column which is a MultiIndex where the
top level is generic instruments and the second level is
['front', 'back'] which refer to the front month contract and the back
month contract of the roll. Note that for different generics, e.g. CL1,
CL2, the front and back month contract during a roll would refer to
different underlying instruments. The values represent the fraction of
the roll on each day during the roll period. The first row of the
transition period should be completely allocated to the front contract
and the last row should be completely allocated to the back contract.
holidays: array_like of datetime64[D]
Holidays to exclude when calculating business day offsets from the last
roll date. See numpy.busday_count.
validate_inputs: Boolean
Whether or not to validate ordering of contract_dates and transition.
**Caution** this is provided for speed however if this is set to False
and inputs are not defined properly algorithm may return incorrect
data.
Returns
-------
A list of tuples consisting of the generic instrument name, the tradeable
contract as a string, the weight on this contract as a float and the date
as a pandas.Timestamp.
Examples
--------
>>> import pandas as pd
>>> import mapping.mappings as mappings
>>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']])
>>> idx = [-2, -1, 0]
>>> transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
... [0.0, 1.0, 0.0, 1.0]],
... index=idx, columns=cols)
>>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'),
... pd.Timestamp('2016-11-21'),
... pd.Timestamp('2016-12-20')],
... index=['CLX16', 'CLZ16', 'CLF17'])
>>> ts = pd.Timestamp('2016-10-19')
>>> wts = mappings.static_transition(ts, contract_dates, transition)
"""
if validate_inputs:
# required for MultiIndex slicing
_check_static(transition.sort_index(axis=1))
# the algorithm below will return invalid results if contract_dates is
# not as expected so better to fail explicitly
_check_contract_dates(contract_dates)
if not holidays:
holidays = []
# further speedup can be obtained using contract_dates.loc[timestamp:]
# but this requires swapping contract_dates index and values
after_contract_dates = contract_dates.loc[contract_dates >= timestamp]
contracts = after_contract_dates.index
front_expiry_dt = after_contract_dates.iloc[0]
days_to_expiry = np.busday_count(front_expiry_dt.date(), timestamp.date(),
holidays=holidays)
name2num = dict(zip(transition.columns.levels[0],
range(len(transition.columns.levels[0]))))
if days_to_expiry in transition.index:
weights_iter = transition.loc[days_to_expiry].iteritems()
# roll hasn't started yet
elif days_to_expiry < transition.index.min():
# provides significant speedup over transition.iloc[0].iteritems()
vals = transition.values[0]
weights_iter = zip(transition.columns.tolist(), vals)
# roll is finished
else:
vals = transition.values[-1]
weights_iter = zip(transition.columns.tolist(), vals)
cwts = []
for idx_tuple, weighting in weights_iter:
gen_name, position = idx_tuple
if weighting != 0:
if position == "front":
cntrct_idx = name2num[gen_name]
elif position == "back":
cntrct_idx = name2num[gen_name] + 1
try:
cntrct_name = contracts[cntrct_idx]
except IndexError as e:
raise type(e)(("index {0} is out of bounds in\n{1}\nas of {2} "
"resulting from {3} mapping")
.format(cntrct_idx, after_contract_dates,
timestamp, idx_tuple)
).with_traceback(sys.exc_info()[2])
cwts.append((gen_name, cntrct_name, weighting, timestamp))
return cwts
def _check_contract_dates(contract_dates):
if not contract_dates.index.is_unique:
raise ValueError("'contract_dates.index' must be unique")
if not contract_dates.is_unique:
raise ValueError("'contract_dates' must be unique")
# since from above we know this is unique if not monotonic means not
# strictly monotonic if we know it is sorted
if not contract_dates.is_monotonic_increasing:
raise ValueError("'contract_dates' must be strictly monotonic "
"increasing")
def _check_static(transition):
if set(transition.columns.levels[-1]) != {"front", "back"}:
raise ValueError("transition.columns.levels[-1] must consist of"
"'front' and 'back'")
generic_row_sums = transition.groupby(level=0, axis=1).sum()
if not (generic_row_sums == 1).all().all():
raise ValueError("transition rows for each generic must sum to"
" 1\n %s" % transition)
if not transition.loc[:, (slice(None), "front")].apply(lambda x: np.all(np.diff(x.values) <= 0)).all(): # NOQA
raise ValueError("'front' columns must be monotonically decreasing and"
" 'back' columns must be monotonically increasing,"
" invalid transtion:\n %s" % transition)
return
|
inveniosoftware/invenio-records-files
|
invenio_records_files/alembic/1ba76da94103_create_records_files_tables.py
|
upgrade
|
python
|
def upgrade():
op.create_table(
'records_buckets',
sa.Column(
'record_id',
sqlalchemy_utils.types.uuid.UUIDType(),
nullable=False),
sa.Column(
'bucket_id',
sqlalchemy_utils.types.uuid.UUIDType(),
nullable=False),
sa.ForeignKeyConstraint(['bucket_id'], [u'files_bucket.id'], ),
sa.ForeignKeyConstraint(['record_id'], [u'records_metadata.id'], ),
sa.PrimaryKeyConstraint('record_id', 'bucket_id')
)
|
Upgrade database.
|
train
|
https://github.com/inveniosoftware/invenio-records-files/blob/c410eba986ea43be7e97082d5dcbbdc19ccec39c/invenio_records_files/alembic/1ba76da94103_create_records_files_tables.py#L25-L40
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Create records files tables."""
import sqlalchemy as sa
import sqlalchemy_utils
from alembic import op
# revision identifiers, used by Alembic.
revision = '1ba76da94103'
down_revision = '2da9a03b0833'
branch_labels = ()
depends_on = (
'2e97565eba72', # invenio-files-rest
'862037093962', # invenio-records
)
def downgrade():
"""Downgrade database."""
op.drop_table('records_buckets')
|
inveniosoftware/invenio-records-files
|
invenio_records_files/api.py
|
_writable
|
python
|
def _writable(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
"""Send record for indexing.
:returns: Execution result of the decorated method.
:raises InvalidOperationError: It occurs when the bucket is locked or
deleted.
"""
if self.bucket.locked or self.bucket.deleted:
raise InvalidOperationError()
return method(self, *args, **kwargs)
return wrapper
|
Check that record is in defined status.
:param method: Method to be decorated.
:returns: Function decorated.
|
train
|
https://github.com/inveniosoftware/invenio-records-files/blob/c410eba986ea43be7e97082d5dcbbdc19ccec39c/invenio_records_files/api.py#L80-L98
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""API for manipulating files associated to a record."""
from collections import OrderedDict
from functools import wraps
from invenio_db import db
from invenio_files_rest.errors import InvalidOperationError
from invenio_files_rest.models import ObjectVersion
from invenio_records.api import Record as _Record
from invenio_records.errors import MissingModelError
from .models import RecordsBuckets
from .utils import sorted_files_from_bucket
class FileObject(object):
"""Wrapper for files."""
def __init__(self, obj, data):
"""Bind to current bucket."""
self.obj = obj
self.data = data
def get_version(self, version_id=None):
"""Return specific version ``ObjectVersion`` instance or HEAD.
:param version_id: Version ID of the object.
:returns: :class:`~invenio_files_rest.models.ObjectVersion` instance or
HEAD of the stored object.
"""
return ObjectVersion.get(bucket=self.obj.bucket, key=self.obj.key,
version_id=version_id)
def get(self, key, default=None):
"""Proxy to ``obj``.
:param key: Metadata key which holds the value.
:returns: Metadata value of the specified key or default.
"""
if hasattr(self.obj, key):
return getattr(self.obj, key)
return self.data.get(key, default)
def __getattr__(self, key):
"""Proxy to ``obj``."""
return getattr(self.obj, key)
def __getitem__(self, key):
"""Proxy to ``obj`` and ``data``."""
if hasattr(self.obj, key):
return getattr(self.obj, key)
return self.data[key]
def __setitem__(self, key, value):
"""Proxy to ``data``."""
if hasattr(self.obj, key):
raise KeyError(key)
self.data[key] = value
def dumps(self):
"""Create a dump of the metadata associated to the record."""
self.data.update({
'bucket': str(self.obj.bucket_id),
'checksum': self.obj.file.checksum,
'key': self.obj.key, # IMPORTANT it must stay here!
'size': self.obj.file.size,
'version_id': str(self.obj.version_id),
})
return self.data
class FilesIterator(object):
"""Iterator for files."""
def __init__(self, record, bucket=None, file_cls=None):
"""Initialize iterator."""
self._it = None
self.record = record
self.model = record.model
self.file_cls = file_cls or FileObject
self.bucket = bucket
self.filesmap = OrderedDict([
(f['key'], f) for f in self.record.get('_files', [])
])
@property
def keys(self):
"""Return file keys."""
return self.filesmap.keys()
def __len__(self):
"""Get number of files."""
return ObjectVersion.get_by_bucket(self.bucket).count()
def __iter__(self):
"""Get iterator."""
self._it = iter(sorted_files_from_bucket(self.bucket, self.keys))
return self
def next(self):
"""Python 2.7 compatibility."""
return self.__next__() # pragma: no cover
def __next__(self):
"""Get next file item."""
obj = next(self._it)
return self.file_cls(obj, self.filesmap.get(obj.key, {}))
def __contains__(self, key):
"""Test if file exists."""
return ObjectVersion.get_by_bucket(
self.bucket).filter_by(key=key).count()
def __getitem__(self, key):
"""Get a specific file."""
obj = ObjectVersion.get(self.bucket, key)
if obj:
return self.file_cls(obj, self.filesmap.get(obj.key, {}))
raise KeyError(key)
def flush(self):
"""Flush changes to record."""
files = self.dumps()
# Do not create `_files` when there has not been `_files` field before
# and the record still has no files attached.
if files or '_files' in self.record:
self.record['_files'] = files
@_writable
def __setitem__(self, key, stream):
"""Add file inside a deposit."""
with db.session.begin_nested():
# save the file
obj = ObjectVersion.create(
bucket=self.bucket, key=key, stream=stream)
self.filesmap[key] = self.file_cls(obj, {}).dumps()
self.flush()
@_writable
def __delitem__(self, key):
"""Delete a file from the deposit."""
obj = ObjectVersion.delete(bucket=self.bucket, key=key)
if obj is None:
raise KeyError(key)
if key in self.filesmap:
del self.filesmap[key]
self.flush()
def sort_by(self, *ids):
"""Update files order.
:param ids: List of ids specifying the final status of the list.
"""
# Support sorting by file_ids or keys.
files = {str(f_.file_id): f_.key for f_ in self}
# self.record['_files'] = [{'key': files.get(id_, id_)} for id_ in ids]
self.filesmap = OrderedDict([
(files.get(id_, id_), self[files.get(id_, id_)].dumps())
for id_ in ids
])
self.flush()
@_writable
def rename(self, old_key, new_key):
"""Rename a file.
:param old_key: Old key that holds the object.
:param new_key: New key that will hold the object.
:returns: The object that has been renamed.
"""
assert new_key not in self
assert old_key != new_key
file_ = self[old_key]
old_data = self.filesmap[old_key]
# Create a new version with the new name
obj = ObjectVersion.create(
bucket=self.bucket, key=new_key,
_file_id=file_.obj.file_id
)
# Delete old key
self.filesmap[new_key] = self.file_cls(obj, old_data).dumps()
del self[old_key]
return obj
def dumps(self, bucket=None):
"""Serialize files from a bucket.
:param bucket: Instance of files
:class:`invenio_files_rest.models.Bucket`. (Default:
``self.bucket``)
:returns: List of serialized files.
"""
return [
self.file_cls(o, self.filesmap.get(o.key, {})).dumps()
for o in sorted_files_from_bucket(bucket or self.bucket, self.keys)
]
class FilesMixin(object):
"""Implement files attribute for Record models.
.. note::
Implement ``_create_bucket()`` in subclass to allow files property
to automatically create a bucket in case no bucket is present.
"""
file_cls = FileObject
"""File class used to generate the instance of files. Default to
:class:`~invenio_records_files.api.FileObject`
"""
files_iter_cls = FilesIterator
"""Files iterator class used to generate the files iterator. Default to
:class:`~invenio_records_files.api.FilesIterator`
"""
def _create_bucket(self):
"""Return an instance of ``Bucket`` class.
.. note:: Reimplement in children class for custom behavior.
:returns: Instance of :class:`invenio_files_rest.models.Bucket`.
"""
return None
@property
def files(self):
"""Get files iterator.
:returns: Files iterator.
"""
if self.model is None:
raise MissingModelError()
records_buckets = RecordsBuckets.query.filter_by(
record_id=self.id).first()
if not records_buckets:
bucket = self._create_bucket()
if not bucket:
return None
RecordsBuckets.create(record=self.model, bucket=bucket)
else:
bucket = records_buckets.bucket
return self.files_iter_cls(self, bucket=bucket, file_cls=self.file_cls)
@files.setter
def files(self, data):
"""Set files from data."""
current_files = self.files
if current_files:
raise RuntimeError('Can not update existing files.')
for key in data:
current_files[key] = data[key]
class Record(_Record, FilesMixin):
"""Define API for files manipulation using ``FilesMixin``."""
def delete(self, force=False):
"""Delete a record and also remove the RecordsBuckets if necessary.
:param force: True to remove also the
:class:`~invenio_records_files.models.RecordsBuckets` object.
:returns: Deleted record.
"""
if force:
RecordsBuckets.query.filter_by(
record=self.model,
bucket=self.files.bucket
).delete()
return super(Record, self).delete(force)
|
inveniosoftware/invenio-records-files
|
invenio_records_files/api.py
|
FileObject.get_version
|
python
|
def get_version(self, version_id=None):
return ObjectVersion.get(bucket=self.obj.bucket, key=self.obj.key,
version_id=version_id)
|
Return specific version ``ObjectVersion`` instance or HEAD.
:param version_id: Version ID of the object.
:returns: :class:`~invenio_files_rest.models.ObjectVersion` instance or
HEAD of the stored object.
|
train
|
https://github.com/inveniosoftware/invenio-records-files/blob/c410eba986ea43be7e97082d5dcbbdc19ccec39c/invenio_records_files/api.py#L32-L40
| null |
class FileObject(object):
"""Wrapper for files."""
def __init__(self, obj, data):
"""Bind to current bucket."""
self.obj = obj
self.data = data
def get(self, key, default=None):
"""Proxy to ``obj``.
:param key: Metadata key which holds the value.
:returns: Metadata value of the specified key or default.
"""
if hasattr(self.obj, key):
return getattr(self.obj, key)
return self.data.get(key, default)
def __getattr__(self, key):
"""Proxy to ``obj``."""
return getattr(self.obj, key)
def __getitem__(self, key):
"""Proxy to ``obj`` and ``data``."""
if hasattr(self.obj, key):
return getattr(self.obj, key)
return self.data[key]
def __setitem__(self, key, value):
"""Proxy to ``data``."""
if hasattr(self.obj, key):
raise KeyError(key)
self.data[key] = value
def dumps(self):
"""Create a dump of the metadata associated to the record."""
self.data.update({
'bucket': str(self.obj.bucket_id),
'checksum': self.obj.file.checksum,
'key': self.obj.key, # IMPORTANT it must stay here!
'size': self.obj.file.size,
'version_id': str(self.obj.version_id),
})
return self.data
|
inveniosoftware/invenio-records-files
|
invenio_records_files/api.py
|
FileObject.get
|
python
|
def get(self, key, default=None):
if hasattr(self.obj, key):
return getattr(self.obj, key)
return self.data.get(key, default)
|
Proxy to ``obj``.
:param key: Metadata key which holds the value.
:returns: Metadata value of the specified key or default.
|
train
|
https://github.com/inveniosoftware/invenio-records-files/blob/c410eba986ea43be7e97082d5dcbbdc19ccec39c/invenio_records_files/api.py#L42-L50
| null |
class FileObject(object):
"""Wrapper for files."""
def __init__(self, obj, data):
"""Bind to current bucket."""
self.obj = obj
self.data = data
def get_version(self, version_id=None):
"""Return specific version ``ObjectVersion`` instance or HEAD.
:param version_id: Version ID of the object.
:returns: :class:`~invenio_files_rest.models.ObjectVersion` instance or
HEAD of the stored object.
"""
return ObjectVersion.get(bucket=self.obj.bucket, key=self.obj.key,
version_id=version_id)
def __getattr__(self, key):
"""Proxy to ``obj``."""
return getattr(self.obj, key)
def __getitem__(self, key):
"""Proxy to ``obj`` and ``data``."""
if hasattr(self.obj, key):
return getattr(self.obj, key)
return self.data[key]
def __setitem__(self, key, value):
"""Proxy to ``data``."""
if hasattr(self.obj, key):
raise KeyError(key)
self.data[key] = value
def dumps(self):
"""Create a dump of the metadata associated to the record."""
self.data.update({
'bucket': str(self.obj.bucket_id),
'checksum': self.obj.file.checksum,
'key': self.obj.key, # IMPORTANT it must stay here!
'size': self.obj.file.size,
'version_id': str(self.obj.version_id),
})
return self.data
|
inveniosoftware/invenio-records-files
|
invenio_records_files/api.py
|
FileObject.dumps
|
python
|
def dumps(self):
self.data.update({
'bucket': str(self.obj.bucket_id),
'checksum': self.obj.file.checksum,
'key': self.obj.key, # IMPORTANT it must stay here!
'size': self.obj.file.size,
'version_id': str(self.obj.version_id),
})
return self.data
|
Create a dump of the metadata associated to the record.
|
train
|
https://github.com/inveniosoftware/invenio-records-files/blob/c410eba986ea43be7e97082d5dcbbdc19ccec39c/invenio_records_files/api.py#L68-L77
| null |
class FileObject(object):
"""Wrapper for files."""
def __init__(self, obj, data):
"""Bind to current bucket."""
self.obj = obj
self.data = data
def get_version(self, version_id=None):
"""Return specific version ``ObjectVersion`` instance or HEAD.
:param version_id: Version ID of the object.
:returns: :class:`~invenio_files_rest.models.ObjectVersion` instance or
HEAD of the stored object.
"""
return ObjectVersion.get(bucket=self.obj.bucket, key=self.obj.key,
version_id=version_id)
def get(self, key, default=None):
"""Proxy to ``obj``.
:param key: Metadata key which holds the value.
:returns: Metadata value of the specified key or default.
"""
if hasattr(self.obj, key):
return getattr(self.obj, key)
return self.data.get(key, default)
def __getattr__(self, key):
"""Proxy to ``obj``."""
return getattr(self.obj, key)
def __getitem__(self, key):
"""Proxy to ``obj`` and ``data``."""
if hasattr(self.obj, key):
return getattr(self.obj, key)
return self.data[key]
def __setitem__(self, key, value):
"""Proxy to ``data``."""
if hasattr(self.obj, key):
raise KeyError(key)
self.data[key] = value
|
inveniosoftware/invenio-records-files
|
invenio_records_files/api.py
|
FilesIterator.flush
|
python
|
def flush(self):
files = self.dumps()
# Do not create `_files` when there has not been `_files` field before
# and the record still has no files attached.
if files or '_files' in self.record:
self.record['_files'] = files
|
Flush changes to record.
|
train
|
https://github.com/inveniosoftware/invenio-records-files/blob/c410eba986ea43be7e97082d5dcbbdc19ccec39c/invenio_records_files/api.py#L150-L156
|
[
"def dumps(self, bucket=None):\n \"\"\"Serialize files from a bucket.\n\n :param bucket: Instance of files\n :class:`invenio_files_rest.models.Bucket`. (Default:\n ``self.bucket``)\n :returns: List of serialized files.\n \"\"\"\n return [\n self.file_cls(o, self.filesmap.get(o.key, {})).dumps()\n for o in sorted_files_from_bucket(bucket or self.bucket, self.keys)\n ]\n"
] |
class FilesIterator(object):
"""Iterator for files."""
def __init__(self, record, bucket=None, file_cls=None):
"""Initialize iterator."""
self._it = None
self.record = record
self.model = record.model
self.file_cls = file_cls or FileObject
self.bucket = bucket
self.filesmap = OrderedDict([
(f['key'], f) for f in self.record.get('_files', [])
])
@property
def keys(self):
"""Return file keys."""
return self.filesmap.keys()
def __len__(self):
"""Get number of files."""
return ObjectVersion.get_by_bucket(self.bucket).count()
def __iter__(self):
"""Get iterator."""
self._it = iter(sorted_files_from_bucket(self.bucket, self.keys))
return self
def next(self):
"""Python 2.7 compatibility."""
return self.__next__() # pragma: no cover
def __next__(self):
"""Get next file item."""
obj = next(self._it)
return self.file_cls(obj, self.filesmap.get(obj.key, {}))
def __contains__(self, key):
"""Test if file exists."""
return ObjectVersion.get_by_bucket(
self.bucket).filter_by(key=key).count()
def __getitem__(self, key):
"""Get a specific file."""
obj = ObjectVersion.get(self.bucket, key)
if obj:
return self.file_cls(obj, self.filesmap.get(obj.key, {}))
raise KeyError(key)
@_writable
def __setitem__(self, key, stream):
"""Add file inside a deposit."""
with db.session.begin_nested():
# save the file
obj = ObjectVersion.create(
bucket=self.bucket, key=key, stream=stream)
self.filesmap[key] = self.file_cls(obj, {}).dumps()
self.flush()
@_writable
def __delitem__(self, key):
"""Delete a file from the deposit."""
obj = ObjectVersion.delete(bucket=self.bucket, key=key)
if obj is None:
raise KeyError(key)
if key in self.filesmap:
del self.filesmap[key]
self.flush()
def sort_by(self, *ids):
"""Update files order.
:param ids: List of ids specifying the final status of the list.
"""
# Support sorting by file_ids or keys.
files = {str(f_.file_id): f_.key for f_ in self}
# self.record['_files'] = [{'key': files.get(id_, id_)} for id_ in ids]
self.filesmap = OrderedDict([
(files.get(id_, id_), self[files.get(id_, id_)].dumps())
for id_ in ids
])
self.flush()
@_writable
def rename(self, old_key, new_key):
"""Rename a file.
:param old_key: Old key that holds the object.
:param new_key: New key that will hold the object.
:returns: The object that has been renamed.
"""
assert new_key not in self
assert old_key != new_key
file_ = self[old_key]
old_data = self.filesmap[old_key]
# Create a new version with the new name
obj = ObjectVersion.create(
bucket=self.bucket, key=new_key,
_file_id=file_.obj.file_id
)
# Delete old key
self.filesmap[new_key] = self.file_cls(obj, old_data).dumps()
del self[old_key]
return obj
def dumps(self, bucket=None):
"""Serialize files from a bucket.
:param bucket: Instance of files
:class:`invenio_files_rest.models.Bucket`. (Default:
``self.bucket``)
:returns: List of serialized files.
"""
return [
self.file_cls(o, self.filesmap.get(o.key, {})).dumps()
for o in sorted_files_from_bucket(bucket or self.bucket, self.keys)
]
|
inveniosoftware/invenio-records-files
|
invenio_records_files/api.py
|
FilesIterator.sort_by
|
python
|
def sort_by(self, *ids):
# Support sorting by file_ids or keys.
files = {str(f_.file_id): f_.key for f_ in self}
# self.record['_files'] = [{'key': files.get(id_, id_)} for id_ in ids]
self.filesmap = OrderedDict([
(files.get(id_, id_), self[files.get(id_, id_)].dumps())
for id_ in ids
])
self.flush()
|
Update files order.
:param ids: List of ids specifying the final status of the list.
|
train
|
https://github.com/inveniosoftware/invenio-records-files/blob/c410eba986ea43be7e97082d5dcbbdc19ccec39c/invenio_records_files/api.py#L180-L192
|
[
"def flush(self):\n \"\"\"Flush changes to record.\"\"\"\n files = self.dumps()\n # Do not create `_files` when there has not been `_files` field before\n # and the record still has no files attached.\n if files or '_files' in self.record:\n self.record['_files'] = files\n"
] |
class FilesIterator(object):
"""Iterator for files."""
def __init__(self, record, bucket=None, file_cls=None):
"""Initialize iterator."""
self._it = None
self.record = record
self.model = record.model
self.file_cls = file_cls or FileObject
self.bucket = bucket
self.filesmap = OrderedDict([
(f['key'], f) for f in self.record.get('_files', [])
])
@property
def keys(self):
"""Return file keys."""
return self.filesmap.keys()
def __len__(self):
"""Get number of files."""
return ObjectVersion.get_by_bucket(self.bucket).count()
def __iter__(self):
"""Get iterator."""
self._it = iter(sorted_files_from_bucket(self.bucket, self.keys))
return self
def next(self):
"""Python 2.7 compatibility."""
return self.__next__() # pragma: no cover
def __next__(self):
"""Get next file item."""
obj = next(self._it)
return self.file_cls(obj, self.filesmap.get(obj.key, {}))
def __contains__(self, key):
"""Test if file exists."""
return ObjectVersion.get_by_bucket(
self.bucket).filter_by(key=key).count()
def __getitem__(self, key):
"""Get a specific file."""
obj = ObjectVersion.get(self.bucket, key)
if obj:
return self.file_cls(obj, self.filesmap.get(obj.key, {}))
raise KeyError(key)
def flush(self):
"""Flush changes to record."""
files = self.dumps()
# Do not create `_files` when there has not been `_files` field before
# and the record still has no files attached.
if files or '_files' in self.record:
self.record['_files'] = files
@_writable
def __setitem__(self, key, stream):
"""Add file inside a deposit."""
with db.session.begin_nested():
# save the file
obj = ObjectVersion.create(
bucket=self.bucket, key=key, stream=stream)
self.filesmap[key] = self.file_cls(obj, {}).dumps()
self.flush()
@_writable
def __delitem__(self, key):
"""Delete a file from the deposit."""
obj = ObjectVersion.delete(bucket=self.bucket, key=key)
if obj is None:
raise KeyError(key)
if key in self.filesmap:
del self.filesmap[key]
self.flush()
@_writable
def rename(self, old_key, new_key):
"""Rename a file.
:param old_key: Old key that holds the object.
:param new_key: New key that will hold the object.
:returns: The object that has been renamed.
"""
assert new_key not in self
assert old_key != new_key
file_ = self[old_key]
old_data = self.filesmap[old_key]
# Create a new version with the new name
obj = ObjectVersion.create(
bucket=self.bucket, key=new_key,
_file_id=file_.obj.file_id
)
# Delete old key
self.filesmap[new_key] = self.file_cls(obj, old_data).dumps()
del self[old_key]
return obj
def dumps(self, bucket=None):
"""Serialize files from a bucket.
:param bucket: Instance of files
:class:`invenio_files_rest.models.Bucket`. (Default:
``self.bucket``)
:returns: List of serialized files.
"""
return [
self.file_cls(o, self.filesmap.get(o.key, {})).dumps()
for o in sorted_files_from_bucket(bucket or self.bucket, self.keys)
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.