text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
"""Processed version of the documentation.
The documentation from the objects of raw_doc is further processed into
objects from the module proc_doc. These objects can then be processed
into structured documents such as HTML more easily.
"""
# TODO(holtgrew): Location traceability for entries and text.
import os.path
import HTMLParser
import logging
import re
import sys
import xml.sax.saxutils
import inc_mgr
import sig_parser
import dox_tokens
import raw_doc
def escapeForXml(s):
"""Return escaped XML of s."""
return xml.sax.saxutils.escape(s)
class DocumentationBuildException(Exception):
"""Thrown when there is a logical error on building the documentation."""
class LinkResolver(object):
"""Member of ProcDoc for resolving links."""
def __init__(self, proc_doc):
self.proc_doc = proc_doc
def splitSecondLevelEntry(name):
"""Split second-level entry and return (first, second) pair.
If name is not a second level entry then (None, name) is returned.
"""
xs = None
if name.count('::') > 1 and ' ' in name:
xs = name.split(' ', 1)
elif '#' in name:
xs = name.split('#', 1)
elif '::' in name:
xs = name.rsplit('::', 1)
if xs:
return xs
return (None, name)
class ProcDoc(object):
"""Collection of the top-level documentation entries."""
def __init__(self):
self.top_level_entries = {}
self.second_level_entries = {}
self.entries = {}
def addTopLevelEntry(self, x):
"""Add a top-level-entry."""
self.registerEntry(x)
self.top_level_entries[x.name] = x
def addSecondLevelEntry(self, x):
"""Add a second-level entry."""
self.registerEntry(x)
self.second_level_entries[x.name] = x
first, second = splitSecondLevelEntry(x.name)
if first:
# print '%s => %s as %s' % (x.name, second, x.kind)
self.top_level_entries[first].registerSubentry(x)
def addVariable(self, x):
"""Add a second-level entry."""
self.registerEntry(x)
#print 'x ==', x
#print 'x.type ==', x.type
if self.top_level_entries.get(x.type):
self.second_level_entries[x.name] = x
self.top_level_entries[x.type].registerSubentry(x)
elif '::' in x.name:
self.second_level_entries[x.name] = x
first, second = splitSecondLevelEntry(x.name)
self.top_level_entries[first].registerSubentry(x)
else:
self.top_level_entries[x.name] = x
def registerEntry(self, x):
"""Register an entry."""
if x.name in self.entries:
old = self.entries[x.name]
tpl = ('Trying to define %(new_kind)s %(new_name)s in %(new_file)s:'
'%(new_line)s but is previously defined as %(old_kind)s '
'%(old_name)s in %(old_file)s:%(old_line)d.')
vals = {
'new_kind': x.kind,
'new_name': x.name,
'new_file': old.location[0],
'new_line': old.location[1],
'old_kind': old.kind,
'old_name': old.name,
'old_file' : x.location[0],
'old_line' : x.location[1]}
raise DocumentationBuildException(tpl % vals)
self.entries[x.name] = x
x.doc = self
def runTextVisitor(self, v):
"""Run visitor v on all Text members of all entries and sub entries.
"""
for e in self.entries.itervalues():
e.runTextVisitor(v)
class TextNode(object):
"""A node represents a part of a processed text.
Processed text is text generated from tokens lexed from the input file.
For example, text in the paragraph of a entry's body can be representd by
TextNode objects.
TextNode objects are similar to DOM nodes, i.e. they can contain children
and have attributes. This means that we can have a link node that has a
href/target attribute with a target URL and one or more child nodes that
contain the link's label.
Additionally, we store the source location (begin and end line/column) of
the node in its source file.
We represent plain links, i.e. where the label is the same as the target
using the representation for "<a href="seqan:$target">$target</a>".
We represent included code snippets as "<code type='.cpp'>$code</code>."
@ivar type: The type of the node, as a string. Reserved values are
'<text>' for plain text nodes.
@ivar attrs: A dict object mapping attribute names to string values.
@ivar children: A list of TextNode objects.
@ivar text: The text value of a node, a string.
"""
def __init__(self, type='<text>', verbatim=False, text='', attrs={}):
self.type = type
self.attrs = dict(attrs)
self.children = []
if verbatim:
self.text = text
else:
self.text = escapeForXml(text)
def __str__(self):
attrs = (repr(self.type), repr(self.text), repr(self.attrs), len(self.children))
return 'TextNode(type=%s, text=%s, attrs=%s, len(children)=%d)' % attrs
def __repr__(self):
return str(self)
def setAttr(self, key, value):
self.attrs[escapeForXml(key)] = escapeForXml(value)
def addChild(self, n):
self.children.append(n)
return self.children[-1]
@property
def X(self):
"""Returns first child, used to retrieve member of top-level <div>."""
if self.type == '<text>':
return self
else:
return self.children[0]
def toHtmlLike(self, skip_top_tag=False, **kwargs):
"""Returns a string with a HTML-like representation for debuggin.
@param skip_top_tag: Do ont output top-level tag.
@param kwargs: Additional attributes to add.
"""
if self.type == '<text>':
if self.attrs:
print >>sys.stderr, 'WARNING: Attributes on text node!'
return self.text
else:
dash = {True: '', False: ' /'}.get(bool(self.children)) # Whether empty
res = []
if not skip_top_tag:
res += ['<', self.type]
for key, value in self.attrs.iteritems():
res += [' ', key, '=', '"', repr(value)[1:-1], '"']
for key, value in kwargs.iteritems():
res += [' ', key, '=', '"', value, '"']
res.append(dash + '>')
if self.children:
res += [x.toHtmlLike() for x in self.children]
if not skip_top_tag:
res += ['</', self.type, '>']
return ''.join(res)
class ProcEntry(object):
"""A processed representation of a documentation entry.
A documentation entry has a kind (string), a name (string), a brief
description (TextNode(type='<text>')), and a list of references/sees to
other elements (list of TextNode(type='<link>')). Also, it has a body
which is a TextNode with children.
@ivar kind: The kind of the entry, string.
@ivar name: The name of the entry, string.
@ivar brief: A brief description, a text-typed TextNode or None.
@ivar body: A TextNode object with children for the documentation body.
@ivar sees: A list of link-typed TextNode objects, can be empty.
@ivar doc: The owning, ProcDoc, set on ProcDoc.registerEntry
@ivar subentries: Sub entries, dir, grouped by type.
@ivar raw_entry: The RawEntry object that this ProcEntry was generated from.
"""
def __init__(self, name, title=None, brief=None, body=None, sees=[]):
self.name = name
self.title = title
self.brief = brief
self.body = body
self.sees = list(sees)
self.doc = None
self.subentries = {}
self.raw_entry = None
self._location = None
def registerSubentry(self, proc_entry):
self.subentries.setdefault(proc_entry.kind, []).append(proc_entry)
def hasSubEntry(self, kind, proc_doc):
"""Returns has a subentry of the given kind."""
if self.subentries.get(kind):
return True
if hasattr(self, 'all_extended'):
for cl in self.all_extended:
extended = proc_doc.top_level_entries[cl]
if extended.subentries.get(kind):
return True
if hasattr(self, 'all_implemented'):
for co in self.all_implemented:
extended = proc_doc.top_level_entries[co]
if extended.subentries.get(kind):
return True
return False
def visitTextNodes(self, visitor):
"""Visit all text nodes using the given visitor."""
visitor.visit(self.brief)
visitor.visit(self.body)
for see in self.sees:
visitor.visit(see)
@property
def location(self):
"""Returns pair (path, line)."""
if not self._location:
path = '<none>'
line = -1
if self.raw_entry.name:
line = self.raw_entry.name.tokens[0].lineno
path = self.raw_entry.name.tokens[0].file_name
self._location = (path, line)
return self._location
@property
def kind(self):
return self.__class__.__name__.replace('Proc', '').lower()
class ProcCodeEntry(ProcEntry):
"""A processed code entry.
@ivar signatures: A TextNode with the signatures of the entry. They are
properly formatted to be displayed as verbatim text.
@ivar signature_entries: A list of sig_parser.SigEntry objects.
@ivar headerfiles: A list of str objects with the arguments to #include.
@ivar deprecation_msgs: List of TextNode objects with deprecation messages.
"""
def __init__(self, name, brief=None, body=None, sees=[]):
ProcEntry.__init__(self, name, brief, body, sees)
self.signatures = []
self.signature_entries = []
self.headerfiles = []
self.deprecation_msgs = []
def addSignature(self, s):
self.signatures.append(s)
def addSignatureEntry(self, e):
self.signature_entries.append(e)
def addHeaderfile(self, h):
self.headerfiles.append(h)
def addDeprecationMsg(self, m):
self.deprecation_msgs.append(m)
def subEntries(self, kind):
return []
class ProcEnum(ProcCodeEntry):
"""A processed enum documentation.
@ivar values: A list of ProcVariable entries that represent values
of this enum.
"""
def __init__(self, name, brief=None, body=None, sees=[]):
ProcCodeEntry.__init__(self, name, brief, body, sees)
self.values = []
class ProcAdaption(ProcCodeEntry):
"""A processed adaption documentation.
@ivar values: A list of ProcVariable entries that represent values
of this adaption.
"""
def __init__(self, name, brief=None, body=None, sees=[]):
ProcCodeEntry.__init__(self, name, brief, body, sees)
self.values = []
class ProcTypedef(ProcCodeEntry):
"""A processed typedef documentation.
@ivar values: A list of ProcVariable entries that represent values
of this typedef.
"""
def __init__(self, name, brief=None, body=None, sees=[]):
ProcCodeEntry.__init__(self, name, brief, body, sees)
self.values = []
@property
def kind(self):
if '#' in self.name:
return 'grouped_typedef'
elif '::' in self.name:
return 'member_typedef'
else:
return 'global_typedef'
class ProcConcept(ProcCodeEntry):
"""A processed concept documentation.
@ivar extends: A list of str values with the names of the extended
concepts.
@ivar all_extended: A set of str values with the names of all extended
concepts, also transitively.
@ivar all_extending: A set of str values with the names of all extending
concepts.
@ivar all_implementing: A set of str values with the names of all
implementing classes.
"""
def __init__(self, name, brief=None, body=None, sees=[]):
ProcCodeEntry.__init__(self, name, brief, body, sees)
self.extends = []
self.all_extended = set()
self.all_extending = set()
self.all_implementing = set()
def addExtends(self, s):
self.extends.append(s)
def __str__(self):
return 'ProcConcept(%s, brief=%s, body=%s, sees=%s)' % (
repr(self.name), repr(self.brief), repr(self.body), repr(self.sees))
def __repr__(self):
return 'ProcConcept(%s)' % repr(self.name)
class ProcClass(ProcCodeEntry):
"""A processed class documentation.
@ivar extends: A list of str values with the names of the extended
classes.
@ivar implements: A list of str values with the names of the implemented
concepts.
@ivar all_implemented: Set of str values with the names of all implemented
concepts.
@ivar all_extending: Set of str values with the names of all extending
classes.
@ivar all_extended: Set of str values with the names of all extended classes.
"""
def __init__(self, name, brief=None, body=None, sees=[]):
ProcCodeEntry.__init__(self, name, brief, body, sees)
self.extends = []
self.implements = []
self.all_implemented = set()
self.all_extending = set()
self.all_extended = set()
self.tparams = []
self.typedefs = []
def addExtends(self, s):
self.extends.append(s)
def addImplements(self, s):
self.implements.append(s)
def addTParam(self, t):
self.tparams.append(t)
def addTypedef(self, t):
self.typedefs.append(t)
class ProcTag(ProcCodeEntry):
"""A processed tag documentation.
"""
@property
def kind(self):
if '#' in self.name:
return 'grouped_tag'
else:
return 'tag'
class ProcParam(object):
"""Representation of a parameter.
@ivar name: The name of the parameter. str.
@ivar in_out: One of IN, OUT, IN_OUT, None.
@ivar desc: Documentation of the parameter. TextNode.
"""
def __init__(self):
self.name = None
self.in_out = None
self.desc = TextNode()
def visitTextNodes(self, visitor):
"""Visit all text nodes using the given visitor."""
visitor.visit(self.desc)
ProcParam.IN = 'IN'
ProcParam.OUT = 'IN'
ProcParam.IN_OUT = 'IN_OUT'
class ProcTParam(object):
"""Documentation of a processed template parameter.
@ivar type: The type of the parameter. str
@ivar desc: Documentation of the parameter. TextNode.
"""
def __init__(self):
self.type = None
self.desc = TextNode()
def visitTextNode(self, visitor):
"""Visit all text nodes using the given visitor."""
visitor.visit(self.desc)
class ProcReturn(object):
"""Documentation of a @return entry.
@ivar type: The return type. str.
@ivar desc: The documentation of the return value. TextNode.
"""
def __init__(self):
self.type = None
self.desc = TextNode()
def visitTextNode(self, visitor):
"""Visit all text nodes using the given visitor."""
visitor.visit(self.desc)
class ProcFunction(ProcCodeEntry):
"""A processed function documentation.
@ivar params: A list of str values with the names of the extended
concepts.
@ivar tparams:
@ivar returns:
"""
def __init__(self, name, brief=None, body=None, sees=[]):
ProcCodeEntry.__init__(self, name, brief, body, sees)
self.params = []
self.tparams = []
self.returns = []
@property
def kind(self):
if '#' in self.name:
return 'interface_function'
elif '::' in self.name:
return 'member_function'
else:
return 'global_function'
def visitTextNode(self, visitor):
"""Visit all text nodes using the given visitor."""
ProcCodeEntry.visitTextNode(self, visitor)
for p in self.params:
p.visitTextNode(p)
for p in self.tparams:
p.visitTextNode(p)
for p in self.returns:
p.visitTextNode(p)
def addParam(self, p):
self.params.append(p)
def addTParam(self, t):
self.tparams.append(t)
def addReturn(self, r):
self.returns.append(r)
class ProcMacro(ProcCodeEntry):
"""A processed macro documentation.
@ivar params: A list of str values with the names of the extended
concepts.
@ivar returns:
"""
def __init__(self, name, brief=None, body=None, sees=[]):
ProcCodeEntry.__init__(self, name, brief, body, sees)
self.params = []
self.returns = []
@property
def kind(self):
if '#' in self.name:
return 'grouped_macro'
else:
return 'macro'
def visitTextNode(self, visitor):
"""Visit all text nodes using the given visitor."""
ProcCodeEntry.visitTextNode(self, visitor)
for p in self.params:
p.visitTextNode(p)
for p in self.returns:
p.visitTextNode(p)
def addParam(self, p):
self.params.append(p)
def addReturn(self, r):
self.returns.append(r)
class ProcMetafunction(ProcCodeEntry):
"""A processed function documentation.
@ivar tparams: A list of str values with the names of the extended
concepts.
@ivar returns: A list of ProcReturn values.
"""
def __init__(self, name, brief=None, body=None, sees=[]):
ProcCodeEntry.__init__(self, name, brief, body, sees)
self.tparams = []
self.returns = []
@property
def kind(self):
if '#' in self.name:
return 'interface_metafunction'
else:
return 'global_metafunction'
def visitTextNode(self, visitor):
"""Visit all text nodes using the given visitor."""
ProcCodeEntry.visitTextNode(self, visitor)
for p in self.tparams:
p.visitTextNode(p)
for p in self.returns:
p.visitTextNode(p)
def addTParam(self, t):
self.tparams.append(t)
def addReturn(self, r):
self.returns.append(r)
class ProcVariable(ProcCodeEntry):
"""A processed function documentation.
@ivar type: A string with the name of a type.
"""
def __init__(self, name, brief=None, body=None, sees=[]):
ProcCodeEntry.__init__(self, name, brief, body, sees)
self.type = None
@property
def kind(self):
if '::' in self.name:
return 'member_variable'
else:
return 'variable'
class ProcPage(ProcEntry):
"""A processed page."""
def __init__(self, name, brief=None, body=None, sees=[]):
ProcEntry.__init__(self, name, brief, body, sees)
def __str__(self):
return 'Page(name=%s)' % repr(self.name)
class ProcGroup(ProcEntry):
"""A processed group."""
def __init__(self, name, brief=None, body=None, sees=[]):
ProcEntry.__init__(self, name, brief, body, sees)
self.tags = []
self.typedefs = []
def __str__(self):
return 'Group(name=%s)' % repr(self.name)
def addTypedef(self, t):
self.typedefs.append(t)
class HtmlTagParser(HTMLParser.HTMLParser):
"""Used for parsing HTML and storing the first tag and its attributes."""
def __init__(self):
self.reset()
def reset(self):
HTMLParser.HTMLParser.reset(self)
self.tag = None
self.attrs = None
self.is_open = None
self.is_close = None
def handle_starttag(self, tag, attrs):
self.tag = tag
self.attrs = dict(attrs)
self.is_open = True
def handle_endtag(self, tag):
self.tag = tag
self.is_close = True
def parse(self, txt):
self.reset()
self.feed(txt)
class RawTextToTextNodeConverter(object):
"""Convert raw text including HTML tags to text node."""
def __init__(self, strip_lt_line_space=False):
self.tag_stack = []
self.node_stack = []
self.current = None
self.strip_lt_line_space = strip_lt_line_space
# Processing text between inline @begintag @endtag is done by first
# scanning over the tokens and then processing the tokens in between
# recursively with a new RawTextToTextNodeConverter.
self.current_cmd = None # current command, e.g. '@link'
self.tokens_cmd = [] # currently scanned tokens
self.commands = ['COMMAND_LINK', 'COMMAND_ENDLINK']
self.command_pairs = {'COMMAND_LINK': 'COMMAND_ENDLINK'}
self.html_parser = HtmlTagParser()
def handleTag(self, token):
"""Handle a HTML tag.
The HTML tag is translated into a TextNode and appended to self.current.
Note that this is meant for parsing one tag only.
"""
self.html_parser.parse(token.val)
tag_name = self.html_parser.tag
if self.html_parser.is_open: # Opening tag.
self.tag_stack.append(self.html_parser.tag)
self.node_stack.append(self.current)
tag = TextNode(type=self.html_parser.tag)
for key, value in self.html_parser.attrs.items():
tag.setAttr(key, value)
self.current = self.current.addChild(tag)
if self.html_parser.is_close: # No else, also handle standalone tags.
if self.tag_stack and self.tag_stack[-1] == tag_name:
self.tag_stack.pop() # correct closing tag
elif self.tag_stack and self.tag_stack[-1] != tag_name:
# incorrect closing, pop and return
args = (tag_name, self.tag_stack[-1])
print >>sys.stderr, 'WARNING: Closing wrong tag %s instead of %s' % args
self.tag_stack.pop()
return
else: # not self.tag_stack
print >>sys.stderr, 'WARNING: Closing tag without opening %s!' % tag_name
# Pop from node stack.
if self.node_stack:
self.current = self.node_stack[-1]
self.node_stack.pop()
else:
print >>sys.stderr, 'WARNING: Having closed too many tags!'
def handleCommand(self, token):
"""Handle command for the given token."""
if self.current_cmd: # There is a command active
if token.type == self.command_pairs[self.current_cmd]: # closing current
self.handleCommandClosing() # handle closing of command
else: # not closing current
self.tokens_cmd.append(token)
else: # no command active, open
assert token.type in self.command_pairs.keys(), \
'Must be open command.'
self.current_cmd = token.type
def handleCommandClosing(self):
"""Handle closing of current command."""
assert self.current_cmd == 'COMMAND_LINK', 'Only known commandx.'
if self.current_cmd == 'COMMAND_LINK':
# Trim leading/trailing whitespace tokens
def isWhitespace(t):
return t.type in dox_tokens.WHITESPACE
while self.tokens_cmd and isWhitespace(self.tokens_cmd[0]):
self.tokens_cmd.pop(0)
while self.tokens_cmd and isWhitespace(self.tokens_cmd[-1]):
self.tokens_cmd.pop(-1)
if not self.tokens_cmd:
print >>sys.stderr, 'WARNING: Empty @link @endlink.'
return
# Get link target.
target_token = self.tokens_cmd.pop(0)
# Trim leading whitespace again.
while self.tokens_cmd and isWhitespace(self.tokens_cmd[0]):
self.tokens_cmd.pop(0)
# Translate any remaining non-whitespace tokens.
title_tokens = self.tokens_cmd
link_text = raw_doc.RawText(title_tokens)
conv = RawTextToTextNodeConverter()
link_text_node = conv.run(link_text)
link_text_node.type = 'a'
link_text_node.attrs = {'href': 'seqan:' + target_token.val}
self.current.addChild(link_text_node)
self.tokens_cmd = []
self.current_cmd = None
def run(self, raw_text, verbatim=False):
"""Convert the tokens in raw_text into a tree of TextNode objects."""
#print >>sys.stderr, '================== %s' % raw_text.text
#print >>sys.stderr, [(t.type, t.val) for t in raw_text.tokens]
self.current = TextNode(type='div')
root = self.current
at_line_start = True
for i, t in enumerate(raw_text.tokens):
if self.current_cmd: # collect token in self.tokens_cmd
self.handleCommand(t)
continue
if t.type in dox_tokens.WHITESPACE:
if i == 0 or (i + 1) == len(raw_text.tokens):
continue # Ignore leading and trailing whitespace.
if t.type == 'SPACE' and at_line_start:
continue # Ignore space at the beginning of a line.
if t.type == 'BREAK':
self.current.addChild(TextNode(text='\n'))
else:
self.current.addChild(TextNode(text=' '))
elif not verbatim and t.type == 'HTML_TAG':
at_line_start = False
self.handleTag(t)
elif not verbatim and t.type in self.commands:
#print >>sys.stderr, 'command %s' % t
at_line_start = False
self.handleCommand(t)
else:
at_line_start = False
# TODO(holtgrew): Escape values.
self.current.addChild(TextNode(text=t.val))
at_line_start = t.type in ['EMPTY_LINE', 'BREAK']
if self.current_cmd:
print >>sys.stderr, 'WARNING: Open command %s!' % self.current_cmd
return root
def process(self, raw_entry):
raise Exception('Not implemented!')
class EntryConverter(object):
"""Base class for the conversion of raw entries processed entries.
@ivar doc_proc: DocProcessor object.
@ivar entry_class: The class of the ProcEntry type to create.
"""
def __init__(self, doc_proc):
self.doc_proc = doc_proc
self.entry_class = None
def rawTextToTextNode(self, raw_text, strip_lt_line_space=False, verbatim=False):
"""Convert RawText object into a TextNode object.
The text node will have the type 'div'.
@param strip_lt_breaks_lines: Whether or not to remove leading
space for lines.
@param verbatim: Whether or not to convert HTML tags.
"""
converter = RawTextToTextNodeConverter(strip_lt_line_space)
return converter.run(raw_text, verbatim)
def bodyToTextNode(self, raw_body):
"""Convert a RawBody to a TextNode."""
res = TextNode(type='div')
for p in raw_body.paragraphs:
if p.getType() == 'paragraph':
if not p.text.text.strip():
continue # Skip whitespace
p = self.rawTextToTextNode(p.text)
p.type = 'p'
res.addChild(p)
elif p.getType() == 'section':
h = self.rawTextToTextNode(p.heading)
h.type = 'h%d' % p.level
res.addChild(h)
elif p.getType() == 'include':
ftype = os.path.splitext(p.path.text)[1]
code_text = self.doc_proc.include_mgr.loadFile(p.path.text)
proc_include = TextNode(type='code', attrs={'type': ftype})
proc_include.addChild(TextNode(text=code_text, verbatim=True))
res.addChild(proc_include)
elif p.getType() == 'snippet':
ftype = os.path.splitext(p.path.text)[1]
code_text = self.doc_proc.include_mgr.loadSnippet(p.path.text, p.name.text)
proc_snippet = TextNode(type='code', attrs={'type': ftype})
proc_snippet.addChild(TextNode(text=code_text, verbatim=True))
res.addChild(proc_snippet)
elif p.getType() == 'code':
code_text = p.text.text
type = '.txt'
m = re.match(r'^{[^}]+}', code_text)
if m:
type = m.group(0)[1:-1]
code_text = code_text[len(type) + 2:].strip()
#print [repr(t.val) for t in p.text.tokens]
x = TextNode(type='code', attrs={'type':type})
x.addChild(TextNode(text=code_text, verbatim=True))
res.addChild(x)
return res
def process(self, raw_entry):
entry = self.entry_class(name=raw_entry.name.text)
# Convert the title
if raw_entry.title.text:
entry.title = self.rawTextToTextNode(raw_entry.title)
# Convert first brief member. We already warned about duplicate ones
# elsewhere.
if raw_entry.briefs:
entry.brief = self.rawTextToTextNode(raw_entry.briefs[0].text)
# Convert the body
if raw_entry.body:
entry.body = self.bodyToTextNode(raw_entry.body)
# Convert the sees entries.
for see in raw_entry.sees:
link = self.rawTextToTextNode(see.text)
link.type = 'a'
link.attrs['href'] = 'seqan:%s' % see.text.text
entry.sees.append(link)
# Store the raw entry in the processed ones.
entry.raw_entry = raw_entry
return entry
class CodeEntryConverter(EntryConverter):
"""Base for the processing RawCodeEntry objects into processed entries."""
def __init__(self, doc_proc):
EntryConverter.__init__(self, doc_proc)
self.parse_signature = True
def process(self, raw_entry):
entry = EntryConverter.process(self, raw_entry)
# Add headerfile paths as list of strings.
for s in raw_entry.headerfiles:
entry.addHeaderfile(s.text.text.strip())
# Add deprecation messages as list of TextNodes.
for s in raw_entry.deprecation_msgs:
entry.addDeprecationMsg(self.rawTextToTextNode(s.text, strip_lt_line_space=True,
verbatim=True))
# Add signatures as a text node with code.
for s in raw_entry.signatures:
entry.addSignature(self.rawTextToTextNode(s.text, strip_lt_line_space=True,
verbatim=True))
# Use sig_parser to convert the signature texts to SigEntry objects.
# They are used for the list of functions/metafunctions for a type.
if self.parse_signature:
for s in raw_entry.signatures:
try:
sig_entry = sig_parser.SigParser(s.text.text).parse()
entry.addSignatureEntry(sig_entry)
except sig_parser.SigParseException, e:
print >>sys.stderr, '\nWARNING: Could not parse signature: %s' % e
print >>sys.stderr, 'Signature is: %s' % s.text.text.strip()
return entry
class EnumConverter(CodeEntryConverter):
def __init__(self, doc_proc):
CodeEntryConverter.__init__(self, doc_proc)
self.entry_class = ProcEnum
def process(self, raw_entry):
return CodeEntryConverter.process(self, raw_entry)
class AdaptionConverter(CodeEntryConverter):
def __init__(self, doc_proc):
CodeEntryConverter.__init__(self, doc_proc)
self.entry_class = ProcAdaption
self.parse_signature = False
def process(self, raw_entry):
return CodeEntryConverter.process(self, raw_entry)
class TypedefConverter(CodeEntryConverter):
def __init__(self, doc_proc):
CodeEntryConverter.__init__(self, doc_proc)
self.entry_class = ProcTypedef
self.parse_signature = False
def process(self, raw_entry):
return CodeEntryConverter.process(self, raw_entry)
class ConceptConverter(CodeEntryConverter):
def __init__(self, doc_proc):
CodeEntryConverter.__init__(self, doc_proc)
self.entry_class = ProcConcept
def process(self, raw_entry):
concept = CodeEntryConverter.process(self, raw_entry)
for e in raw_entry.extends:
concept.addExtends(e.text.text.strip())
return concept
class ClassConverter(CodeEntryConverter):
def __init__(self, doc_proc):
CodeEntryConverter.__init__(self, doc_proc)
self.entry_class = ProcClass
def process(self, raw_entry):
klass = CodeEntryConverter.process(self, raw_entry)
for e in raw_entry.extends:
klass.addExtends(e.text.text.strip())
for e in raw_entry.implements:
klass.addImplements(e.text.text.strip())
for t in raw_entry.tparams:
proc_tparam = ProcTParam()
proc_tparam.type = t.name.text
proc_tparam.desc = self.rawTextToTextNode(t.text)
klass.addTParam(proc_tparam)
return klass
class TagConverter(CodeEntryConverter):
def __init__(self, doc_proc):
CodeEntryConverter.__init__(self, doc_proc)
self.entry_class = ProcTag
self.parse_signature = False
class FunctionConverter(CodeEntryConverter):
def __init__(self, doc_proc):
CodeEntryConverter.__init__(self, doc_proc)
self.entry_class = ProcFunction
self.in_out_map = {
'in': ProcParam.IN,
'out': ProcParam.OUT,
'in,out': ProcParam.IN_OUT,
}
def process(self, raw_entry):
function = CodeEntryConverter.process(self, raw_entry)
for p in raw_entry.params:
proc_param = ProcParam()
proc_param.name = p.name.text
if p.inout:
proc_param.in_out = self.in_out_map.get(p.inout.val[1:-1])
proc_param.desc = self.rawTextToTextNode(p.text)
function.addParam(proc_param)
for t in raw_entry.tparams:
proc_tparam = ProcTParam()
proc_tparam.type = t.name.text
proc_tparam.desc = self.rawTextToTextNode(t.text)
function.addTParam(proc_tparam)
for r in raw_entry.returns:
proc_return = ProcReturn()
proc_return.type = r.name.text
proc_return.desc = self.rawTextToTextNode(r.text)
function.addReturn(proc_return)
return function
class MacroConverter(CodeEntryConverter):
def __init__(self, doc_proc):
CodeEntryConverter.__init__(self, doc_proc)
self.entry_class = ProcMacro
self.in_out_map = {
'in': ProcParam.IN,
'out': ProcParam.OUT,
'in,out': ProcParam.IN_OUT,
}
self.parse_signature = False
def process(self, raw_entry):
macro = CodeEntryConverter.process(self, raw_entry)
for p in raw_entry.params:
proc_param = ProcParam()
proc_param.name = p.name.text
if p.inout:
proc_param.in_out = self.in_out_map.get(p.inout.val[1:-1])
proc_param.desc = self.rawTextToTextNode(p.text)
macro.addParam(proc_param)
for r in raw_entry.returns:
proc_return = ProcReturn()
proc_return.type = r.name.text
proc_return.desc = self.rawTextToTextNode(r.text)
macro.addReturn(proc_return)
return macro
class MetafunctionConverter(CodeEntryConverter):
def __init__(self, doc_proc):
CodeEntryConverter.__init__(self, doc_proc)
self.entry_class = ProcMetafunction
def process(self, raw_entry):
metafunction = CodeEntryConverter.process(self, raw_entry)
for t in raw_entry.tparams:
proc_tparam = ProcTParam()
proc_tparam.type = t.name.text
proc_tparam.desc = self.rawTextToTextNode(t.text)
metafunction.addTParam(proc_tparam)
for r in raw_entry.returns:
proc_return = ProcReturn()
proc_return.type = r.name.text
proc_return.desc = self.rawTextToTextNode(r.text)
metafunction.addReturn(proc_return)
return metafunction
class VariableConverter(CodeEntryConverter):
def __init__(self, doc_proc):
CodeEntryConverter.__init__(self, doc_proc)
self.entry_class = ProcVariable
def process(self, raw_entry):
variable = CodeEntryConverter.process(self, raw_entry)
if raw_entry.type:
variable.type = raw_entry.type.text
return variable
class TagStack(object):
"""Helper class for processing nested HTML tags."""
def __init__(self):
self.stack = []
def push(self, token):
pass
def pop(self, token):
pass
class PageConverter(EntryConverter):
"""Process a RawPage into a Page object."""
def __init__(self, doc_proc):
EntryConverter.__init__(self, doc_proc)
self.entry_class = ProcPage
class GroupConverter(EntryConverter):
"""Process a RawGroup into a Group object."""
def __init__(self, doc_proc):
EntryConverter.__init__(self, doc_proc)
self.entry_class = ProcGroup
class TextNodeVisitor(object):
"""Interface/abstract base class for visiting text nodes of entries or such."""
def visit(self, text_node):
"""Visit TextNode, possibly translating its content.
@param text_node: TextNode object or None.
"""
pass
class DocProcessor(object):
"""Convert a RawDoc object into a ProcDoc object.
@ivar converters: Dict that maps RawEntry kinds to Converter objects.
@ivar include_dir: The base path for including files that can be used in
the @include and @snippet commands.
@ivar include_mgr: inc_mgr.IncludeManager object for file/snippet
inclusion.
"""
def __init__(self, logger=None, include_dir='.'):
self.logger = logger
self.include_dir = include_dir
self.include_mgr = inc_mgr.IncludeManager(self.include_dir)
self.converters = {
'class': ClassConverter(self),
'concept': ConceptConverter(self),
'enum': EnumConverter(self),
'adaption': AdaptionConverter(self),
'global_typedef': TypedefConverter(self),
'member_typedef': TypedefConverter(self),
'grouped_typedef': TypedefConverter(self),
'global_function': FunctionConverter(self),
'global_metafunction': MetafunctionConverter(self),
'defgroup': GroupConverter(self),
'grouped_macro' : MacroConverter(self),
'interface_function': FunctionConverter(self),
'interface_metafunction': MetafunctionConverter(self),
'macro' : MacroConverter(self),
'member_function': FunctionConverter(self),
'member_variable': VariableConverter(self),
'page': PageConverter(self),
'tag': TagConverter(self),
'grouped_tag': TagConverter(self),
'variable': VariableConverter(self),
}
def run(self, doc):
res = ProcDoc()
self.log('Processing Documentation...')
self.convertTopLevelEntries(doc, res)
self.convertSecondLevelEntries(doc, res)
self.convertVariables(doc, res)
self.checkLinks(doc, res)
self.buildInheritanceLists(res)
return res
def convertTopLevelEntries(self, doc, res):
"""Convert top level entries.
Variables are not converted yet. They are converted in a separate
step since they might encode enum values.
"""
self.log(' 1) Converting Top-Level Entries.')
print 'doc.entries', [e.name.text for e in doc.entries]
for raw_entry in doc.entries:
# Get fitting converter or warn if there is none.
kind = raw_entry.getType()
if not kind in ['concept', 'class', 'global_function',
'global_metafunction', 'page', 'tag',
'defgroup', 'macro', 'adaption', 'global_typedef', 'enum']:
continue # Not a top-level entry.
converter = self.converters.get(kind)
if not converter:
self.logWarning('Could not find converter for kind "%s".', kind)
continue # Skip if no converter could be found.
# Perform conversion.
proc_entry = converter.process(raw_entry)
# Store object in ProcDoc.
self.log(' * %s (%s)' % (proc_entry.name, proc_entry))
res.addTopLevelEntry(proc_entry)
def convertSecondLevelEntries(self, doc, res):
self.log(' 2) Converting Second-Level Entries.')
for raw_entry in doc.entries:
# Get fitting converter or warn if there is none.
kind = raw_entry.getType()
if not kind in ['member_function', 'interface_function',
'interface_metafunction',
'grouped_tag', 'grouped_macro', 'member_typedef',
'grouped_typedef']:
continue # Not a top-level entry.
converter = self.converters.get(kind)
if not converter:
self.logWarning('Could not find converter for kind "%s".', kind)
continue # Skip if no converter could be found.
# Perform conversion.
proc_entry = converter.process(raw_entry)
# Store object in ProcDoc.
self.log(' * %s' % proc_entry.name)
res.addSecondLevelEntry(proc_entry)
def convertVariables(self, doc, res):
self.log(' 3) Converting Variable entries.')
var_types = ['member_variable', 'grouped_variable', 'variable']
for raw_entry in [e for e in doc.entries if e.getType() in var_types]:
kind = raw_entry.getType()
converter = self.converters.get(kind)
if not converter:
self.logWarning('Could not find converter for kind "%s".', kind)
continue # Skip if no converter could be found.
# Perform conversion.
proc_entry = converter.process(raw_entry)
# Store object in ProcDoc.
self.log(' * %s %s' % (proc_entry.type, proc_entry.name))
res.addVariable(proc_entry)
def checkLinks(self, doc, res):
"""Check <link> items of text nodes and references.
References are given either explicitely in items like @extends and
@implements.
"""
self.log(' 3) Checking References.')
self.logWarning(' WARNING: Not implemented yet!')
def buildInheritanceLists(self, doc):
"""Build lists regarding the inheritance in the classes and concepts in doc.
We will build the equivalent to what Javadoc builds.
For concepts, this is the list of (a) all extended concepts, (b) all
known extending concepts, (c) all known implementing classes.
For classes, this is the list of (a) all implemented concepts, (b) all
direct known subclasses, (c) all extended classes.
@param doc: The ProcDoc object with the classes and concept.
"""
self.log(' 4) Building Inheritance Lists.')
# Process concepts: All extended and all extending.
concepts = [x for x in doc.top_level_entries.values()
if x.kind == 'concept']
# Get all concepts that c extends into c.all_extended.
for c in concepts:
q = list(c.extends) # Queue for recursion
while q:
name = q[0]
q.pop(0)
if name in c.all_extended:
continue # Skip to break loops.
c.all_extended.add(name)
q += doc.top_level_entries[name].extends
# Now, build list of all extending concepts into c.all_extending.
for c in concepts:
for name in c.all_extended:
doc.top_level_entries[name].all_extending.add(name)
# Process classes: All extended and all extending classes.
classes = [x for x in doc.top_level_entries.values()
if x.kind == 'class']
# Get all classes that c extends into c.all_extended.
for c in classes:
q = list(c.extends) # Queue for recursion
while q:
name = q[0]
q.pop(0)
if name in c.all_extended:
continue # Skip to break loops.
c.all_extended.add(name)
q += doc.top_level_entries[name].extends
# Now, build list of all extending clsses into c.all_extending.
for c in classes:
for name in c.all_extended:
doc.top_level_entries[name].all_extending.add(c.name)
# Build list of all implementing classes for all concepts.
for cl in classes:
for name in cl.implements:
if '\u0001' in name:
continue # Skip transitive inheritance.
co = doc.top_level_entries[name]
co.all_implementing.add(cl.name)
co.all_implementing.update(cl.all_extending)
# Build list of all implemented concepts for all classes.
for co in concepts:
for name in co.all_implementing:
cl = doc.top_level_entries[name]
cl.all_implemented.add(co.name)
def log(self, msg, *args, **kwargs):
"""Print the given message to the configured logger if any.
"""
if not self.logger:
return
self.logger.info(msg, *args, **kwargs)
def logWarning(self, msg, *args, **kwargs):
"""Print the given message to the configured logger if any.
"""
if not self.logger:
return
self.logger.warning('WARNING: ' + msg, *args, **kwargs)
|
gkno/seqan
|
util/py_lib/seqan/dox/proc_doc.py
|
Python
|
bsd-3-clause
| 46,842
|
[
"VisIt"
] |
593dfdf4717b39a1628b9881eafb8f33a4f6528897cde7cd25eb52a5ebddfcc2
|
from ddapp import robotstate
import ddapp.vtkAll as vtk
from ddapp.transformUtils import poseFromTransform
from ddapp.fieldcontainer import FieldContainer
import numpy as np
import math
class ConstraintBase(FieldContainer):
__isfrozen = False
robotArg = 'r'
def __init__(self, **kwargs):
self._add_fields(
enabled = True,
tspan = [-np.inf, np.inf],
)
self._set_fields(**kwargs)
def getCommands(self, commands=None, constraintNames=None, suffix=''):
commands = [] if commands is None else commands
constraintNames = [] if constraintNames is None else constraintNames
self._getCommands(commands, constraintNames, suffix)
return commands, constraintNames
def printCommands(self):
for command in self.getCommands()[0]:
print command
@staticmethod
def toRowVectorString(vec):
''' Returns elements separated by "," '''
return '[%s]' % ', '.join([repr(x) for x in vec])
@staticmethod
def toColumnVectorString(vec):
''' Returns elements separated by ";" '''
return '[%s]' % '; '.join([repr(x) for x in vec])
@staticmethod
def toMatrixString(mat):
if isinstance(mat, vtk.vtkTransform):
mat = np.array([[mat.GetMatrix().GetElement(r, c) for c in xrange(4)] for r in xrange(4)])
assert len(mat.shape) == 2
return '[%s]' % '; '.join([', '.join([repr(x) for x in row]) for row in mat])
@staticmethod
def toPositionQuaternionString(pose):
if isinstance(pose, vtk.vtkTransform):
pos, quat = poseFromTransform(pose)
pose = np.hstack((pos, quat))
assert pose.shape == (7,)
return ConstraintBase.toColumnVectorString(pose)
@staticmethod
def toPosition(pos):
if isinstance(pos, vtk.vtkTransform):
pos = np.array(pos.GetPosition())
assert pos.shape == (3,)
return pos
@staticmethod
def toQuaternion(quat):
if isinstance(quat, vtk.vtkTransform):
_, quat = poseFromTransform(quat)
assert quat.shape == (4,)
return quat
def getTSpanString(self):
return self.toRowVectorString([self.tspan[0], self.tspan[-1]])
def getJointsString(self, joints):
return '[%s]' % '; '.join(['joints.%s' % jointName for jointName in joints])
class PostureConstraint(ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
postureName = 'q_zero',
joints = [],
jointsLowerBound = [],
jointsUpperBound = [],
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
assert len(self.jointsLowerBound) == len(self.jointsUpperBound) == len(self.joints)
varName='posture_constraint%s' % suffix
constraintNames.append(varName)
formatArgs = dict(varName=varName,
robotArg=self.robotArg,
tspan=self.getTSpanString(),
postureName=self.postureName,
jointsVar='joint_inds',
lowerLimit='joints_lower_limit',
upperLimit='joints_upper_limit',
lowerBound=self.toColumnVectorString(self.jointsLowerBound),
upperBound=self.toColumnVectorString(self.jointsUpperBound),
jointInds=self.getJointsString(self.joints))
commands.append(
'{varName} = PostureConstraint({robotArg}, {tspan});\n'
'{jointsVar} = {jointInds};\n'
'{lowerLimit} = {postureName}({jointsVar}) + {lowerBound};\n'
'{upperLimit} = {postureName}({jointsVar}) + {upperBound};\n'
'{varName} = {varName}.setJointLimits({jointsVar}, {lowerLimit}, {upperLimit});'
''.format(**formatArgs))
class FixedLinkFromRobotPoseConstraint (ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
poseName = '',
linkName = '',
lowerBound = np.zeros(3),
upperBound = np.zeros(3),
angleToleranceInDegrees = 0.0,
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
assert self.linkName
assert self.poseName
positionVarName = 'position_constraint%s' % suffix
quaternionVarName = 'quaternion_constraint%s' % suffix
constraintNames.append(positionVarName)
constraintNames.append(quaternionVarName)
formatArgs = dict(positionVarName=positionVarName,
quaternionVarName=quaternionVarName,
robotArg=self.robotArg,
tspan=self.getTSpanString(),
linkName=self.linkName,
poseName=self.poseName,
lowerBound=self.toColumnVectorString(self.lowerBound),
upperBound=self.toColumnVectorString(self.upperBound),
tolerance=repr(math.radians(self.angleToleranceInDegrees)))
commands.append(
'point_in_link_frame = [0; 0; 0];\n'
'kinsol = {robotArg}.doKinematics({poseName});\n'
'xyz_quat = {robotArg}.forwardKin(kinsol, links.{linkName}, point_in_link_frame, 2);\n'
'lower_bounds = xyz_quat(1:3) + {lowerBound};\n'
'upper_bounds = xyz_quat(1:3) + {upperBound};\n'
'{positionVarName} = WorldPositionConstraint({robotArg}, links.{linkName}, '
'point_in_link_frame, lower_bounds, upper_bounds, {tspan});'
'{quaternionVarName} = WorldQuatConstraint({robotArg}, links.{linkName}, '
'xyz_quat(4:7), {tolerance}, {tspan});'
''.format(**formatArgs))
class PositionConstraint(ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
linkName = '',
referenceFrame = vtk.vtkTransform(),
pointInLink = np.zeros(3),
positionTarget = np.zeros(3),
positionOffset = np.zeros(3),
lowerBound = np.zeros(3),
upperBound = np.zeros(3),
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
assert self.linkName
varName = 'position_constraint%s' % suffix
constraintNames.append(varName)
positionTarget = self.positionTarget
if isinstance(positionTarget, vtk.vtkTransform):
positionTarget = positionTarget.GetPosition()
positionOffset = self.positionOffset
if isinstance(positionOffset, vtk.vtkTransform):
positionOffset = positionOffset.GetPosition()
formatArgs = dict(varName=varName,
robotArg=self.robotArg,
tspan=self.getTSpanString(),
linkName=self.linkName,
pointInLink=self.toColumnVectorString(self.pointInLink),
refFrame=self.toMatrixString(self.referenceFrame),
positionTarget=self.toColumnVectorString(positionTarget + positionOffset),
lowerBound=self.toColumnVectorString(self.lowerBound),
upperBound=self.toColumnVectorString(self.upperBound))
commands.append(
'point_in_link_frame = {pointInLink};\n'
'ref_frame = {refFrame};\n'
'lower_bounds = {positionTarget} + {lowerBound};\n'
'upper_bounds = {positionTarget} + {upperBound};\n'
'{varName} = WorldPositionInFrameConstraint({robotArg}, links.{linkName}, '
'point_in_link_frame, ref_frame, lower_bounds, upper_bounds, {tspan});'
''.format(**formatArgs))
class RelativePositionConstraint(ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
bodyNameA = '',
bodyNameB = '',
pointInBodyA = np.zeros(3),
frameInBodyB = vtk.vtkTransform(),
positionTarget = np.zeros(3),
positionOffset = np.zeros(3),
lowerBound = np.zeros(3),
upperBound = np.zeros(3),
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
assert self.bodyNameA
assert self.bodyNameB
varName = 'relative_position_constraint%s' % suffix
constraintNames.append(varName)
pointInBodyA = self.toPosition(self.pointInBodyA)
positionTarget = self.toPosition(self.positionTarget)
positionOffset = self.toPosition(self.positionOffset)
formatArgs = dict(varName=varName,
robotArg=self.robotArg,
tspan=self.getTSpanString(),
bodyNameA=self.bodyNameA,
bodyNameB=self.bodyNameB,
pointInBodyA=self.toColumnVectorString(pointInBodyA),
frameInBodyB=self.toPositionQuaternionString(self.frameInBodyB),
positionTarget=self.toColumnVectorString(positionTarget + positionOffset),
lowerBound=self.toColumnVectorString(self.lowerBound),
upperBound=self.toColumnVectorString(self.upperBound))
commands.append(
'point_in_body_a = {pointInBodyA};\n'
'frame_in_body_b = {frameInBodyB};\n'
'lower_bounds = {positionTarget} + {lowerBound};\n'
'upper_bounds = {positionTarget} + {upperBound};\n'
'{varName} = RelativePositionConstraint({robotArg}, point_in_body_a, lower_bounds, '
'upper_bounds, links.{bodyNameA}, links.{bodyNameB}, frame_in_body_b, {tspan});'
''.format(**formatArgs))
class PointToPointDistanceConstraint(ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
bodyNameA = '',
bodyNameB = '',
pointInBodyA = np.zeros(3),
pointInBodyB = np.zeros(3),
lowerBound = np.zeros(1),
upperBound = np.zeros(1),
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
assert self.bodyNameA
assert self.bodyNameB
varName = 'point_to_point_distance_constraint%s' % suffix
constraintNames.append(varName)
pointInBodyA = self.toPosition(self.pointInBodyA)
pointInBodyB = self.toPosition(self.pointInBodyB)
formatArgs = dict(varName=varName,
robotArg=self.robotArg,
tspan=self.getTSpanString(),
bodyNameA=self.bodyNameA,
bodyNameB=self.bodyNameB,
pointInBodyA=self.toColumnVectorString(pointInBodyA),
pointInBodyB=self.toColumnVectorString(pointInBodyB),
lowerBound=self.toColumnVectorString(self.lowerBound),
upperBound=self.toColumnVectorString(self.upperBound))
commands.append(
'{varName} = Point2PointDistanceConstraint({robotArg}, links.{bodyNameA}, links.{bodyNameB}, {pointInBodyA}, {pointInBodyB}, '
'{lowerBound}, {upperBound}, {tspan});'
''.format(**formatArgs))
class QuatConstraint(ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
linkName = '',
angleToleranceInDegrees = 0.0,
quaternion = np.array([1.0, 0.0, 0.0, 0.0]),
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
assert self.linkName
varName = 'quat_constraint%s' % suffix
constraintNames.append(varName)
quat = self.quaternion
if isinstance(quat, vtk.vtkTransform):
_, quat = poseFromTransform(quat)
formatArgs = dict(varName=varName,
robotArg=self.robotArg,
tspan=self.getTSpanString(),
linkName=self.linkName,
quat=self.toColumnVectorString(quat),
#tolerance=repr(math.sin(math.radians(self.angleToleranceInDegrees))**2))
tolerance=repr(math.radians(self.angleToleranceInDegrees)))
commands.append(
'{varName} = WorldQuatConstraint({robotArg}, links.{linkName}, '
'{quat}, {tolerance}, {tspan});'
''.format(**formatArgs))
class EulerConstraint(ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
linkName = '',
orientation = np.array([0.0, 0.0, 0.0]),
lowerBound = np.array([0.0, 0.0, 0.0]),
upperBound = np.array([0.0, 0.0, 0.0]),
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
assert self.linkName
varName = 'euler_constraint%s' % suffix
constraintNames.append(varName)
orientation = self.orientation
if isinstance(orientation, vtk.vtkTransform):
orientation = np.radians(np.array(orientation.GetOrientation()))
formatArgs = dict(varName=varName,
robotArg=self.robotArg,
tspan=self.getTSpanString(),
linkName=self.linkName,
orientation=self.toColumnVectorString(orientation),
lowerBound=self.toColumnVectorString(self.lowerBound),
upperBound=self.toColumnVectorString(self.upperBound))
commands.append(
'{varName} = WorldEulerConstraint({robotArg}, links.{linkName}, '
'{orientation} + {lowerBound}, {orientation} + {upperBound}, {tspan});'
''.format(**formatArgs))
class WorldGazeOrientConstraint(ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
linkName = '',
quaternion = np.array([1.0, 0.0, 0.0, 0.0]),
axis = np.array([1.0, 0.0, 0.0]),
coneThreshold = 0.0,
threshold = 0.0,
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
assert self.linkName
varName = 'gaze_orient_constraint%s' % suffix
constraintNames.append(varName)
quat = self.quaternion
if isinstance(quat, vtk.vtkTransform):
_, quat = poseFromTransform(quat)
formatArgs = dict(varName=varName,
robotArg=self.robotArg,
tspan=self.getTSpanString(),
linkName=self.linkName,
quat=self.toColumnVectorString(quat),
axis=self.toColumnVectorString(self.axis),
coneThreshold=repr(self.coneThreshold),
threshold=repr(self.threshold))
commands.append(
'{varName} = WorldGazeOrientConstraint({robotArg}, links.{linkName}, {axis}, '
'{quat}, {coneThreshold}, {threshold}, {tspan});'
''.format(**formatArgs))
class WorldGazeDirConstraint(ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
linkName = '',
bodyAxis = np.array([1.0, 0.0, 0.0]),
targetFrame = vtk.vtkTransform,
targetAxis = np.array([1.0, 0.0, 0.0]),
coneThreshold = 0.0,
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
assert self.linkName
varName = 'gaze_dir_constraint%s' % suffix
constraintNames.append(varName)
worldAxis = list(self.targetAxis)
#print 'in:', worldAxis
self.targetFrame.TransformVector(worldAxis, worldAxis)
#print 'out:', worldAxis
formatArgs = dict(varName=varName,
robotArg=self.robotArg,
tspan=self.getTSpanString(),
linkName=self.linkName,
bodyAxis=self.toColumnVectorString(self.bodyAxis),
worldAxis=self.toColumnVectorString(worldAxis),
coneThreshold=repr(self.coneThreshold))
commands.append(
'{varName} = WorldGazeDirConstraint({robotArg}, links.{linkName}, {bodyAxis}, '
'{worldAxis}, {coneThreshold}, {tspan});'
''.format(**formatArgs))
class WorldGazeTargetConstraint(ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
linkName = '',
axis = np.array([1.0, 0.0, 0.0]),
worldPoint = np.array([1.0, 0.0, 0.0]),
bodyPoint = np.array([1.0, 0.0, 0.0]),
coneThreshold = 0.0,
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
assert self.linkName
varName = 'gaze_target_constraint%s' % suffix
constraintNames.append(varName)
worldPoint = self.worldPoint
if isinstance(worldPoint, vtk.vtkTransform):
worldPoint = worldPoint.GetPosition()
bodyPoint = self.bodyPoint
if isinstance(bodyPoint, vtk.vtkTransform):
bodyPoint = bodyPoint.GetPosition()
formatArgs = dict(varName=varName,
robotArg=self.robotArg,
tspan=self.getTSpanString(),
linkName=self.linkName,
axis=self.toColumnVectorString(self.axis),
worldPoint=self.toColumnVectorString(worldPoint),
bodyPoint=self.toColumnVectorString(bodyPoint),
coneThreshold=repr(self.coneThreshold))
commands.append(
'{varName} = WorldGazeTargetConstraint({robotArg}, links.{linkName}, {axis}, '
'{worldPoint}, {bodyPoint}, {coneThreshold}, {tspan});'
''.format(**formatArgs))
class QuasiStaticConstraint(ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
leftFootEnabled = True,
rightFootEnabled = True,
pelvisEnabled = False,
shrinkFactor = None,
leftFootLinkName = "",
rightFootLinkName = "",
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
if not (self.leftFootEnabled or self.rightFootEnabled or self.pelvisEnabled):
return
varName = 'qsc_constraint%s' % suffix
constraintNames.append(varName)
if self.shrinkFactor is None:
shrinkFactor = 'default_shrink_factor'
else:
shrinkFactor = repr(self.shrinkFactor)
formatArgs = dict(varName=varName,
robotArg=self.robotArg,
tspan=self.getTSpanString(),
shrinkFactor=shrinkFactor,
leftFootLinkName = self.leftFootLinkName,
rightFootLinkName = self.rightFootLinkName)
commands.append(
'{varName} = QuasiStaticConstraint({robotArg}, {tspan}, 1);\n'
'{varName} = {varName}.setShrinkFactor({shrinkFactor});\n'
'{varName} = {varName}.setActive(true);'
''.format(**formatArgs))
if self.leftFootEnabled:
commands.append('{varName} = {varName}.addContact(links.{leftFootLinkName}, l_foot_pts);'.format(**formatArgs))
if self.rightFootEnabled:
commands.append('{varName} = {varName}.addContact(links.{rightFootLinkName}, r_foot_pts);'.format(**formatArgs))
if self.pelvisEnabled:
commands.append('{varName} = {varName}.addContact(links.pelvis, pelvis_pts);'.format(**formatArgs))
class WorldFixedBodyPoseConstraint(ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
linkName = '',
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
varName = 'fixed_body_constraint%s' % suffix
constraintNames.append(varName)
formatArgs = dict(varName=varName,
robotArg=self.robotArg,
tspan=self.getTSpanString(),
linkName=self.linkName)
commands.append(
'{varName} = WorldFixedBodyPoseConstraint({robotArg}, links.{linkName}, {tspan});\n'
''.format(**formatArgs))
class WorldFixedOrientConstraint(ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
linkName = '',
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
varName = 'fixed_orientation_constraint%s' % suffix
constraintNames.append(varName)
formatArgs = dict(varName=varName,
robotArg=self.robotArg,
tspan=self.getTSpanString(),
linkName=self.linkName)
commands.append(
'{varName} = WorldFixedOrientConstraint({robotArg}, links.{linkName}, {tspan});\n'
''.format(**formatArgs))
class MinDistanceConstraint(ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
minDistance = 0.05
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
varName = 'contact_constraint%s' % suffix
constraintNames.append(varName)
formatArgs = dict(varName=varName,
robotArg=self.robotArg,
tspan=self.getTSpanString(),
minDistance=repr(self.minDistance))
commands.append(
'{varName} = MinDistanceConstraint({robotArg}, {minDistance}, {tspan});\n'
''.format(**formatArgs))
class ExcludeCollisionGroupConstraint(ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
excludedGroupName = ''
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
formatArgs = dict(name=self.excludedGroupName,
tspan=self.getTSpanString()
)
commands.append(
'excluded_collision_groups = struct(\'name\',\'{name}\',\'tspan\',{tspan});\n'
''.format(**formatArgs))
class ActiveEndEffectorConstraint(ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
endEffectorName = '',
endEffectorPoint = np.zeros(3)
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
commands.append("end_effector_name = '%s';" % self.endEffectorName)
commands.append("end_effector_pt = %s;" % self.toColumnVectorString(self.endEffectorPoint))
class GravityCompensationTorqueConstraint(ConstraintBase):
def __init__(self, **kwargs):
self._add_fields(
joints = [],
torquesLowerBound = [],
torquesUpperBound = [],
)
ConstraintBase.__init__(self, **kwargs)
def _getCommands(self, commands, constraintNames, suffix):
varName = 'gravity_compensation_torque_constraint%s' % suffix
constraintNames.append(varName)
formatArgs = dict(varName=varName,
robotArg=self.robotArg,
jointInds=self.getJointsString(self.joints),
lowerBound=self.toColumnVectorString(self.torquesLowerBound),
upperBound=self.toColumnVectorString(self.torquesUpperBound),
tspan=self.getTSpanString())
commands.append(
'{varName} = GravityCompensationTorqueConstraint({robotArg}, {jointInds}, {lowerBound}, {upperBound}, {tspan});\n'
''.format(**formatArgs))
|
gizatt/director
|
src/python/ddapp/ikconstraints.py
|
Python
|
bsd-3-clause
| 24,993
|
[
"VTK"
] |
5cfa3f82ca84e1eca7a329374bb14059b479d8d8dd4b6c3ea35aab5cf187704b
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Code to deal with various programs for sequencing and assembly.
This code deals with programs such as Phred, Phrap and Consed -- which provide
utilities for calling bases from sequencing reads, and assembling sequences
into contigs.
"""
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/Sequencing/__init__.py
|
Python
|
gpl-2.0
| 412
|
[
"Biopython"
] |
d4f9a9e9ec9670d52993db0a9b7244d5b21b8fbfd279b620b14b74eefaef9512
|
import warnings
from collections import OrderedDict, defaultdict
import numpy as np
import pandas as pd
from .coding import strings, times, variables
from .coding.variables import SerializationWarning
from .core import duck_array_ops, indexing
from .core.common import contains_cftime_datetimes
from .core.pycompat import dask_array_type
from .core.variable import IndexVariable, Variable, as_variable
class NativeEndiannessArray(indexing.ExplicitlyIndexedNDArrayMixin):
"""Decode arrays on the fly from non-native to native endianness
This is useful for decoding arrays from netCDF3 files (which are all
big endian) into native endianness, so they can be used with Cython
functions, such as those found in bottleneck and pandas.
>>> x = np.arange(5, dtype='>i2')
>>> x.dtype
dtype('>i2')
>>> NativeEndianArray(x).dtype
dtype('int16')
>>> NativeEndianArray(x)[:].dtype
dtype('int16')
"""
def __init__(self, array):
self.array = indexing.as_indexable(array)
@property
def dtype(self):
return np.dtype(self.array.dtype.kind + str(self.array.dtype.itemsize))
def __getitem__(self, key):
return np.asarray(self.array[key], dtype=self.dtype)
class BoolTypeArray(indexing.ExplicitlyIndexedNDArrayMixin):
"""Decode arrays on the fly from integer to boolean datatype
This is useful for decoding boolean arrays from integer typed netCDF
variables.
>>> x = np.array([1, 0, 1, 1, 0], dtype='i1')
>>> x.dtype
dtype('>i2')
>>> BoolTypeArray(x).dtype
dtype('bool')
>>> BoolTypeArray(x)[:].dtype
dtype('bool')
"""
def __init__(self, array):
self.array = indexing.as_indexable(array)
@property
def dtype(self):
return np.dtype('bool')
def __getitem__(self, key):
return np.asarray(self.array[key], dtype=self.dtype)
def _var_as_tuple(var):
return var.dims, var.data, var.attrs.copy(), var.encoding.copy()
def maybe_encode_nonstring_dtype(var, name=None):
if ('dtype' in var.encoding and
var.encoding['dtype'] not in ('S1', str)):
dims, data, attrs, encoding = _var_as_tuple(var)
dtype = np.dtype(encoding.pop('dtype'))
if dtype != var.dtype:
if np.issubdtype(dtype, np.integer):
if (np.issubdtype(var.dtype, np.floating) and
'_FillValue' not in var.attrs and
'missing_value' not in var.attrs):
warnings.warn('saving variable %s with floating '
'point data as an integer dtype without '
'any _FillValue to use for NaNs' % name,
SerializationWarning, stacklevel=10)
data = duck_array_ops.around(data)[...]
data = data.astype(dtype=dtype)
var = Variable(dims, data, attrs, encoding)
return var
def maybe_default_fill_value(var):
# make NaN the fill value for float types:
if ('_FillValue' not in var.attrs and
'_FillValue' not in var.encoding and
np.issubdtype(var.dtype, np.floating)):
var.attrs['_FillValue'] = var.dtype.type(np.nan)
return var
def maybe_encode_bools(var):
if ((var.dtype == np.bool) and
('dtype' not in var.encoding) and ('dtype' not in var.attrs)):
dims, data, attrs, encoding = _var_as_tuple(var)
attrs['dtype'] = 'bool'
data = data.astype(dtype='i1', copy=True)
var = Variable(dims, data, attrs, encoding)
return var
def _infer_dtype(array, name=None):
"""Given an object array with no missing values, infer its dtype from its
first element
"""
if array.dtype.kind != 'O':
raise TypeError('infer_type must be called on a dtype=object array')
if array.size == 0:
return np.dtype(float)
element = array[(0,) * array.ndim]
if isinstance(element, (bytes, str)):
return strings.create_vlen_dtype(type(element))
dtype = np.array(element).dtype
if dtype.kind != 'O':
return dtype
raise ValueError('unable to infer dtype on variable {!r}; xarray '
'cannot serialize arbitrary Python objects'
.format(name))
def ensure_not_multiindex(var, name=None):
if (isinstance(var, IndexVariable) and
isinstance(var.to_index(), pd.MultiIndex)):
raise NotImplementedError(
'variable {!r} is a MultiIndex, which cannot yet be '
'serialized to netCDF files '
'(https://github.com/pydata/xarray/issues/1077). Use '
'reset_index() to convert MultiIndex levels into coordinate '
'variables instead.'.format(name))
def _copy_with_dtype(data, dtype):
"""Create a copy of an array with the given dtype.
We use this instead of np.array() to ensure that custom object dtypes end
up on the resulting array.
"""
result = np.empty(data.shape, dtype)
result[...] = data
return result
def ensure_dtype_not_object(var, name=None):
# TODO: move this from conventions to backends? (it's not CF related)
if var.dtype.kind == 'O':
dims, data, attrs, encoding = _var_as_tuple(var)
if isinstance(data, dask_array_type):
warnings.warn(
'variable {} has data in the form of a dask array with '
'dtype=object, which means it is being loaded into memory '
'to determine a data type that can be safely stored on disk. '
'To avoid this, coerce this variable to a fixed-size dtype '
'with astype() before saving it.'.format(name),
SerializationWarning)
data = data.compute()
missing = pd.isnull(data)
if missing.any():
# nb. this will fail for dask.array data
non_missing_values = data[~missing]
inferred_dtype = _infer_dtype(non_missing_values, name)
# There is no safe bit-pattern for NA in typical binary string
# formats, we so can't set a fill_value. Unfortunately, this means
# we can't distinguish between missing values and empty strings.
if strings.is_bytes_dtype(inferred_dtype):
fill_value = b''
elif strings.is_unicode_dtype(inferred_dtype):
fill_value = u''
else:
# insist on using float for numeric values
if not np.issubdtype(inferred_dtype, np.floating):
inferred_dtype = np.dtype(float)
fill_value = inferred_dtype.type(np.nan)
data = _copy_with_dtype(data, dtype=inferred_dtype)
data[missing] = fill_value
else:
data = _copy_with_dtype(data, dtype=_infer_dtype(data, name))
assert data.dtype.kind != 'O' or data.dtype.metadata
var = Variable(dims, data, attrs, encoding)
return var
def encode_cf_variable(var, needs_copy=True, name=None):
"""
Converts an Variable into an Variable which follows some
of the CF conventions:
- Nans are masked using _FillValue (or the deprecated missing_value)
- Rescaling via: scale_factor and add_offset
- datetimes are converted to the CF 'units since time' format
- dtype encodings are enforced.
Parameters
----------
var : xarray.Variable
A variable holding un-encoded data.
Returns
-------
out : xarray.Variable
A variable which has been encoded as described above.
"""
ensure_not_multiindex(var, name=name)
for coder in [times.CFDatetimeCoder(),
times.CFTimedeltaCoder(),
variables.CFScaleOffsetCoder(),
variables.CFMaskCoder(),
variables.UnsignedIntegerCoder()]:
var = coder.encode(var, name=name)
# TODO(shoyer): convert all of these to use coders, too:
var = maybe_encode_nonstring_dtype(var, name=name)
var = maybe_default_fill_value(var)
var = maybe_encode_bools(var)
var = ensure_dtype_not_object(var, name=name)
return var
def decode_cf_variable(name, var, concat_characters=True, mask_and_scale=True,
decode_times=True, decode_endianness=True,
stack_char_dim=True, use_cftime=None):
"""
Decodes a variable which may hold CF encoded information.
This includes variables that have been masked and scaled, which
hold CF style time variables (this is almost always the case if
the dataset has been serialized) and which have strings encoded
as character arrays.
Parameters
----------
name: str
Name of the variable. Used for better error messages.
var : Variable
A variable holding potentially CF encoded information.
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue). If the _Unsigned attribute is present
treat integer arrays as unsigned.
decode_times : bool
Decode cf times ('hours since 2000-01-01') to np.datetime64.
decode_endianness : bool
Decode arrays from non-native to native endianness.
stack_char_dim : bool
Whether to stack characters into bytes along the last dimension of this
array. Passed as an argument because we need to look at the full
dataset to figure out if this is appropriate.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Returns
-------
out : Variable
A variable holding the decoded equivalent of var.
"""
var = as_variable(var)
original_dtype = var.dtype
if concat_characters:
if stack_char_dim:
var = strings.CharacterArrayCoder().decode(var, name=name)
var = strings.EncodedStringCoder().decode(var)
if mask_and_scale:
for coder in [variables.UnsignedIntegerCoder(),
variables.CFMaskCoder(),
variables.CFScaleOffsetCoder()]:
var = coder.decode(var, name=name)
if decode_times:
for coder in [times.CFTimedeltaCoder(),
times.CFDatetimeCoder(use_cftime=use_cftime)]:
var = coder.decode(var, name=name)
dimensions, data, attributes, encoding = (
variables.unpack_for_decoding(var))
# TODO(shoyer): convert everything below to use coders
if decode_endianness and not data.dtype.isnative:
# do this last, so it's only done if we didn't already unmask/scale
data = NativeEndiannessArray(data)
original_dtype = data.dtype
encoding.setdefault('dtype', original_dtype)
if 'dtype' in attributes and attributes['dtype'] == 'bool':
del attributes['dtype']
data = BoolTypeArray(data)
if not isinstance(data, dask_array_type):
data = indexing.LazilyOuterIndexedArray(data)
return Variable(dimensions, data, attributes, encoding=encoding)
def _update_bounds_attributes(variables):
"""Adds time attributes to time bounds variables.
Variables handling time bounds ("Cell boundaries" in the CF
conventions) do not necessarily carry the necessary attributes to be
decoded. This copies the attributes from the time variable to the
associated boundaries.
See Also:
http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/
cf-conventions.html#cell-boundaries
https://github.com/pydata/xarray/issues/2565
"""
# For all time variables with bounds
for v in variables.values():
attrs = v.attrs
has_date_units = 'units' in attrs and 'since' in attrs['units']
if has_date_units and 'bounds' in attrs:
if attrs['bounds'] in variables:
bounds_attrs = variables[attrs['bounds']].attrs
bounds_attrs.setdefault('units', attrs['units'])
if 'calendar' in attrs:
bounds_attrs.setdefault('calendar', attrs['calendar'])
def _update_bounds_encoding(variables):
"""Adds time encoding to time bounds variables.
Variables handling time bounds ("Cell boundaries" in the CF
conventions) do not necessarily carry the necessary attributes to be
decoded. This copies the encoding from the time variable to the
associated bounds variable so that we write CF-compliant files.
See Also:
http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/
cf-conventions.html#cell-boundaries
https://github.com/pydata/xarray/issues/2565
"""
# For all time variables with bounds
for v in variables.values():
attrs = v.attrs
encoding = v.encoding
has_date_units = 'units' in encoding and 'since' in encoding['units']
is_datetime_type = (np.issubdtype(v.dtype, np.datetime64) or
contains_cftime_datetimes(v))
if (is_datetime_type and not has_date_units and
'bounds' in attrs and attrs['bounds'] in variables):
warnings.warn("Variable '{0}' has datetime type and a "
"bounds variable but {0}.encoding does not have "
"units specified. The units encodings for '{0}' "
"and '{1}' will be determined independently "
"and may not be equal, counter to CF-conventions. "
"If this is a concern, specify a units encoding for "
"'{0}' before writing to a file."
.format(v.name, attrs['bounds']),
UserWarning)
if has_date_units and 'bounds' in attrs:
if attrs['bounds'] in variables:
bounds_encoding = variables[attrs['bounds']].encoding
bounds_encoding.setdefault('units', encoding['units'])
if 'calendar' in encoding:
bounds_encoding.setdefault('calendar',
encoding['calendar'])
def decode_cf_variables(variables, attributes, concat_characters=True,
mask_and_scale=True, decode_times=True,
decode_coords=True, drop_variables=None,
use_cftime=None):
"""
Decode several CF encoded variables.
See: decode_cf_variable
"""
dimensions_used_by = defaultdict(list)
for v in variables.values():
for d in v.dims:
dimensions_used_by[d].append(v)
def stackable(dim):
# figure out if a dimension can be concatenated over
if dim in variables:
return False
for v in dimensions_used_by[dim]:
if v.dtype.kind != 'S' or dim != v.dims[-1]:
return False
return True
coord_names = set()
if isinstance(drop_variables, str):
drop_variables = [drop_variables]
elif drop_variables is None:
drop_variables = []
drop_variables = set(drop_variables)
# Time bounds coordinates might miss the decoding attributes
if decode_times:
_update_bounds_attributes(variables)
new_vars = OrderedDict()
for k, v in variables.items():
if k in drop_variables:
continue
stack_char_dim = (concat_characters and v.dtype == 'S1' and
v.ndim > 0 and stackable(v.dims[-1]))
new_vars[k] = decode_cf_variable(
k, v, concat_characters=concat_characters,
mask_and_scale=mask_and_scale, decode_times=decode_times,
stack_char_dim=stack_char_dim, use_cftime=use_cftime)
if decode_coords:
var_attrs = new_vars[k].attrs
if 'coordinates' in var_attrs:
coord_str = var_attrs['coordinates']
var_coord_names = coord_str.split()
if all(k in variables for k in var_coord_names):
new_vars[k].encoding['coordinates'] = coord_str
del var_attrs['coordinates']
coord_names.update(var_coord_names)
if decode_coords and 'coordinates' in attributes:
attributes = OrderedDict(attributes)
coord_names.update(attributes.pop('coordinates').split())
return new_vars, attributes, coord_names
def decode_cf(obj, concat_characters=True, mask_and_scale=True,
decode_times=True, decode_coords=True, drop_variables=None,
use_cftime=None):
"""Decode the given Dataset or Datastore according to CF conventions into
a new Dataset.
Parameters
----------
obj : Dataset or DataStore
Object to decode.
concat_characters : bool, optional
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool, optional
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool, optional
Decode cf times (e.g., integers since 'hours since 2000-01-01') to
np.datetime64.
decode_coords : bool, optional
Use the 'coordinates' attribute on variable (or the dataset itself) to
identify coordinates.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Returns
-------
decoded : Dataset
"""
from .core.dataset import Dataset
from .backends.common import AbstractDataStore
if isinstance(obj, Dataset):
vars = obj._variables
attrs = obj.attrs
extra_coords = set(obj.coords)
file_obj = obj._file_obj
encoding = obj.encoding
elif isinstance(obj, AbstractDataStore):
vars, attrs = obj.load()
extra_coords = set()
file_obj = obj
encoding = obj.get_encoding()
else:
raise TypeError('can only decode Dataset or DataStore objects')
vars, attrs, coord_names = decode_cf_variables(
vars, attrs, concat_characters, mask_and_scale, decode_times,
decode_coords, drop_variables=drop_variables, use_cftime=use_cftime)
ds = Dataset(vars, attrs=attrs)
ds = ds.set_coords(coord_names.union(extra_coords).intersection(vars))
ds._file_obj = file_obj
ds.encoding = encoding
return ds
def cf_decoder(variables, attributes,
concat_characters=True, mask_and_scale=True,
decode_times=True):
"""
Decode a set of CF encoded variables and attributes.
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool
Decode cf times ('hours since 2000-01-01') to np.datetime64.
Returns
-------
decoded_variables : dict
A dictionary mapping from variable name to xarray.Variable objects.
decoded_attributes : dict
A dictionary mapping from attribute name to values.
See also
--------
decode_cf_variable
"""
variables, attributes, _ = decode_cf_variables(
variables, attributes, concat_characters, mask_and_scale, decode_times)
return variables, attributes
def _encode_coordinates(variables, attributes, non_dim_coord_names):
# calculate global and variable specific coordinates
non_dim_coord_names = set(non_dim_coord_names)
for name in list(non_dim_coord_names):
if isinstance(name, str) and ' ' in name:
warnings.warn(
'coordinate {!r} has a space in its name, which means it '
'cannot be marked as a coordinate on disk and will be '
'saved as a data variable instead'.format(name),
SerializationWarning, stacklevel=6)
non_dim_coord_names.discard(name)
global_coordinates = non_dim_coord_names.copy()
variable_coordinates = defaultdict(set)
for coord_name in non_dim_coord_names:
target_dims = variables[coord_name].dims
for k, v in variables.items():
if (k not in non_dim_coord_names and k not in v.dims and
set(target_dims) <= set(v.dims)):
variable_coordinates[k].add(coord_name)
global_coordinates.discard(coord_name)
variables = OrderedDict((k, v.copy(deep=False))
for k, v in variables.items())
# These coordinates are saved according to CF conventions
for var_name, coord_names in variable_coordinates.items():
attrs = variables[var_name].attrs
if 'coordinates' in attrs:
raise ValueError('cannot serialize coordinates because variable '
"%s already has an attribute 'coordinates'"
% var_name)
attrs['coordinates'] = ' '.join(map(str, coord_names))
# These coordinates are not associated with any particular variables, so we
# save them under a global 'coordinates' attribute so xarray can roundtrip
# the dataset faithfully. Because this serialization goes beyond CF
# conventions, only do it if necessary.
# Reference discussion:
# http://mailman.cgd.ucar.edu/pipermail/cf-metadata/2014/057771.html
if global_coordinates:
attributes = OrderedDict(attributes)
if 'coordinates' in attributes:
raise ValueError('cannot serialize coordinates because the global '
"attribute 'coordinates' already exists")
attributes['coordinates'] = ' '.join(map(str, global_coordinates))
return variables, attributes
def encode_dataset_coordinates(dataset):
"""Encode coordinates on the given dataset object into variable specific
and global attributes.
When possible, this is done according to CF conventions.
Parameters
----------
dataset : Dataset
Object to encode.
Returns
-------
variables : dict
attrs : dict
"""
non_dim_coord_names = set(dataset.coords) - set(dataset.dims)
return _encode_coordinates(dataset._variables, dataset.attrs,
non_dim_coord_names=non_dim_coord_names)
def cf_encoder(variables, attributes):
"""
Encode a set of CF encoded variables and attributes.
Takes a dicts of variables and attributes and encodes them
to conform to CF conventions as much as possible.
This includes masking, scaling, character array handling,
and CF-time encoding.
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
Returns
-------
encoded_variables : dict
A dictionary mapping from variable name to xarray.Variable,
encoded_attributes : dict
A dictionary mapping from attribute name to value
See also
--------
decode_cf_variable, encode_cf_variable
"""
# add encoding for time bounds variables if present.
_update_bounds_encoding(variables)
new_vars = OrderedDict((k, encode_cf_variable(v, name=k))
for k, v in variables.items())
# Remove attrs from bounds variables (issue #2921)
for var in new_vars.values():
bounds = var.attrs['bounds'] if 'bounds' in var.attrs else None
if bounds and bounds in new_vars:
# see http://cfconventions.org/cf-conventions/cf-conventions.html#cell-boundaries # noqa
for attr in ['units', 'standard_name', 'axis', 'positive',
'calendar', 'long_name', 'leap_month', 'leap_year',
'month_lengths']:
if attr in new_vars[bounds].attrs and attr in var.attrs:
if new_vars[bounds].attrs[attr] == var.attrs[attr]:
new_vars[bounds].attrs.pop(attr)
return new_vars, attributes
|
shoyer/xray
|
xarray/conventions.py
|
Python
|
apache-2.0
| 25,750
|
[
"NetCDF"
] |
33a6f7930849dcb93026b7687c357ad3de782f234e2e3497be008d3efee2efc4
|
#!/usr/bin/env python3
"""
Author: Kartamyshev A.I. (Darth Feiwante)
"""
def min_distance(database = None, calculation = ()):
"""
This function is used to find minimal distance in the lattice presented
as a part of the 'Calculation' object
INPUT:
- database (.gbdm3) - database dictionary; could be provided; if not then are taken from header
- calculation (tuple) - tuple describing the Calculation object in form ('structure', 'set', version)
RETURN:
None
SOURCE:
None
TODO:
Some improvements
"""
c = database[calculation]
min_dist = 1000000
for i in range(c.natom):
for j in range(c.natom):
if j == i: continue
at1 = c.xcart[i]
at2 = c.xcart[j]
dist = ((at1[0]-at2[0])**2 + (at1[1]-at2[1])**2 + (at1[2]-at2[2])**2)**0.5
if min_dist>dist:
min_dist = dist
atom1 = at1
atom2 = at2
print('Minimal distance = ', min_dist)
print('Atom 1 = ', atom1)
print('Atom 2 = ', atom2)
def formation_energy(database = None, calc_def = (), calc_id = ()):
"""
This function is used to find minimal distance in the lattice presented
as a part of the 'Calculation' object
INPUT:
- database (.gbdm3) - database dictionary; could be provided; if not then are taken from header
- calc_def (tuple) - tuple describing the Calculation object for the
lattice containing a defect in form ('structure', 'set', version)
- calc_id (tuple) - tuple describing the Calculation object for the
lattice without a defect in form ('structure', 'set', version)
RETURN:
None
SOURCE:
None
TODO:
- Add different type of defects
"""
defect = database[calc_def]
ideal = database[calc_id]
n_at_def = defect.natom
e_def = defect.energy_free
e_id_at = ideal.energy_free/ideal.natom
E_f = e_def - n_at_def*e_id_at
print('Formation energy for defect '+calc_def[0]+' = '+str(E_f)+' eV')
# Fitting of the E(a,c) dependence for the equilibrium c/a searching
import xalglib
class ALGLIB:
def build_2d_bicubic_spline(self, x, m, y, n, z, d): self.bicubicv2d = xalglib.spline2dbuildbicubicv(x, m, y, n, z, d)
def calc(self, x, y, ind):
l = xalglib.spline2ddiff(self.bicubicv2d,x,y)
if ind==0: return l[0] # z
elif ind==1: return l[1] # dz/dx
elif ind==2: return l[2] # dz/dy
elif ind==3: return l[3] # d2z/dxdy
else: raise RuntimeError ('Unknown ind = '+str(ind))
class Approximation:
def aprx_lsq(self, fun_aprx, num_coef, xx, yy):
from scipy.optimize import leastsq
coefs = ''
for i in range(num_coef): coefs += 'a'+str(i)+', '
coefs+=' = par'
def f_aprx(par, yy1, xx1):
exec(coefs)
x1 = []
for x in xx1: x1.append(eval(fun_aprx))
return [yy1[i] - x1[i] for i in range(len(x1))]
plsq = leastsq(f_aprx, [1 for i in range(num_coef)], args=(yy, xx))
self.coefs = list(plsq[0])
def carrier_mobility(calc_init=(), calc_deform_x=(), calc_deform_y=(),
vbm=1, vbm_point=0, cbm=2, cbm_point=0,
deform_x=(), n_points_x=2, deform_y=(), n_points_y=2,
temperature_range = (300,900), temperature_ref=300,
effective_mass_el={}, effective_mass_xy_el={},
effective_mass_hole={}, effective_mass_xy_hole={},
lab_size=15, tick_size=15, leg_size=15, fig_size=(9,17), fig_title='', xlim=(), ylim_elastic=(), ylim_deform=(),
expression='Guo2021_JMCC', database=None, folder=''):
"""
This function is used to calculate carrier mobility for 2D structure
INPUT:
- calc_init (tuple) - tuple describing the Calculation object for the
undeformed lattice in form ('structure', 'set', version)
- calc_deform_x (tuple) - tuple describing the Calculation object for the
lattice deformed along the 'x' axis in form ('structure', 'set', range(version1, version2))
- calc_deform_y (tuple) - tuple describing the Calculation object for the
lattice deformed along the 'y' axis in form ('structure', 'set', range(version1, version2))
- vbm (int) - number of the last occupied band (valence band) (count starts from '1')
- vbm_point (int) - number of the k-point in the IBZKPT file, at which the valence band maximum (VBM) is located (count starts from '0')
- cbm (int) - number of the first unoccupied band (conduction band) (count starts from '1')
- cbm_point (int) - number of the k-point in the IBZKPT file, at which the conduction band minimum (CBM) is located (count starts from '0')
- deform_x (tuple of floats) - range of deformations along the 'x' axis in relative units
- n_points_x (int) - number of different deformations along the 'x' axis
- deform_y (tuple of floats) - range of deformations along the 'y' axis in relative units
- n_points_y (int) - number of different deformations along the 'y' axis
- temperature_range (tuple of floats) - temperature range for the calculations of the temperature-dependent carrier mobility
- temperature_ref (float) - temperature, for which the detailed information is caculated
- effective_mass_el (dict) - the dictionary of the effective masses of electrons in different directions of the reciprocal space
has form {'M':0.250,...}, where 'M' - name of the point and 0.250 is the effective mass in
electron mass units. The point determines the direction 'CBM' -> 'M'.
- effective_mass_xy_el (dict) - the same as 'effective_mass_el' but for directions 'CBM' -> 'X' and 'CBM' -> 'Y' only
- effective_mass_hole (dict) - the same as 'effective_mass_el' but for holes
- effective_mass_xy_el (dict) - the same as 'effective_mass_xy_el' but for holes
- lab_size (int) - font size of labels
- tick_size (int) - font size of ticks
- leg_size (int) - font size of legend
- fig_size (tuple of floats) - has form (height, width) set size of the figure in units of cm
- fig_title (str) - optional, the title placed above the plot
- xlim (tuple of floats) - has form (min_x, max_x) and represents limits of deformation for
both plots of the elastic moduli and deformation potential
- ylim_elastic (tuple of floats) - has form (min_y, max_y) and represents limits for
the elastic moduli plot, units are N/m
- ylim_deform (tuple of floats) - has form (min_y, max_y) and represents limits for
the deformation plot, units are eV
- expression (str) - key representing the formula to calculate carrier mobility
Possible options:
'Guo2021_JMCC' - Eq. (4) from J. Mater. Chem. C. 9 (2021) 2464–2473. doi:10.1039/D0TC05649A
- database (.gbdm3) - database dictionary; could be provided; if not then are taken from header
- folder (str) - directory where all the results will be built
RETURN:
None
SOURCE:
Guo2021_JMCC equation - S.-D. Guo, W.-Q. Mu, Y.-T. Zhu, R.-Y. Han, W.-C. Ren, J. Mater. Chem. C. 9 (2021) 2464–2473. doi:10.1039/D0TC05649A. (Eq. 4)
TODO:
- Add calculations for 3D structures
"""
from math import pi, sqrt, acos, sin
from pymatgen.io.vasp import Vasprun, Outcar
from pymatgen.electronic_structure.core import Spin, OrbitalType
from analysis_functions import Approximation as A
import matplotlib.pyplot as plt
import matplotlib.gridspec as gspec
# Transformation coefficients
ang = 1e-10 # Angstrom, m
ev_in_j = 1.60217662e-19 # Electron-volt, J
# Functions for calculation of the carrier mobility
def carrier_Guo2021_JMCC(t, c2d, el, m, md):
# Constants
e = 1.60217662e-19 # Electron charge, Coulomb
m_e = 9.10938356e-31 # Electron mass, kg
k_B = 1.38064852e-23 # Boltzmann constant, J/K
h = 6.62607004e-34 # Plank constant, J*s
mu = (e * (h/(2*pi))**3 * c2d * 10000)/(k_B*t*m*md*m_e**2*el**2)
return mu
# Lists of deformation along x and y axes
step_x = (deform_x[1] - deform_x[0])/(n_points_x - 1)
eps_x = [deform_x[0] + step_x*i for i in range(n_points_x)]
step_y = (deform_y[1] - deform_y[0])/(n_points_y - 1)
eps_y = [deform_y[0] + step_y*i for i in range(n_points_y)]
# Reading energies
energy_x = []
energy_vbm_x = []
energy_cbm_x = []
for i in calc_deform_x[2]:
calc_cur = database[(calc_deform_x[0], calc_deform_x[1], i)]
energy_x.append(calc_cur.energy_sigma0)
path_vasprun = calc_cur.path['output'].replace('OUTCAR','vasprun.xml')
path_kpoints = calc_cur.path['output'].replace(str(i)+'.OUTCAR','IBZKPT')
run = Vasprun(path_vasprun, parse_projected_eigen=True)
dosrun = Vasprun(path_vasprun)
try:
bands = run.get_band_structure(path_kpoints,
line_mode=True,
efermi=dosrun.efermi)
except Exception:
path_kpoints = calc_cur.path['output'].replace(str(i)+'.OUTCAR','KPOINTS')
bands = run.get_band_structure(path_kpoints,
line_mode=True,
efermi=dosrun.efermi)
energy_vbm_x.append(bands.bands[Spin.up][vbm-1][vbm_point])
energy_cbm_x.append(bands.bands[Spin.up][cbm-1][cbm_point])
energy_y = []
energy_vbm_y = []
energy_cbm_y = []
for i in calc_deform_y[2]:
calc_cur = database[(calc_deform_y[0], calc_deform_y[1], i)]
energy_y.append(calc_cur.energy_sigma0)
path_vasprun = calc_cur.path['output'].replace('OUTCAR','vasprun.xml')
path_kpoints = calc_cur.path['output'].replace(str(i)+'.OUTCAR','IBZKPT')
run = Vasprun(path_vasprun, parse_projected_eigen=True)
dosrun = Vasprun(path_vasprun)
try:
bands = run.get_band_structure(path_kpoints,
line_mode=True,
efermi=dosrun.efermi)
except Exception:
path_kpoints = calc_cur.path['output'].replace(str(i)+'.OUTCAR','KPOINTS')
bands = run.get_band_structure(path_kpoints,
line_mode=True,
efermi=dosrun.efermi)
energy_vbm_y.append(bands.bands[Spin.up][vbm-1][vbm_point])
energy_cbm_y.append(bands.bands[Spin.up][cbm-1][cbm_point])
# Calculation of the initial surface area s0
a = list(database[calc_init].end.rprimd[0])
b = list(database[calc_init].end.rprimd[1])
mod_a = sqrt(a[0]**2 + a[1]**2 + a[2]**2)
mod_b = sqrt(b[0]**2 + b[1]**2 + b[2]**2)
sc_ab = a[0]*b[0] + a[1]*b[1] + a[2]*b[2]
angle = acos(sc_ab/(mod_a * mod_b))
s0 = mod_a * mod_b * sin(angle)
# ********************************************* 2D elastic moduli ********************************************
# 2D Elastic modulus
# Deformation along x direction
ek1 = A()
ek1.aprx_lsq('a0*x**2+a1*x+a2', 3, eps_x, energy_x)
C_2D_x = ek1.coefs[0]*ev_in_j/(s0*ang**2)
a0_x = ek1.coefs[0]
a1_x = ek1.coefs[1]
a2_x = ek1.coefs[2]
eps_x_fit = [deform_x[0] + (deform_x[1] - deform_x[0])/999*i for i in range(1000)]
energy_x_fit = [a0_x*i**2 + a1_x*i + a2_x for i in eps_x_fit]
# Deformation along y direction
ek2 = A()
ek2.aprx_lsq('a0*x**2+a1*x+a2', 3, eps_y, energy_y)
C_2D_y = ek2.coefs[0]*ev_in_j/(s0*ang**2)
a0_y = ek2.coefs[0]
a1_y = ek2.coefs[1]
a2_y = ek2.coefs[2]
eps_y_fit = [deform_y[0] + (deform_y[1] - deform_y[0])/999*i for i in range(1000)]
energy_y_fit = [a0_y*i**2 + a1_y*i + a2_y for i in eps_y_fit]
# ************************************************************************************************************
# ***************************************** Deformation potential ********************************************
# Deformation potential
# X direction
ek3 = A()
ek3.aprx_lsq('a0*x+a1', 2, eps_x, energy_vbm_x)
E_l_hole_x = ek3.coefs[0]*ev_in_j
vbm_x_0 = ek3.coefs[0]
vbm_x_1 = ek3.coefs[1]
ek4 = A()
ek4.aprx_lsq('a0*x+a1', 2, eps_x, energy_cbm_x)
E_l_el_x = ek4.coefs[0]*ev_in_j
cbm_x_0 = ek4.coefs[0]
cbm_x_1 = ek4.coefs[1]
energy_vbm_x_fit = [vbm_x_0*i + vbm_x_1 for i in eps_x_fit]
energy_cbm_x_fit = [cbm_x_0*i + cbm_x_1 for i in eps_x_fit]
# Y direction
ek5 = A()
ek5.aprx_lsq('a0*x+a1', 2, eps_y, energy_vbm_y)
E_l_hole_y = ek5.coefs[0]*ev_in_j
vbm_y_0 = ek5.coefs[0]
vbm_y_1 = ek5.coefs[1]
ek6 = A()
ek6.aprx_lsq('a0*x+a1', 2, eps_y, energy_cbm_y)
E_l_el_y = ek6.coefs[0]*ev_in_j
cbm_y_0 = ek6.coefs[0]
cbm_y_1 = ek6.coefs[1]
energy_vbm_y_fit = [vbm_y_0*i + vbm_y_1 for i in eps_y_fit]
energy_cbm_y_fit = [cbm_y_0*i + cbm_y_1 for i in eps_y_fit]
# Writing files with the energy dependencies on deformation value
f = open(folder+'/elastic_x.out', 'w')
for i in range(len(eps_x)):
f.write('{0:15.5f} {1:15.10f}'.format(eps_x[i], energy_x[i])+'\n')
f.close()
f = open(folder+'/elastic_y.out', 'w')
for i in range(len(eps_y)):
f.write('{0:15.5f} {1:15.10f}'.format(eps_y[i], energy_y[i])+'\n')
f.close()
f = open(folder+'/elastic_x_fit.out', 'w')
for i in range(len(eps_x_fit)):
f.write('{0:15.5f} {1:15.10f}'.format(eps_x_fit[i], energy_x_fit[i])+'\n')
f.close()
f = open(folder+'/elastic_y_fit.out', 'w')
for i in range(len(eps_y_fit)):
f.write('{0:15.5f} {1:15.10f}'.format(eps_y_fit[i], energy_y_fit[i])+'\n')
f.close()
f = open(folder+'/deformation_potential_x_hole.out', 'w')
for i in range(len(eps_x)):
f.write('{0:15.5f} {1:15.10f}'.format(eps_x[i], energy_vbm_x[i])+'\n')
f.close()
f = open(folder+'/deformation_potential_x_electron.out', 'w')
for i in range(len(eps_x)):
f.write('{0:15.5f} {1:15.10f}'.format(eps_x[i], energy_cbm_x[i])+'\n')
f.close()
f = open(folder+'/deformation_potential_x_hole_fit.out', 'w')
for i in range(len(eps_x_fit)):
f.write('{0:15.5f} {1:15.10f}'.format(eps_x_fit[i], energy_vbm_x_fit[i])+'\n')
f.close()
f = open(folder+'/deformation_potential_x_electron_fit.out', 'w')
for i in range(len(eps_x_fit)):
f.write('{0:15.5f} {1:15.10f}'.format(eps_x_fit[i], energy_cbm_x_fit[i])+'\n')
f.close()
f = open(folder+'/deformation_potential_y_hole.out', 'w')
for i in range(len(eps_y)):
f.write('{0:15.5f} {1:15.10f}'.format(eps_y[i], energy_vbm_y[i])+'\n')
f.close()
f = open(folder+'/deformation_potential_y_electron.out', 'w')
for i in range(len(eps_y)):
f.write('{0:15.5f} {1:15.10f}'.format(eps_y[i], energy_cbm_y[i])+'\n')
f.close()
f = open(folder+'/deformation_potential_y_hole_fit.out', 'w')
for i in range(len(eps_y_fit)):
f.write('{0:15.5f} {1:15.10f}'.format(eps_y_fit[i], energy_vbm_y_fit[i])+'\n')
f.close()
f = open(folder+'/deformation_potential_y_electron_fit.out', 'w')
for i in range(len(eps_y_fit)):
f.write('{0:15.5f} {1:15.10f}'.format(eps_y_fit[i], energy_cbm_y_fit[i])+'\n')
f.close()
# ************************************************************************************************************
# Figure with Elastic moduli and Deformation potential
interval = 0.05
npoint = 50
precision = 0.000001
shift_left = 100000.15
shift_right = 100000.20
# Use GridSpec to set subplots
gs = gspec.GridSpec(1, 2, width_ratios=[1.0, 1.0], height_ratios = [1.0]) # height_ratios = [5, 5, 5]
gs.update(bottom=0.07, hspace=0.17, wspace=0.25, top=0.93, right=0.97, left=0.1)
# ========================== Vanadium =================================
plt1 = plt.subplot(gs[0,0])
plt1.plot(eps_x, energy_x, linewidth=2, linestyle='' , marker='o', markersize = 7, color='red' )
plt1.plot(eps_x_fit, energy_x_fit, linewidth=2, linestyle='-', color='red' , label='x: $C_{2D} = $'+'{0:7.2f}'.format(C_2D_x)+' N/m')
plt1.plot(eps_y, energy_y, linewidth=2, linestyle='' , marker='o', markersize = 7, color='blue')
plt1.plot(eps_y_fit, energy_y_fit, linewidth=2, linestyle='-', color='blue', label='y: $C_{2D} = $'+'{0:7.2f}'.format(C_2D_y)+' N/m')
plt.xticks(fontsize=tick_size)
plt.yticks(fontsize=tick_size)
plt.xlabel('$\epsilon$', fontsize = lab_size)
plt.ylabel('Energy, eV', fontsize = lab_size)
if xlim: plt1.set_xlim(xlim[0], xlim[1])
if ylim_elastic: plt1.set_ylim(ylim_elastic[0], ylim_elastic[1])
plt1.legend(bbox_to_anchor=(0.5, 0.5), borderaxespad=0., labelspacing=0.3, numpoints=1, frameon=True, fancybox=True, markerscale=1., handletextpad=0.3, fontsize=leg_size)
# ========================== Chromium =================================
plt2 = plt.subplot(gs[0,1])
plt2.plot(eps_x, energy_vbm_x, linewidth=2, linestyle='' , marker='o', markersize = 7, color='red' )
plt2.plot(eps_x_fit, energy_vbm_x_fit, linewidth=2, linestyle='-', color='red', label='x: VBM $E_{l} = $'+'{0:7.2f}'.format(E_l_hole_x/ev_in_j)+' eV')
plt2.plot(eps_x, energy_cbm_x, linewidth=2, linestyle='' , marker='o', markersize = 7, color='blue')
plt2.plot(eps_x_fit, energy_cbm_x_fit, linewidth=2, linestyle='-', color='blue', label='x: CBM $E_{l} = $'+'{0:7.2f}'.format(E_l_el_x/ev_in_j)+' eV')
plt2.plot(eps_y, energy_vbm_y, linewidth=2, linestyle='' , marker='s', markersize = 7, color='orange' )
plt2.plot(eps_y_fit, energy_vbm_y_fit, linewidth=2, linestyle='-', color='orange', label='y: VBM $E_{l} = $'+'{0:7.2f}'.format(E_l_hole_y/ev_in_j)+' eV')
plt2.plot(eps_y, energy_cbm_y, linewidth=2, linestyle='' , marker='s', markersize = 7, color='green')
plt2.plot(eps_y_fit, energy_cbm_y_fit, linewidth=2, linestyle='-', color='green', label='y: CBM $E_{l} = $'+'{0:7.2f}'.format(E_l_el_y/ev_in_j)+' eV')
plt.xticks(fontsize=tick_size)
plt.yticks(fontsize=tick_size)
plt.xlabel('$\epsilon$', fontsize = lab_size)
plt.ylabel('Energy, eV', fontsize = lab_size)
if xlim: plt2.set_xlim(xlim[0], xlim[1])
if ylim_deform: plt2.set_ylim(ylim_deform[0], ylim_deform[1])
plt2.legend(bbox_to_anchor=(0.5, 0.5), borderaxespad=0., labelspacing=0.3, numpoints=1, frameon=True, fancybox=True, markerscale=1., handletextpad=0.3, fontsize=leg_size)
# Make figure with the chosen size
fig = plt.figure(1, dpi=600)
fig.set_figheight(fig_size[0])
fig.set_figwidth(fig_size[1])
fig.savefig(folder+'/Elastic_moduli_Deformation_potential.pdf', format="pdf", dpi=600)
plt.clf()
plt.cla()
# Calculation of the carrier mobility
# Check if the temperature range option is used
if temperature_range:
step_t = (temperature_range[1] - temperature_range[0])/(1000 - 1)
t_list = [temperature_range[0] + i*step_t for i in range(1000)]
# Make calculations
f = open(folder+'/carrier_mobility_t'+str(temperature_ref)+'.out', 'w')
if expression == 'Guo2021_JMCC':
cite = 'The expression for the carrier mobility was taken from \n S.-D. Guo, W.-Q. Mu, Y.-T. Zhu, R.-Y. Han, W.-C. Ren, J. Mater. Chem. C. 9 (2021) 2464–2473. doi:10.1039/D0TC05649A. (Eq. 4)\n'
f.write(cite)
f.write('\n')
f.write('Temperature '+str(temperature_ref)+' K\n')
f.write('{0:^15s}{1:^10s}{2:^15s}{3:^15s}{4:^15s}{5:^17s}'.format('Carrier type', 'Direction', 'C_2D, N/m', 'm*, m_e', 'E_l, eV', 'mu_2D, cm^2V^(-1)s^(-1)')+'\n')
if expression == 'Guo2021_JMCC':
function_cm = carrier_Guo2021_JMCC
else:
raise RuntimeError('Choose the appropriate expression for carrier mobility!!!')
# Electrons
effective_mass_xy_el_md = (effective_mass_xy_el['X']*effective_mass_xy_el['Y'])**0.5
for j in effective_mass_el.keys():
f.write('CBM -> '+j+'\n')
if expression == 'Guo2021_JMCC':
mu_el_x = function_cm(t=temperature_ref, c2d=C_2D_x, el=E_l_el_x, m=effective_mass_el[j], md=effective_mass_xy_el_md)
mu_el_y = function_cm(t=temperature_ref, c2d=C_2D_y, el=E_l_el_y, m=effective_mass_el[j], md=effective_mass_xy_el_md)
f.write('{0:^15s}{1:^10s}{2:^15.2f}{3:^15.2f}{4:^15.2f}{5:^17.2f}'.format('Electron', 'x', C_2D_x, effective_mass_el[j], E_l_el_x/ev_in_j, mu_el_x)+'\n')
f.write('{0:^15s}{1:^10s}{2:^15.2f}{3:^15.2f}{4:^15.2f}{5:^17.2f}'.format('Electron', 'y', C_2D_y, effective_mass_el[j], E_l_el_y/ev_in_j, mu_el_y)+'\n')
# If the temperature range is set
if temperature_range:
mu_el_x_list = [function_cm(t=i, c2d=C_2D_x, el=E_l_el_x, m=effective_mass_el[j], md=effective_mass_xy_el_md) for i in t_list]
mu_el_y_list = [function_cm(t=i, c2d=C_2D_y, el=E_l_el_y, m=effective_mass_el[j], md=effective_mass_xy_el_md) for i in t_list]
f1 = open(folder+'/carrier_mobility_t'+str(temperature_range[0])+'_'+str(temperature_range[1])+'_el_CBM_'+j+'_X.out', 'w')
for i in range(len(t_list)):
f1.write('{0:^15.2f} {1:^15.2f}'.format(t_list[i], mu_el_x_list[i])+'\n')
f1.close()
f1 = open(folder+'/carrier_mobility_t'+str(temperature_range[0])+'_'+str(temperature_range[1])+'_el_CBM_'+j+'_Y.out', 'w')
for i in range(len(t_list)):
f1.write('{0:^15.2f} {1:^15.2f}'.format(t_list[i], mu_el_y_list[i])+'\n')
f1.close()
# Make figure for the temperature dependence of the carrier mobility of electrons
plt.plot(t_list, mu_el_x_list, linewidth=2, linestyle='-', color='red', label = 'X')
plt.plot(t_list, mu_el_y_list, linewidth=2, linestyle='-', color='blue', label = 'Y')
plt.xlabel('Temperature, K', fontsize = lab_size)
plt.ylabel('$\mu_{2D}, cm^2V^{-1}s^{-1}$', fontsize = lab_size)
plt.legend(bbox_to_anchor=(0.5, 0.5), borderaxespad=0., labelspacing=0.3, numpoints=1, frameon=True, fancybox=True, markerscale=1., handletextpad=0.3, fontsize=leg_size)
plt.title('Electron CBM ->'+j)
fig = plt.figure(1)
fig.set_figheight(9)
fig.set_figwidth(9)
fig.savefig(folder+'/carrier_mobility_t'+str(temperature_range[0])+'_'+str(temperature_range[1])+'_el_CBM_'+j+'_XY.pdf', format="pdf", dpi=600)
plt.clf()
plt.cla()
# Holes
effective_mass_xy_hole_md = (effective_mass_xy_hole['X']*effective_mass_xy_hole['Y'])**0.5
for j in effective_mass_hole.keys():
f.write('VBM -> '+j+'\n')
if expression == 'Guo2021_JMCC':
mu_hole_x = function_cm(t=temperature_ref, c2d=C_2D_x, el=E_l_hole_x, m=effective_mass_hole[j], md=effective_mass_xy_hole_md)
mu_hole_y = function_cm(t=temperature_ref, c2d=C_2D_y, el=E_l_hole_y, m=effective_mass_hole[j], md=effective_mass_xy_hole_md)
f.write('{0:^15s}{1:^10s}{2:^15.2f}{3:^15.2f}{4:^15.2f}{5:^17.2f}'.format('Hole', 'x', C_2D_x, effective_mass_hole[j], E_l_hole_x/ev_in_j, mu_hole_x)+'\n')
f.write('{0:^15s}{1:^10s}{2:^15.2f}{3:^15.2f}{4:^15.2f}{5:^17.2f}'.format('Hole', 'y', C_2D_y, effective_mass_hole[j], E_l_hole_y/ev_in_j, mu_hole_y)+'\n')
# If the temperature range is set
if temperature_range:
mu_hole_x_list = [function_cm(t=i, c2d=C_2D_x, el=E_l_hole_x, m=effective_mass_hole[j], md=effective_mass_xy_hole_md) for i in t_list]
mu_hole_y_list = [function_cm(t=i, c2d=C_2D_y, el=E_l_hole_y, m=effective_mass_hole[j], md=effective_mass_xy_hole_md) for i in t_list]
f1 = open(folder+'/carrier_mobility_t'+str(temperature_range[0])+'_'+str(temperature_range[1])+'_hole_VBM_'+j+'_X.out', 'w')
for i in range(len(t_list)):
f1.write('{0:^15.2f} {1:^15.2f}'.format(t_list[i], mu_hole_x_list[i])+'\n')
f1.close()
f1 = open(folder+'/carrier_mobility_t'+str(temperature_range[0])+'_'+str(temperature_range[1])+'_hole_VBM_'+j+'_Y.out', 'w')
for i in range(len(t_list)):
f1.write('{0:^15.2f} {1:^15.2f}'.format(t_list[i], mu_hole_y_list[i])+'\n')
f1.close()
# Make figure for the temperature dependence of the carrier mobility of holes
plt.plot(t_list, mu_hole_x_list, linewidth=2, linestyle='-', color='red', label = 'X')
plt.plot(t_list, mu_hole_y_list, linewidth=2, linestyle='-', color='blue', label = 'Y')
plt.xlabel('Temperature, K', fontsize = lab_size)
plt.ylabel('$\mu_{2D}, cm^2V^{-1}s^{-1}$', fontsize = lab_size)
plt.legend(bbox_to_anchor=(0.5, 0.5), borderaxespad=0., labelspacing=0.3, numpoints=1, frameon=True, fancybox=True, markerscale=1., handletextpad=0.3, fontsize=leg_size)
plt.title('Hole CBM ->'+j)
fig = plt.figure(1)
fig.set_figheight(9)
fig.set_figwidth(9)
fig.savefig(folder+'/carrier_mobility_t'+str(temperature_range[0])+'_'+str(temperature_range[1])+'_hole_VBM_'+j+'_XY.pdf', format="pdf", dpi=600)
plt.clf()
plt.cla()
f.close()
|
dimonaks/siman
|
siman/analysis_functions.py
|
Python
|
gpl-2.0
| 26,024
|
[
"VASP",
"pymatgen"
] |
9810a46ef7660b5527f1d3b2f7ea99a99351467dce4c9eee053db2f2339e389d
|
'''
Utility module
This module contains a variety of convenience functions, including:
- get_dataset
- interp
- p_corrected_bonf
- p_critical_bonf
- smooth
'''
# Copyright (C) 2022 Todd Pataky
# updated (2022/02/06) todd
from math import sqrt,log
import numpy as np
from . stats._spm import plist2string as p2s
# from scipy.ndimage.filters import gaussian_filter1d
from scipy.ndimage import gaussian_filter1d
def plist2stringlist(pList):
s = p2s(pList).split(', ')
for i,ss in enumerate(s):
if ss.startswith('<'):
s[i] = 'p' + ss
else:
s[i] = 'p=' + ss
return s
def get_dataset(*args):
'''
.. warning:: Deprecated
**get_dataset** is deprecated and will be removed from future versions of **spm1d**. Please access datasets using the "spm1d.data" interface.
'''
raise( IOError('"get_dataset" is deprecated. Please access datasets using "spm1d.data".') )
def interp(y, Q=101):
'''
Simple linear interpolation to *n* values.
:Parameters:
- *y* --- a 1D array or list of J separate 1D arrays
- *Q* --- number of nodes in the interpolated continuum
:Returns:
- Q-component 1D array or a (J x Q) array
:Example:
>>> y0 = np.random.rand(51)
>>> y1 = np.random.rand(87)
>>> y2 = np.random.rand(68)
>>> Y = [y0, y1, y2]
>>> Y = spm1d.util.interp(Y, Q=101)
'''
y = np.asarray(y)
if (y.ndim==2) or (not np.isscalar(y[0])):
return np.asarray( [interp(yy, Q) for yy in y] )
else:
x0 = range(y.size)
x1 = np.linspace(0, y.size, Q)
return np.interp(x1, x0, y, left=None, right=None)
def p_corrected_bonf(p, n):
'''
Bonferroni-corrected *p* value.
.. warning:: This correction assumes independence amongst multiple tests.
:Parameters:
- *p* --- probability value computed from one of multiple tests
- *n* --- number of tests
:Returns:
- Bonferroni-corrected *p* value.
:Example:
>>> p = spm1d.util.p_corrected_bonf(0.03, 8) # yields p = 0.216
'''
if p<=0:
return 0
elif p>=1:
return 1
else:
pBonf = 1 - (1.0-p)**n
pBonf = max(0, min(1, pBonf))
return pBonf
def p_critical_bonf(alpha, n):
'''
Bonferroni-corrected critical Type I error rate.
.. warning:: This crticial threshold assumes independence amongst multiple tests.
:Parameters:
- *alpha* --- original Type I error rate (usually 0.05)
- *n* --- number of tests
:Returns:
- Bonferroni-corrected critical *p* value; retains *alpha* across all tests.
:Example:
>>> p = spm1d.util.p_critical_bonf(0.05, 20) # yields p = 0.00256
'''
if alpha<=0:
return 0
elif alpha>=1:
return 1
else:
return 1 - (1.0-alpha)**(1.0/n)
def smooth(Y, fwhm=5.0):
'''
Smooth a set of 1D continua.
This method uses **scipy.ndimage.filters.gaussian_filter1d** but uses the *fwhm*
instead of the standard deviation.
:Parameters:
- *Y* --- a (J x Q) numpy array
- *fwhm* --- Full-width at half-maximum of a Gaussian kernel used for smoothing.
:Returns:
- (J x Q) numpy array
:Example:
>>> Y0 = np.random.rand(5, 101)
>>> Y = spm1d.util.smooth(Y0, fwhm=10.0)
.. note:: A Gaussian kernel's *fwhm* is related to its standard deviation (*sd*) as follows:
>>> fwhm = sd * sqrt(8*log(2))
'''
sd = fwhm / sqrt(8*log(2))
return gaussian_filter1d(Y, sd, mode='wrap')
|
0todd0000/spm1d
|
spm1d/util.py
|
Python
|
gpl-3.0
| 3,308
|
[
"Gaussian"
] |
644e68d26d7280de95e22e7c408250e306963591be679af1d8d2dce43a96d1e7
|
#!/usr/bin/env python
# flake8: noqa
# pylint: disable=C0103,C0301,R0903
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015 Leandro Toledo de Souza <leandrotoeldodesouza@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains a object that represents an Emoji"""
class Emoji(object):
"""This object represents an Emoji."""
GRINNING_FACE_WITH_SMILING_EYES = b'\xF0\x9F\x98\x81'
FACE_WITH_TEARS_OF_JOY = b'\xF0\x9F\x98\x82'
SMILING_FACE_WITH_OPEN_MOUTH = b'\xF0\x9F\x98\x83'
SMILING_FACE_WITH_OPEN_MOUTH_AND_SMILING_EYES = b'\xF0\x9F\x98\x84'
SMILING_FACE_WITH_OPEN_MOUTH_AND_COLD_SWEAT = b'\xF0\x9F\x98\x85'
SMILING_FACE_WITH_OPEN_MOUTH_AND_TIGHTLY_CLOSED_EYES = b'\xF0\x9F\x98\x86'
WINKING_FACE = b'\xF0\x9F\x98\x89'
SMILING_FACE_WITH_SMILING_EYES = b'\xF0\x9F\x98\x8A'
FACE_SAVOURING_DELICIOUS_FOOD = b'\xF0\x9F\x98\x8B'
RELIEVED_FACE = b'\xF0\x9F\x98\x8C'
SMILING_FACE_WITH_HEART_SHAPED_EYES = b'\xF0\x9F\x98\x8D'
SMIRKING_FACE = b'\xF0\x9F\x98\x8F'
UNAMUSED_FACE = b'\xF0\x9F\x98\x92'
FACE_WITH_COLD_SWEAT = b'\xF0\x9F\x98\x93'
PENSIVE_FACE = b'\xF0\x9F\x98\x94'
CONFOUNDED_FACE = b'\xF0\x9F\x98\x96'
FACE_THROWING_A_KISS = b'\xF0\x9F\x98\x98'
KISSING_FACE_WITH_CLOSED_EYES = b'\xF0\x9F\x98\x9A'
FACE_WITH_STUCK_OUT_TONGUE_AND_WINKING_EYE = b'\xF0\x9F\x98\x9C'
FACE_WITH_STUCK_OUT_TONGUE_AND_TIGHTLY_CLOSED_EYES = b'\xF0\x9F\x98\x9D'
DISAPPOINTED_FACE = b'\xF0\x9F\x98\x9E'
ANGRY_FACE = b'\xF0\x9F\x98\xA0'
POUTING_FACE = b'\xF0\x9F\x98\xA1'
CRYING_FACE = b'\xF0\x9F\x98\xA2'
PERSEVERING_FACE = b'\xF0\x9F\x98\xA3'
FACE_WITH_LOOK_OF_TRIUMPH = b'\xF0\x9F\x98\xA4'
DISAPPOINTED_BUT_RELIEVED_FACE = b'\xF0\x9F\x98\xA5'
FEARFUL_FACE = b'\xF0\x9F\x98\xA8'
WEARY_FACE = b'\xF0\x9F\x98\xA9'
SLEEPY_FACE = b'\xF0\x9F\x98\xAA'
TIRED_FACE = b'\xF0\x9F\x98\xAB'
LOUDLY_CRYING_FACE = b'\xF0\x9F\x98\xAD'
FACE_WITH_OPEN_MOUTH_AND_COLD_SWEAT = b'\xF0\x9F\x98\xB0'
FACE_SCREAMING_IN_FEAR = b'\xF0\x9F\x98\xB1'
ASTONISHED_FACE = b'\xF0\x9F\x98\xB2'
FLUSHED_FACE = b'\xF0\x9F\x98\xB3'
DIZZY_FACE = b'\xF0\x9F\x98\xB5'
FACE_WITH_MEDICAL_MASK = b'\xF0\x9F\x98\xB7'
GRINNING_CAT_FACE_WITH_SMILING_EYES = b'\xF0\x9F\x98\xB8'
CAT_FACE_WITH_TEARS_OF_JOY = b'\xF0\x9F\x98\xB9'
SMILING_CAT_FACE_WITH_OPEN_MOUTH = b'\xF0\x9F\x98\xBA'
SMILING_CAT_FACE_WITH_HEART_SHAPED_EYES = b'\xF0\x9F\x98\xBB'
CAT_FACE_WITH_WRY_SMILE = b'\xF0\x9F\x98\xBC'
KISSING_CAT_FACE_WITH_CLOSED_EYES = b'\xF0\x9F\x98\xBD'
POUTING_CAT_FACE = b'\xF0\x9F\x98\xBE'
CRYING_CAT_FACE = b'\xF0\x9F\x98\xBF'
WEARY_CAT_FACE = b'\xF0\x9F\x99\x80'
FACE_WITH_NO_GOOD_GESTURE = b'\xF0\x9F\x99\x85'
FACE_WITH_OK_GESTURE = b'\xF0\x9F\x99\x86'
PERSON_BOWING_DEEPLY = b'\xF0\x9F\x99\x87'
SEE_NO_EVIL_MONKEY = b'\xF0\x9F\x99\x88'
HEAR_NO_EVIL_MONKEY = b'\xF0\x9F\x99\x89'
SPEAK_NO_EVIL_MONKEY = b'\xF0\x9F\x99\x8A'
HAPPY_PERSON_RAISING_ONE_HAND = b'\xF0\x9F\x99\x8B'
PERSON_RAISING_BOTH_HANDS_IN_CELEBRATION = b'\xF0\x9F\x99\x8C'
PERSON_FROWNING = b'\xF0\x9F\x99\x8D'
PERSON_WITH_POUTING_FACE = b'\xF0\x9F\x99\x8E'
PERSON_WITH_FOLDED_HANDS = b'\xF0\x9F\x99\x8F'
BLACK_SCISSORS = b'\xE2\x9C\x82'
WHITE_HEAVY_CHECK_MARK = b'\xE2\x9C\x85'
AIRPLANE = b'\xE2\x9C\x88'
ENVELOPE = b'\xE2\x9C\x89'
RAISED_FIST = b'\xE2\x9C\x8A'
RAISED_HAND = b'\xE2\x9C\x8B'
VICTORY_HAND = b'\xE2\x9C\x8C'
PENCIL = b'\xE2\x9C\x8F'
BLACK_NIB = b'\xE2\x9C\x92'
HEAVY_CHECK_MARK = b'\xE2\x9C\x94'
HEAVY_MULTIPLICATION_X = b'\xE2\x9C\x96'
SPARKLES = b'\xE2\x9C\xA8'
EIGHT_SPOKED_ASTERISK = b'\xE2\x9C\xB3'
EIGHT_POINTED_BLACK_STAR = b'\xE2\x9C\xB4'
SNOWFLAKE = b'\xE2\x9D\x84'
SPARKLE = b'\xE2\x9D\x87'
CROSS_MARK = b'\xE2\x9D\x8C'
NEGATIVE_SQUARED_CROSS_MARK = b'\xE2\x9D\x8E'
BLACK_QUESTION_MARK_ORNAMENT = b'\xE2\x9D\x93'
WHITE_QUESTION_MARK_ORNAMENT = b'\xE2\x9D\x94'
WHITE_EXCLAMATION_MARK_ORNAMENT = b'\xE2\x9D\x95'
HEAVY_EXCLAMATION_MARK_SYMBOL = b'\xE2\x9D\x97'
HEAVY_BLACK_HEART = b'\xE2\x9D\xA4'
HEAVY_PLUS_SIGN = b'\xE2\x9E\x95'
HEAVY_MINUS_SIGN = b'\xE2\x9E\x96'
HEAVY_DIVISION_SIGN = b'\xE2\x9E\x97'
BLACK_RIGHTWARDS_ARROW = b'\xE2\x9E\xA1'
CURLY_LOOP = b'\xE2\x9E\xB0'
ROCKET = b'\xF0\x9F\x9A\x80'
RAILWAY_CAR = b'\xF0\x9F\x9A\x83'
HIGH_SPEED_TRAIN = b'\xF0\x9F\x9A\x84'
HIGH_SPEED_TRAIN_WITH_BULLET_NOSE = b'\xF0\x9F\x9A\x85'
METRO = b'\xF0\x9F\x9A\x87'
STATION = b'\xF0\x9F\x9A\x89'
BUS = b'\xF0\x9F\x9A\x8C'
BUS_STOP = b'\xF0\x9F\x9A\x8F'
AMBULANCE = b'\xF0\x9F\x9A\x91'
FIRE_ENGINE = b'\xF0\x9F\x9A\x92'
POLICE_CAR = b'\xF0\x9F\x9A\x93'
TAXI = b'\xF0\x9F\x9A\x95'
AUTOMOBILE = b'\xF0\x9F\x9A\x97'
RECREATIONAL_VEHICLE = b'\xF0\x9F\x9A\x99'
DELIVERY_TRUCK = b'\xF0\x9F\x9A\x9A'
SHIP = b'\xF0\x9F\x9A\xA2'
SPEEDBOAT = b'\xF0\x9F\x9A\xA4'
HORIZONTAL_TRAFFIC_LIGHT = b'\xF0\x9F\x9A\xA5'
CONSTRUCTION_SIGN = b'\xF0\x9F\x9A\xA7'
POLICE_CARS_REVOLVING_LIGHT = b'\xF0\x9F\x9A\xA8'
TRIANGULAR_FLAG_ON_POST = b'\xF0\x9F\x9A\xA9'
DOOR = b'\xF0\x9F\x9A\xAA'
NO_ENTRY_SIGN = b'\xF0\x9F\x9A\xAB'
SMOKING_SYMBOL = b'\xF0\x9F\x9A\xAC'
NO_SMOKING_SYMBOL = b'\xF0\x9F\x9A\xAD'
BICYCLE = b'\xF0\x9F\x9A\xB2'
PEDESTRIAN = b'\xF0\x9F\x9A\xB6'
MENS_SYMBOL = b'\xF0\x9F\x9A\xB9'
WOMENS_SYMBOL = b'\xF0\x9F\x9A\xBA'
RESTROOM = b'\xF0\x9F\x9A\xBB'
BABY_SYMBOL = b'\xF0\x9F\x9A\xBC'
TOILET = b'\xF0\x9F\x9A\xBD'
WATER_CLOSET = b'\xF0\x9F\x9A\xBE'
BATH = b'\xF0\x9F\x9B\x80'
CIRCLED_LATIN_CAPITAL_LETTER_M = b'\xE2\x93\x82'
NEGATIVE_SQUARED_LATIN_CAPITAL_LETTER_A = b'\xF0\x9F\x85\xB0'
NEGATIVE_SQUARED_LATIN_CAPITAL_LETTER_B = b'\xF0\x9F\x85\xB1'
NEGATIVE_SQUARED_LATIN_CAPITAL_LETTER_O = b'\xF0\x9F\x85\xBE'
NEGATIVE_SQUARED_LATIN_CAPITAL_LETTER_P = b'\xF0\x9F\x85\xBF'
NEGATIVE_SQUARED_AB = b'\xF0\x9F\x86\x8E'
SQUARED_CL = b'\xF0\x9F\x86\x91'
SQUARED_COOL = b'\xF0\x9F\x86\x92'
SQUARED_FREE = b'\xF0\x9F\x86\x93'
SQUARED_ID = b'\xF0\x9F\x86\x94'
SQUARED_NEW = b'\xF0\x9F\x86\x95'
SQUARED_NG = b'\xF0\x9F\x86\x96'
SQUARED_OK = b'\xF0\x9F\x86\x97'
SQUARED_SOS = b'\xF0\x9F\x86\x98'
SQUARED_UP_WITH_EXCLAMATION_MARK = b'\xF0\x9F\x86\x99'
SQUARED_VS = b'\xF0\x9F\x86\x9A'
REGIONAL_INDICATOR_SYMBOL_LETTER_D_PLUS_REGIONAL_INDICATOR_SYMBOL_LETTER_E\
= b'\xF0\x9F\x87\xA9\xF0\x9F\x87\xAA'
REGIONAL_INDICATOR_SYMBOL_LETTER_G_PLUS_REGIONAL_INDICATOR_SYMBOL_LETTER_B\
= b'\xF0\x9F\x87\xAC\xF0\x9F\x87\xA7'
REGIONAL_INDICATOR_SYMBOL_LETTER_C_PLUS_REGIONAL_INDICATOR_SYMBOL_LETTER_N\
= b'\xF0\x9F\x87\xA8\xF0\x9F\x87\xB3'
REGIONAL_INDICATOR_SYMBOL_LETTER_J_PLUS_REGIONAL_INDICATOR_SYMBOL_LETTER_P\
= b'\xF0\x9F\x87\xAF\xF0\x9F\x87\xB5'
REGIONAL_INDICATOR_SYMBOL_LETTER_K_PLUS_REGIONAL_INDICATOR_SYMBOL_LETTER_R\
= b'\xF0\x9F\x87\xB0\xF0\x9F\x87\xB7'
REGIONAL_INDICATOR_SYMBOL_LETTER_F_PLUS_REGIONAL_INDICATOR_SYMBOL_LETTER_R\
= b'\xF0\x9F\x87\xAB\xF0\x9F\x87\xB7'
REGIONAL_INDICATOR_SYMBOL_LETTER_E_PLUS_REGIONAL_INDICATOR_SYMBOL_LETTER_S\
= b'\xF0\x9F\x87\xAA\xF0\x9F\x87\xB8'
REGIONAL_INDICATOR_SYMBOL_LETTER_I_PLUS_REGIONAL_INDICATOR_SYMBOL_LETTER_T\
= b'\xF0\x9F\x87\xAE\xF0\x9F\x87\xB9'
REGIONAL_INDICATOR_SYMBOL_LETTER_U_PLUS_REGIONAL_INDICATOR_SYMBOL_LETTER_S\
= b'\xF0\x9F\x87\xBA\xF0\x9F\x87\xB8'
REGIONAL_INDICATOR_SYMBOL_LETTER_R_PLUS_REGIONAL_INDICATOR_SYMBOL_LETTER_U\
= b'\xF0\x9F\x87\xB7\xF0\x9F\x87\xBA'
SQUARED_KATAKANA_KOKO = b'\xF0\x9F\x88\x81'
SQUARED_KATAKANA_SA = b'\xF0\x9F\x88\x82'
SQUARED_CJK_UNIFIED_IDEOGRAPH_7121 = b'\xF0\x9F\x88\x9A'
SQUARED_CJK_UNIFIED_IDEOGRAPH_6307 = b'\xF0\x9F\x88\xAF'
SQUARED_CJK_UNIFIED_IDEOGRAPH_7981 = b'\xF0\x9F\x88\xB2'
SQUARED_CJK_UNIFIED_IDEOGRAPH_7A7A = b'\xF0\x9F\x88\xB3'
SQUARED_CJK_UNIFIED_IDEOGRAPH_5408 = b'\xF0\x9F\x88\xB4'
SQUARED_CJK_UNIFIED_IDEOGRAPH_6E80 = b'\xF0\x9F\x88\xB5'
SQUARED_CJK_UNIFIED_IDEOGRAPH_6709 = b'\xF0\x9F\x88\xB6'
SQUARED_CJK_UNIFIED_IDEOGRAPH_6708 = b'\xF0\x9F\x88\xB7'
SQUARED_CJK_UNIFIED_IDEOGRAPH_7533 = b'\xF0\x9F\x88\xB8'
SQUARED_CJK_UNIFIED_IDEOGRAPH_5272 = b'\xF0\x9F\x88\xB9'
SQUARED_CJK_UNIFIED_IDEOGRAPH_55B6 = b'\xF0\x9F\x88\xBA'
CIRCLED_IDEOGRAPH_ADVANTAGE = b'\xF0\x9F\x89\x90'
CIRCLED_IDEOGRAPH_ACCEPT = b'\xF0\x9F\x89\x91'
COPYRIGHT_SIGN = b'\xC2\xA9'
REGISTERED_SIGN = b'\xC2\xAE'
DOUBLE_EXCLAMATION_MARK = b'\xE2\x80\xBC'
EXCLAMATION_QUESTION_MARK = b'\xE2\x81\x89'
DIGIT_EIGHT_PLUS_COMBINING_ENCLOSING_KEYCAP = b'\x38\xE2\x83\xA3'
DIGIT_NINE_PLUS_COMBINING_ENCLOSING_KEYCAP = b'\x39\xE2\x83\xA3'
DIGIT_SEVEN_PLUS_COMBINING_ENCLOSING_KEYCAP = b'\x37\xE2\x83\xA3'
DIGIT_SIX_PLUS_COMBINING_ENCLOSING_KEYCAP = b'\x36\xE2\x83\xA3'
DIGIT_ONE_PLUS_COMBINING_ENCLOSING_KEYCAP = b'\x31\xE2\x83\xA3'
DIGIT_ZERO_PLUS_COMBINING_ENCLOSING_KEYCAP = b'\x30\xE2\x83\xA3'
DIGIT_TWO_PLUS_COMBINING_ENCLOSING_KEYCAP = b'\x32\xE2\x83\xA3'
DIGIT_THREE_PLUS_COMBINING_ENCLOSING_KEYCAP = b'\x33\xE2\x83\xA3'
DIGIT_FIVE_PLUS_COMBINING_ENCLOSING_KEYCAP = b'\x35\xE2\x83\xA3'
DIGIT_FOUR_PLUS_COMBINING_ENCLOSING_KEYCAP = b'\x34\xE2\x83\xA3'
NUMBER_SIGN_PLUS_COMBINING_ENCLOSING_KEYCAP = b'\x23\xE2\x83\xA3'
TRADE_MARK_SIGN = b'\xE2\x84\xA2'
INFORMATION_SOURCE = b'\xE2\x84\xB9'
LEFT_RIGHT_ARROW = b'\xE2\x86\x94'
UP_DOWN_ARROW = b'\xE2\x86\x95'
NORTH_WEST_ARROW = b'\xE2\x86\x96'
NORTH_EAST_ARROW = b'\xE2\x86\x97'
SOUTH_EAST_ARROW = b'\xE2\x86\x98'
SOUTH_WEST_ARROW = b'\xE2\x86\x99'
LEFTWARDS_ARROW_WITH_HOOK = b'\xE2\x86\xA9'
RIGHTWARDS_ARROW_WITH_HOOK = b'\xE2\x86\xAA'
WATCH = b'\xE2\x8C\x9A'
HOURGLASS = b'\xE2\x8C\x9B'
BLACK_RIGHT_POINTING_DOUBLE_TRIANGLE = b'\xE2\x8F\xA9'
BLACK_LEFT_POINTING_DOUBLE_TRIANGLE = b'\xE2\x8F\xAA'
BLACK_UP_POINTING_DOUBLE_TRIANGLE = b'\xE2\x8F\xAB'
BLACK_DOWN_POINTING_DOUBLE_TRIANGLE = b'\xE2\x8F\xAC'
ALARM_CLOCK = b'\xE2\x8F\xB0'
HOURGLASS_WITH_FLOWING_SAND = b'\xE2\x8F\xB3'
BLACK_SMALL_SQUARE = b'\xE2\x96\xAA'
WHITE_SMALL_SQUARE = b'\xE2\x96\xAB'
BLACK_RIGHT_POINTING_TRIANGLE = b'\xE2\x96\xB6'
BLACK_LEFT_POINTING_TRIANGLE = b'\xE2\x97\x80'
WHITE_MEDIUM_SQUARE = b'\xE2\x97\xBB'
BLACK_MEDIUM_SQUARE = b'\xE2\x97\xBC'
WHITE_MEDIUM_SMALL_SQUARE = b'\xE2\x97\xBD'
BLACK_MEDIUM_SMALL_SQUARE = b'\xE2\x97\xBE'
BLACK_SUN_WITH_RAYS = b'\xE2\x98\x80'
CLOUD = b'\xE2\x98\x81'
BLACK_TELEPHONE = b'\xE2\x98\x8E'
BALLOT_BOX_WITH_CHECK = b'\xE2\x98\x91'
UMBRELLA_WITH_RAIN_DROPS = b'\xE2\x98\x94'
HOT_BEVERAGE = b'\xE2\x98\x95'
WHITE_UP_POINTING_INDEX = b'\xE2\x98\x9D'
WHITE_SMILING_FACE = b'\xE2\x98\xBA'
ARIES = b'\xE2\x99\x88'
TAURUS = b'\xE2\x99\x89'
GEMINI = b'\xE2\x99\x8A'
CANCER = b'\xE2\x99\x8B'
LEO = b'\xE2\x99\x8C'
VIRGO = b'\xE2\x99\x8D'
LIBRA = b'\xE2\x99\x8E'
SCORPIUS = b'\xE2\x99\x8F'
SAGITTARIUS = b'\xE2\x99\x90'
CAPRICORN = b'\xE2\x99\x91'
AQUARIUS = b'\xE2\x99\x92'
PISCES = b'\xE2\x99\x93'
BLACK_SPADE_SUIT = b'\xE2\x99\xA0'
BLACK_CLUB_SUIT = b'\xE2\x99\xA3'
BLACK_HEART_SUIT = b'\xE2\x99\xA5'
BLACK_DIAMOND_SUIT = b'\xE2\x99\xA6'
HOT_SPRINGS = b'\xE2\x99\xA8'
BLACK_UNIVERSAL_RECYCLING_SYMBOL = b'\xE2\x99\xBB'
WHEELCHAIR_SYMBOL = b'\xE2\x99\xBF'
ANCHOR = b'\xE2\x9A\x93'
WARNING_SIGN = b'\xE2\x9A\xA0'
HIGH_VOLTAGE_SIGN = b'\xE2\x9A\xA1'
MEDIUM_WHITE_CIRCLE = b'\xE2\x9A\xAA'
MEDIUM_BLACK_CIRCLE = b'\xE2\x9A\xAB'
SOCCER_BALL = b'\xE2\x9A\xBD'
BASEBALL = b'\xE2\x9A\xBE'
SNOWMAN_WITHOUT_SNOW = b'\xE2\x9B\x84'
SUN_BEHIND_CLOUD = b'\xE2\x9B\x85'
OPHIUCHUS = b'\xE2\x9B\x8E'
NO_ENTRY = b'\xE2\x9B\x94'
CHURCH = b'\xE2\x9B\xAA'
FOUNTAIN = b'\xE2\x9B\xB2'
FLAG_IN_HOLE = b'\xE2\x9B\xB3'
SAILBOAT = b'\xE2\x9B\xB5'
TENT = b'\xE2\x9B\xBA'
FUEL_PUMP = b'\xE2\x9B\xBD'
ARROW_POINTING_RIGHTWARDS_THEN_CURVING_UPWARDS = b'\xE2\xA4\xB4'
ARROW_POINTING_RIGHTWARDS_THEN_CURVING_DOWNWARDS = b'\xE2\xA4\xB5'
LEFTWARDS_BLACK_ARROW = b'\xE2\xAC\x85'
UPWARDS_BLACK_ARROW = b'\xE2\xAC\x86'
DOWNWARDS_BLACK_ARROW = b'\xE2\xAC\x87'
BLACK_LARGE_SQUARE = b'\xE2\xAC\x9B'
WHITE_LARGE_SQUARE = b'\xE2\xAC\x9C'
WHITE_MEDIUM_STAR = b'\xE2\xAD\x90'
HEAVY_LARGE_CIRCLE = b'\xE2\xAD\x95'
WAVY_DASH = b'\xE3\x80\xB0'
PART_ALTERNATION_MARK = b'\xE3\x80\xBD'
CIRCLED_IDEOGRAPH_CONGRATULATION = b'\xE3\x8A\x97'
CIRCLED_IDEOGRAPH_SECRET = b'\xE3\x8A\x99'
MAHJONG_TILE_RED_DRAGON = b'\xF0\x9F\x80\x84'
PLAYING_CARD_BLACK_JOKER = b'\xF0\x9F\x83\x8F'
CYCLONE = b'\xF0\x9F\x8C\x80'
FOGGY = b'\xF0\x9F\x8C\x81'
CLOSED_UMBRELLA = b'\xF0\x9F\x8C\x82'
NIGHT_WITH_STARS = b'\xF0\x9F\x8C\x83'
SUNRISE_OVER_MOUNTAINS = b'\xF0\x9F\x8C\x84'
SUNRISE = b'\xF0\x9F\x8C\x85'
CITYSCAPE_AT_DUSK = b'\xF0\x9F\x8C\x86'
SUNSET_OVER_BUILDINGS = b'\xF0\x9F\x8C\x87'
RAINBOW = b'\xF0\x9F\x8C\x88'
BRIDGE_AT_NIGHT = b'\xF0\x9F\x8C\x89'
WATER_WAVE = b'\xF0\x9F\x8C\x8A'
VOLCANO = b'\xF0\x9F\x8C\x8B'
MILKY_WAY = b'\xF0\x9F\x8C\x8C'
EARTH_GLOBE_ASIA_AUSTRALIA = b'\xF0\x9F\x8C\x8F'
NEW_MOON_SYMBOL = b'\xF0\x9F\x8C\x91'
FIRST_QUARTER_MOON_SYMBOL = b'\xF0\x9F\x8C\x93'
WAXING_GIBBOUS_MOON_SYMBOL = b'\xF0\x9F\x8C\x94'
FULL_MOON_SYMBOL = b'\xF0\x9F\x8C\x95'
CRESCENT_MOON = b'\xF0\x9F\x8C\x99'
FIRST_QUARTER_MOON_WITH_FACE = b'\xF0\x9F\x8C\x9B'
GLOWING_STAR = b'\xF0\x9F\x8C\x9F'
SHOOTING_STAR = b'\xF0\x9F\x8C\xA0'
CHESTNUT = b'\xF0\x9F\x8C\xB0'
SEEDLING = b'\xF0\x9F\x8C\xB1'
PALM_TREE = b'\xF0\x9F\x8C\xB4'
CACTUS = b'\xF0\x9F\x8C\xB5'
TULIP = b'\xF0\x9F\x8C\xB7'
CHERRY_BLOSSOM = b'\xF0\x9F\x8C\xB8'
ROSE = b'\xF0\x9F\x8C\xB9'
HIBISCUS = b'\xF0\x9F\x8C\xBA'
SUNFLOWER = b'\xF0\x9F\x8C\xBB'
BLOSSOM = b'\xF0\x9F\x8C\xBC'
EAR_OF_MAIZE = b'\xF0\x9F\x8C\xBD'
EAR_OF_RICE = b'\xF0\x9F\x8C\xBE'
HERB = b'\xF0\x9F\x8C\xBF'
FOUR_LEAF_CLOVER = b'\xF0\x9F\x8D\x80'
MAPLE_LEAF = b'\xF0\x9F\x8D\x81'
FALLEN_LEAF = b'\xF0\x9F\x8D\x82'
LEAF_FLUTTERING_IN_WIND = b'\xF0\x9F\x8D\x83'
MUSHROOM = b'\xF0\x9F\x8D\x84'
TOMATO = b'\xF0\x9F\x8D\x85'
AUBERGINE = b'\xF0\x9F\x8D\x86'
GRAPES = b'\xF0\x9F\x8D\x87'
MELON = b'\xF0\x9F\x8D\x88'
WATERMELON = b'\xF0\x9F\x8D\x89'
TANGERINE = b'\xF0\x9F\x8D\x8A'
BANANA = b'\xF0\x9F\x8D\x8C'
PINEAPPLE = b'\xF0\x9F\x8D\x8D'
RED_APPLE = b'\xF0\x9F\x8D\x8E'
GREEN_APPLE = b'\xF0\x9F\x8D\x8F'
PEACH = b'\xF0\x9F\x8D\x91'
CHERRIES = b'\xF0\x9F\x8D\x92'
STRAWBERRY = b'\xF0\x9F\x8D\x93'
HAMBURGER = b'\xF0\x9F\x8D\x94'
SLICE_OF_PIZZA = b'\xF0\x9F\x8D\x95'
MEAT_ON_BONE = b'\xF0\x9F\x8D\x96'
POULTRY_LEG = b'\xF0\x9F\x8D\x97'
RICE_CRACKER = b'\xF0\x9F\x8D\x98'
RICE_BALL = b'\xF0\x9F\x8D\x99'
COOKED_RICE = b'\xF0\x9F\x8D\x9A'
CURRY_AND_RICE = b'\xF0\x9F\x8D\x9B'
STEAMING_BOWL = b'\xF0\x9F\x8D\x9C'
SPAGHETTI = b'\xF0\x9F\x8D\x9D'
BREAD = b'\xF0\x9F\x8D\x9E'
FRENCH_FRIES = b'\xF0\x9F\x8D\x9F'
ROASTED_SWEET_POTATO = b'\xF0\x9F\x8D\xA0'
DANGO = b'\xF0\x9F\x8D\xA1'
ODEN = b'\xF0\x9F\x8D\xA2'
SUSHI = b'\xF0\x9F\x8D\xA3'
FRIED_SHRIMP = b'\xF0\x9F\x8D\xA4'
FISH_CAKE_WITH_SWIRL_DESIGN = b'\xF0\x9F\x8D\xA5'
SOFT_ICE_CREAM = b'\xF0\x9F\x8D\xA6'
SHAVED_ICE = b'\xF0\x9F\x8D\xA7'
ICE_CREAM = b'\xF0\x9F\x8D\xA8'
DOUGHNUT = b'\xF0\x9F\x8D\xA9'
COOKIE = b'\xF0\x9F\x8D\xAA'
CHOCOLATE_BAR = b'\xF0\x9F\x8D\xAB'
CANDY = b'\xF0\x9F\x8D\xAC'
LOLLIPOP = b'\xF0\x9F\x8D\xAD'
CUSTARD = b'\xF0\x9F\x8D\xAE'
HONEY_POT = b'\xF0\x9F\x8D\xAF'
SHORTCAKE = b'\xF0\x9F\x8D\xB0'
BENTO_BOX = b'\xF0\x9F\x8D\xB1'
POT_OF_FOOD = b'\xF0\x9F\x8D\xB2'
COOKING = b'\xF0\x9F\x8D\xB3'
FORK_AND_KNIFE = b'\xF0\x9F\x8D\xB4'
TEACUP_WITHOUT_HANDLE = b'\xF0\x9F\x8D\xB5'
SAKE_BOTTLE_AND_CUP = b'\xF0\x9F\x8D\xB6'
WINE_GLASS = b'\xF0\x9F\x8D\xB7'
COCKTAIL_GLASS = b'\xF0\x9F\x8D\xB8'
TROPICAL_DRINK = b'\xF0\x9F\x8D\xB9'
BEER_MUG = b'\xF0\x9F\x8D\xBA'
CLINKING_BEER_MUGS = b'\xF0\x9F\x8D\xBB'
RIBBON = b'\xF0\x9F\x8E\x80'
WRAPPED_PRESENT = b'\xF0\x9F\x8E\x81'
BIRTHDAY_CAKE = b'\xF0\x9F\x8E\x82'
JACK_O_LANTERN = b'\xF0\x9F\x8E\x83'
CHRISTMAS_TREE = b'\xF0\x9F\x8E\x84'
FATHER_CHRISTMAS = b'\xF0\x9F\x8E\x85'
FIREWORKS = b'\xF0\x9F\x8E\x86'
FIREWORK_SPARKLER = b'\xF0\x9F\x8E\x87'
BALLOON = b'\xF0\x9F\x8E\x88'
PARTY_POPPER = b'\xF0\x9F\x8E\x89'
CONFETTI_BALL = b'\xF0\x9F\x8E\x8A'
TANABATA_TREE = b'\xF0\x9F\x8E\x8B'
CROSSED_FLAGS = b'\xF0\x9F\x8E\x8C'
PINE_DECORATION = b'\xF0\x9F\x8E\x8D'
JAPANESE_DOLLS = b'\xF0\x9F\x8E\x8E'
CARP_STREAMER = b'\xF0\x9F\x8E\x8F'
WIND_CHIME = b'\xF0\x9F\x8E\x90'
MOON_VIEWING_CEREMONY = b'\xF0\x9F\x8E\x91'
SCHOOL_SATCHEL = b'\xF0\x9F\x8E\x92'
GRADUATION_CAP = b'\xF0\x9F\x8E\x93'
CAROUSEL_HORSE = b'\xF0\x9F\x8E\xA0'
FERRIS_WHEEL = b'\xF0\x9F\x8E\xA1'
ROLLER_COASTER = b'\xF0\x9F\x8E\xA2'
FISHING_POLE_AND_FISH = b'\xF0\x9F\x8E\xA3'
MICROPHONE = b'\xF0\x9F\x8E\xA4'
MOVIE_CAMERA = b'\xF0\x9F\x8E\xA5'
CINEMA = b'\xF0\x9F\x8E\xA6'
HEADPHONE = b'\xF0\x9F\x8E\xA7'
ARTIST_PALETTE = b'\xF0\x9F\x8E\xA8'
TOP_HAT = b'\xF0\x9F\x8E\xA9'
CIRCUS_TENT = b'\xF0\x9F\x8E\xAA'
TICKET = b'\xF0\x9F\x8E\xAB'
CLAPPER_BOARD = b'\xF0\x9F\x8E\xAC'
PERFORMING_ARTS = b'\xF0\x9F\x8E\xAD'
VIDEO_GAME = b'\xF0\x9F\x8E\xAE'
DIRECT_HIT = b'\xF0\x9F\x8E\xAF'
SLOT_MACHINE = b'\xF0\x9F\x8E\xB0'
BILLIARDS = b'\xF0\x9F\x8E\xB1'
GAME_DIE = b'\xF0\x9F\x8E\xB2'
BOWLING = b'\xF0\x9F\x8E\xB3'
FLOWER_PLAYING_CARDS = b'\xF0\x9F\x8E\xB4'
MUSICAL_NOTE = b'\xF0\x9F\x8E\xB5'
MULTIPLE_MUSICAL_NOTES = b'\xF0\x9F\x8E\xB6'
SAXOPHONE = b'\xF0\x9F\x8E\xB7'
GUITAR = b'\xF0\x9F\x8E\xB8'
MUSICAL_KEYBOARD = b'\xF0\x9F\x8E\xB9'
TRUMPET = b'\xF0\x9F\x8E\xBA'
VIOLIN = b'\xF0\x9F\x8E\xBB'
MUSICAL_SCORE = b'\xF0\x9F\x8E\xBC'
RUNNING_SHIRT_WITH_SASH = b'\xF0\x9F\x8E\xBD'
TENNIS_RACQUET_AND_BALL = b'\xF0\x9F\x8E\xBE'
SKI_AND_SKI_BOOT = b'\xF0\x9F\x8E\xBF'
BASKETBALL_AND_HOOP = b'\xF0\x9F\x8F\x80'
CHEQUERED_FLAG = b'\xF0\x9F\x8F\x81'
SNOWBOARDER = b'\xF0\x9F\x8F\x82'
RUNNER = b'\xF0\x9F\x8F\x83'
SURFER = b'\xF0\x9F\x8F\x84'
TROPHY = b'\xF0\x9F\x8F\x86'
AMERICAN_FOOTBALL = b'\xF0\x9F\x8F\x88'
SWIMMER = b'\xF0\x9F\x8F\x8A'
HOUSE_BUILDING = b'\xF0\x9F\x8F\xA0'
HOUSE_WITH_GARDEN = b'\xF0\x9F\x8F\xA1'
OFFICE_BUILDING = b'\xF0\x9F\x8F\xA2'
JAPANESE_POST_OFFICE = b'\xF0\x9F\x8F\xA3'
HOSPITAL = b'\xF0\x9F\x8F\xA5'
BANK = b'\xF0\x9F\x8F\xA6'
AUTOMATED_TELLER_MACHINE = b'\xF0\x9F\x8F\xA7'
HOTEL = b'\xF0\x9F\x8F\xA8'
LOVE_HOTEL = b'\xF0\x9F\x8F\xA9'
CONVENIENCE_STORE = b'\xF0\x9F\x8F\xAA'
SCHOOL = b'\xF0\x9F\x8F\xAB'
DEPARTMENT_STORE = b'\xF0\x9F\x8F\xAC'
FACTORY = b'\xF0\x9F\x8F\xAD'
IZAKAYA_LANTERN = b'\xF0\x9F\x8F\xAE'
JAPANESE_CASTLE = b'\xF0\x9F\x8F\xAF'
EUROPEAN_CASTLE = b'\xF0\x9F\x8F\xB0'
SNAIL = b'\xF0\x9F\x90\x8C'
SNAKE = b'\xF0\x9F\x90\x8D'
HORSE = b'\xF0\x9F\x90\x8E'
SHEEP = b'\xF0\x9F\x90\x91'
MONKEY = b'\xF0\x9F\x90\x92'
CHICKEN = b'\xF0\x9F\x90\x94'
BOAR = b'\xF0\x9F\x90\x97'
ELEPHANT = b'\xF0\x9F\x90\x98'
OCTOPUS = b'\xF0\x9F\x90\x99'
SPIRAL_SHELL = b'\xF0\x9F\x90\x9A'
BUG = b'\xF0\x9F\x90\x9B'
ANT = b'\xF0\x9F\x90\x9C'
HONEYBEE = b'\xF0\x9F\x90\x9D'
LADY_BEETLE = b'\xF0\x9F\x90\x9E'
FISH = b'\xF0\x9F\x90\x9F'
TROPICAL_FISH = b'\xF0\x9F\x90\xA0'
BLOWFISH = b'\xF0\x9F\x90\xA1'
TURTLE = b'\xF0\x9F\x90\xA2'
HATCHING_CHICK = b'\xF0\x9F\x90\xA3'
BABY_CHICK = b'\xF0\x9F\x90\xA4'
FRONT_FACING_BABY_CHICK = b'\xF0\x9F\x90\xA5'
BIRD = b'\xF0\x9F\x90\xA6'
PENGUIN = b'\xF0\x9F\x90\xA7'
KOALA = b'\xF0\x9F\x90\xA8'
POODLE = b'\xF0\x9F\x90\xA9'
BACTRIAN_CAMEL = b'\xF0\x9F\x90\xAB'
DOLPHIN = b'\xF0\x9F\x90\xAC'
MOUSE_FACE = b'\xF0\x9F\x90\xAD'
COW_FACE = b'\xF0\x9F\x90\xAE'
TIGER_FACE = b'\xF0\x9F\x90\xAF'
RABBIT_FACE = b'\xF0\x9F\x90\xB0'
CAT_FACE = b'\xF0\x9F\x90\xB1'
DRAGON_FACE = b'\xF0\x9F\x90\xB2'
SPOUTING_WHALE = b'\xF0\x9F\x90\xB3'
HORSE_FACE = b'\xF0\x9F\x90\xB4'
MONKEY_FACE = b'\xF0\x9F\x90\xB5'
DOG_FACE = b'\xF0\x9F\x90\xB6'
PIG_FACE = b'\xF0\x9F\x90\xB7'
FROG_FACE = b'\xF0\x9F\x90\xB8'
HAMSTER_FACE = b'\xF0\x9F\x90\xB9'
WOLF_FACE = b'\xF0\x9F\x90\xBA'
BEAR_FACE = b'\xF0\x9F\x90\xBB'
PANDA_FACE = b'\xF0\x9F\x90\xBC'
PIG_NOSE = b'\xF0\x9F\x90\xBD'
PAW_PRINTS = b'\xF0\x9F\x90\xBE'
EYES = b'\xF0\x9F\x91\x80'
EAR = b'\xF0\x9F\x91\x82'
NOSE = b'\xF0\x9F\x91\x83'
MOUTH = b'\xF0\x9F\x91\x84'
TONGUE = b'\xF0\x9F\x91\x85'
WHITE_UP_POINTING_BACKHAND_INDEX = b'\xF0\x9F\x91\x86'
WHITE_DOWN_POINTING_BACKHAND_INDEX = b'\xF0\x9F\x91\x87'
WHITE_LEFT_POINTING_BACKHAND_INDEX = b'\xF0\x9F\x91\x88'
WHITE_RIGHT_POINTING_BACKHAND_INDEX = b'\xF0\x9F\x91\x89'
FISTED_HAND_SIGN = b'\xF0\x9F\x91\x8A'
WAVING_HAND_SIGN = b'\xF0\x9F\x91\x8B'
OK_HAND_SIGN = b'\xF0\x9F\x91\x8C'
THUMBS_UP_SIGN = b'\xF0\x9F\x91\x8D'
THUMBS_DOWN_SIGN = b'\xF0\x9F\x91\x8E'
CLAPPING_HANDS_SIGN = b'\xF0\x9F\x91\x8F'
OPEN_HANDS_SIGN = b'\xF0\x9F\x91\x90'
CROWN = b'\xF0\x9F\x91\x91'
WOMANS_HAT = b'\xF0\x9F\x91\x92'
EYEGLASSES = b'\xF0\x9F\x91\x93'
NECKTIE = b'\xF0\x9F\x91\x94'
T_SHIRT = b'\xF0\x9F\x91\x95'
JEANS = b'\xF0\x9F\x91\x96'
DRESS = b'\xF0\x9F\x91\x97'
KIMONO = b'\xF0\x9F\x91\x98'
BIKINI = b'\xF0\x9F\x91\x99'
WOMANS_CLOTHES = b'\xF0\x9F\x91\x9A'
PURSE = b'\xF0\x9F\x91\x9B'
HANDBAG = b'\xF0\x9F\x91\x9C'
POUCH = b'\xF0\x9F\x91\x9D'
MANS_SHOE = b'\xF0\x9F\x91\x9E'
ATHLETIC_SHOE = b'\xF0\x9F\x91\x9F'
HIGH_HEELED_SHOE = b'\xF0\x9F\x91\xA0'
WOMANS_SANDAL = b'\xF0\x9F\x91\xA1'
WOMANS_BOOTS = b'\xF0\x9F\x91\xA2'
FOOTPRINTS = b'\xF0\x9F\x91\xA3'
BUST_IN_SILHOUETTE = b'\xF0\x9F\x91\xA4'
BOY = b'\xF0\x9F\x91\xA6'
GIRL = b'\xF0\x9F\x91\xA7'
MAN = b'\xF0\x9F\x91\xA8'
WOMAN = b'\xF0\x9F\x91\xA9'
FAMILY = b'\xF0\x9F\x91\xAA'
MAN_AND_WOMAN_HOLDING_HANDS = b'\xF0\x9F\x91\xAB'
POLICE_OFFICER = b'\xF0\x9F\x91\xAE'
WOMAN_WITH_BUNNY_EARS = b'\xF0\x9F\x91\xAF'
BRIDE_WITH_VEIL = b'\xF0\x9F\x91\xB0'
PERSON_WITH_BLOND_HAIR = b'\xF0\x9F\x91\xB1'
MAN_WITH_GUA_PI_MAO = b'\xF0\x9F\x91\xB2'
MAN_WITH_TURBAN = b'\xF0\x9F\x91\xB3'
OLDER_MAN = b'\xF0\x9F\x91\xB4'
OLDER_WOMAN = b'\xF0\x9F\x91\xB5'
BABY = b'\xF0\x9F\x91\xB6'
CONSTRUCTION_WORKER = b'\xF0\x9F\x91\xB7'
PRINCESS = b'\xF0\x9F\x91\xB8'
JAPANESE_OGRE = b'\xF0\x9F\x91\xB9'
JAPANESE_GOBLIN = b'\xF0\x9F\x91\xBA'
GHOST = b'\xF0\x9F\x91\xBB'
BABY_ANGEL = b'\xF0\x9F\x91\xBC'
EXTRATERRESTRIAL_ALIEN = b'\xF0\x9F\x91\xBD'
ALIEN_MONSTER = b'\xF0\x9F\x91\xBE'
IMP = b'\xF0\x9F\x91\xBF'
SKULL = b'\xF0\x9F\x92\x80'
INFORMATION_DESK_PERSON = b'\xF0\x9F\x92\x81'
GUARDSMAN = b'\xF0\x9F\x92\x82'
DANCER = b'\xF0\x9F\x92\x83'
LIPSTICK = b'\xF0\x9F\x92\x84'
NAIL_POLISH = b'\xF0\x9F\x92\x85'
FACE_MASSAGE = b'\xF0\x9F\x92\x86'
HAIRCUT = b'\xF0\x9F\x92\x87'
BARBER_POLE = b'\xF0\x9F\x92\x88'
SYRINGE = b'\xF0\x9F\x92\x89'
PILL = b'\xF0\x9F\x92\x8A'
KISS_MARK = b'\xF0\x9F\x92\x8B'
LOVE_LETTER = b'\xF0\x9F\x92\x8C'
RING = b'\xF0\x9F\x92\x8D'
GEM_STONE = b'\xF0\x9F\x92\x8E'
KISS = b'\xF0\x9F\x92\x8F'
BOUQUET = b'\xF0\x9F\x92\x90'
COUPLE_WITH_HEART = b'\xF0\x9F\x92\x91'
WEDDING = b'\xF0\x9F\x92\x92'
BEATING_HEART = b'\xF0\x9F\x92\x93'
BROKEN_HEART = b'\xF0\x9F\x92\x94'
TWO_HEARTS = b'\xF0\x9F\x92\x95'
SPARKLING_HEART = b'\xF0\x9F\x92\x96'
GROWING_HEART = b'\xF0\x9F\x92\x97'
HEART_WITH_ARROW = b'\xF0\x9F\x92\x98'
BLUE_HEART = b'\xF0\x9F\x92\x99'
GREEN_HEART = b'\xF0\x9F\x92\x9A'
YELLOW_HEART = b'\xF0\x9F\x92\x9B'
PURPLE_HEART = b'\xF0\x9F\x92\x9C'
HEART_WITH_RIBBON = b'\xF0\x9F\x92\x9D'
REVOLVING_HEARTS = b'\xF0\x9F\x92\x9E'
HEART_DECORATION = b'\xF0\x9F\x92\x9F'
DIAMOND_SHAPE_WITH_A_DOT_INSIDE = b'\xF0\x9F\x92\xA0'
ELECTRIC_LIGHT_BULB = b'\xF0\x9F\x92\xA1'
ANGER_SYMBOL = b'\xF0\x9F\x92\xA2'
BOMB = b'\xF0\x9F\x92\xA3'
SLEEPING_SYMBOL = b'\xF0\x9F\x92\xA4'
COLLISION_SYMBOL = b'\xF0\x9F\x92\xA5'
SPLASHING_SWEAT_SYMBOL = b'\xF0\x9F\x92\xA6'
DROPLET = b'\xF0\x9F\x92\xA7'
DASH_SYMBOL = b'\xF0\x9F\x92\xA8'
PILE_OF_POO = b'\xF0\x9F\x92\xA9'
FLEXED_BICEPS = b'\xF0\x9F\x92\xAA'
DIZZY_SYMBOL = b'\xF0\x9F\x92\xAB'
SPEECH_BALLOON = b'\xF0\x9F\x92\xAC'
WHITE_FLOWER = b'\xF0\x9F\x92\xAE'
HUNDRED_POINTS_SYMBOL = b'\xF0\x9F\x92\xAF'
MONEY_BAG = b'\xF0\x9F\x92\xB0'
CURRENCY_EXCHANGE = b'\xF0\x9F\x92\xB1'
HEAVY_DOLLAR_SIGN = b'\xF0\x9F\x92\xB2'
CREDIT_CARD = b'\xF0\x9F\x92\xB3'
BANKNOTE_WITH_YEN_SIGN = b'\xF0\x9F\x92\xB4'
BANKNOTE_WITH_DOLLAR_SIGN = b'\xF0\x9F\x92\xB5'
MONEY_WITH_WINGS = b'\xF0\x9F\x92\xB8'
CHART_WITH_UPWARDS_TREND_AND_YEN_SIGN = b'\xF0\x9F\x92\xB9'
SEAT = b'\xF0\x9F\x92\xBA'
PERSONAL_COMPUTER = b'\xF0\x9F\x92\xBB'
BRIEFCASE = b'\xF0\x9F\x92\xBC'
MINIDISC = b'\xF0\x9F\x92\xBD'
FLOPPY_DISK = b'\xF0\x9F\x92\xBE'
OPTICAL_DISC = b'\xF0\x9F\x92\xBF'
DVD = b'\xF0\x9F\x93\x80'
FILE_FOLDER = b'\xF0\x9F\x93\x81'
OPEN_FILE_FOLDER = b'\xF0\x9F\x93\x82'
PAGE_WITH_CURL = b'\xF0\x9F\x93\x83'
PAGE_FACING_UP = b'\xF0\x9F\x93\x84'
CALENDAR = b'\xF0\x9F\x93\x85'
TEAR_OFF_CALENDAR = b'\xF0\x9F\x93\x86'
CARD_INDEX = b'\xF0\x9F\x93\x87'
CHART_WITH_UPWARDS_TREND = b'\xF0\x9F\x93\x88'
CHART_WITH_DOWNWARDS_TREND = b'\xF0\x9F\x93\x89'
BAR_CHART = b'\xF0\x9F\x93\x8A'
CLIPBOARD = b'\xF0\x9F\x93\x8B'
PUSHPIN = b'\xF0\x9F\x93\x8C'
ROUND_PUSHPIN = b'\xF0\x9F\x93\x8D'
PAPERCLIP = b'\xF0\x9F\x93\x8E'
STRAIGHT_RULER = b'\xF0\x9F\x93\x8F'
TRIANGULAR_RULER = b'\xF0\x9F\x93\x90'
BOOKMARK_TABS = b'\xF0\x9F\x93\x91'
LEDGER = b'\xF0\x9F\x93\x92'
NOTEBOOK = b'\xF0\x9F\x93\x93'
NOTEBOOK_WITH_DECORATIVE_COVER = b'\xF0\x9F\x93\x94'
CLOSED_BOOK = b'\xF0\x9F\x93\x95'
OPEN_BOOK = b'\xF0\x9F\x93\x96'
GREEN_BOOK = b'\xF0\x9F\x93\x97'
BLUE_BOOK = b'\xF0\x9F\x93\x98'
ORANGE_BOOK = b'\xF0\x9F\x93\x99'
BOOKS = b'\xF0\x9F\x93\x9A'
NAME_BADGE = b'\xF0\x9F\x93\x9B'
SCROLL = b'\xF0\x9F\x93\x9C'
MEMO = b'\xF0\x9F\x93\x9D'
TELEPHONE_RECEIVER = b'\xF0\x9F\x93\x9E'
PAGER = b'\xF0\x9F\x93\x9F'
FAX_MACHINE = b'\xF0\x9F\x93\xA0'
SATELLITE_ANTENNA = b'\xF0\x9F\x93\xA1'
PUBLIC_ADDRESS_LOUDSPEAKER = b'\xF0\x9F\x93\xA2'
CHEERING_MEGAPHONE = b'\xF0\x9F\x93\xA3'
OUTBOX_TRAY = b'\xF0\x9F\x93\xA4'
INBOX_TRAY = b'\xF0\x9F\x93\xA5'
PACKAGE = b'\xF0\x9F\x93\xA6'
E_MAIL_SYMBOL = b'\xF0\x9F\x93\xA7'
INCOMING_ENVELOPE = b'\xF0\x9F\x93\xA8'
ENVELOPE_WITH_DOWNWARDS_ARROW_ABOVE = b'\xF0\x9F\x93\xA9'
CLOSED_MAILBOX_WITH_LOWERED_FLAG = b'\xF0\x9F\x93\xAA'
CLOSED_MAILBOX_WITH_RAISED_FLAG = b'\xF0\x9F\x93\xAB'
POSTBOX = b'\xF0\x9F\x93\xAE'
NEWSPAPER = b'\xF0\x9F\x93\xB0'
MOBILE_PHONE = b'\xF0\x9F\x93\xB1'
MOBILE_PHONE_WITH_RIGHTWARDS_ARROW_AT_LEFT = b'\xF0\x9F\x93\xB2'
VIBRATION_MODE = b'\xF0\x9F\x93\xB3'
MOBILE_PHONE_OFF = b'\xF0\x9F\x93\xB4'
ANTENNA_WITH_BARS = b'\xF0\x9F\x93\xB6'
CAMERA = b'\xF0\x9F\x93\xB7'
VIDEO_CAMERA = b'\xF0\x9F\x93\xB9'
TELEVISION = b'\xF0\x9F\x93\xBA'
RADIO = b'\xF0\x9F\x93\xBB'
VIDEOCASSETTE = b'\xF0\x9F\x93\xBC'
CLOCKWISE_DOWNWARDS_AND_UPWARDS_OPEN_CIRCLE_ARROWS = b'\xF0\x9F\x94\x83'
SPEAKER_WITH_THREE_SOUND_WAVES = b'\xF0\x9F\x94\x8A'
BATTERY = b'\xF0\x9F\x94\x8B'
ELECTRIC_PLUG = b'\xF0\x9F\x94\x8C'
LEFT_POINTING_MAGNIFYING_GLASS = b'\xF0\x9F\x94\x8D'
RIGHT_POINTING_MAGNIFYING_GLASS = b'\xF0\x9F\x94\x8E'
LOCK_WITH_INK_PEN = b'\xF0\x9F\x94\x8F'
CLOSED_LOCK_WITH_KEY = b'\xF0\x9F\x94\x90'
KEY = b'\xF0\x9F\x94\x91'
LOCK = b'\xF0\x9F\x94\x92'
OPEN_LOCK = b'\xF0\x9F\x94\x93'
BELL = b'\xF0\x9F\x94\x94'
BOOKMARK = b'\xF0\x9F\x94\x96'
LINK_SYMBOL = b'\xF0\x9F\x94\x97'
RADIO_BUTTON = b'\xF0\x9F\x94\x98'
BACK_WITH_LEFTWARDS_ARROW_ABOVE = b'\xF0\x9F\x94\x99'
END_WITH_LEFTWARDS_ARROW_ABOVE = b'\xF0\x9F\x94\x9A'
ON_WITH_EXCLAMATION_MARK_WITH_LEFT_RIGHT_ARROW_ABOVE = b'\xF0\x9F\x94\x9B'
SOON_WITH_RIGHTWARDS_ARROW_ABOVE = b'\xF0\x9F\x94\x9C'
TOP_WITH_UPWARDS_ARROW_ABOVE = b'\xF0\x9F\x94\x9D'
NO_ONE_UNDER_EIGHTEEN_SYMBOL = b'\xF0\x9F\x94\x9E'
KEYCAP_TEN = b'\xF0\x9F\x94\x9F'
INPUT_SYMBOL_FOR_LATIN_CAPITAL_LETTERS = b'\xF0\x9F\x94\xA0'
INPUT_SYMBOL_FOR_LATIN_SMALL_LETTERS = b'\xF0\x9F\x94\xA1'
INPUT_SYMBOL_FOR_NUMBERS = b'\xF0\x9F\x94\xA2'
INPUT_SYMBOL_FOR_SYMBOLS = b'\xF0\x9F\x94\xA3'
INPUT_SYMBOL_FOR_LATIN_LETTERS = b'\xF0\x9F\x94\xA4'
FIRE = b'\xF0\x9F\x94\xA5'
ELECTRIC_TORCH = b'\xF0\x9F\x94\xA6'
WRENCH = b'\xF0\x9F\x94\xA7'
HAMMER = b'\xF0\x9F\x94\xA8'
NUT_AND_BOLT = b'\xF0\x9F\x94\xA9'
HOCHO = b'\xF0\x9F\x94\xAA'
PISTOL = b'\xF0\x9F\x94\xAB'
CRYSTAL_BALL = b'\xF0\x9F\x94\xAE'
SIX_POINTED_STAR_WITH_MIDDLE_DOT = b'\xF0\x9F\x94\xAF'
JAPANESE_SYMBOL_FOR_BEGINNER = b'\xF0\x9F\x94\xB0'
TRIDENT_EMBLEM = b'\xF0\x9F\x94\xB1'
BLACK_SQUARE_BUTTON = b'\xF0\x9F\x94\xB2'
WHITE_SQUARE_BUTTON = b'\xF0\x9F\x94\xB3'
LARGE_RED_CIRCLE = b'\xF0\x9F\x94\xB4'
LARGE_BLUE_CIRCLE = b'\xF0\x9F\x94\xB5'
LARGE_ORANGE_DIAMOND = b'\xF0\x9F\x94\xB6'
LARGE_BLUE_DIAMOND = b'\xF0\x9F\x94\xB7'
SMALL_ORANGE_DIAMOND = b'\xF0\x9F\x94\xB8'
SMALL_BLUE_DIAMOND = b'\xF0\x9F\x94\xB9'
UP_POINTING_RED_TRIANGLE = b'\xF0\x9F\x94\xBA'
DOWN_POINTING_RED_TRIANGLE = b'\xF0\x9F\x94\xBB'
UP_POINTING_SMALL_RED_TRIANGLE = b'\xF0\x9F\x94\xBC'
DOWN_POINTING_SMALL_RED_TRIANGLE = b'\xF0\x9F\x94\xBD'
CLOCK_FACE_ONE_OCLOCK = b'\xF0\x9F\x95\x90'
CLOCK_FACE_TWO_OCLOCK = b'\xF0\x9F\x95\x91'
CLOCK_FACE_THREE_OCLOCK = b'\xF0\x9F\x95\x92'
CLOCK_FACE_FOUR_OCLOCK = b'\xF0\x9F\x95\x93'
CLOCK_FACE_FIVE_OCLOCK = b'\xF0\x9F\x95\x94'
CLOCK_FACE_SIX_OCLOCK = b'\xF0\x9F\x95\x95'
CLOCK_FACE_SEVEN_OCLOCK = b'\xF0\x9F\x95\x96'
CLOCK_FACE_EIGHT_OCLOCK = b'\xF0\x9F\x95\x97'
CLOCK_FACE_NINE_OCLOCK = b'\xF0\x9F\x95\x98'
CLOCK_FACE_TEN_OCLOCK = b'\xF0\x9F\x95\x99'
CLOCK_FACE_ELEVEN_OCLOCK = b'\xF0\x9F\x95\x9A'
CLOCK_FACE_TWELVE_OCLOCK = b'\xF0\x9F\x95\x9B'
MOUNT_FUJI = b'\xF0\x9F\x97\xBB'
TOKYO_TOWER = b'\xF0\x9F\x97\xBC'
STATUE_OF_LIBERTY = b'\xF0\x9F\x97\xBD'
SILHOUETTE_OF_JAPAN = b'\xF0\x9F\x97\xBE'
MOYAI = b'\xF0\x9F\x97\xBF'
GRINNING_FACE = b'\xF0\x9F\x98\x80'
SMILING_FACE_WITH_HALO = b'\xF0\x9F\x98\x87'
SMILING_FACE_WITH_HORNS = b'\xF0\x9F\x98\x88'
SMILING_FACE_WITH_SUNGLASSES = b'\xF0\x9F\x98\x8E'
NEUTRAL_FACE = b'\xF0\x9F\x98\x90'
EXPRESSIONLESS_FACE = b'\xF0\x9F\x98\x91'
CONFUSED_FACE = b'\xF0\x9F\x98\x95'
KISSING_FACE = b'\xF0\x9F\x98\x97'
KISSING_FACE_WITH_SMILING_EYES = b'\xF0\x9F\x98\x99'
FACE_WITH_STUCK_OUT_TONGUE = b'\xF0\x9F\x98\x9B'
WORRIED_FACE = b'\xF0\x9F\x98\x9F'
FROWNING_FACE_WITH_OPEN_MOUTH = b'\xF0\x9F\x98\xA6'
ANGUISHED_FACE = b'\xF0\x9F\x98\xA7'
GRIMACING_FACE = b'\xF0\x9F\x98\xAC'
FACE_WITH_OPEN_MOUTH = b'\xF0\x9F\x98\xAE'
HUSHED_FACE = b'\xF0\x9F\x98\xAF'
SLEEPING_FACE = b'\xF0\x9F\x98\xB4'
FACE_WITHOUT_MOUTH = b'\xF0\x9F\x98\xB6'
HELICOPTER = b'\xF0\x9F\x9A\x81'
STEAM_LOCOMOTIVE = b'\xF0\x9F\x9A\x82'
TRAIN = b'\xF0\x9F\x9A\x86'
LIGHT_RAIL = b'\xF0\x9F\x9A\x88'
TRAM = b'\xF0\x9F\x9A\x8A'
ONCOMING_BUS = b'\xF0\x9F\x9A\x8D'
TROLLEYBUS = b'\xF0\x9F\x9A\x8E'
MINIBUS = b'\xF0\x9F\x9A\x90'
ONCOMING_POLICE_CAR = b'\xF0\x9F\x9A\x94'
ONCOMING_TAXI = b'\xF0\x9F\x9A\x96'
ONCOMING_AUTOMOBILE = b'\xF0\x9F\x9A\x98'
ARTICULATED_LORRY = b'\xF0\x9F\x9A\x9B'
TRACTOR = b'\xF0\x9F\x9A\x9C'
MONORAIL = b'\xF0\x9F\x9A\x9D'
MOUNTAIN_RAILWAY = b'\xF0\x9F\x9A\x9E'
SUSPENSION_RAILWAY = b'\xF0\x9F\x9A\x9F'
MOUNTAIN_CABLEWAY = b'\xF0\x9F\x9A\xA0'
AERIAL_TRAMWAY = b'\xF0\x9F\x9A\xA1'
ROWBOAT = b'\xF0\x9F\x9A\xA3'
VERTICAL_TRAFFIC_LIGHT = b'\xF0\x9F\x9A\xA6'
PUT_LITTER_IN_ITS_PLACE_SYMBOL = b'\xF0\x9F\x9A\xAE'
DO_NOT_LITTER_SYMBOL = b'\xF0\x9F\x9A\xAF'
POTABLE_WATER_SYMBOL = b'\xF0\x9F\x9A\xB0'
NON_POTABLE_WATER_SYMBOL = b'\xF0\x9F\x9A\xB1'
NO_BICYCLES = b'\xF0\x9F\x9A\xB3'
BICYCLIST = b'\xF0\x9F\x9A\xB4'
MOUNTAIN_BICYCLIST = b'\xF0\x9F\x9A\xB5'
NO_PEDESTRIANS = b'\xF0\x9F\x9A\xB7'
CHILDREN_CROSSING = b'\xF0\x9F\x9A\xB8'
SHOWER = b'\xF0\x9F\x9A\xBF'
BATHTUB = b'\xF0\x9F\x9B\x81'
PASSPORT_CONTROL = b'\xF0\x9F\x9B\x82'
CUSTOMS = b'\xF0\x9F\x9B\x83'
BAGGAGE_CLAIM = b'\xF0\x9F\x9B\x84'
LEFT_LUGGAGE = b'\xF0\x9F\x9B\x85'
EARTH_GLOBE_EUROPE_AFRICA = b'\xF0\x9F\x8C\x8D'
EARTH_GLOBE_AMERICAS = b'\xF0\x9F\x8C\x8E'
GLOBE_WITH_MERIDIANS = b'\xF0\x9F\x8C\x90'
WAXING_CRESCENT_MOON_SYMBOL = b'\xF0\x9F\x8C\x92'
WANING_GIBBOUS_MOON_SYMBOL = b'\xF0\x9F\x8C\x96'
LAST_QUARTER_MOON_SYMBOL = b'\xF0\x9F\x8C\x97'
WANING_CRESCENT_MOON_SYMBOL = b'\xF0\x9F\x8C\x98'
NEW_MOON_WITH_FACE = b'\xF0\x9F\x8C\x9A'
LAST_QUARTER_MOON_WITH_FACE = b'\xF0\x9F\x8C\x9C'
FULL_MOON_WITH_FACE = b'\xF0\x9F\x8C\x9D'
SUN_WITH_FACE = b'\xF0\x9F\x8C\x9E'
EVERGREEN_TREE = b'\xF0\x9F\x8C\xB2'
DECIDUOUS_TREE = b'\xF0\x9F\x8C\xB3'
LEMON = b'\xF0\x9F\x8D\x8B'
PEAR = b'\xF0\x9F\x8D\x90'
BABY_BOTTLE = b'\xF0\x9F\x8D\xBC'
HORSE_RACING = b'\xF0\x9F\x8F\x87'
RUGBY_FOOTBALL = b'\xF0\x9F\x8F\x89'
EUROPEAN_POST_OFFICE = b'\xF0\x9F\x8F\xA4'
RAT = b'\xF0\x9F\x90\x80'
MOUSE = b'\xF0\x9F\x90\x81'
OX = b'\xF0\x9F\x90\x82'
WATER_BUFFALO = b'\xF0\x9F\x90\x83'
COW = b'\xF0\x9F\x90\x84'
TIGER = b'\xF0\x9F\x90\x85'
LEOPARD = b'\xF0\x9F\x90\x86'
RABBIT = b'\xF0\x9F\x90\x87'
CAT = b'\xF0\x9F\x90\x88'
DRAGON = b'\xF0\x9F\x90\x89'
CROCODILE = b'\xF0\x9F\x90\x8A'
WHALE = b'\xF0\x9F\x90\x8B'
RAM = b'\xF0\x9F\x90\x8F'
GOAT = b'\xF0\x9F\x90\x90'
ROOSTER = b'\xF0\x9F\x90\x93'
DOG = b'\xF0\x9F\x90\x95'
PIG = b'\xF0\x9F\x90\x96'
DROMEDARY_CAMEL = b'\xF0\x9F\x90\xAA'
BUSTS_IN_SILHOUETTE = b'\xF0\x9F\x91\xA5'
TWO_MEN_HOLDING_HANDS = b'\xF0\x9F\x91\xAC'
TWO_WOMEN_HOLDING_HANDS = b'\xF0\x9F\x91\xAD'
THOUGHT_BALLOON = b'\xF0\x9F\x92\xAD'
BANKNOTE_WITH_EURO_SIGN = b'\xF0\x9F\x92\xB6'
BANKNOTE_WITH_POUND_SIGN = b'\xF0\x9F\x92\xB7'
OPEN_MAILBOX_WITH_RAISED_FLAG = b'\xF0\x9F\x93\xAC'
OPEN_MAILBOX_WITH_LOWERED_FLAG = b'\xF0\x9F\x93\xAD'
POSTAL_HORN = b'\xF0\x9F\x93\xAF'
NO_MOBILE_PHONES = b'\xF0\x9F\x93\xB5'
TWISTED_RIGHTWARDS_ARROWS = b'\xF0\x9F\x94\x80'
CLOCKWISE_RIGHTWARDS_AND_LEFTWARDS_OPEN_CIRCLE_ARROWS = b'\xF0\x9F\x94\x81'
CLOCKWISE_RIGHTWARDS_AND_LEFTWARDS_OPEN_CIRCLE_ARROWS_WITH_CIRCLED_ONE_OVERLAY = b'\xF0\x9F\x94\x82'
ANTICLOCKWISE_DOWNWARDS_AND_UPWARDS_OPEN_CIRCLE_ARROWS = b'\xF0\x9F\x94\x84'
LOW_BRIGHTNESS_SYMBOL = b'\xF0\x9F\x94\x85'
HIGH_BRIGHTNESS_SYMBOL = b'\xF0\x9F\x94\x86'
SPEAKER_WITH_CANCELLATION_STROKE = b'\xF0\x9F\x94\x87'
SPEAKER_WITH_ONE_SOUND_WAVE = b'\xF0\x9F\x94\x89'
BELL_WITH_CANCELLATION_STROKE = b'\xF0\x9F\x94\x95'
MICROSCOPE = b'\xF0\x9F\x94\xAC'
TELESCOPE = b'\xF0\x9F\x94\xAD'
CLOCK_FACE_ONE_THIRTY = b'\xF0\x9F\x95\x9C'
CLOCK_FACE_TWO_THIRTY = b'\xF0\x9F\x95\x9D'
CLOCK_FACE_THREE_THIRTY = b'\xF0\x9F\x95\x9E'
CLOCK_FACE_FOUR_THIRTY = b'\xF0\x9F\x95\x9F'
CLOCK_FACE_FIVE_THIRTY = b'\xF0\x9F\x95\xA0'
CLOCK_FACE_SIX_THIRTY = b'\xF0\x9F\x95\xA1'
CLOCK_FACE_SEVEN_THIRTY = b'\xF0\x9F\x95\xA2'
CLOCK_FACE_EIGHT_THIRTY = b'\xF0\x9F\x95\xA3'
CLOCK_FACE_NINE_THIRTY = b'\xF0\x9F\x95\xA4'
CLOCK_FACE_TEN_THIRTY = b'\xF0\x9F\x95\xA5'
CLOCK_FACE_ELEVEN_THIRTY = b'\xF0\x9F\x95\xA6'
CLOCK_FACE_TWELVE_THIRTY = b'\xF0\x9F\x95\xA7'
|
AndrewSamokhvalov/python-telegram-bot
|
telegram/emoji.py
|
Python
|
gpl-3.0
| 37,307
|
[
"Octopus"
] |
a9357b5e59ef864bec81a2d48b20ee338bb236208028ec64a708fe0983d15350
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# IPv4 dictionaries.
animal_adjectives = [
'agile',
'bashful',
'clever',
'clumsy',
'drowsy',
'fearful',
'graceful',
'hungry',
'lonely',
'morose',
'placid',
'ruthless',
'silent',
'thoughtful',
'vapid',
'weary']
animal_colors = [
'beige',
'black',
'blue',
'bright',
'bronze',
'brown',
'dark',
'drab',
'green',
'gold',
'grey',
'jade',
'pale',
'pink',
'red',
'white']
animal_nouns = [
'ape',
'bear',
'crow',
'dove',
'frog',
'goat',
'hawk',
'lamb',
'mouse',
'newt',
'owl',
'pig',
'rat',
'snake',
'toad',
'wolf']
animal_verbs = [
'aches',
'basks',
'cries',
'dives',
'eats',
'fights',
'groans',
'hunts',
'jumps',
'lies',
'prowls',
'runs',
'sleeps',
'thrives',
'wakes',
'yawns']
nature_adjectives = [
'ancient',
'barren',
'blazing',
'crowded',
'distant',
'empty',
'foggy',
'fragrant',
'frozen',
'moonlit',
'peaceful',
'quiet',
'rugged',
'serene',
'sunlit',
'wind-swept']
nature_nouns = [
'canyon',
'clearing',
'desert',
'foothills',
'forest',
'grasslands',
'jungle',
'meadow',
'mountains',
'prairie',
'river',
'rockpool',
'sand-dune',
'tundra',
'valley',
'wetlands']
plant_nouns = [
'autumn colors',
'cherry blossoms',
'chrysanthemums',
'crabapple blooms',
'dry palm fronds',
'fat horse chestnuts',
'forget-me-nots',
'jasmine petals',
'lotus flowers',
'ripe blackberries',
'the maple seeds',
'the pine needles',
'tiger lillies',
'water lillies',
'willow branches',
'yellowwood leaves']
plant_verbs = [
'blow',
'crunch',
'dance',
'drift',
'drop',
'fall',
'grow',
'pile',
'rest',
'roll',
'show',
'spin',
'stir',
'sway',
'turn',
'twist']
# IPv6 dictionaries.
adjectives = [
'ace',
'apt',
'arched',
'ash',
'bad',
'bare',
'beige',
'big',
'black',
'bland',
'bleak',
'blond',
'blue',
'blunt',
'blush',
'bold',
'bone',
'both',
'bound',
'brash',
'brass',
'brave',
'brief',
'brisk',
'broad',
'bronze',
'brushed',
'burned',
'calm',
'ceil',
'chaste',
'cheap',
'chilled',
'clean',
'coarse',
'cold',
'cool',
'corn',
'crass',
'crazed',
'cream',
'crisp',
'crude',
'cruel',
'cursed',
'cute',
'daft',
'damp',
'dark',
'dead',
'deaf',
'dear',
'deep',
'dense',
'dim',
'drab',
'dry',
'dull',
'faint',
'fair',
'fake',
'false',
'famed',
'far',
'fast',
'fat',
'fierce',
'fine',
'firm',
'flat',
'flawed',
'fond',
'foul',
'frail',
'free',
'fresh',
'full',
'fun',
'glum',
'good',
'grave',
'gray',
'great',
'green',
'grey',
'grim',
'gruff',
'hard',
'harsh',
'high',
'hoarse',
'hot',
'huge',
'hurt',
'ill',
'jade',
'jet',
'jinxed',
'keen',
'kind',
'lame',
'lank',
'large',
'last',
'late',
'lean',
'lewd',
'light',
'limp',
'live',
'loath',
'lone',
'long',
'loose',
'lost',
'louche',
'loud',
'low',
'lush',
'mad',
'male',
'masked',
'mean',
'meek',
'mild',
'mint',
'moist',
'mute',
'near',
'neat',
'new',
'nice',
'nude',
'numb',
'odd',
'old',
'pained',
'pale',
'peach',
'pear',
'peeved',
'pink',
'piqued',
'plain',
'plum',
'plump',
'plush',
'poor',
'posed',
'posh',
'prim',
'prime',
'prompt',
'prone',
'proud',
'prune',
'puce',
'pure',
'quaint',
'quartz',
'quick',
'rare',
'raw',
'real',
'red',
'rich',
'ripe',
'rough',
'rude',
'rushed',
'rust',
'sad',
'safe',
'sage',
'sane',
'scortched',
'shaped',
'sharp',
'sheared',
'short',
'shrewd',
'shrill',
'shrunk',
'shy',
'sick',
'skilled',
'slain',
'slick',
'slight',
'slim',
'slow',
'small',
'smart',
'smooth',
'smug',
'snide',
'snug',
'soft',
'sore',
'sought',
'sour',
'spare',
'sparse',
'spent',
'spoilt',
'spry',
'squat',
'staid',
'stale',
'stary',
'staunch',
'steep',
'stiff',
'strange',
'straw',
'stretched',
'strict',
'striped',
'strong',
'suave',
'sure',
'svelte',
'swank',
'sweet',
'swift',
'tall',
'tame',
'tan',
'tart',
'taut',
'teal',
'terse',
'thick',
'thin',
'tight',
'tiny',
'tired',
'toothed',
'torn',
'tough',
'trim',
'trussed',
'twin',
'used',
'vague',
'vain',
'vast',
'veiled',
'vexed',
'vile',
'warm',
'weak',
'webbed',
'wrong',
'wry',
'young']
nouns = [
'ants',
'apes',
'asps',
'balls',
'barb',
'barbs',
'bass',
'bats',
'beads',
'beaks',
'bears',
'bees',
'bells',
'belts',
'birds',
'blades',
'blobs',
'blooms',
'boars',
'boats',
'bolts',
'books',
'bowls',
'boys',
'bream',
'brides',
'broods',
'brooms',
'brutes',
'bucks',
'bulbs',
'bulls',
'busks',
'cakes',
'calfs',
'calves',
'cats',
'char',
'chests',
'choirs',
'clams',
'clans',
'clouds',
'clowns',
'cod',
'coins',
'colts',
'cones',
'cords',
'cows',
'crabs',
'cranes',
'crows',
'cults',
'czars',
'darts',
'dates',
'deer',
'dholes',
'dice',
'discs',
'does',
'dogs',
'doors',
'dopes',
'doves',
'dreams',
'drones',
'ducks',
'dunes',
'dwarves',
'eels',
'eggs',
'elk',
'elks',
'elms',
'elves',
'ewes',
'eyes',
'faces',
'facts',
'fawns',
'feet',
'ferns',
'fish',
'fists',
'flames',
'fleas',
'flocks',
'flutes',
'foals',
'foes',
'fools',
'fowl',
'frogs',
'fruits',
'gangs',
'gar',
'geese',
'gems',
'germs',
'ghosts',
'gnomes',
'goats',
'grapes',
'grooms',
'grouse',
'grubs',
'guards',
'gulls',
'hands',
'hares',
'hawks',
'heads',
'hearts',
'hens',
'herbs',
'hills',
'hogs',
'holes',
'hordes',
'ide',
'jars',
'jays',
'kids',
'kings',
'kites',
'lads',
'lakes',
'lambs',
'larks',
'lice',
'lights',
'limbs',
'looms',
'loons',
'mares',
'masks',
'mice',
'mimes',
'minks',
'mists',
'mites',
'mobs',
'molds',
'moles',
'moons',
'moths',
'newts',
'nymphs',
'orbs',
'orcs',
'owls',
'pearls',
'pears',
'peas',
'perch',
'pigs',
'pikes',
'pines',
'plains',
'plants',
'plums',
'pools',
'prawns',
'prunes',
'pugs',
'punks',
'quail',
'quails',
'queens',
'quills',
'rafts',
'rains',
'rams',
'rats',
'rays',
'ribs',
'rocks',
'rooks',
'ruffs',
'runes',
'sands',
'seals',
'seas',
'seeds',
'serfs',
'shards',
'sharks',
'sheep',
'shells',
'ships',
'shoals',
'shrews',
'shrimp',
'skate',
'skies',
'skunks',
'sloths',
'slugs',
'smew',
'smiles',
'snails',
'snakes',
'snipes',
'sole',
'songs',
'spades',
'sprats',
'sprouts',
'squabs',
'squads',
'squares',
'squid',
'stars',
'stoats',
'stones',
'storks',
'strays',
'suns',
'swans',
'swarms',
'swells',
'swifts',
'tars',
'teams',
'teeth',
'terns',
'thorns',
'threads',
'thrones',
'ticks',
'toads',
'tools',
'trees',
'tribes',
'trolls',
'trout',
'tunes',
'tusks',
'veins',
'verbs',
'vines',
'voles',
'wasps',
'waves',
'wells',
'whales',
'whelks',
'whiffs',
'winds',
'wolves',
'worms',
'wraiths',
'wrens',
'yaks']
verbs = [
'aid',
'arm',
'awe',
'axe',
'bag',
'bait',
'bare',
'bash',
'bathe',
'beat',
'bid',
'bilk',
'blame',
'bleach',
'bleed',
'bless',
'bluff',
'blur',
'boast',
'boost',
'boot',
'bore',
'botch',
'breed',
'brew',
'bribe',
'brief',
'brine',
'broil',
'browse',
'bruise',
'build',
'burn',
'burst',
'call',
'calm',
'carve',
'chafe',
'chant',
'charge',
'chart',
'cheat',
'check',
'cheer',
'chill',
'choke',
'chomp',
'choose',
'churn',
'cite',
'clamp',
'clap',
'clasp',
'claw',
'clean',
'cleanse',
'clip',
'cloack',
'clone',
'clutch',
'coax',
'crack',
'crave',
'crunch',
'cry',
'cull',
'cure',
'curse',
'cuss',
'dare',
'daze',
'dent',
'dig',
'ding',
'doubt',
'dowse',
'drag',
'drain',
'drape',
'draw',
'dread',
'dredge',
'drill',
'drink',
'drip',
'drive',
'drop',
'drown',
'dry',
'dump',
'eat',
'etch',
'face',
'fail',
'fault',
'fear',
'feed',
'feel',
'fetch',
'fight',
'find',
'fix',
'flap',
'flay',
'flee',
'fling',
'flip',
'float',
'foil',
'forge',
'free',
'freeze',
'frisk',
'gain',
'glimpse',
'gnaw',
'goad',
'gouge',
'grab',
'grasp',
'graze',
'grieve',
'grip',
'groom',
'guard',
'guards',
'guide',
'gulp',
'gush',
'halt',
'harm',
'hate',
'haul',
'haunt',
'have',
'heal',
'hear',
'help',
'herd',
'hex',
'hire',
'hit',
'hoist',
'hound',
'hug',
'hurl',
'irk',
'jab',
'jeer',
'join',
'jolt',
'keep',
'kick',
'kill',
'kiss',
'lash',
'leash',
'leave',
'lift',
'like',
'love',
'lugg',
'lure',
'maim',
'make',
'mask',
'meet',
'melt',
'mend',
'miss',
'mould',
'move',
'nab',
'name',
'need',
'oust',
'paint',
'paw',
'pay',
'peck',
'peeve',
'pelt',
'please',
'pluck',
'poach',
'poll',
'praise',
'prick',
'print',
'probe',
'prod',
'prompt',
'punch',
'quash',
'quell',
'quote',
'raid',
'raise',
'raze',
'ride',
'roast',
'rouse',
'rule',
'scald',
'scalp',
'scar',
'scathe',
'score',
'scorn',
'scour',
'scuff',
'sear',
'see',
'seek',
'seize',
'send',
'sense',
'serve',
'shake',
'shear',
'shift',
'shoot',
'shun',
'slap',
'slay',
'slice',
'smack',
'smash',
'smell',
'smite',
'snare',
'snatch',
'sniff',
'snub',
'soak',
'spare',
'splash',
'split',
'spook',
'spray',
'squash',
'squeeze',
'stab',
'stain',
'starve',
'steal',
'steer',
'sting',
'strike',
'stun',
'tag',
'tame',
'taste',
'taunt',
'teach',
'tend']
|
lord63/pyhipku
|
pyhipku/dictionary.py
|
Python
|
mit
| 11,940
|
[
"Elk",
"GULP"
] |
59ed045515064841228e1d43ef796693ea968c94c8887a7b8fdb8c0ca27fa978
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import reduce
import numpy
from pyscf import gto
from pyscf import lib
from pyscf import scf
from pyscf.lo import iao
mol = gto.Mole()
mol.atom = '''
O 0. 0. 0
h 0. -0.757 0.587
h 0. 0.757 0.587'''
mol.basis = 'unc-sto3g'
mol.verbose = 5
mol.output = '/dev/null'
mol.build()
class KnownValues(unittest.TestCase):
def test_fast_iao_mulliken_pop(self):
mf = scf.RHF(mol).run()
a = iao.iao(mol, mf.mo_coeff[:,mf.mo_occ>0])
p,chg = iao.fast_iao_mullikan_pop(mol, mf.make_rdm1(), a)
self.assertAlmostEqual(lib.finger(p), 0.56812564587009806, 5)
mf = scf.UHF(mol).run()
p,chg = iao.fast_iao_mullikan_pop(mol, mf.make_rdm1(), a)
self.assertAlmostEqual(lib.finger(p[0]+p[1]), 0.56812564587009806, 5)
if __name__ == "__main__":
print("TODO: Test iao")
unittest.main()
|
gkc1000/pyscf
|
pyscf/lo/test/test_iao.py
|
Python
|
apache-2.0
| 1,541
|
[
"PySCF"
] |
297867ea07ec6873b27b587c317aba031e39a9a7e2841528af651a821154c991
|
"""
Find an example demonstrating that floating point addition is not associative
Example output:
t1 = 0.11945064104636571
t2 = t1 / t1
t3 = 0.16278913131835504
t4 = 0.6323432862008465
assert associative_add(t4, t3, t2)
"""
from testmachine import TestMachine
from random import Random
# This is the object that we use to define the kind of test case we want to
# generate.
machine = TestMachine()
# testmachine.common defines a number of standard operations on different types
# of variables. We're going to use some of those rather than implementing our
# own.
from testmachine.common import basic_operations, arithmetic_operations
# We only have one type of variable. We'll call that floats, but this is just
# an arbitrary name. We could call it steve if we wanted to.
# We generate our basic floats as random numbers between 0 and 1.
machine.generate(Random.random, "floats")
# These are basic stack manipulation operations. They aren't very exciting, but
# they expand the likelihood of producing interesting programs. Most machines
# will use these.
basic_operations(machine, "floats")
# floats can be combined with the normal arithmetic operations
arithmetic_operations(machine, "floats")
# We want to demonstrate that floating point addition is not associative. This
# check will read three variables off our stack of floats and see if adding t
# them up in different orders produces the same value.
def associative_add(x, y, z):
return x + (y + z) == (x + y) + z
# If the function we pass to a check returns a falsy value then the program
# will fail.
machine.check(associative_add, ("floats", "floats", "floats"))
if __name__ == '__main__':
# Attempt to find a falsifying example for the problem we've defined and
# print it to stdout. If this cannot find any examples (it will), say so.
machine.run()
|
rboulton/testmachine
|
testmachine/examples/floats.py
|
Python
|
bsd-2-clause
| 1,860
|
[
"exciting"
] |
73159f06dc3a249751d19036dad098ec0bd848ace1d79aa42243bb887ece3ccc
|
import unittest
from hamcrest import *
from nose.tools import nottest
from test.features import BrowserTest
from test.features.support.splinter_matchers import has_text, has_class
class test_search(BrowserTest):
def test_search_page(self):
self.browser.visit("http://0.0.0.0:8000/search?keyword=gds")
assert_that(self.browser.find_by_css('#wrapper h1').text,
is_('Search results'))
assert_that(self.browser.find_by_css('tbody tr'), has_length(7))
def test_default_sorting(self):
self.browser.visit("http://0.0.0.0:8000/search?keyword=gds")
assert_that(self.browser.find_by_css('.sorted'), has_text('Transactions per year'))
assert_that(self.browser.find_by_css('.sorted'), has_class('descending'))
def test_sorting_search_results_by_transactional_service(self):
self.browser.visit("http://0.0.0.0:8000/search?keyword=gds")
self.browser.click_link_by_text('Transactional service')
assert_that(self.browser.find_by_css('.sorted'), has_text('Transactional service'))
assert_that(self.browser.find_by_css('.sorted'), has_class('ascending'))
def test_sorting_search_results_by_category(self):
self.browser.visit("http://0.0.0.0:8000/search?keyword=gds")
self.browser.click_link_by_text('Category')
assert_that(self.browser.find_by_css('.sorted'), has_text('Category'))
assert_that(self.browser.find_by_css('.sorted'), has_class('ascending'))
def test_sorting_search_results_by_agency(self):
self.browser.visit("http://0.0.0.0:8000/search?keyword=gds")
self.browser.click_link_by_text('Agency / body')
assert_that(self.browser.find_by_css('.sorted'), has_text('Agency / body'))
assert_that(self.browser.find_by_css('.sorted'), has_class('ascending'))
def test_sorting_twice_the_same_column_reverses_the_order(self):
self.browser.visit("http://0.0.0.0:8000/search?keyword=gds")
self.browser.click_link_by_text('Transactional service')
self.browser.click_link_by_text('Transactional service')
assert_that(self.browser.find_by_css('.sorted'), has_text('Transactional service'))
assert_that(self.browser.find_by_css('.sorted'), has_class('descending'))
def test_default_order_is_ascending_after_changing_column(self):
self.browser.visit("http://0.0.0.0:8000/search?keyword=gds")
self.browser.click_link_by_text('Transactional service')
self.browser.click_link_by_text('Category')
assert_that(self.browser.find_by_css('.sorted'), has_text('Category'))
assert_that(self.browser.find_by_css('.sorted'), has_class('ascending'))
def test_default_volume_order_is_descending(self):
self.browser.visit("http://0.0.0.0:8000/search?keyword=gds")
self.browser.click_link_by_text('Transactional service')
self.browser.click_link_by_text('Transactions per year')
assert_that(self.browser.find_by_css('.sorted'), has_text('Transactions per year'))
assert_that(self.browser.find_by_css('.sorted'), has_class('descending'))
def test_link_column_is_not_sortable(self):
self.browser.visit("http://0.0.0.0:8000/search?keyword=gds")
assert_that(self.browser.find_by_xpath('//th[text()="Web link"]'), has_length(greater_than(0)))
assert_that(self.browser.find_link_by_text("Web link"), has_length(0))
|
imclab/transactions-explorer
|
test/features/test_search.py
|
Python
|
mit
| 3,443
|
[
"VisIt"
] |
f1b6bb931f4448fce4d7becce299ccf81afe946a93ec4e98b9f28c5c3b7e10a3
|
# This Python file uses the following encoding: utf-8
retired_shows = [
{'art': 'http://twit.cachefly.net/coverart/abby/abby600.jpg',
'desc': 'Abby Laporte and her guests as they offer fresh perspectives on '
'difficult teen subjects.',
'feeds': {'MP3': 'http://feeds.twit.tv/abby.xml'},
'title': "Abby's Road",
'url': '/shows/abbys-road'},
{'art': 'http://twit.cachefly.net/coverart/arena/arena1400.jpg',
'desc': "This week's top Android apps",
'feeds': {'MP3': 'http://feeds.twit.tv/arena.xml',
'Video-HD': 'http://feeds.twit.tv/arena_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/arena_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/arena_video_small.xml'},
'title': 'Android App Arena',
'url': '/shows/android-app-arena'},
{'art': 'http://twit.cachefly.net/coverart/byb/byb1400.jpg',
'desc': "You've got to watch... before you buy! The latest tech put to the "
'test.',
'feeds': {'MP3': 'http://feeds.twit.tv/byb.xml',
'Video-HD': 'http://feeds.twit.tv/byb_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/byb_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/byb_video_small.xml'},
'title': 'Before You Buy',
'url': '/shows/before-you-buy'},
{'art': 'http://twit.cachefly.net/coverart/code/code1400.jpg',
'desc': 'An introduction to computer programming in many languages.',
'feeds': {'MP3': 'http://feeds.twit.tv/code.xml',
'Video-HD': 'http://feeds.twit.tv/code_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/code_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/code_video_small.xml'},
'title': 'Coding 101',
'url': '/shows/coding-101'},
{'art': 'http://twit.cachefly.net/coverart/snw/snw600.jpg',
'desc': "Dr. Kiki's Science Hour is an in-depth exploration of scientific "
'topics ranging from climate change to nanotech.',
'feeds': {'MP3': 'http://feeds.twit.tv/kiki.xml',
'Video-HI': 'http://feeds.twit.tv/dksh_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/dksh_video_small.xml'},
'title': "Dr. Kiki's Science Hour",
'url': '/shows/dr-kikis-science-hour'},
{'art': 'https://twit.cachefly.net/coverart/fop/fop2048.jpg',
'desc': 'Candid chats about photography with Ant Pruitt and some of the '
'industry’s hottest photographers.',
'feeds': {'MP3': 'https://feeds.twit.tv/fop.xml'},
'title': 'Focus On Photography',
'url': '/shows/focus-on-photography'},
{'art': 'http://twit.cachefly.net/coverart/fc/fc600.jpg',
'desc': 'Join Scott Johnson and Tom Merritt to get short term, long term, '
'and crazy predictions.',
'feeds': {'MP3': 'http://feeds.twit.tv/fc.xml',
'Video-HI': 'http://feeds.twit.tv/fc_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/fc_video_small.xml'},
'title': 'FourCast',
'url': '/shows/fourcast'},
{'art': 'http://twit.cachefly.net/coverart/fib/fib600.jpg',
'desc': 'Explore the world of cloning, protein folding, genome mapping, and '
'more with the most important researchers in the field.',
'feeds': {'MP3': 'http://feeds.twit.tv/fib.xml',
'Video-HI': 'http://feeds.twit.tv/fib_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/fib_video_small.xml'},
'title': 'Futures in Biotech',
'url': '/shows/futures-in-biotech'},
{'art': 'http://twit.cachefly.net/coverart/go/go600.jpg',
'desc': 'Veronica and Brian bring you news, reviews, and commentary about '
"console and PC games that you'll get nowhere else.",
'feeds': {'MP3': 'http://feeds.twit.tv/go.xml',
'Video-HI': 'http://feeds.twit.tv/go_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/go_video_small.xml'},
'title': 'Game On!',
'url': '/shows/game-on'},
{'art': 'http://twit.cachefly.net/coverart/gtt/gtt600.jpg',
'desc': 'Green Tech Today is the essential show for the eco-minded geek.',
'feeds': {'MP3': 'http://feeds.twit.tv/gtt.xml',
'Video-HI': 'http://feeds.twit.tv/gtt_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/gtt_video_small.xml'},
'title': 'Green Tech Today',
'url': '/shows/green-tech-today'},
{'art': 'http://twit.cachefly.net/coverart/htg/htg1400.jpg',
'desc': 'This is the show for big screen TVs to surround sound to the '
'latest video tech.',
'feeds': {'MP3': 'http://feeds.twit.tv/htg.xml',
'Video-HD': 'http://feeds.twit.tv/htg_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/htg_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/htg_video_small.xml'},
'title': 'Home Theater Geeks',
'url': '/shows/home-theater-geeks'},
{'art': 'http://twit.cachefly.net/coverart/ifive/ifive1400.jpg',
'desc': 'Five apps, five tips, five minutes all about the iPhone.',
'feeds': {'MP3': 'http://feeds.twit.tv/ifive.xml',
'Video-LO': 'http://feeds.twit.tv/ifive_video_small.xml',
'Video-HI': 'http://feeds.twit.tv/ifive_video_large.xml',
'Video-HD': 'http://feeds.twit.tv/ifive_video_hd.xml'},
'title': 'iFive for the iPhone',
'url': '/shows/ifive-iphone'},
{'art': 'http://twit.cachefly.net/coverart/jm/jm300.jpg',
'desc': 'Megan and Leo they talk about parenting in the digital age.',
'feeds': {'MP3': 'http://feeds.twit.tv/jm.xml'},
'title': 'Jumping Monkeys',
'url': '/shows/jumping-monkeys'},
{'art': 'http://twit.cachefly.net/coverart/kh/kh1400.jpg',
'desc': 'Learn how to make technology work for you.',
'feeds': {'MP3': 'http://feeds.twit.tv/kh.xml',
'Video-HD': 'http://feeds.twit.tv/kh_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/kh_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/kh_video_small.xml'},
'title': 'Know How...',
'url': '/shows/know-how'},
{'art': 'https://twit.cachefly.net/coverart/macbreak/macbreak2048.jpg',
'desc': 'Mac experts talk about everything Mac.',
'feeds': {'MP3': 'http://feeds.twit.tv/mbw.xml',
'Video-LO': 'http://feeds.twit.tv/mbw_video_small.xml',
'Video-HI': 'http://feeds.twit.tv/mbw_video_large.xml',
'Video-HD': 'http://feeds.twit.tv/mbw_video_hd.xml'},
'title': 'MacBreak',
'url': '/shows/macbreak'},
{'art': 'http://twit.cachefly.net/coverart/mm/mm1400.jpg',
'desc': 'Marketing Mavericks covers the intersection of marketing and tech.',
'feeds': {'MP3': 'http://feeds.twit.tv/mm.xml',
'Video-HD': 'http://feeds.twit.tv/mm_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/mm_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/mm_video_small.xml'},
'title': 'Marketing Mavericks',
'url': '/shows/marketing-mavericks'},
{'art': 'http://twit.cachefly.net/coverart/mh/mh600.jpg',
'desc': "Stop by the house of polymath Ray Maxwell and see what he's "
'investigating today.',
'feeds': {'MP3': 'http://feeds.twit.tv/mh.xml',
'Video-HI': 'http://feeds.twit.tv/mh_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/mh_video_small.xml'},
'title': "Maxwell's House",
'url': '/shows/maxwells-house'},
{'art': 'http://twit.cachefly.net/coverart/mc/mc300.jpg',
'desc': 'Munchcast covers geek food in all its guises.',
'feeds': {'MP3': 'http://feeds.twit.tv/mc.xml'},
'title': 'Munchcast',
'url': '/shows/munchcast'},
{'art': 'https://twit.cachefly.net/coverart/natn/natn1400.jpg',
'desc': "What's happening on the 'net right now?",
'feeds': {'MP3': 'http://feeds.twit.tv/itn.xml',
'Video-LO': 'http://feeds.twit.tv/natn_video_small.xml',
'Video-HI': 'http://feeds.twit.tv/natn_video_large.xml'},
'title': 'net@night',
'url': '/shows/netnight'},
{'art': 'http://twit.cachefly.net/coverart/omgcraft/omgcraft1400.jpg',
'desc': 'Your one-stop-shop for everything Minecraft.',
'feeds': {'MP3': 'http://feeds.twit.tv/omgcraft.xml',
'Video-HD': 'http://feeds.twit.tv/omgcraft_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/omgcraft_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/omgcraft_video_small.xml'},
'title': 'OMGcraft',
'url': '/shows/omgcraft'},
{'art': 'http://twit.cachefly.net/coverart/padre/padre1400.jpg',
'desc': 'Join Padre each week as he picks up the stories that fall through '
'the cracks.',
'feeds': {'MP3': 'http://feeds.twit.tv/padre.xml',
'Video-HD': 'http://feeds.twit.tv/padre_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/padre_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/padre_video_small.xml'},
'title': "Padre's Corner",
'url': '/shows/padres-corner'},
{'art': 'http://twit.cachefly.net/coverart/ru/ru1400.jpg',
'desc': 'The best that "the front page of the internet" has to offer.',
'feeds': {'MP3': 'http://feeds.twit.tv/ru.xml',
'Video-LO': 'http://feeds.twit.tv/ru_video_small.xml',
'Video-HI': 'http://feeds.twit.tv/ru_video_large.xml',
'Video-HD': 'http://feeds.twit.tv/ru_video_hd.xml'},
'title': 'redditUP',
'url': '/shows/redditup'},
{'art': 'https://twit.cachefly.net/coverart/roz/roz300.jpg',
'desc': 'Join Roz Savage as she becomes the first woman to row solo across '
'the Pacific.',
'feeds': {'MP3': 'http://feeds.twit.tv/roz.xml'},
'title': 'Roz Rows the Pacific',
'url': '/shows/roz-rows-the-pacific'},
{'art': 'https://twit.cachefly.net/coverart/snw/snw600.jpg',
'desc': 'Keep up with the most interesting science to make headlines each '
'week.',
'feeds': {'MP3': 'http://feeds.twit.tv/snw.xml',
'Video-HI': 'http://feeds.twit.tv/snw_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/snw_video_small.xml'},
'title': 'Science News Weekly',
'url': '/shows/science-news-weekly'},
{'art': 'http://twit.cachefly.net/coverart/tn2n/tn2n1400.jpg',
'desc': "Wrap up the day's tech news with Megan Morrone and the best "
'journalists in tech.',
'feeds': {'MP3': 'http://feeds.twit.tv/tn2n.xml',
'Video-HD': 'http://feeds.twit.tv/tn2n_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/tn2n_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/tn2n_video_small.xml'},
'title': 'Tech News 2Night',
'url': '/shows/tech-news-2night'},
{'art': 'http://twit.cachefly.net/coverart/tnt/tnt1400.jpg',
'desc': 'Your daily dose of tech news featuring the top tech journalists.',
'feeds': {'MP3': 'http://feeds.twit.tv/tnt.xml',
'Video-HD': 'http://feeds.twit.tv/tnt_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/tnt_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/tnt_video_small.xml'},
'title': 'Tech News Today',
'url': '/shows/tech-news-today'},
{'art': 'http://twit.cachefly.net/coverart/hn/hn1400.jpg',
'desc': 'Dick DeBartolo and OMGchad showcase useful, unique, and silly '
'gadgets and gizmos.',
'feeds': {'MP3': 'http://gizwiz.tv/feed/audio',
'Video-LO': 'http://gizwiz.tv/feed/video_low',
'Video-HI': 'http://gizwiz.tv/feed/video'},
'title': 'The Giz Wiz',
'url': '/shows/weekly-daily-giz-wiz'},
{'art': 'http://twit.cachefly.net/coverart/tlr/tlr300.jpg',
'desc': 'Miscellaneous audio from TV and radio technology guru Leo Laporte.',
'feeds': {'MP3': 'http://feeds.twit.tv/tlr.xml'},
'title': 'The Laporte Report',
'url': '/shows/the-laporte-report'},
{'art': 'http://twit.cachefly.net/coverart/tnss/tnss2048.jpg',
'desc': 'The New Screen Savers is a variety show for tech on the TWiT '
'network.',
'feeds': {'MP3': 'http://feeds.twit.tv/tnss.xml',
'Video-HD': 'http://feeds.twit.tv/tnss_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/tnss_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/tnss_video_small.xml'},
'title': 'The New Screen Savers',
'url': '/shows/new-screen-savers'},
{'art': 'http://twit.cachefly.net/coverart/tsh/tsh1400.jpg',
'desc': 'Your source for the best social tools, news, and fascinating folks '
'building the next generation of the Internet.',
'feeds': {'MP3': 'http://feeds.twit.tv/tsh.xml',
'Video-HD': 'http://feeds.twit.tv/tsh_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/tsh_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/tsh_video_small.xml'},
'title': 'The Social Hour',
'url': '/shows/social-hour'},
{'art': 'http://twit.cachefly.net/coverart/twif/twif600.jpg',
'desc': "This week's fun facts in an irreverent look at the news.",
'feeds': {'MP3': 'http://feeds.twit.tv/tsh.xml',
'Video-LO': 'http://feeds.twit.tv/tsh_video_small.xml',
'Video-HI': 'http://feeds.twit.tv/tsh_video_large.xml',
'Video-HD': 'http://feeds.twit.tv/tsh_video_hd.xml'},
'title': 'this WEEK in FUN',
'url': '/shows/this-week-in-fun'},
{'art': 'http://twit.cachefly.net/coverart/twil/twil1400.jpg',
'desc': 'Join Denise as she discusses issues in technology law.',
'feeds': {'MP3': 'http://feeds.twit.tv/twil.xml',
'Video-HD': 'http://feeds.twit.tv/twil_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/twil_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/twil_video_small.xml'},
'title': 'This Week in Law',
'url': '/shows/this-week-in-law'},
{'art': 'http://twit.cachefly.net/coverart/twirt/twirt600.jpg',
'desc': 'Advice, stories, and instruction from some of the sharpest minds '
'in audio media tech.',
'feeds': {'MP3': 'http://feeds.twit.tv/twirt.xml',
'Video-HI': 'http://feeds.twit.tv/twirt_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/twirt_video_small.xml'},
'title': 'This Week in Radio Tech',
'url': '/shows/this-week-in-radio-tech'},
{'art': 'http://twit.cachefly.net/coverart/yt/yt1400.jpg',
'desc': 'Lamarr and Chad explore the world of online viral video.',
'feeds': {'MP3': 'http://feeds.twit.tv/yt.xml',
'Video-HD': 'http://feeds.twit.tv/yt_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/yt_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/yt_video_small.xml'},
'title': 'This Week in YouTube',
'url': '/shows/this-week-in-youtube'},
{'art': 'http://twit.cachefly.net/coverart/tvh/tvh600.jpg',
'desc': 'Photography, art, the future, and the nature of the new culture we '
'are all building together.',
'feeds': {'MP3': 'http://feeds.twit.tv/tvh.xml',
'Video-HI': 'http://feeds.twit.tv/tvh_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/tvh_video_small.xml'},
'title': "Trey's Variety Hour",
'url': '/shows/treys-variety-hour'},
{'art': 'http://twit.cachefly.net/coverart/tri/tri1400.jpg',
'desc': 'The smartest people in the world talking about the most important '
'topics in technology.',
'feeds': {'MP3': 'http://feeds.twit.tv/tri.xml',
'Video-HD': 'http://feeds.twit.tv/tri_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/tri_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/tri_video_small.xml'},
'title': 'Triangulation',
'url': '/shows/triangulation'},
{'art': 'http://twit.cachefly.net/coverart/photo/photo600.jpg',
'desc': 'Learn tips from pros as they impart their knowledge through '
'anecdotes, experience, and discussion.',
'feeds': {'MP3': 'http://feeds.twit.tv/photo.xml',
'Video-HD': 'http://feeds.twit.tv/photo_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/photo_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/photo_video_small.xml'},
'title': 'TWiT Photo',
'url': '/shows/twit-photo'},
{'art': 'https://twit.cachefly.net/coverart/vog/vog1400.jpg',
'desc': 'Valley of Genius features the hackers, founders, and freaks who '
'made Silicon Valley boom, in their own words.',
'feeds': {'MP3': 'http://feeds.twit.tv/vog.xml'},
'title': 'Valley of Genius',
'url': '/shows/valley-of-genius'}]
active_shows = [
{'art': 'https://twit.cachefly.net/coverart/all/all2048.jpg',
'desc': 'A feed of all the shows produced by TWiT.tv.',
'feeds': {'MP3': 'http://feeds.twit.tv/leo.xml',
'Video-LO': 'http://feeds.twit.tv/leo_video_small.xml',
'Video-HI': 'http://feeds.twit.tv/leo_video_large.xml',
'Video-HD': 'http://feeds.twit.tv/leo_video_hd.xml'},
'title': 'All TWiT.tv Shows',
'url': '/shows/all-twittv-shows'},
{'art': 'http://twit.cachefly.net/coverart/twit/twit1400.jpg',
'desc': 'Your first podcast of the week is the last word in tech.',
'feeds': {'MP3': 'http://feeds.twit.tv/twit.xml',
'Video-HD': 'http://feeds.twit.tv/twit_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/twit_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/twit_video_small.xml'},
'title': 'This Week in Tech',
'url': '/shows/this-week-in-tech'},
{'art': 'http://twit.cachefly.net/coverart/sn/sn1400.jpg',
'desc': 'Steve Gibson discusses the hot topics in security today with Leo '
'Laporte.',
'feeds': {'MP3': 'http://feeds.twit.tv/sn.xml',
'Video-HD': 'http://feeds.twit.tv/sn_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/sn_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/sn_video_small.xml'},
'title': 'Security Now',
'url': '/shows/security-now'},
{'art': 'http://twit.cachefly.net/coverart/ttg/ttg1400.jpg',
'desc': 'Leo Laporte takes to the radio every weekend to talk tech and '
'answer your questions.',
'feeds': {'MP3': 'http://feeds.twit.tv/kfi.xml',
'Video-HD': 'http://feeds.twit.tv/ttg_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/ttg_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/ttg_video_small.xml'},
'title': 'The Tech Guy',
'url': '/shows/the-tech-guy'},
{'art': 'https://twit.cachefly.net/coverart/atg/atg2048.jpg',
'desc': "Need tech help? Don't fear, don't fret, don't freak out - just Ask "
'The Tech Guy!',
'feeds': {'MP3': 'https://feeds.twit.tv/atg.xml',
'Video-HD': 'https://feeds.twit.tv/atg_video_hd.xml',
'Video-HI': 'https://feeds.twit.tv/atg_video_large.xml',
'Video-LO': 'https://feeds.twit.tv/atg_video_small.xml'},
'title': 'Ask The Tech Guy',
'url': '/shows/ask-the-tech-guy'},
{'art': 'http://twit.cachefly.net/coverart/ww/ww1400.jpg',
'desc': 'Paul Thurrott and Mary Jo Foley talk about Windows and all things '
'Microsoft.',
'feeds': {'MP3': 'http://feeds.twit.tv/ww.xml',
'Video-HD': 'http://feeds.twit.tv/ww_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/ww_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/ww_video_small.xml'},
'title': 'Windows Weekly',
'url': '/shows/windows-weekly'},
{'art': 'http://twit.cachefly.net/coverart/mbw/mbw1400.jpg',
'desc': 'Get the latest Apple news and views from the top names in tech '
'journalism.',
'feeds': {'MP3': 'http://feeds.twit.tv/mbw.xml',
'Video-HD': 'http://feeds.twit.tv/mbw_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/mbw_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/mbw_video_small.xml'},
'title': 'MacBreak Weekly',
'url': '/shows/macbreak-weekly'},
{'art': 'https://twit.cachefly.net/coverart/stt/stt2048.jpg',
'desc': 'Be it smart bulbs or smart assistants -- and everything in between '
'-- Smart Tech Today helps you make the most of...',
'feeds': {'MP3': 'https://feeds.twit.tv/stt.xml',
'Video-HD': 'https://feeds.twit.tv/stt_video_hd.xml',
'Video-HI': 'https://feeds.twit.tv/stt_video_large.xml',
'Video-LO': 'https://feeds.twit.tv/stt_video_small.xml'},
'title': 'Smart Tech Today',
'url': '/shows/smart-tech-today'},
{'art': 'http://twit.cachefly.net/coverart/twig/twig1400.jpg',
'desc': 'The latest Google and cloud computing news.',
'feeds': {'MP3': 'http://feeds.twit.tv/twig.xml',
'Video-HD': 'http://feeds.twit.tv/twig_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/twig_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/twig_video_small.xml'},
'title': 'This Week in Google',
'url': '/shows/this-week-in-google'},
{'art': 'https://twit.cachefly.net/coverart/hop/hop2048.jpg',
'desc': 'Create awesome images with your smartphone or camera.',
'feeds': {'MP3': 'https://feeds.twit.tv/hop.xml',
'Video-HD': 'https://feeds.twit.tv/hop_video_hd.xml',
'Video-HI': 'https://feeds.twit.tv/hop_video_large.xml',
'Video-LO': 'https://feeds.twit.tv/hop_video_small.xml'},
'title': 'Hands-On Photography',
'url': '/shows/hands-on-photography'},
{'art': 'https://twit.cachefly.net/coverart/tnw/tnw2048.jpg',
'desc': 'Your weekly dose of tech news featuring the top tech journalists.',
'feeds': {'MP3': 'http://feeds.twit.tv/tnw.xml',
'Video-HD': 'http://feeds.twit.tv/tnw_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/tnw_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/tnw_video_small.xml'},
'title': 'Tech News Weekly',
'url': '/shows/tech-news-weekly'},
{'art': 'https://twit.cachefly.net/coverart/ipad/ipad2048.jpg',
'desc': "iPhone, iPad, Apple Watch, or Apple TV... If it's iOS, Leo and "
"Mikah show you what's new and what's cool.",
'feeds': {'MP3': 'http://feeds.twit.tv/ipad.xml',
'Video-LO': 'http://feeds.twit.tv/ipad_video_small.xml',
'Video-HI': 'http://feeds.twit.tv/ipad_video_large.xml',
'Video-HD': 'http://feeds.twit.tv/ipad_video_hd.xml'},
'title': 'iOS Today',
'url': '/shows/ios-today'},
{'art': 'http://twit.cachefly.net/coverart/aaa/aaa1400.jpg',
'desc': 'The Android experts keep you posted on the latest news, hardware '
'and apps.',
'feeds': {'MP3': 'http://feeds.twit.tv/aaa.xml',
'Video-HD': 'http://feeds.twit.tv/aaa_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/aaa_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/aaa_video_small.xml'},
'title': 'All About Android',
'url': '/shows/all-about-android'},
{'art': 'https://twit.cachefly.net/coverart/hot/hot2048.jpg',
'desc': "Hands-On Tech is where you'll find hands-on reviews, previews, and "
'unboxings of the hottest tech gadgets.',
'feeds': {'MP3': 'http://feeds.twit.tv/hot.xml',
'Video-HD': 'http://feeds.twit.tv/hot_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/hot_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/hot_video_small.xml'},
'title': 'Hands-On Tech',
'url': '/shows/hands-on-tech'},
{'art': 'http://twit.cachefly.net/coverart/twiet/twiet1400.jpg',
'desc': 'Keep up on the latest in cutting edge enterprise tech.',
'feeds': {'MP3': 'http://feeds.twit.tv/twiet.xml',
'Video-HD': 'http://feeds.twit.tv/twiet_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/twiet_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/twiet_video_small.xml'},
'title': 'This Week in Enterprise Tech',
'url': '/shows/this-week-in-enterprise-tech'},
{'art': 'http://twit.cachefly.net/coverart/twich/twich1400.jpg',
'desc': 'Do you obsess about computer hardware? This is the show for you!',
'feeds': {'MP3': 'http://feeds.twit.tv/twich.xml',
'Video-HD': 'http://feeds.twit.tv/twich_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/twich_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/twich_video_small.xml'},
'title': 'This Week in Computer Hardware',
'url': '/shows/this-week-in-computer-hardware'},
{'art': 'http://twit.cachefly.net/coverart/floss/floss1400.jpg',
'desc': 'We show off free, libre, and open source software here each week.',
'feeds': {'MP3': 'http://feeds.twit.tv/floss.xml',
'Video-HD': 'http://feeds.twit.tv/floss_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/floss_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/floss_video_small.xml'},
'title': 'FLOSS Weekly',
'url': '/shows/floss-weekly'},
{'art': 'http://twit.cachefly.net/coverart/hn/hn1400.jpg',
'desc': 'Calling all hams! The ultimate amateur radio talk show meets here '
'weekly.',
'feeds': {'MP3': 'http://feeds.twit.tv/hn.xml',
'Video-HD': 'http://feeds.twit.tv/hn_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/hn_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/hn_video_small.xml'},
'title': 'Ham Nation',
'url': '/shows/ham-nation'},
{'art': 'https://twit.cachefly.net/coverart/events/events2048.jpg',
'desc': 'TWiT Events is your window to the best tech events in the world.',
'feeds': {'MP3': 'http://feeds.twit.tv/events.xml',
'Video-HD': 'http://feeds.twit.tv/events_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/events_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/events_video_small.xml'},
'title': 'TWiT Events',
'url': '/shows/twit-events'},
{'art': 'https://twit.cachefly.net/coverart/news/news2048.jpg',
'desc': 'Get the latest product announcements from all the tech giants, '
'plus breaking tech news as it happens.',
'feeds': {'MP3': 'http://feeds.twit.tv/ces.xml',
'Video-HD': 'http://feeds.twit.tv/specials_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/specials_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/specials_video_small.xml'},
'title': 'TWiT News',
'url': '/shows/twit-news'},
{'art': 'http://twit.cachefly.net/coverart/bits/bits1400.jpg',
'desc': 'Highlights from various shows on TWiT.tv.',
'feeds': {'MP3': 'http://feeds.twit.tv/bits.xml',
'Video-HD': 'http://feeds.twit.tv/bits_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/bits_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/bits_video_small.xml'},
'title': 'TWiT Bits',
'url': '/shows/twit-bits'},
{'art': 'http://twit.cachefly.net/coverart/radioleo/radioleo1400.jpg',
'desc': 'A compendium of podcasts from the Chief TWiT, Leo Laporte.',
'feeds': {'MP3': 'http://feeds.twit.tv/leo.xml',
'Video-HD': 'http://feeds.twit.tv/leo_video_hd.xml',
'Video-HI': 'http://feeds.twit.tv/leo_video_large.xml',
'Video-LO': 'http://feeds.twit.tv/leo_video_small.xml'},
'title': 'Radio Leo',
'url': '/shows/radio-leo'}]
|
divingmule/plugin.video.twit
|
resources/shows.py
|
Python
|
gpl-2.0
| 27,057
|
[
"Brian"
] |
85479b2a93386c01dcc576abacc2f9884f0c119e608556affc7d5c4d5307e506
|
# Author: Samuel Genheden samuel.genheden@gmail.com
"""
Program to truncate a data file to a QM system
Part of ComQum-ELBA
"""
import argparse
from collections import namedtuple
import numpy as np
from sgenlib import lammps
def _trunc_connectivity(conlist, atom_ids) :
newlist = []
for con in conlist:
keep = False
for atom in con.atoms :
if atom in atom_ids :
keep = True
break
if keep :
con.atoms = [atom_ids.index(atom)+1 for atom in con.atoms]
con.idx = len(newlist)+1
newlist.append(con)
return newlist
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Truncate a LAMMPS datafile")
parser.add_argument('file', help="the data file.")
parser.add_argument('-m', '--mol', type=int, help="the molecule id of the QM system")
parser.add_argument('-o', '--out',help="the output file",default="data.sys1")
args = parser.parse_args()
if args.file is None:
print "No input file specified. Exiting!"
quit()
datafile = lammps.Datafile(args.file)
new_atoms = [atom for atom in datafile.atoms if atom.molecule == args.mol]
atom_ids = [atom.idx for atom in new_atoms]
datafile.bonds = _trunc_connectivity(datafile.bonds, atom_ids)
datafile.angles = _trunc_connectivity(datafile.angles, atom_ids)
datafile.dihedrals = _trunc_connectivity(datafile.dihedrals, atom_ids)
for i, atom in enumerate(new_atoms, 1):
atom.idx = i
datafile.atoms = new_atoms
datafile.write(args.out)
|
SGenheden/Scripts
|
Lammps/lmp_truncate.py
|
Python
|
mit
| 1,597
|
[
"LAMMPS"
] |
2a1a9143d8093f1c2f18cf307813972cb9daff997bd8106be0673a7322b45c2e
|
# -*- coding: utf-8 -*-
"""
Demonstrates use of PlotWidget class. This is little more than a
GraphicsView with a PlotItem placed in its center.
"""
import PyQt4.QtCore as q
import PyQt4.QtGui as qt
import numpy as np
import pyqtgraph as pg
import pyqtgraph.dockarea as dock
import zmq
import msgpack as msg
import msgpack_numpy
msgpack_numpy.patch()
from lvclient import EmittingLVODMClient,ProcessingLVODMClient
import odmanalysis as odm
from odmanalysis import fitfunctions
import time
from scipy.optimize import curve_fit
from CurveFitService import CurveFitServiceController, CurveFitServiceCollector
import argparse
class MeanRecorder(q.QObject):
def __init__(self):
q.QObject.__init__(self)
self.profile = None
self.n = 0.0
def reset(self):
self.profile = None
self.n = 0.0
def record(self,profile):
if self.n > 0:
self.n+=1
self.profile = profile/self.n + self.profile * (self.n-1)/self.n
elif self.n == 0:
self.n += 1
self.profile = profile
else:
pass
class SplineCreatorState(q.QObject):
pass
class SplineCreatorWidget(qt.QWidget):
movingPeakIntervalChanged = q.Signal(tuple)
referencePeakIntervalChanged = q.Signal(tuple)
movingPeakFitFunctionChanged = q.Signal(fitfunctions.ScaledSpline)
referencePeakFitFunctionChanged = q.Signal(fitfunctions.ScaledSpline)
def __init__(self,parent=None):
qt.QWidget.__init__(self,parent)
self.isRecording = False
layout = qt.QVBoxLayout()
self.setLayout(layout)
buttonStrip = qt.QHBoxLayout()
self.startRecordingButton = qt.QPushButton("start recording",self)
self.startRecordingButton.setIcon(qt.QIcon(qt.QStyle.SP_MediaPlay))
self.startRecordingButton.setIconSize(q.QSize(24,24))
buttonStrip.addWidget(self.startRecordingButton)
self.stopRecordingButton = qt.QPushButton("stop recording",self)
self.stopRecordingButton.setIcon(qt.QIcon(qt.QStyle.SP_MediaStop))
self.stopRecordingButton.setIconSize(q.QSize(24,24))
buttonStrip.addWidget(self.stopRecordingButton)
layout.addLayout(buttonStrip)
self.plotWidget = pg.PlotWidget(name='Intensity Profile',parent=self)
layout.addWidget(self.plotWidget)
self.meanRecorder = MeanRecorder()
self.refPeakSplineControl = InteractiveSplineCreatorControlsWidget(self.meanRecorder,parent=self)
self.movingPeakSplineControl = InteractiveSplineCreatorControlsWidget(self.meanRecorder,parent=self)
hLayout = qt.QHBoxLayout()
hLayout.addWidget(self.refPeakSplineControl)
hLayout.addWidget(self.movingPeakSplineControl)
hLayout.addStretch()
layout.addLayout(hLayout)
pw = self.plotWidget
self.livePlot = pw.plot()
self.livePlot.setPen((200,200,100))
self.meanPlot = pw.plot()
self.meanPlot.setPen((100,200,200))
pw.setLabel('left', 'Intensity', units='a.u.')
pw.setLabel('bottom', 'Position', units='px')
pw.setXRange(0, 200)
pw.setYRange(0, 10000)
self.movingPeakRegion = pg.LinearRegionItem(brush=pg.intColor(1,alpha=100))
self.movingPeakRegion.setZValue(10)
self.movingPeakRegionLabel = pg.TextItem("moving peak",color=pg.intColor(1),
anchor=(0,1))
self.movingPeakRegionLabel.setX(self.movingPeakRegion.getRegion()[0])
pw.addItem(self.movingPeakRegionLabel)
pw.addItem(self.movingPeakRegion, ignoreBounds=True)
self.referencePeakRegion = pg.LinearRegionItem(brush=pg.intColor(2,alpha=100))
self.referencePeakRegion.setZValue(10)
self.referencePeakRegionLabel = pg.TextItem("reference peak",color=pg.intColor(2),
anchor=(0,2))
self.referencePeakRegionLabel.setX(self.referencePeakRegion.getRegion()[0])
pw.addItem(self.referencePeakRegionLabel)
pw.addItem(self.referencePeakRegion, ignoreBounds=True)
pw.setAutoVisible(y=True)
# connect signals and slots
self.startRecordingButton.clicked.connect(self._startRecording)
self.stopRecordingButton.clicked.connect(self._stopRecording)
self.referencePeakRegion.sigRegionChangeFinished.connect(self._emitReferencePeakIntervalChanged)
self.movingPeakRegion.sigRegionChangeFinished.connect(self._emitMovingPeakIntervalChanged)
self.referencePeakRegion.sigRegionChanged.connect(lambda r: self.referencePeakRegionLabel.setX(r.getRegion()[0]))
self.movingPeakRegion.sigRegionChanged.connect(lambda r: self.movingPeakRegionLabel.setX(r.getRegion()[0]))
self.refPeakSplineControl.splineCreator.fitFunctionCreated.connect(self._emitReferencePeakFitFunctionChanged)
self.movingPeakSplineControl.splineCreator.fitFunctionCreated.connect(self._emitMovingPeakFitFunctionChanged)
def _startRecording(self):
self.isRecording = True
self.meanRecorder.reset()
def _stopRecording(self):
self.isRecording = False
def _emitReferencePeakIntervalChanged(self):
interval = self.referencePeakRegion.getRegion()
self.referencePeakIntervalChanged.emit(interval)
def _emitMovingPeakIntervalChanged(self):
interval = self.movingPeakRegion.getRegion()
self.movingPeakIntervalChanged.emit(interval)
def _emitMovingPeakFitFunctionChanged(self,spline):
self.movingPeakFitFunctionChanged.emit(spline)
def _emitReferencePeakFitFunctionChanged(self,spline):
self.referencePeakFitFunctionChanged.emit(spline)
def updateData(self, intensityProfile):
length = len(intensityProfile)
xValues = np.arange(0,length)
if self.isRecording:
if (self.meanRecorder.profile is not None and len(self.meanRecorder.profile) != length):
self.meanRecorder.reset()
self.meanRecorder.record(intensityProfile)
self.livePlot.setData(y=intensityProfile, x=xValues)
self.meanPlot.setData(y=self.meanRecorder.profile, x=xValues)
class FitGraphWidget(qt.QWidget):
def __init__(self,parent=None):
qt.QWidget.__init__(self,parent)
layout = qt.QVBoxLayout()
self.setLayout(layout)
self.plotWidget = pg.PlotWidget(name='Fit')
layout.addWidget(self.plotWidget)
self.__initializePlots()
def __initializePlots(self):
pw = self.plotWidget
self.livePlot = pw.plot()
self.livePlot.setPen((200,200,100))
self.movingPeakFitPlot = pw.plot()
self.movingPeakFitPlot.setPen((200,0,0))
self.referencePeakFitPlot = pw.plot()
self.referencePeakFitPlot.setPen((0,200,0))
self.xValues = None
pw.setLabel('left', 'Intensity', units='a.u.')
pw.setLabel('bottom', 'Position', units='px')
pw.setXRange(0, 200)
pw.setYRange(0, 10000)
pw.setAutoVisible(y=True)
def updateIntensityProfile(self, intensityProfile):
xValues = np.arange(0,len(intensityProfile))
self.xValues = xValues
self.livePlot.setData(y=intensityProfile, x=xValues)
def updateGraphData(self, fitFunction_mp, popt_mp, fitFunction_ref,popt_ref):
self.movingPeakFitPlot.setData(x=self.xValues,y=fitFunction_mp(self.xValues,*popt_mp))
self.referencePeakFitPlot.setData(x=self.xValues,y=fitFunction_ref(self.xValues,*popt_ref))
class RollingChartWidget(qt.QWidget):
def __init__(self,parent=None):
qt.QWidget.__init__(self,parent)
self.initializeBuffer(5000)
layout = qt.QVBoxLayout()
self.setLayout(layout)
self.plotWidget = pg.PlotWidget()
self.livePlot = self.plotWidget.plot()
self.livePlot.setPen((200,200,100))
layout.addWidget(self.plotWidget)
def initializeBuffer(self,size):
self._xBuffer = np.empty(size)
self._yBuffer = np.empty(size)
self._xBuffer.fill(np.NAN)
self._yBuffer.fill(np.NAN)
self._bufferSize = size
self._bufferPointer = 0
@property
def bufferSize(self):
return self._bufferSize
@bufferSize.setter
def setBufferSize(self,size):
self._initializeBuffer(size)
def addData(self,y):
i = self._bufferPointer % self._bufferSize
self._yBuffer[i] = y
self.livePlot.setData(y=self._yBuffer)
self._bufferPointer += 1
class LVStatusDisplayWidget(qt.QWidget):
def __init__(self,parent=None):
qt.QWidget.__init__(self,parent)
layout = qt.QGridLayout()
self.setLayout(layout)
layout.addWidget(qt.QLabel("Labview Status:"),0,0)
self.lvStatusLabel = qt.QLabel("")
layout.addWidget(self.lvStatusLabel,0,1)
def updateStatus(self,status):
if not status == self.lvStatusLabel.text:
self.lvStatusLabel.setText(status)
class InteractiveSplineCreator(q.QObject):
fitFunctionCreated = q.Signal(fitfunctions.ScaledSpline)
def __init__(self):
q.QObject.__init__(self)
self._fitFunction = None
self._sigma = 0
self._intensityProfile = None
@property
def intensityProfile(self):
return self._intensityProfile
def setIntensityProfile(self,intensityProfile):
self._intensityProfile = intensityProfile
@property
def sigma(self):
return self._sigma
def setSigma(self,sigma):
self._sigma = sigma
def hasFitFunction(self):
return self._fitFunction is not None
@property
def fitFunction(self):
return self._fitFunction
def createSpline(self):
spline = fitfunctions.ScaledSpline()
spline.estimateInitialParameters(self.intensityProfile,
filter_sigma=self.sigma)
self._fitFunction = spline
self.fitFunctionCreated.emit(spline)
class InteractiveSplineCreatorControlsWidget(qt.QWidget):
def __init__(self,meanRecorder, parent=None):
qt.QWidget.__init__(self,parent)
layout=qt.QGridLayout()
self.setLayout(layout)
layout.addWidget(qt.QLabel("Gaussian filter sigma:"),0,0)
self.sigmaSpinBox = qt.QSpinBox()
self.sigmaSpinBox.setMinimum(0)
self.sigmaSpinBox.setMaximum(10)
layout.addWidget(self.sigmaSpinBox,0,1)
self.makeFitFunctionButton = qt.QPushButton("Create")
layout.addWidget(self.makeFitFunctionButton,1,0)
self.splineCreator = InteractiveSplineCreator()
self.meanRecorder = meanRecorder
# connect signals and slots
self.sigmaSpinBox.valueChanged.connect(self.splineCreator.setSigma)
self.makeFitFunctionButton.clicked.connect(self.createSpline)
def createSpline(self):
self.splineCreator.setIntensityProfile(self.meanRecorder.profile)
self.splineCreator.createSpline()
class RealTimeFitter(q.QObject):
def __init__(self,parent=None):
q.QObject.__init__(self,parent)
self._refFitFunction = None
self._mpFitFunction = None
self._xminRef = None
self._xmaxRef = None
self._xminMp = None
self._xmaxMp = None
self._refEstimates = [0.0,1.0,0.0]
self._mpEstimates = [0.0,1.0,0.0]
@property
def canFit(self):
return self._mpFitFunction is not None and self._xminRef is not None and self._xmaxRef is not None and self._xminMp is not None and self._xmaxMp is not None
def fit(self,intensityProfile):
if self.canFit:
try:
displacement_mp, popt_mp = self._getMovingPeakDisplacement(intensityProfile)
displacement_ref, popt_ref = self._getReferencePeakDisplacement(intensityProfile)
return dict(displacement_mp=displacement_mp,
displacement_ref=displacement_ref,
popt_mp=popt_mp,
popt_ref=popt_ref,
fitFunction_mp=self._mpFitFunction,
fitFunction_ref=self._refFitFunction)
except Exception as e:
print e
else:
return dict()
def _getMovingPeakDisplacement(self,intensityProfile):
xdata = np.arange(len(intensityProfile))[self._xminMp:self._xmaxMp]
ydata = intensityProfile[self._xminMp:self._xmaxMp]
popt,pcov = curve_fit(self._mpFitFunction,xdata,ydata,p0=self._mpEstimates)
self._mpEstimates = popt
return self._mpFitFunction.getDisplacement(*popt), popt
def _getReferencePeakDisplacement(self,intensityProfile):
xdata = np.arange(len(intensityProfile))[self._xminRef:self._xmaxRef]
ydata = intensityProfile[self._xminRef:self._xmaxRef]
popt,pcov = curve_fit(self._refFitFunction,xdata,ydata,p0=self._refEstimates)
self._refEstimates = popt
return self._refFitFunction.getDisplacement(*popt), popt
def setReferencePeakFitFunction(self,fitFunction):
print "ref. fitfunction: %s" % fitFunction
self._refFitFunction = fitFunction
def setReferencePeakInterval(self,interval):
print interval
self._xmaxRef = int(max(interval))
self._xminRef = int(min(interval))
def setMovingPeakInterval(self,interval):
print interval
self._xmaxMp = int(max(interval))
self._xminMp = int(min(interval))
def setMovingPeakFitFunction(self,fitFunction):
print "mp. fitfunction: %s" % fitFunction
self._mpFitFunction = fitFunction
def reset(self):
"""
Resets the stored curve-fit estimates to the default values.
"""
pass
class FitControlWidget(qt.QWidget):
def __init__(self,parent=None):
qt.QWidget.__init__(self,parent)
layout = qt.QHBoxLayout()
self.setLayout(layout)
self.startButton = qt.QPushButton("start fit")
layout.addWidget(self.startButton)
self.stopButton = qt.QPushButton("stop fit")
layout.addWidget(self.stopButton)
class MainWindow(qt.QMainWindow):
def __init__(self, parent=None, lvport=4562):
qt.QMainWindow.__init__(self,parent)
self._lvAddress = r"tcp://localhost:%i" % lvport
self.setWindowTitle("Live ODM Analysis")
self.resize(1000,800)
area=dock.DockArea()
self.dockArea = area
self.setCentralWidget(area)
d1 = dock.Dock("Spline Creator", size=(500, 500)) ## give this dock the minimum possible size
d2 = dock.Dock("LabView Status", size=(1,100))
d3 = dock.Dock("Fit result", size=(500,400))
d4 = dock.Dock("Displacement", size=(500,200))
d5 = dock.Dock("Fit Controls", size=(1,100))
area.addDock(d1, 'left') ## place d1 at left edge of dock area (it will fill the whole space since there are no other docks yet)
area.addDock(d2, 'bottom', d1) ## place d2 at right edge of dock area
area.addDock(d3, 'bottom', d2)## place d3 at bottom edge of d1
area.addDock(d4, 'right') ## place d4 at right edge of dock area
area.addDock(d5, 'bottom', d2)
self.splineCreatorWidget = SplineCreatorWidget(self)
d1.addWidget(self.splineCreatorWidget)
self.lvStatusDisplay = LVStatusDisplayWidget(self)
hLayout = qt.QHBoxLayout()
hLayout.addWidget(self.lvStatusDisplay)
hLayout.addStretch()
w2 = qt.QWidget()
w2.setLayout(hLayout)
d2.addWidget(w2)
self.fitGraph = FitGraphWidget(self)
d3.addWidget(self.fitGraph)
self.displacementChart = RollingChartWidget()
d4.addWidget(self.displacementChart)
self.fitControls = FitControlWidget(self)
d5.addWidget(self.fitControls)
self.lvClient = EmittingLVODMClient()
self.lvClient.connect(self._lvAddress)
#self.fitClient = ProcessingLVODMClient(lambda d: self.fitter.fit(d['Intensity Profile']))
#self.fitClient.connect("tcp://localhost:4562")
self.fitCollectorThread = CurveFitServiceCollector()
self.fitServiceController = CurveFitServiceController(producerAddress=self._lvAddress,
collectorAddress=self.fitCollectorThread.address)
# connect signals and slots
self.lvClient.messageReceived.connect(self.handleLVData)
self.fitCollectorThread.resultReceived.connect(self.handleFitResult)
self.splineCreatorWidget.referencePeakIntervalChanged.connect(self.fitServiceController.setReferencePeakInterval)
self.splineCreatorWidget.movingPeakIntervalChanged.connect(self.fitServiceController.setMovingPeakInterval)
self.splineCreatorWidget.movingPeakFitFunctionChanged.connect(self.fitServiceController.setMovingPeakFitFunction)
self.splineCreatorWidget.referencePeakFitFunctionChanged.connect(self.fitServiceController.setReferencePeakFitFunction)
self.fitControls.startButton.clicked.connect(self.fitServiceController.startFitting)
self.fitControls.stopButton.clicked.connect(self.fitServiceController.stopFitting)
def handleLVData(self,lvData):
status = lvData['Measurement Process State']
intensityProfile = np.array(lvData['Intensity Profile'])
self.splineCreatorWidget.updateData(intensityProfile)
self.fitGraph.updateIntensityProfile(intensityProfile)
self.lvStatusDisplay.updateStatus(status)
def handleFitResult(self,fitResult):
if fitResult is not None and len(fitResult) != 0:
self.fitGraph.updateGraphData(fitFunction_mp=fitResult['fitFunction_mp'],
fitFunction_ref=fitResult['fitFunction_ref'],
popt_mp=fitResult['popt_mp'],
popt_ref=fitResult['popt_ref'])
self.displacementChart.addData(y=fitResult['displacement_mp']-fitResult['displacement_ref'])
def show(self):
super(MainWindow,self).show()
self.lvClient.startAsync()
self.fitCollectorThread.start()
def closeEvent(self,event):
self._abortClients()
qt.QWidget.closeEvent(self,event)
def _abortClients(self):
print "aborting"
self.lvClient.abort()
self.fitServiceController.abort()
self.fitCollectorThread.abort()
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Interactive odm gui")
parser.add_argument('--port','-p',
type=int,
help="Localhost port of the labview odm measurement process.",
default=4562)
args = parser.parse_args()
#QtGui.QApplication.setGraphicsSystem('raster')
app = qt.QApplication([])
print args
mw = MainWindow(lvport=args.port)
mw.show()
import sys
if (sys.flags.interactive != 1) or not hasattr(q, 'PYQT_VERSION'):
qt.QApplication.instance().exec_()
|
jkokorian/odm-live
|
lvclient_gui.py
|
Python
|
gpl-3.0
| 20,530
|
[
"Gaussian"
] |
3305899b1dafb1839c497bb06e2ac083200f49aa455bba8f4d19d1d907d1fdd6
|
from django.conf.urls import patterns, include, url
from django.conf import settings
from catmaid.views import *
import catmaid
import vncbrowser
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from adminplus.sites import AdminSitePlus
admin.site = AdminSitePlus()
admin.autodiscover()
# CATMAID
urlpatterns = patterns('',
url(r'^', include('catmaid.urls')),
)
# Neuron Catalog
urlpatterns += patterns('',
url(r'^vncbrowser/', include('vncbrowser.urls')),
)
# Admin site
urlpatterns += patterns('',
url(r'^admin/', include(admin.site.urls))
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
# Access to static estensions in debug mode, remove leading slash.
(r'^%s(?P<path>.*)$' % settings.STATIC_EXTENSION_URL[1:],
'django.views.static.serve', {'document_root': settings.STATIC_EXTENSION_ROOT}),
(r'^%s(?P<path>.*)$' % settings.MEDIA_URL.replace(settings.CATMAID_URL, ''),
'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
|
AdaEne/CATMAID
|
django/projects/mysite/urls.py
|
Python
|
gpl-3.0
| 1,163
|
[
"NEURON"
] |
d91e3cef0335b999ebc94cfc49fb7bd46a23ba34a783824ec365baeb5af29273
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
import h2o
import sys
sys.path.insert(1,"../../../") # allow us to run this standalone
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.estimators.stackedensemble import H2OStackedEnsembleEstimator
from tests import pyunit_utils as pu
from tests.pyunit_utils import assert_warn
seed = 1
def prepare_data(blending=False):
col_types = ["numeric", "numeric", "numeric", "enum", "enum", "numeric", "numeric", "numeric", "numeric"]
dat = h2o.upload_file(path=pu.locate("smalldata/extdata/prostate.csv"),
destination_frame="prostate_hex",
col_types=col_types)
train, test = dat.split_frame(ratios=[.8], seed=1)
x = ["CAPSULE", "GLEASON", "RACE", "DPROS", "DCAPS", "PSA", "VOL"]
y = "AGE"
ds = pu.ns(x=x, y=y, train=train, test=test)
if blending:
train, blend = train.split_frame(ratios=[.7], seed=seed)
return ds.extend(train=train, blend=blend)
else:
return ds
def train_base_models(dataset, **kwargs):
model_args = kwargs if hasattr(dataset, 'blend') else dict(nfolds=3, fold_assignment="Modulo", keep_cross_validation_predictions=True, **kwargs)
gbm = H2OGradientBoostingEstimator(distribution="gaussian",
ntrees=10,
max_depth=3,
min_rows=2,
learn_rate=0.2,
seed=seed,
**model_args)
gbm.train(x=dataset.x, y=dataset.y, training_frame=dataset.train)
rf = H2ORandomForestEstimator(ntrees=10,
seed=seed,
**model_args)
rf.train(x=dataset.x, y=dataset.y, training_frame=dataset.train)
xrf = H2ORandomForestEstimator(ntrees=20,
histogram_type="Random",
seed=seed,
**model_args)
xrf.train(x=dataset.x, y=dataset.y, training_frame=dataset.train)
return [gbm, rf, xrf]
def train_stacked_ensemble(dataset, base_models, **kwargs):
se = H2OStackedEnsembleEstimator(base_models=base_models, seed=seed)
se.train(x=dataset.x, y=dataset.y,
training_frame=dataset.train,
blending_frame=dataset.blend if hasattr(dataset, 'blend') else None,
**kwargs)
return se
def test_suite_stackedensemble_gaussian(blending=False):
def test_predict_on_se_model():
ds = prepare_data(blending)
models = train_base_models(ds)
se = train_stacked_ensemble(ds, models)
for i in range(2): # repeat predict to verify consistency
pred = se.predict(test_data=ds.test)
assert pred.nrow == ds.test.nrow, "expected " + str(pred.nrow) + " to be equal to " + str(ds.test.nrow)
assert pred.ncol == 1, "expected " + str(pred.ncol) + " to be equal to 1 but it was equal to " + str(pred.ncol)
def test_se_performance_is_better_than_individual_models():
ds = prepare_data(blending)
base_models = train_base_models(ds)
def compute_perf(model):
perf = pu.ns(
train=model.model_performance(train=True),
test=model.model_performance(test_data=ds.test)
)
print("{} training performance: ".format(model.model_id))
print(perf.train)
print("{} test performance: ".format(model.model_id))
print(perf.test)
return perf
base_perfs = {}
for model in base_models:
base_perfs[model.model_id] = compute_perf(model)
se = train_stacked_ensemble(ds, base_models)
perf_se = compute_perf(se)
# Check that stack perf is better (smaller) than the best (smaller) base learner perf:
# Training RMSE for each base learner
baselearner_best_rmse_train = min([perf.train.rmse() for perf in base_perfs.values()])
stack_rmse_train = perf_se.train.rmse()
print("Best Base-learner Training RMSE: {}".format(baselearner_best_rmse_train))
print("Ensemble Training RMSE: {}".format(stack_rmse_train))
assert_warn(stack_rmse_train < baselearner_best_rmse_train,
"expected SE training RMSE would be smaller than the best of base learner training RMSE, but obtained: " \
"RMSE (SE) = {}, RMSE (best base learner) = {}".format(stack_rmse_train, baselearner_best_rmse_train))
# Test RMSE for each base learner
baselearner_best_rmse_test = min([perf.test.rmse() for perf in base_perfs.values()])
stack_rmse_test = perf_se.test.rmse()
print("Best Base-learner Test RMSE: {}".format(baselearner_best_rmse_test))
print("Ensemble Test RMSE: {}".format(stack_rmse_test))
assert_warn(stack_rmse_test < baselearner_best_rmse_test,
"expected SE test RMSE would be smaller than the best of base learner test RMSE, but obtained: " \
"RMSE (SE) = {}, RMSE (best base learner) = {}".format(stack_rmse_test, baselearner_best_rmse_test))
def test_validation_frame_produces_same_metric_as_perf_test():
ds = prepare_data(blending)
models = train_base_models(ds)
se = train_stacked_ensemble(ds, models, validation_frame=ds.test)
se_perf = se.model_performance(test_data=ds.test)
se_perf_validation_frame = se.model_performance(valid=True)
# since the metrics object is not exactly the same, we can just test that RSME is the same
assert se_perf.rmse() == se_perf_validation_frame.rmse(), \
"expected SE test RMSE to be the same as SE validation frame RMSE, but obtained: " \
"RMSE (perf on test) = {}, RMSE (test passed as validation frame) = {}".format(se_perf.rmse(), se_perf_validation_frame.rmse())
return [pu.tag_test(test, 'blending' if blending else None) for test in [
test_predict_on_se_model,
test_se_performance_is_better_than_individual_models,
test_validation_frame_produces_same_metric_as_perf_test
]]
pu.run_tests([
test_suite_stackedensemble_gaussian(),
test_suite_stackedensemble_gaussian(blending=True)
])
|
h2oai/h2o-dev
|
h2o-py/tests/testdir_algos/stackedensemble/pyunit_stackedensemble_gaussian.py
|
Python
|
apache-2.0
| 6,514
|
[
"Gaussian"
] |
d0b45169dc074455da9438f0d7d623e059ef6f071a7db4b2970ea9c21d76f119
|
#!/usr/bin/env python
import sys
import os
def runfail(cmd):
print "Running : %s" % cmd
if not 0 == os.system(cmd):
sys.exit("Failed : %s " % cmd)
if not len(sys.argv) >= 2:
sys.exit("blast.py query.fa reference.fa")
if len(sys.argv) == 3:
query_file, ref_file = sys.argv[1:3]
no_ref = False
else:
query_file = sys.argv[1]
no_ref = True
start_path = os.getcwd()
if not os.path.exists(query_file):
sys.exit("Missing Query File")
if not os.path.isabs(query_file):
query_file = os.path.join(start_path, query_file)
if not no_ref and not os.path.isabs(ref_file):
ref_file = os.path.join(start_path, ref_file)
tmp_dir = os.environ["TMPDIR"]
task_id = int(os.environ["SGE_TASK_ID"])
start_file = "p%04d" % task_id
os.chdir(tmp_dir)
runfail("cp {} . ".format(os.path.join(start_path, start_file)))
runfail("makeblastdb -dbtype nucl -in {}".format(start_file))
blast6_out = start_file + ".blast6"
runfail("blastn -db {db} -query {query} -outfmt \"6 std qlen slen qseq sseq\" -dust no -task blastn -reward 5 -penalty -4 -gapopen 8 -gapextend 6 -evalue 1e-15 -num_threads 3 | sort -k 2,2 -k 9,9n > {outfile}".format(db=start_file, query=query_file, outfile=blast6_out))
runfail("cp {} {} ".format(blast6_out, start_path))
blast6_filter_out = start_file +".blast6.r"
runfail("blast6Filter r_experimental {} > {}".format(blast6_out, blast6_filter_out))
runfail("cp {} {} ".format(blast6_filter_out, start_path))
correct_fa = start_file + ".blast6.r.fa"
correct_log = start_file + ".blast6.r.log"
cor_params = {"raw_reads": start_file,
"filter_out": blast6_filter_out,
"cor_fa" : correct_fa,
"cor_log" : correct_log}
runfail("correctOxford {raw_reads} {filter_out} > {cor_fa} 2> {cor_log}".format(**cor_params))
runfail("cp {} {} {}".format(correct_fa,correct_log,start_path))
if no_ref:
sys.exit(0)
refblast_out = start_file + ".blast6.r.refblast6"
ref_blast_params = {"reference": ref_file,
"cor_query": correct_fa,
"ref_blast_out": refblast_out}
runfail("blastn -db {reference} -query {cor_query} -outfmt \"6 std qlen slen\" -evalue 1e-10 -reward 5 -penalty -4 -gapopen 8 -gapextend 6 -dust no -task blastn -out {ref_blast_out}".format(**ref_blast_params))
refblast_filter_out = start_file + ".blast6.r.refblast6.q"
runfail("blast6Filter q {} > {}".format(refblast_out, refblast_filter_out))
runfail("cp {} {} ".format(refblast_filter_out, start_path))
|
RamsinghLab/nanocorr
|
nanocorr.py
|
Python
|
lgpl-3.0
| 2,495
|
[
"BLAST"
] |
59ae6d2c7be025a8686cffb53ca88697f4c3eb6dafc22d037948dbe6785ba0d9
|
#!/usr/bin/python
#
# Copyright (c) 2011 Rime Project.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""A framework for parallel processing in single-threaded environment."""
import functools
import os
import signal
import subprocess
import sys
import threading
import time
# State of tasks.
NUM_STATES = 6
RUNNING, WAITING, BLOCKED, READY, FINISHED, ABORTED = range(NUM_STATES)
class TaskBranch(object):
def __init__(self, tasks, unsafe_interrupt=False):
self.tasks = tasks
self.interrupt = unsafe_interrupt
class TaskReturn(object):
def __init__(self, value):
self.value = value
class TaskBlock(object):
pass
class _TaskRaise(object):
"""Internal only; do not return an instance of this class from generators."""
def __init__(self, type, value=None, traceback=None):
self.exc_info = (type, value, traceback)
class Bailout(Exception):
def __init__(self, value=None):
self.value = value
class TaskInterrupted(Exception):
pass
class Task(object):
def __hash__(self):
"""Hash function of Task.
Usually users should override CacheKey() only.
"""
if self.CacheKey() is None:
return id(self)
return hash(self.CacheKey())
def __eq__(self, other):
"""Equality function of Task.
Usually users should override CacheKey() only.
"""
if not isinstance(other, Task):
return False
if self.CacheKey() is None and other.CacheKey() is None:
return id(self) == id(other)
return self.CacheKey() == other.CacheKey()
def IsCacheable(self):
"""Checks if this task is cachable.
Usually users should override CacheKey() only.
"""
return self.CacheKey() is not None
def IsExclusive(self):
"""Checks if this task is exclusive.
If a task is exclusive, it runs only when no other task is blocked.
"""
return False
def CacheKey(self):
"""Returns the cache key of this task.
Need to be overridden in subclasses. If this returns None, the task value is
never cached.
"""
raise NotImplementedError()
def Continue(self, value=None):
"""Continues the task.
Implementations can return these type of values:
- TaskBranch: a list of tasks to be invoked next.
- TaskReturn: a value to be returned to the caller.
- TaskBlock: indicates this operation will block.
- Task: treated as TaskBranch(task).
- any other value: treated as TaskReturn(value).
In addition to these, it can raise an exception, including Bailout.
First invocation of this function will be with no parameter or None. If it
returns TaskBranch, next parameter will be a list of the results of the
specified tasks.
"""
raise NotImplementedError()
def Throw(self, type, value=None, traceback=None):
"""Throws in an exception.
After Continue() or Throw() returned TaskBranch, if some of the branches
raised an exception, this function is called. Return value of this
function is treated in the same way as Continue().
"""
raise NotImplementedError()
def Poll(self):
"""Polls the blocked task.
If the operation is ready, return True. This function should return
immediately, and should not raise an exception.
"""
return True
def Wait(self):
"""Polls the blocked task.
This function should wait until the operation gets ready. This function
should not raise an exception.
"""
pass
def Close(self):
"""Closes the task.
This is called once after Continue() or Throw() returned TaskReturn,
they raised an exception, or the task was interrupted.
The task should release all resources associated with it, such as
running generators or opened processes.
If this function raises an exception, the value returned by Continue()
or Throw() is discarded.
"""
pass
class GeneratorTask(Task):
def __init__(self, it, key):
self.it = it
self.key = key
def __repr__(self):
return repr(self.key)
def CacheKey(self):
return self.key
def Continue(self, value=None):
try:
return self.it.send(value)
except StopIteration:
return TaskReturn(None)
def Throw(self, type, value=None, traceback=None):
try:
return self.it.throw(type, value, traceback)
except StopIteration:
return TaskReturn(None)
def Close(self):
try:
self.it.close()
except RuntimeError:
# Python2.5 raises RuntimeError when GeneratorExit is ignored. This often
# happens when yielding a return value from inside of try block, or even
# Ctrl+C was pressed when in try block.
pass
@staticmethod
def FromFunction(func):
@functools.wraps(func)
def MakeTask(*args, **kwargs):
key = GeneratorTask._MakeCacheKey(func, args, kwargs)
try:
hash(key)
except TypeError:
raise ValueError(
'Unhashable argument was passed to GeneratorTask function')
it = func(*args, **kwargs)
return GeneratorTask(it, key)
return MakeTask
@staticmethod
def _MakeCacheKey(func, args, kwargs):
return ('GeneratorTask', func, tuple(args), tuple(kwargs.items()))
# Shortcut for daily use.
task_method = GeneratorTask.FromFunction
class ExternalProcessTask(Task):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.proc = None
if 'timeout' in kwargs:
self.timeout = kwargs['timeout']
del kwargs['timeout']
else:
self.timeout = None
if 'exclusive' in kwargs:
self.exclusive = kwargs['exclusive']
del kwargs['exclusive']
else:
self.exclusive = False
self.timer = None
def CacheKey(self):
# Never cache.
return None
def IsExclusive(self):
return self.exclusive
def Continue(self, value=None):
if self.exclusive:
return self._ContinueExclusive()
else:
return self._ContinueNonExclusive()
def _ContinueExclusive(self):
assert self.proc is None
self._StartProcess()
self.proc.wait()
return TaskReturn(self._EndProcess())
def _ContinueNonExclusive(self):
if self.proc is None:
self._StartProcess()
return TaskBlock()
elif not self.Poll():
return TaskBlock()
else:
return TaskReturn(self._EndProcess())
def Poll(self):
assert self.proc is not None
return self.proc.poll() is not None
def Wait(self):
assert self.proc is not None
self.proc.wait()
def Close(self):
if self.timer is not None:
self.timer.cancel()
self.timer = None
if self.proc is not None:
try:
os.kill(self.proc.pid, signal.SIGKILL)
except:
pass
self.proc.wait()
self.proc = None
def _StartProcess(self):
self.start_time = time.time()
self.proc = subprocess.Popen(*self.args, **self.kwargs)
if self.timeout is not None:
def TimeoutKiller():
try:
os.kill(self.proc.pid, signal.SIGXCPU)
except:
pass
self.timer = threading.Timer(self.timeout, TimeoutKiller)
self.timer.start()
else:
self.timer = None
def _EndProcess(self):
self.end_time = time.time()
self.time = self.end_time - self.start_time
if self.timer is not None:
self.timer.cancel()
self.timer = None
# Don't keep proc in cache.
proc = self.proc
self.proc = None
return proc
class SerialTaskGraph(object):
"""TaskGraph which emulates normal serialized execution."""
def __init__(self):
self.cache = dict()
self.blocked_task = None
self.running = False
def IsRunning(self):
return self.running
def Run(self, task):
assert not self.running
self.running = True
try:
return self._Run(task)
finally:
self.running = False
def _Run(self, task):
if task not in self.cache:
self.cache[task] = None
value = (True, None)
while True:
try:
if value[0]:
result = task.Continue(value[1])
elif isinstance(value[1][1], Bailout):
result = task.Continue(value[1][1].value)
else:
result = task.Throw(*value[1])
except StopIteration:
result = TaskReturn(None)
except:
result = _TaskRaise(*sys.exc_info())
if isinstance(result, TaskBranch):
try:
value = (True, [self._Run(subtask) for subtask in result.tasks])
except:
value = (False, sys.exc_info())
elif isinstance(result, Task):
try:
value = (True, self._Run(result))
except:
value = (False, sys.exc_info())
elif isinstance(result, TaskBlock):
value = (True, None)
try:
self.blocked_task = task
task.Wait()
finally:
self.blocked_task = None
elif isinstance(result, _TaskRaise):
self.cache[task] = (False, result.exc_info)
break
elif isinstance(result, TaskReturn):
self.cache[task] = (True, result.value)
break
else:
self.cache[task] = (True, result)
break
try:
task.Close()
except:
self.cache[task] = (False, sys.exc_info())
if self.cache[task] is None:
raise RuntimeException('Cyclic task dependency found')
success, value = self.cache[task]
if success:
return value
else:
raise value[0], value[1], value[2]
def GetBlockedTasks(self):
if self.blocked_task is not None:
return [self.blocked_task]
return []
class FiberTaskGraph(object):
"""TaskGraph which executes tasks with fibers (microthreads).
FiberTaskGraph allows some tasks to be in blocked state in the same time.
Branched tasks are executed in arbitrary order.
"""
def __init__(self, parallelism, debug=0):
self.parallelism = parallelism
self.debug = debug
self.cache = dict()
self.task_graph = dict()
self.task_interrupt = dict()
self.task_counters = dict()
self.task_waits = dict()
self.task_state = dict()
self.state_stats = [0] * NUM_STATES
self.ready_tasks = []
self.blocked_tasks = []
self.pending_stack = []
self.running = False
def IsRunning(self):
return self.running
def Run(self, init_task):
assert not self.running
self.running = True
self.first_tick = time.clock()
self.last_tick = self.first_tick
self.cumulative_parallelism = 0.0
self._BranchTask(None, [init_task])
while self._RunNextTask():
pass
for task in self.task_state:
if self.task_state[task] not in (FINISHED, ABORTED):
self._InterruptTask(task)
self._UpdateCumulativeParallelism()
if self.last_tick > self.first_tick:
parallelism_efficiency = (
self.cumulative_parallelism /
(self.parallelism * (self.last_tick - self.first_tick)))
else:
parallelism_efficiency = 1.0
self._Log('Parallelism efficiency: %.2f%%' %
(100.0 * parallelism_efficiency),
level=1)
assert self.task_state[None] == READY
del self.task_state[None]
del self.task_graph[None]
self.running = False
success, value = self.cache[init_task]
if success:
return value
elif isinstance(value, Bailout):
return value.value
else:
raise value[0], value[1], value[2]
def _RunNextTask(self):
while len(self.ready_tasks) == 0:
if not self._VisitBranch():
self._WaitBlockedTasks()
next_task = self.ready_tasks.pop(0)
self._LogTaskStats()
if next_task is None:
return False
if self.task_state[next_task] != READY:
# Interrupted.
return True
exc_info = None
if next_task in self.task_graph:
if isinstance(self.task_graph[next_task], list):
value = []
for task in self.task_graph[next_task]:
if task in self.cache:
success, cached = self.cache[task]
if success:
value.append(cached)
elif exc_info is None or isinstance(exc_info[1], TaskInterrupted):
exc_info = cached
else:
success, cached = self.cache[self.task_graph[next_task]]
if success:
value = cached
else:
exc_info = cached
del self.task_graph[next_task]
else:
value = None
self._SetTaskState(next_task, RUNNING)
if exc_info is not None:
if isinstance(exc_info[1], Bailout):
self._ContinueTask(next_task, exc_info[1].value)
else:
self._ThrowTask(next_task, exc_info)
else:
self._ContinueTask(next_task, value)
return True
def _VisitBranch(self):
if not self.pending_stack:
return False
# Visit branches by depth first.
task, subtask = self.pending_stack.pop()
self._BeginTask(subtask, task)
return True
def _ContinueTask(self, task, value):
assert self.task_state[task] == RUNNING
assert not task.IsExclusive() or len(self.blocked_tasks) == 0
self._LogDebug('_ContinueTask: %s: entering' % task)
try:
result = task.Continue(value)
except:
self._LogDebug('_ContinueTask: %s: exception raised' % task)
self._ProcessTaskException(task, sys.exc_info())
else:
self._LogDebug('_ContinueTask: %s: exited' % task)
self._ProcessTaskResult(task, result)
def _ThrowTask(self, task, exc_info):
assert self.task_state[task] == RUNNING
assert not task.IsExclusive() or len(self.blocked_tasks) == 0
self._LogDebug('_ThrowTask: %s: entering' % task)
try:
result = task.Throw(*exc_info)
except:
self._LogDebug('_ThrowTask: %s: exception raised' % task)
self._ProcessTaskException(task, sys.exc_info())
else:
self._LogDebug('_ThrowTask: %s: exited' % task)
self._ProcessTaskResult(task, result)
def _ProcessTaskResult(self, task, result):
assert self.task_state[task] == RUNNING
if isinstance(result, Task):
self._LogDebug('_ProcessTaskResult: %s: received Task' % task)
self._BranchTask(task, result)
elif isinstance(result, TaskBranch):
self._LogDebug('_ProcessTaskResult: %s: received TaskBranch '
'with %d tasks' % (task, len(result.tasks)))
self._BranchTask(task, list(result.tasks), result.interrupt)
elif isinstance(result, TaskReturn):
self._LogDebug('_ProcessTaskResult: %s: received TaskReturn' % task)
self._FinishTask(task, result.value)
elif isinstance(result, TaskBlock):
self._LogDebug('_ProcessTaskResult: %s: received TaskBlock' % task)
self._BlockTask(task)
else:
self._LogDebug('_ProcessTaskResult: %s: received unknown type,'
'implying TaskReturn' % task)
self._FinishTask(task, result)
def _ProcessTaskException(self, task, exc_info):
assert self.task_state[task] == RUNNING
try:
task.Close()
except:
# Ignore the exception.
pass
self._ExceptTask(task, exc_info)
def _BranchTask(self, task, subtasks, interrupt=False):
assert task is None or self.task_state[task] == RUNNING
self.task_graph[task] = subtasks
if not isinstance(subtasks, list):
assert isinstance(subtasks, Task)
subtasks = [subtasks]
if len(subtasks) == 0:
self._LogDebug('_BranchTask: %s: zero branch, fast return' % task)
self.ready_tasks.insert(0, task)
self._SetTaskState(task, READY)
self._LogTaskStats()
return
self.task_interrupt[task] = interrupt
self.task_counters[task] = len(subtasks)
# The branches are half-expanded, but don't complete the operation here
# so that too many branches are opened.
for subtask in reversed(subtasks):
self.pending_stack.append((task, subtask))
self._SetTaskState(task, WAITING)
def _BeginTask(self, task, parent_task):
if task in self.cache:
assert self.task_state[task] in (FINISHED, ABORTED)
self._LogDebug('_BeginTask: %s: cache hit' % task)
success = self.cache[task][0]
if success:
self._ResolveTask(parent_task)
else:
self._BailoutTask(parent_task)
elif parent_task not in self.task_counters:
# Some sibling task already bailed out. Skip this task.
self._LogDebug('_BeginTask: %s: sibling task bailed out' % task)
return
else:
if task in self.task_waits:
assert self.task_state[task] in (WAITING, BLOCKED)
self._LogDebug('_BeginTask: %s: running' % task)
self.task_waits[task].append(parent_task)
else:
assert task not in self.task_state
self._LogDebug('_BeginTask: %s: starting' % task)
self.task_waits[task] = [parent_task]
self._SetTaskState(task, RUNNING)
if task.IsExclusive():
self._WaitBlockedTasksUntilEmpty()
self._ContinueTask(task, None)
def _FinishTask(self, task, value):
assert self.task_state[task] == RUNNING
try:
task.Close()
except:
self._ExceptTask(task, sys.exc_info())
return
self.cache[task] = (True, value)
self._LogDebug('_FinishTask: %s: finished, returned: %s' % (task, value))
for wait_task in self.task_waits[task]:
self._ResolveTask(wait_task)
del self.task_waits[task]
self._SetTaskState(task, FINISHED)
def _ExceptTask(self, task, exc_info):
assert self.task_state[task] in (RUNNING, BLOCKED)
assert task not in self.cache
self.cache[task] = (False, exc_info)
self._LogDebug('_ExceptTask: %s: exception raised: %s' %
(task, exc_info[0].__name__))
bailouts = self.task_waits[task]
del self.task_waits[task]
if self.task_state[task] == BLOCKED:
del self.task_counters[task]
self._SetTaskState(task, ABORTED)
for bailout in bailouts:
self._BailoutTask(bailout)
def _BlockTask(self, task):
assert self.task_state[task] == RUNNING
assert len(self.blocked_tasks) < self.parallelism
self.task_counters[task] = 1
self._UpdateCumulativeParallelism()
self.blocked_tasks.insert(0, task)
self._SetTaskState(task, BLOCKED)
self._LogTaskStats()
self._LogDebug('_BlockTask: %s: pushed to blocked_tasks' % task)
self._WaitBlockedTasksUntilNotFull()
assert len(self.blocked_tasks) < self.parallelism
def _WaitBlockedTasksUntilEmpty(self):
self._LogDebug('_WaitBlockedTasksUntilEmpty: %d blocked tasks' %
len(self.blocked_tasks))
while len(self.blocked_tasks) > 0:
self._WaitBlockedTasks()
def _WaitBlockedTasksUntilNotFull(self):
self._LogDebug('_WaitBlockedTasksUntilNotFull: %d blocked tasks' %
len(self.blocked_tasks))
if len(self.blocked_tasks) == self.parallelism:
self._Log('Maximum parallelism reached, waiting for blocked tasks',
level=2)
self._WaitBlockedTasks()
self._Log('Blocked task ready (%d -> %d)' %
(self.parallelism, len(self.blocked_tasks)),
level=2)
def _WaitBlockedTasks(self):
assert len(self.blocked_tasks) > 0
self._LogTaskStats()
self._LogDebug('_WaitBlockedTasks: waiting')
while True:
resolved = self._PollBlockedTasks()
if resolved > 0:
break
self._Sleep()
self._LogDebug('_WaitBlockedTasks: resolved %d blocked tasks' % resolved)
def _PollBlockedTasks(self):
i = 0
resolved = 0
while i < len(self.blocked_tasks):
task = self.blocked_tasks[i]
assert self.task_state[task] == BLOCKED
success = task.Poll()
if success:
self._ResolveTask(task)
resolved += 1
self._UpdateCumulativeParallelism()
self.blocked_tasks.pop(i)
self._LogTaskStats()
else:
i += 1
return resolved
def _ResolveTask(self, task):
if task not in self.task_counters:
self._LogDebug('_ResolveTask: %s: resolved, but already bailed out' % task)
return
assert self.task_state[task] in (WAITING, BLOCKED)
self._LogDebug('_ResolveTask: %s: resolved, counter: %d -> %d' %
(task, self.task_counters[task], self.task_counters[task]-1))
self.task_counters[task] -= 1
if self.task_counters[task] == 0:
if task in self.task_graph and isinstance(self.task_graph[task], list):
# Multiple branches.
self.ready_tasks.append(task)
else:
# Serial execution or blocked task.
self.ready_tasks.insert(0, task)
if task in self.task_interrupt:
del self.task_interrupt[task]
del self.task_counters[task]
self._SetTaskState(task, READY)
self._LogDebug('_ResolveTask: %s: pushed to ready_task' % task)
self._LogTaskStats()
def _BailoutTask(self, task):
if task not in self.task_counters:
self._LogDebug('_BailoutTask: %s: multiple bail out' % task)
return
assert self.task_state[task] in (WAITING, BLOCKED)
self._LogDebug('_BailoutTask: %s: bailing out' % task)
if task in self.task_graph and isinstance(self.task_graph[task], list):
# Multiple branches.
self.ready_tasks.append(task)
else:
# Serial execution or blocked task.
self.ready_tasks.insert(0, task)
interrupt = False
if task in self.task_interrupt:
interrupt = self.task_interrupt[task]
del self.task_interrupt[task]
del self.task_counters[task]
self._SetTaskState(task, READY)
self._LogDebug('_BailoutTask: %s: pushed to ready_task' % task)
if interrupt and task in self.task_graph:
for subtask in self.task_graph[task]:
self._InterruptTask(subtask)
def _InterruptTask(self, task):
if (task is None or task not in self.task_state or
self.task_state[task] not in (WAITING, BLOCKED, READY)):
return
self._LogDebug('_InterruptTask: %s: interrupted' % task)
try:
task.Close()
except:
pass
# Simulate as if the task raised an exception.
subtasks = []
if task in self.task_graph:
subtasks = self.task_graph[task]
del self.task_graph[task]
if not isinstance(subtasks, list):
subtasks = [subtasks]
if task in self.task_interrupt:
del self.task_interrupt[task]
if task in self.task_counters:
del self.task_counters[task]
if self.task_state[task] == BLOCKED:
self._UpdateCumulativeParallelism()
self.blocked_tasks.remove(task)
self._SetTaskState(task, RUNNING)
self._ExceptTask(task, (TaskInterrupted, TaskInterrupted(), None))
for subtask in subtasks:
self._InterruptTask(subtask)
def _UpdateCumulativeParallelism(self):
cur_tick = time.clock()
self.cumulative_parallelism += (
(cur_tick - self.last_tick) * len(self.blocked_tasks))
self.last_tick = cur_tick
def _Sleep(self):
time.sleep(0.01)
def _SetTaskState(self, task, state):
if self.debug >= 1:
if state == RUNNING:
assert task not in self.cache
assert task not in self.task_graph
assert task not in self.task_interrupt
assert task not in self.task_counters
assert task is None or task in self.task_waits
elif state == WAITING:
assert task not in self.cache
assert task in self.task_graph
assert task in self.task_interrupt
assert task in self.task_counters
assert task is None or task in self.task_waits
elif state == BLOCKED:
assert task not in self.cache
assert task not in self.task_graph
assert task not in self.task_interrupt
assert self.task_counters.get(task) == 1
assert task in self.task_waits
elif state == READY:
assert task not in self.cache
assert task not in self.task_interrupt
assert task not in self.task_counters
assert task is None or task in self.task_waits
elif state == FINISHED:
assert task in self.cache and self.cache[task][0]
assert task not in self.task_graph
assert task not in self.task_interrupt
assert task not in self.task_counters
assert task not in self.task_waits
elif state == ABORTED:
assert task in self.cache and not self.cache[task][0]
assert task not in self.task_graph
assert task not in self.task_interrupt
assert task not in self.task_counters
assert task not in self.task_waits
else:
raise AssertionError('Unknown state: ' + str(state))
if task in self.task_state:
self.state_stats[self.task_state[task]] -= 1
self.state_stats[state] += 1
self.task_state[task] = state
def _LogTaskStats(self):
if self.debug == 0:
return
self._LogDebug(('RUNNING %d, WAITING %d, BLOCKED %d, '
'READY %d, FINISHED %d, ABORTED %d') %
tuple(self.state_stats))
def _Log(self, msg, level):
if self.debug >= level:
# TODO(nya): Do real logging.
pass
def _LogDebug(self, msg):
self._Log(msg, level=3)
def GetBlockedTasks(self):
return self.blocked_tasks[:]
|
AI-comp/Orientation2015Problems
|
rime/core/taskgraph.py
|
Python
|
mit
| 26,075
|
[
"VisIt"
] |
ae11c34a4288dd79b1a65b38c08ac5d4d7fe0689518b96f0346e4cc757ae6939
|
#!/usr/bin/env python
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Advanced analysis examples
These examples highlight more advanced neurom
morphometrics functionality using iterators.
'''
from __future__ import print_function
from neurom.core.dataformat import COLS
import neurom as nm
from neurom import geom
from neurom.fst import sectionfunc
from neurom.core import Tree
from neurom.core.types import tree_type_checker, NEURITES
from neurom import morphmath as mm
import numpy as np
if __name__ == '__main__':
filename = 'test_data/swc/Neuron.swc'
# load a neuron from an SWC file
nrn = nm.load_neuron(filename)
# Some examples of what can be done using iteration
# instead of pre-packaged functions that return lists.
# The iterations give us a lot of flexibility: we can map
# any function that takes a segment or section.
# Get of all neurites in cell by iterating over sections,
# and summing the section lengths
def sec_len(sec):
'''Return the length of a section'''
return mm.section_length(sec.points)
print('Total neurite length (sections):',
sum(sec_len(s) for s in nm.iter_sections(nrn)))
# Get length of all neurites in cell by iterating over segments,
# and summing the segment lengths.
# This should yield the same result as iterating over sections.
print('Total neurite length (segments):',
sum(mm.segment_length(s) for s in nm.iter_segments(nrn)))
# get volume of all neurites in cell by summing over segment
# volumes
print('Total neurite volume:',
sum(mm.segment_volume(s) for s in nm.iter_segments(nrn)))
# get area of all neurites in cell by summing over segment
# areas
print('Total neurite surface area:',
sum(mm.segment_area(s) for s in nm.iter_segments(nrn)))
# get total number of neurite points in cell.
def n_points(sec):
'''number of points in a section'''
n = len(sec.points)
# Non-root sections have duplicate first point
return n if sec.parent is None else n - 1
print('Total number of points:',
sum(n_points(s) for s in nm.iter_sections(nrn)))
# get mean radius of neurite points in cell.
# p[COLS.R] yields the radius for point p.
# Note: this includes duplicated points at beginning of
# non-trunk sections
print('Mean radius of points:',
np.mean([s.points[:, COLS.R] for s in nm.iter_sections(nrn)]))
# get mean radius of neurite points in cell.
# p[COLS.R] yields the radius for point p.
# Note: this includes duplicated points at beginning of
# non-trunk sections
pts = [p[COLS.R] for s in nrn.sections[1:] for p in s.points]
print('Mean radius of points:',
np.mean(pts))
# get mean radius of segments
print('Mean radius of segments:',
np.mean(list(mm.segment_radius(s) for s in nm.iter_segments(nrn))))
# get stats for the segment taper rate, for different types of neurite
for ttype in NEURITES:
ttt = ttype
seg_taper_rate = [mm.segment_taper_rate(s)
for s in nm.iter_segments(nrn, neurite_filter=tree_type_checker(ttt))]
print('Segment taper rate (', ttype,
'):\n mean=', np.mean(seg_taper_rate),
', std=', np.std(seg_taper_rate),
', min=', np.min(seg_taper_rate),
', max=', np.max(seg_taper_rate),
sep='')
# Number of bifurcation points.
print('Number of bifurcation points:',
sum(1 for _ in nm.iter_sections(nrn,
iterator_type=Tree.ibifurcation_point)))
# Number of bifurcation points for apical dendrites
print('Number of bifurcation points (apical dendrites):',
sum(1 for _ in nm.iter_sections(nrn,
iterator_type=Tree.ibifurcation_point,
neurite_filter=tree_type_checker(nm.APICAL_DENDRITE))))
# Maximum branch order
print('Maximum branch order:',
max(sectionfunc.branch_order(s) for s in nm.iter_sections(nrn)))
# Neuron's bounding box
# Note: does not account for soma radius
print('Bounding box ((min x, y, z), (max x, y, z))', geom.bounding_box(nrn))
|
eleftherioszisis/NeuroM
|
examples/neuron_iteration_analysis.py
|
Python
|
bsd-3-clause
| 5,976
|
[
"NEURON"
] |
14c625ff4076095da8772a7b327b2981a45ac9d695c97dcbd5b6c857097c2c82
|
#!/usr/local/bin/python -i
# Script: logplot.py
# Purpose: use GnuPlot to plot two columns from a LAMMPS log file
# Syntax: logplot.py log.lammps X Y
# log.lammps = LAMMPS log file
# X,Y = plot Y versus X where X,Y are thermo keywords
# once plot appears, you are in Python interpreter, type C-D to exit
# Author: Steve Plimpton (Sandia), sjplimp at sandia.gov
import sys,os
path = os.environ["LAMMPS_PYTHON_TOOLS"]
sys.path.append(path)
from log import log
from gnu import gnu
if len(sys.argv) != 4:
raise StandardError, "Syntax: logplot.py log.lammps X Y"
logfile = sys.argv[1]
xlabel = sys.argv[2]
ylabel = sys.argv[3]
lg = log(logfile)
x,y = lg.get(xlabel,ylabel)
g = gnu()
g.plot(x,y)
print "Type Ctrl-D to exit Python"
|
val-github/lammps-dev
|
tools/python/logplot.py
|
Python
|
gpl-2.0
| 762
|
[
"LAMMPS"
] |
228caf9fee6a74e0690b4e7d00181bffb675b8b4e34d1c33339c052c65b215dc
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 82:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = np.row_stack([Fmat_original[0:41,:], Fmat_original[82:123,:]])
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_kNN/Single_Contact_Classification/Feature_Comparison/multiple_features/best_kNN_PCA/test11_cross_validate_categories_1200ms_scaled_method_v_force_motion.py
|
Python
|
mit
| 5,088
|
[
"Mayavi"
] |
474c5c5782508fe6a4a49408370409c5d6a172644982ea40a9d44f6e7a83e96c
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
A plugin to verify the data against user-adjusted tests.
This is the research tool, not the low-level data ingerity check.
"""
from __future__ import division, print_function
#------------------------------------------------------------------------
#
# standard python modules
#
#------------------------------------------------------------------------
import os
import sys
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import pickle
try:
from hashlib import md5
except ImportError:
from md5 import md5
from gramps.gen.errors import WindowActiveError
#------------------------------------------------------------------------
#
# GNOME/GTK modules
#
#------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GObject
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.const import URL_MANUAL_PAGE, VERSION_DIR
from gramps.gen.lib import (ChildRefType, EventRoleType, EventType,
FamilyRelType, NameType, Person)
from gramps.gen.lib.date import Today
from gramps.gui.editors import EditPerson, EditFamily
from gramps.gen.utils.db import family_name
from gramps.gui.display import display_help
from gramps.gui.managedwindow import ManagedWindow
from gramps.gen.updatecallback import UpdateCallback
from gramps.gui.plug import tool
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().sgettext
from gramps.gui.glade import Glade
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = '%s_-_Tools' % URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Verify_the_Data...')
#-------------------------------------------------------------------------
#
# temp storage and related functions
#
#-------------------------------------------------------------------------
_person_cache = {}
_family_cache = {}
_event_cache = {}
_today = Today().get_sort_value()
def find_event(db, handle):
if handle in _event_cache:
obj = _event_cache[handle]
else:
obj = db.get_event_from_handle(handle)
_event_cache[handle] = obj
return obj
def find_person(db, handle):
if handle in _person_cache:
obj = _person_cache[handle]
else:
obj = db.get_person_from_handle(handle)
_person_cache[handle] = obj
return obj
def find_family(db, handle):
if handle in _family_cache:
obj = _family_cache[handle]
else:
obj = db.get_family_from_handle(handle)
_family_cache[handle] = obj
return obj
def clear_cache():
_person_cache.clear()
_family_cache.clear()
_event_cache.clear()
#-------------------------------------------------------------------------
#
# helper functions
#
#-------------------------------------------------------------------------
def get_date_from_event_handle(db, event_handle, estimate=False):
if not event_handle:
return 0
event = find_event(db,event_handle)
if event:
date_obj = event.get_date_object()
if not estimate and \
(date_obj.get_day() == 0 or date_obj.get_month() == 0):
return 0
return date_obj.get_sort_value()
else:
return 0
def get_date_from_event_type(db, person, event_type, estimate=False):
if not person:
return 0
for event_ref in person.get_event_ref_list():
event = find_event(db,event_ref.ref)
if event:
if event_ref.get_role() != EventRoleType.PRIMARY and \
event.get_type() == EventType.BURIAL:
continue
if event.get_type() == event_type:
date_obj = event.get_date_object()
if not estimate and \
(date_obj.get_day() == 0 or date_obj.get_month() == 0):
return 0
return date_obj.get_sort_value()
return 0
def get_bapt_date(db, person, estimate=False):
return get_date_from_event_type(db, person,
EventType.BAPTISM, estimate)
def get_bury_date(db, person, estimate=False):
# check role on burial event
for event_ref in person.get_event_ref_list():
event = find_event(db, event_ref.ref)
if event and event.get_type() == EventType.BURIAL and \
event_ref.get_role() == EventRoleType.PRIMARY:
return get_date_from_event_type(db, person,
EventType.BURIAL, estimate)
def get_birth_date(db, person, estimate=False):
if not person:
return 0
birth_ref = person.get_birth_ref()
if not birth_ref:
ret = 0
else:
ret = get_date_from_event_handle(db,birth_ref.ref,estimate)
if estimate and (ret == 0):
ret = get_bapt_date(db,person,estimate)
return ret
def get_death_date(db, person, estimate=False):
if not person:
return 0
death_ref = person.get_death_ref()
if not death_ref:
ret = 0
else:
ret = get_date_from_event_handle(db,death_ref.ref,estimate)
if estimate and (ret == 0):
ret = get_bury_date(db,person,estimate)
return ret
def get_age_at_death(db, person, estimate):
birth_date = get_birth_date(db,person,estimate)
death_date = get_death_date(db,person,estimate)
if (birth_date > 0) and (death_date > 0):
return death_date - birth_date
return 0
def get_father(db, family):
if not family:
return None
father_handle = family.get_father_handle()
if father_handle:
return find_person(db,father_handle)
return None
def get_mother(db, family):
if not family:
return None
mother_handle = family.get_mother_handle()
if mother_handle:
return find_person(db, mother_handle)
return None
def get_child_birth_dates(db, family, estimate):
dates = []
for child_ref in family.get_child_ref_list():
child = find_person(db,child_ref.ref)
child_birth_date = get_birth_date(db, child, estimate)
if child_birth_date > 0:
dates.append(child_birth_date)
return dates
def get_n_children(db, person):
n = 0
for family_handle in person.get_family_handle_list():
family = find_family(db,family_handle)
if family:
n += len(family.get_child_ref_list())
return n
def get_marriage_date(db, family):
if not family:
return 0
for event_ref in family.get_event_ref_list():
event = find_event(db,event_ref.ref)
if event.get_type() == EventType.MARRIAGE and \
(event_ref.get_role() == EventRoleType.FAMILY or
event_ref.get_role() == EventRoleType.PRIMARY ):
date_obj = event.get_date_object()
return date_obj.get_sort_value()
return 0
#-------------------------------------------------------------------------
#
# Actual tool
#
#-------------------------------------------------------------------------
class Verify(tool.Tool, ManagedWindow, UpdateCallback):
def __init__(self, dbstate, uistate, options_class, name, callback=None):
self.label = _('Data Verify tool')
self.vr = None
tool.Tool.__init__(self, dbstate, options_class, name)
ManagedWindow.__init__(self, uistate,[], self.__class__)
if uistate:
UpdateCallback.__init__(self, self.uistate.pulse_progressbar)
self.dbstate = dbstate
if uistate:
self.init_gui()
else:
self.add_results = self.add_results_cli
self.run_tool(cli=True)
def add_results_cli(self, results):
# print data for the user, no GUI
(msg,gramps_id, name, the_type, rule_id, severity, handle) = results
if severity == Rule.WARNING:
print("W: %s, %s: %s, %s" % (msg,the_type, gramps_id, name))
elif severity == Rule.ERROR:
print("E: %s, %s: %s, %s" % (msg,the_type,gramps_id, name))
else:
print("S: %s, %s: %s, %s" % (msg,the_type,gramps_id, name))
def init_gui(self):
# Draw dialog and make it handle everything
self.vr = None
self.top = Glade()
self.top.connect_signals({
"destroy_passed_object" : self.close,
"on_help_clicked" : self.on_help_clicked,
"on_verify_ok_clicked" : self.on_apply_clicked,
"on_delete_event" : self.close,
})
window = self.top.toplevel
self.set_window(window,self.top.get_object('title'),self.label)
for option in self.options.handler.options_dict:
if option in ['estimate_age', 'invdate']:
self.top.get_object(option).set_active(
self.options.handler.options_dict[option]
)
else:
self.top.get_object(option).set_value(
self.options.handler.options_dict[option]
)
self.window.show()
def build_menu_names(self, obj):
return (_("Tool settings"),self.label)
def on_help_clicked(self, obj):
"""Display the relevant portion of GRAMPS manual"""
display_help(webpage=WIKI_HELP_PAGE, section=WIKI_HELP_SEC)
def on_apply_clicked(self, obj):
run_button = self.top.get_object('button4')
close_button = self.top.get_object('button5')
run_button.set_sensitive(False)
close_button.set_sensitive(False)
for option in self.options.handler.options_dict:
if option in ['estimate_age', 'invdate']:
self.options.handler.options_dict[option] = \
self.top.get_object(option).get_active()
else:
self.options.handler.options_dict[option] = \
self.top.get_object(option).get_value_as_int()
try:
self.vr = VerifyResults(self.dbstate, self.uistate, self.track)
self.add_results = self.vr.add_results
self.vr.load_ignored(self.db.full_name)
except WindowActiveError:
pass
self.uistate.set_busy_cursor(True)
self.uistate.progress.show()
self.window.get_window().set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
try:
self.vr.window.get_window().set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
except AttributeError:
pass
self.run_tool(cli=False)
self.uistate.progress.hide()
self.uistate.set_busy_cursor(False)
try:
self.window.get_window().set_cursor(None)
self.vr.window.get_window().set_cursor(None)
except AttributeError:
pass
run_button.set_sensitive(True)
close_button.set_sensitive(True)
self.reset()
# Save options
self.options.handler.save_options()
def run_tool(self,cli=False):
person_handles = self.db.iter_person_handles()
for option, value in \
self.options.handler.options_dict.items():
exec('%s = %s' % (option, value))
if self.vr:
self.vr.real_model.clear()
self.set_total(self.db.get_number_of_people() +
self.db.get_number_of_families())
for person_handle in person_handles:
person = find_person(self.db,person_handle)
rule_list = [
BirthAfterBapt(self.db,person),
DeathBeforeBapt(self.db,person),
BirthAfterBury(self.db,person),
DeathAfterBury(self.db,person),
BirthAfterDeath(self.db,person),
BaptAfterBury(self.db,person),
OldAge(self.db,person, oldage,estimate_age),
OldAgeButNoDeath(self.db,person, oldage,estimate_age),
UnknownGender(self.db,person),
MultipleParents(self.db,person),
MarriedOften(self.db,person,wedder),
OldUnmarried(self.db,person, oldunm,estimate_age),
TooManyChildren(self.db,person,mxchilddad,mxchildmom),
Disconnected(self.db,person),
InvalidBirthDate(self.db,person,invdate),
InvalidDeathDate(self.db,person,invdate),
]
for rule in rule_list:
if rule.broken():
self.add_results(rule.report_itself())
clear_cache()
if not cli:
self.update()
# Family-based rules
for family_handle in self.db.iter_family_handles():
family = find_family(self.db,family_handle)
rule_list = [
SameSexFamily(self.db,family),
FemaleHusband(self.db,family),
MaleWife(self.db,family),
SameSurnameFamily(self.db,family),
LargeAgeGapFamily(self.db,family, hwdif,estimate_age),
MarriageBeforeBirth(self.db,family,estimate_age),
MarriageAfterDeath(self.db,family,estimate_age),
EarlyMarriage(self.db,family,yngmar,estimate_age),
LateMarriage(self.db,family, oldmar,estimate_age),
OldParent(self.db,family, oldmom, olddad,estimate_age),
YoungParent(self.db,family,yngmom,yngdad,estimate_age),
UnbornParent(self.db,family,estimate_age),
DeadParent(self.db,family,estimate_age),
LargeChildrenSpan(self.db,family,cbspan,estimate_age),
LargeChildrenAgeDiff(self.db,family,cspace,estimate_age),
MarriedRelation(self.db,family),
]
for rule in rule_list:
if rule.broken():
self.add_results(rule.report_itself())
clear_cache()
if not cli:
self.update()
#-------------------------------------------------------------------------
#
# Display the results
#
#-------------------------------------------------------------------------
class VerifyResults(ManagedWindow):
IGNORE_COL = 0
WARNING_COL = 1
OBJ_ID_COL = 2
OBJ_NAME_COL = 3
OBJ_TYPE_COL = 4
RULE_ID_COL = 5
OBJ_HANDLE_COL = 6
FG_COLOR_COL = 7
TRUE_COL = 8
SHOW_COL = 9
def __init__(self,dbstate,uistate,track):
self.title = _('Data Verification Results')
ManagedWindow.__init__(self,uistate,track,self.__class__)
self.dbstate = dbstate
self.top = Glade(toplevel="verify_result")
window = self.top.toplevel
self.set_window(window,self.top.get_object('title2'),self.title)
self.top.connect_signals({
"destroy_passed_object" : self.close,
"on_verify_ok_clicked" : self.__dummy,
"on_help_clicked" : self.__dummy,
})
self.warn_tree = self.top.get_object('warn_tree')
self.warn_tree.connect('button_press_event', self.double_click)
self.selection = self.warn_tree.get_selection()
self.hide_button = self.top.get_object('hide_button')
self.hide_button.connect('toggled',self.hide_toggled)
self.mark_button = self.top.get_object('mark_all')
self.mark_button.connect('clicked',self.mark_clicked)
self.unmark_button = self.top.get_object('unmark_all')
self.unmark_button.connect('clicked',self.unmark_clicked)
self.invert_button = self.top.get_object('invert_all')
self.invert_button.connect('clicked',self.invert_clicked)
self.real_model = Gtk.ListStore(GObject.TYPE_BOOLEAN,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING, object,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_BOOLEAN,
GObject.TYPE_BOOLEAN)
self.filt_model = self.real_model.filter_new()
self.filt_model.set_visible_column(VerifyResults.TRUE_COL)
self.sort_model = self.filt_model.sort_new_with_model()
self.warn_tree.set_model(self.sort_model)
self.renderer = Gtk.CellRendererText()
self.img_renderer = Gtk.CellRendererPixbuf()
self.bool_renderer = Gtk.CellRendererToggle()
self.bool_renderer.connect('toggled', self.selection_toggled)
# Add ignore column
ignore_column = Gtk.TreeViewColumn(_('Mark'), self.bool_renderer,
active=VerifyResults.IGNORE_COL)
ignore_column.set_sort_column_id(VerifyResults.IGNORE_COL)
self.warn_tree.append_column(ignore_column)
# Add image column
img_column = Gtk.TreeViewColumn(None, self.img_renderer )
img_column.set_cell_data_func(self.img_renderer,self.get_image)
self.warn_tree.append_column(img_column)
# Add column with the warning text
warn_column = Gtk.TreeViewColumn(_('Warning'), self.renderer,
text=VerifyResults.WARNING_COL,
foreground=VerifyResults.FG_COLOR_COL)
warn_column.set_sort_column_id(VerifyResults.WARNING_COL)
self.warn_tree.append_column(warn_column)
# Add column with object gramps_id
id_column = Gtk.TreeViewColumn(_('ID'), self.renderer,
text=VerifyResults.OBJ_ID_COL,
foreground=VerifyResults.FG_COLOR_COL)
id_column.set_sort_column_id(VerifyResults.OBJ_ID_COL)
self.warn_tree.append_column(id_column)
# Add column with object name
name_column = Gtk.TreeViewColumn(_('Name'), self.renderer,
text=VerifyResults.OBJ_NAME_COL,
foreground=VerifyResults.FG_COLOR_COL)
name_column.set_sort_column_id(VerifyResults.OBJ_NAME_COL)
self.warn_tree.append_column(name_column)
self.window.show()
self.window_shown = False
def __dummy(self, obj):
"""dummy callback, needed because VerifyResults is in same glade file
as Verify, so callbacks of Verify must be defined.
"""
pass
def load_ignored(self,db_filename):
md5sum = md5(db_filename)
self.ignores_filename = os.path.join(
VERSION_DIR,md5sum.hexdigest() + os.path.extsep + 'vfm')
if not self._load_ignored(self.ignores_filename):
self.ignores = {}
def _load_ignored(self,filename):
try:
f = open(filename)
self.ignores = pickle.load(f)
f.close()
return True
except IOError:
return False
def save_ignored(self, new_ignores):
self.ignores = new_ignores
self._save_ignored(self.ignores_filename)
def _save_ignored(self,filename):
try:
f = open(filename,'w')
pickle.dump(self.ignores,f,1)
f.close()
return True
except IOError:
return False
def get_marking(self, handle,rule_id):
if handle in self.ignores:
return (rule_id in self.ignores[handle])
else:
return False
def get_new_marking(self):
new_ignores = {}
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
ignore = row[VerifyResults.IGNORE_COL]
if ignore:
handle = row[VerifyResults.OBJ_HANDLE_COL]
rule_id = row[VerifyResults.RULE_ID_COL]
if handle not in new_ignores:
new_ignores[handle] = set()
new_ignores[handle].add(rule_id)
return new_ignores
def close(self, *obj):
new_ignores = self.get_new_marking()
self.save_ignored(new_ignores)
ManagedWindow.close(self,*obj)
def hide_toggled(self, button):
if button.get_active():
button.set_label(_("_Show all"))
self.filt_model = self.real_model.filter_new()
self.filt_model.set_visible_column(VerifyResults.SHOW_COL)
self.sort_model = self.filt_model.sort_new_with_model()
self.warn_tree.set_model(self.sort_model)
else:
self.filt_model = self.real_model.filter_new()
self.filt_model.set_visible_column(VerifyResults.TRUE_COL)
self.sort_model = self.filt_model.sort_new_with_model()
self.warn_tree.set_model(self.sort_model)
button.set_label(_("_Hide marked"))
def selection_toggled(self, cell, path_string):
sort_path = tuple(map(int, path_string.split(':')))
filt_path = self.sort_model.convert_path_to_child_path(Gtk.TreePath(sort_path))
real_path = self.filt_model.convert_path_to_child_path(filt_path)
row = self.real_model[real_path]
row[VerifyResults.IGNORE_COL] = not row[VerifyResults.IGNORE_COL]
row[VerifyResults.SHOW_COL] = not row[VerifyResults.IGNORE_COL]
self.real_model.row_changed(real_path,row.iter)
def mark_clicked(self, mark_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[VerifyResults.IGNORE_COL] = True
row[VerifyResults.SHOW_COL] = False
self.filt_model.refilter()
def unmark_clicked(self, unmark_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[VerifyResults.IGNORE_COL] = False
row[VerifyResults.SHOW_COL] = True
self.filt_model.refilter()
def invert_clicked(self, invert_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[VerifyResults.IGNORE_COL] = not row[VerifyResults.IGNORE_COL]
row[VerifyResults.SHOW_COL] = not row[VerifyResults.SHOW_COL]
self.filt_model.refilter()
def double_click(self, obj, event):
if event.type == Gdk.EventType._2BUTTON_PRESS and event.button == 1:
(model, node) = self.selection.get_selected()
if not node:
return
sort_path = self.sort_model.get_path(node)
filt_path = self.sort_model.convert_path_to_child_path(sort_path)
real_path = self.filt_model.convert_path_to_child_path(filt_path)
row = self.real_model[real_path]
the_type = row[VerifyResults.OBJ_TYPE_COL]
handle = row[VerifyResults.OBJ_HANDLE_COL]
if the_type == 'Person':
try:
person = self.dbstate.db.get_person_from_handle(handle)
EditPerson(self.dbstate, self.uistate, [], person)
except WindowActiveError:
pass
elif the_type == 'Family':
try:
family = self.dbstate.db.get_family_from_handle(handle)
EditFamily(self.dbstate, self.uistate, [], family)
except WindowActiveError:
pass
def get_image(self, column, cell, model, iter, user_data=None):
the_type = model.get_value(iter, VerifyResults.OBJ_TYPE_COL)
if the_type == 'Person':
cell.set_property('stock-id', 'gramps-person' )
elif the_type == 'Family':
cell.set_property('stock-id', 'gramps-family' )
def add_results(self,results):
(msg,gramps_id, name,the_type,rule_id,severity, handle) = results
ignore = self.get_marking(handle,rule_id)
if severity == Rule.ERROR:
fg = 'red'
# fg = '#8b008b' # purple
# elif severity == Rule.WARNING:
# fg = '#008b00' # green
else:
fg = None
self.real_model.append(row=[ignore,msg,gramps_id, name,
the_type,rule_id, handle,fg,
True, not ignore])
if not self.window_shown:
self.window.show()
self.window_shown = True
def build_menu_names(self, obj):
return (self.title,None)
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class VerifyOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name,person_id=None):
tool.ToolOptions.__init__(self, name,person_id)
# Options specific for this report
self.options_dict = {
'oldage' : 90,
'hwdif' : 30,
'cspace' : 8,
'cbspan' : 25,
'yngmar' : 17,
'oldmar' : 50,
'oldmom' : 48,
'yngmom' : 17,
'yngdad' : 18,
'olddad' : 65,
'wedder' : 3,
'mxchildmom' : 12,
'mxchilddad' : 15,
'lngwdw' : 30,
'oldunm' : 99,
'estimate_age' : 0,
'invdate' : 1,
}
self.options_help = {
'oldage' : ("=num","Maximum age","Age in years"),
'hwdif' : ("=num","Maximum husband-wife age difference",
"Age difference in years"),
'cspace' : ("=num",
"Maximum number of years between children",
"Number of years"),
'cbspan' : ("=num",
"Maximum span of years for all children",
"Span in years"),
'yngmar' : ("=num","Minimum age to marry","Age in years"),
'oldmar' : ("=num","Maximum age to marry","Age in years"),
'oldmom' : ("=num","Maximum age to bear a child",
"Age in years"),
'yngmom' : ("=num","Minimum age to bear a child",
"Age in years"),
'yngdad' : ("=num","Minimum age to father a child",
"Age in years"),
'olddad' : ("=num","Maximum age to father a child",
"Age in years"),
'wedder' : ("=num","Maximum number of spouses for a person",
"Number of spouses"),
'mxchildmom' : ("=num","Maximum number of children for a woman",
"Number of children"),
'mxchilddad' : ("=num","Maximum number of children for a man",
"Number of chidlren"),
'lngwdw' : ("=num","Maximum number of consecutive years "
"of widowhood before next marriage",
"Number of years"),
'oldunm' : ("=num","Maximum age for an unmarried person"
"Number of years"),
'estimate_age' : ("=0/1","Whether to estimate missing or inexact dates",
["Do not estimate","Estimate dates"],
True),
'invdate' : ("=0/1","Whether to check for invalid dates"
"Do not identify invalid dates",
"Identify invalid dates", True),
}
#-------------------------------------------------------------------------
#
# Base classes for different tests -- the rules
#
#-------------------------------------------------------------------------
class Rule(object):
"""
Basic class for use in this tool.
Other rules must inherit from this.
"""
ID = 0
TYPE = ''
ERROR = 1
WARNING = 2
SEVERITY = WARNING
def __init__(self,db, obj):
self.db = db
self.obj = obj
def broken(self):
"""
Return boolean indicating whether this rule is violated.
"""
return False
def get_message(self):
assert False, "Need to be overriden in the derived class"
def get_name(self):
assert False, "Need to be overriden in the derived class"
def get_handle(self):
return self.obj.handle
def get_id(self):
return self.obj.gramps_id
def get_level(self):
return Rule.WARNING
def get_rule_id(self):
params = self._get_params()
return (self.ID,params)
def _get_params(self):
return tuple()
def report_itself(self):
handle = self.get_handle()
the_type = self.TYPE
rule_id = self.get_rule_id()
severity = self.SEVERITY
name = self.get_name()
gramps_id = self.get_id()
msg = self.get_message()
return (msg,gramps_id, name,the_type,rule_id,severity, handle)
class PersonRule(Rule):
"""
Person-based class.
"""
TYPE = 'Person'
def get_name(self):
return self.obj.get_primary_name().get_name()
class FamilyRule(Rule):
"""
Family-based class.
"""
TYPE = 'Family'
def get_name(self):
return family_name(self.obj,self.db)
#-------------------------------------------------------------------------
#
# Actual rules for testing
#
#-------------------------------------------------------------------------
class BirthAfterBapt(PersonRule):
ID = 1
SEVERITY = Rule.ERROR
def broken(self):
birth_date = get_birth_date(self.db,self.obj)
bapt_date = get_bapt_date(self.db,self.obj)
birth_ok = birth_date > 0
bapt_ok = bapt_date > 0
birth_after_death = birth_date > bapt_date
return (birth_ok and bapt_ok and birth_after_death)
def get_message(self):
return _("Baptism before birth")
class DeathBeforeBapt(PersonRule):
ID = 2
SEVERITY = Rule.ERROR
def broken(self):
death_date = get_death_date(self.db,self.obj)
bapt_date = get_bapt_date(self.db,self.obj)
bapt_ok = bapt_date > 0
death_ok = death_date > 0
death_before_bapt = bapt_date > death_date
return (death_ok and bapt_ok and death_before_bapt)
def get_message(self):
return _("Death before baptism")
class BirthAfterBury(PersonRule):
ID = 3
SEVERITY = Rule.ERROR
def broken(self):
birth_date = get_birth_date(self.db,self.obj)
bury_date = get_bury_date(self.db,self.obj)
birth_ok = birth_date > 0
bury_ok = bury_date > 0
birth_after_bury = birth_date > bury_date
return (birth_ok and bury_ok and birth_after_bury)
def get_message(self):
return _("Burial before birth")
class DeathAfterBury(PersonRule):
ID = 4
SEVERITY = Rule.ERROR
def broken(self):
death_date = get_death_date(self.db,self.obj)
bury_date = get_bury_date(self.db,self.obj)
death_ok = death_date > 0
bury_ok = bury_date > 0
death_after_bury = death_date > bury_date
return (death_ok and bury_ok and death_after_bury)
def get_message(self):
return _("Burial before death")
class BirthAfterDeath(PersonRule):
ID = 5
SEVERITY = Rule.ERROR
def broken(self):
birth_date = get_birth_date(self.db,self.obj)
death_date = get_death_date(self.db,self.obj)
birth_ok = birth_date > 0
death_ok = death_date > 0
birth_after_death = birth_date > death_date
return (birth_ok and death_ok and birth_after_death)
def get_message(self):
return _("Death before birth")
class BaptAfterBury(PersonRule):
ID = 6
SEVERITY = Rule.ERROR
def broken(self):
bapt_date = get_bapt_date(self.db,self.obj)
bury_date = get_bury_date(self.db,self.obj)
bapt_ok = bapt_date > 0
bury_ok = bury_date > 0
bapt_after_bury = bapt_date > bury_date
return (bapt_ok and bury_ok and bapt_after_bury)
def get_message(self):
return _("Burial before baptism")
class OldAge(PersonRule):
ID = 7
SEVERITY = Rule.WARNING
def __init__(self,db,person, old_age,est):
PersonRule.__init__(self,db,person)
self.old_age = old_age
self.est = est
def _get_params(self):
return (self.old_age,self.est)
def broken(self):
age_at_death = get_age_at_death(self.db, self.obj, self.est)
return (age_at_death/365 > self.old_age)
def get_message(self):
return _("Old age at death")
class UnknownGender(PersonRule):
ID = 8
SEVERITY = Rule.WARNING
def broken(self):
female = self.obj.get_gender() == Person.FEMALE
male = self.obj.get_gender() == Person.MALE
return not (male or female)
def get_message(self):
return _("Unknown gender")
class MultipleParents(PersonRule):
ID = 9
SEVERITY = Rule.WARNING
def broken(self):
n_parent_sets = len(self.obj.get_parent_family_handle_list())
return (n_parent_sets>1)
def get_message(self):
return _("Multiple parents")
class MarriedOften(PersonRule):
ID = 10
SEVERITY = Rule.WARNING
def __init__(self,db,person,wedder):
PersonRule.__init__(self,db,person)
self.wedder = wedder
def _get_params(self):
return (self.wedder,)
def broken(self):
n_spouses = len(self.obj.get_family_handle_list())
return (n_spouses>self.wedder)
def get_message(self):
return _("Married often")
class OldUnmarried(PersonRule):
ID = 11
SEVERITY = Rule.WARNING
def __init__(self,db,person, old_unm,est):
PersonRule.__init__(self,db,person)
self.old_unm = old_unm
self.est = est
def _get_params(self):
return (self.old_unm,self.est)
def broken(self):
age_at_death = get_age_at_death(self.db,self.obj,self.est)
n_spouses = len(self.obj.get_family_handle_list())
return (age_at_death/365 > self.old_unm and n_spouses==0)
def get_message(self):
return _("Old and unmarried")
class TooManyChildren(PersonRule):
ID = 12
SEVERITY = Rule.WARNING
def __init__(self,db, obj,mx_child_dad,mx_child_mom):
PersonRule.__init__(self,db, obj)
self.mx_child_dad = mx_child_dad
self.mx_child_mom = mx_child_mom
def _get_params(self):
return (self.mx_child_dad,self.mx_child_mom)
def broken(self):
n_child = get_n_children(self.db,self.obj)
if (self.obj.get_gender == Person.MALE
and n_child > self.mx_child_dad):
return True
if (self.obj.get_gender == Person.FEMALE
and n_child > self.mx_child_mom):
return True
return False
def get_message(self):
return _("Too many children")
class SameSexFamily(FamilyRule):
ID = 13
SEVERITY = Rule.WARNING
def broken(self):
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
same_sex = (mother and father and
(mother.get_gender() == father.get_gender()))
unknown_sex = (mother and
(mother.get_gender() == Person.UNKNOWN))
return (same_sex and not unknown_sex)
def get_message(self):
return _("Same sex marriage")
class FemaleHusband(FamilyRule):
ID = 14
SEVERITY = Rule.WARNING
def broken(self):
father = get_father(self.db,self.obj)
return (father and (father.get_gender() == Person.FEMALE))
def get_message(self):
return _("Female husband")
class MaleWife(FamilyRule):
ID = 15
SEVERITY = Rule.WARNING
def broken(self):
mother = get_mother(self.db,self.obj)
return (mother and (mother.get_gender() == Person.MALE))
def get_message(self):
return _("Male wife")
class SameSurnameFamily(FamilyRule):
ID = 16
SEVERITY = Rule.WARNING
def broken(self):
mother = get_mother(self.db, self.obj)
father = get_father(self.db, self.obj)
_broken = False
# Make sure both mother and father exist.
if mother and father:
mname = mother.get_primary_name()
fname = father.get_primary_name()
# Only compare birth names (not married names).
if mname.get_type() == NameType.BIRTH and \
fname.get_type() == NameType.BIRTH:
# Empty names don't count.
if len(mname.get_surname()) != 0 and \
len(fname.get_surname()) != 0:
# Finally, check if the names are the same.
if mname.get_surname() == fname.get_surname():
_broken = True
return _broken
def get_message(self):
return _("Husband and wife with the same surname")
class LargeAgeGapFamily(FamilyRule):
ID = 17
SEVERITY = Rule.WARNING
def __init__(self,db, obj, hw_diff,est):
FamilyRule.__init__(self,db, obj)
self.hw_diff = hw_diff
self.est = est
def _get_params(self):
return (self.hw_diff,self.est)
def broken(self):
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_birth_date = get_birth_date(self.db,mother,self.est)
father_birth_date = get_birth_date(self.db,father,self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
large_diff = \
abs(father_birth_date-mother_birth_date)/365 > self.hw_diff
return (mother_birth_date_ok and father_birth_date_ok and large_diff)
def get_message(self):
return _("Large age difference between spouses")
class MarriageBeforeBirth(FamilyRule):
ID = 18
SEVERITY = Rule.ERROR
def __init__(self,db, obj,est):
FamilyRule.__init__(self,db, obj)
self.est = est
def _get_params(self):
return (self.est,)
def broken(self):
marr_date = get_marriage_date(self.db,self.obj)
marr_date_ok = marr_date > 0
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_birth_date = get_birth_date(self.db,mother,self.est)
father_birth_date = get_birth_date(self.db,father,self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
father_broken = (father_birth_date_ok and marr_date_ok
and (father_birth_date > marr_date))
mother_broken = (mother_birth_date_ok and marr_date_ok
and (mother_birth_date > marr_date))
return (father_broken or mother_broken)
def get_message(self):
return _("Marriage before birth")
class MarriageAfterDeath(FamilyRule):
ID = 19
SEVERITY = Rule.ERROR
def __init__(self,db, obj,est):
FamilyRule.__init__(self,db, obj)
self.est = est
def _get_params(self):
return (self.est,)
def broken(self):
marr_date = get_marriage_date(self.db,self.obj)
marr_date_ok = marr_date > 0
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_death_date = get_death_date(self.db,mother,self.est)
father_death_date = get_death_date(self.db,father,self.est)
mother_death_date_ok = mother_death_date > 0
father_death_date_ok = father_death_date > 0
father_broken = (father_death_date_ok and marr_date_ok
and (father_death_date < marr_date))
mother_broken = (mother_death_date_ok and marr_date_ok
and (mother_death_date < marr_date))
return (father_broken or mother_broken)
def get_message(self):
return _("Marriage after death")
class EarlyMarriage(FamilyRule):
ID = 20
SEVERITY = Rule.WARNING
def __init__(self,db, obj,yng_mar,est):
FamilyRule.__init__(self,db, obj)
self.yng_mar = yng_mar
self.est = est
def _get_params(self):
return (self.yng_mar,self.est,)
def broken(self):
marr_date = get_marriage_date(self.db,self.obj)
marr_date_ok = marr_date > 0
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_birth_date = get_birth_date(self.db,mother,self.est)
father_birth_date = get_birth_date(self.db,father,self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
father_broken = (father_birth_date_ok and marr_date_ok and
father_birth_date < marr_date and
((marr_date - father_birth_date)/365 < self.yng_mar))
mother_broken = (mother_birth_date_ok and marr_date_ok and
mother_birth_date < marr_date and
((marr_date - mother_birth_date)/365 < self.yng_mar))
return (father_broken or mother_broken)
def get_message(self):
return _("Early marriage")
class LateMarriage(FamilyRule):
ID = 21
SEVERITY = Rule.WARNING
def __init__(self,db, obj, old_mar,est):
FamilyRule.__init__(self,db, obj)
self.old_mar = old_mar
self.est = est
def _get_params(self):
return (self.old_mar,self.est)
def broken(self):
marr_date = get_marriage_date(self.db,self.obj)
marr_date_ok = marr_date > 0
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_birth_date = get_birth_date(self.db,mother,self.est)
father_birth_date = get_birth_date(self.db,father,self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
father_broken = (father_birth_date_ok and marr_date_ok and
((marr_date - father_birth_date)/365 > self.old_mar))
mother_broken = (mother_birth_date_ok and marr_date_ok and
((marr_date - mother_birth_date)/365 > self.old_mar))
return (father_broken or mother_broken)
def get_message(self):
return _("Late marriage")
## class MarriageBeforePrefiousMarrChild(PersonRule):
## def broken(self):
## marr_date = get_marriage_date(self.obj)
## prev_marr_child_date = get_prev_marr_child_date(self.obj)
## return (prev_marr_child_date>marr_date)
## def get_message(self):
## return _("Marriage before having a child from previous marriage")
## class LongWidowhood(FamilyRule):
## def broken(self):
## marr_date = get_marriage_date(self.obj)
## prev_marr_spouse_death_date = get_prev_marr_spouse_death_date(self.obj)
## birth_date = get_birth_date(self.obj)
## return (marr_date-prev_marr_spouse_death_date>lngwdw)
## def get_message(self):
## return _("Long Windowhood")
class OldParent(FamilyRule):
ID = 22
SEVERITY = Rule.WARNING
def __init__(self,db, obj, old_mom, old_dad,est):
FamilyRule.__init__(self,db, obj)
self.old_mom = old_mom
self.old_dad = old_dad
self.est = est
def _get_params(self):
return (self.old_mom,self.old_dad,self.est)
def broken(self):
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_birth_date = get_birth_date(self.db,mother,self.est)
father_birth_date = get_birth_date(self.db,father,self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
for child_ref in self.obj.get_child_ref_list():
child = find_person(self.db,child_ref.ref)
child_birth_date = get_birth_date(self.db,child,self.est)
child_birth_date_ok = child_birth_date > 0
if not child_birth_date_ok:
continue
father_broken = (father_birth_date_ok and
((child_birth_date - father_birth_date)/365 > self.old_dad))
if father_broken:
self.get_message = self.father_message
return True
mother_broken = (mother_birth_date_ok and
((child_birth_date - mother_birth_date)/365 > self.old_mom))
if mother_broken:
self.get_message = self.mother_message
return True
return False
def father_message(self):
return _("Old father")
def mother_message(self):
return _("Old mother")
class YoungParent(FamilyRule):
ID = 23
SEVERITY = Rule.WARNING
def __init__(self,db, obj,yng_mom,yng_dad,est):
FamilyRule.__init__(self,db, obj)
self.yng_dad = yng_dad
self.yng_mom = yng_mom
self.est = est
def _get_params(self):
return (self.yng_mom,self.yng_dad,self.est)
def broken(self):
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_birth_date = get_birth_date(self.db,mother,self.est)
father_birth_date = get_birth_date(self.db,father,self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
for child_ref in self.obj.get_child_ref_list():
child = find_person(self.db,child_ref.ref)
child_birth_date = get_birth_date(self.db,child,self.est)
child_birth_date_ok = child_birth_date > 0
if not child_birth_date_ok:
continue
father_broken = (father_birth_date_ok and
((child_birth_date - father_birth_date)/365 < self.yng_dad))
if father_broken:
self.get_message = self.father_message
return True
mother_broken = (mother_birth_date_ok and
((child_birth_date - mother_birth_date)/365 < self.yng_mom))
if mother_broken:
self.get_message = self.mother_message
return True
return False
def father_message(self):
return _("Young father")
def mother_message(self):
return _("Young mother")
class UnbornParent(FamilyRule):
ID = 24
SEVERITY = Rule.ERROR
def __init__(self,db, obj,est):
FamilyRule.__init__(self,db, obj)
self.est = est
def _get_params(self):
return (self.est,)
def broken(self):
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_birth_date = get_birth_date(self.db,mother,self.est)
father_birth_date = get_birth_date(self.db,father,self.est)
mother_birth_date_ok = mother_birth_date > 0
father_birth_date_ok = father_birth_date > 0
for child_ref in self.obj.get_child_ref_list():
child = find_person(self.db,child_ref.ref)
child_birth_date = get_birth_date(self.db,child,self.est)
child_birth_date_ok = child_birth_date > 0
if not child_birth_date_ok:
continue
father_broken = (father_birth_date_ok
and (father_birth_date > child_birth_date))
if father_broken:
self.get_message = self.father_message
return True
mother_broken = (mother_birth_date_ok
and (mother_birth_date > child_birth_date))
if mother_broken:
self.get_message = self.mother_message
return True
def father_message(self):
return _("Unborn father")
def mother_message(self):
return _("Unborn mother")
class DeadParent(FamilyRule):
ID = 25
SEVERITY = Rule.ERROR
def __init__(self,db, obj,est):
FamilyRule.__init__(self,db, obj)
self.est = est
def _get_params(self):
return (self.est,)
def broken(self):
mother = get_mother(self.db,self.obj)
father = get_father(self.db,self.obj)
mother_death_date = get_death_date(self.db,mother,self.est)
father_death_date = get_death_date(self.db,father,self.est)
mother_death_date_ok = mother_death_date > 0
father_death_date_ok = father_death_date > 0
for child_ref in self.obj.get_child_ref_list():
child = find_person(self.db,child_ref.ref)
child_birth_date = get_birth_date(self.db,child,self.est)
child_birth_date_ok = child_birth_date > 0
if not child_birth_date_ok:
continue
hasBirthRelToMother = child_ref.mrel == ChildRefType.BIRTH
hasBirthRelToFather = child_ref.frel == ChildRefType.BIRTH
father_broken = (hasBirthRelToFather
and father_death_date_ok
and ((father_death_date + 294) < child_birth_date))
if father_broken:
self.get_message = self.father_message
return True
mother_broken = (hasBirthRelToMother
and mother_death_date_ok
and (mother_death_date < child_birth_date))
if mother_broken:
self.get_message = self.mother_message
return True
def father_message(self):
return _("Dead father")
def mother_message(self):
return _("Dead mother")
class LargeChildrenSpan(FamilyRule):
ID = 26
SEVERITY = Rule.WARNING
def __init__(self,db, obj,cb_span,est):
FamilyRule.__init__(self,db, obj)
self.cb_span = cb_span
self.est = est
def _get_params(self):
return (self.cb_span,self.est)
def broken(self):
child_birh_dates = get_child_birth_dates(self.db,self.obj,self.est)
child_birh_dates.sort()
return (child_birh_dates and ((child_birh_dates[-1]
- child_birh_dates[0])/365
> self.cb_span))
def get_message(self):
return _("Large year span for all children")
class LargeChildrenAgeDiff(FamilyRule):
ID = 27
SEVERITY = Rule.WARNING
def __init__(self,db, obj,c_space,est):
FamilyRule.__init__(self,db, obj)
self.c_space = c_space
self.est = est
def _get_params(self):
return (self.c_space,self.est)
def broken(self):
child_birh_dates = get_child_birth_dates(self.db,self.obj,self.est)
child_birh_dates_diff = [child_birh_dates[i+1] - child_birh_dates[i]
for i in range(len(child_birh_dates)-1) ]
return (child_birh_dates_diff and
max(child_birh_dates_diff)/365 > self.c_space)
def get_message(self):
return _("Large age differences between children")
class Disconnected(PersonRule):
ID = 28
SEVERITY = Rule.WARNING
def broken(self):
return (len(self.obj.get_parent_family_handle_list())
+ len(self.obj.get_family_handle_list()) == 0)
def get_message(self):
return _("Disconnected individual")
class InvalidBirthDate(PersonRule):
ID = 29
SEVERITY = Rule.ERROR
def __init__(self, db, person, invdate):
PersonRule.__init__(self, db, person)
self._invdate = invdate
def broken(self):
if not self._invdate: return False # should we check?
# if so, let's get the birth date
person = self.obj
birth_ref = person.get_birth_ref()
if birth_ref:
birth_event = self.db.get_event_from_handle(birth_ref.ref)
birth_date = birth_event.get_date_object()
if birth_date and not birth_date.get_valid():
return True
return False
def get_message(self):
return _("Invalid birth date")
class InvalidDeathDate(PersonRule):
ID = 30
SEVERITY = Rule.ERROR
def __init__(self, db, person, invdate):
PersonRule.__init__(self, db, person)
self._invdate = invdate
def broken(self):
if not self._invdate: return False # should we check?
# if so, let's get the death date
person = self.obj
death_ref = person.get_death_ref()
if death_ref:
death_event = self.db.get_event_from_handle(death_ref.ref)
death_date = death_event.get_date_object()
if death_date and not death_date.get_valid():
return True
return False
def get_message(self):
return _("Invalid death date")
class MarriedRelation(FamilyRule):
ID = 31
SEVERITY = Rule.WARNING
def __init__(self,db, obj):
FamilyRule.__init__(self,db, obj)
def broken(self):
marr_date = get_marriage_date(self.db,self.obj)
marr_date_ok = marr_date > 0
married = self.obj.get_relationship() == FamilyRelType.MARRIED
if not married and marr_date_ok:
return self.get_message
def get_message(self):
return _("Marriage date but not married")
class OldAgeButNoDeath(PersonRule):
ID = 32
SEVERITY = Rule.WARNING
def __init__(self,db,person, old_age,est):
PersonRule.__init__(self,db,person)
self.old_age = old_age
self.est = est
def _get_params(self):
return (self.old_age,self.est)
def broken(self):
birth_date = get_birth_date(self.db,self.obj,self.est)
dead = get_death_date(self.db,self.obj,True) # if no death use burial
if dead or not birth_date:
return 0
age = ( _today - birth_date ) / 365
return ( age > self.old_age )
def get_message(self):
return _("Old age but no death")
|
Forage/Gramps
|
gramps/plugins/tool/verify.py
|
Python
|
gpl-2.0
| 55,484
|
[
"Brian"
] |
9a5a7255f3820a70858d5883e6079a27ffaaf83791aa91dae80d3a72eb4b33f4
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import threading
import py_utils
from devil.android import device_utils
from systrace import trace_result
from systrace import tracing_agents
from py_trace_event import trace_time as trace_time_module
TRACE_FILE_PATH = \
'/sdcard/Android/data/org.chromium.latency.walt/files/trace.txt'
CLOCK_DOMAIN_MARKER = '# clock_type=LINUX_CLOCK_MONOTONIC\n'
def try_create_agent(options):
if options.is_walt_enabled:
return WaltAgent()
return None
class WaltConfig(tracing_agents.TracingConfig):
def __init__(self, device_serial_number, is_walt_enabled):
tracing_agents.TracingConfig.__init__(self)
self.device_serial_number = device_serial_number
self.is_walt_enabled = is_walt_enabled
def add_options(parser):
options = optparse.OptionGroup(parser, 'WALT trace options')
options.add_option('--walt', dest='is_walt_enabled', default=False,
action='store_true', help='Use the WALT tracing agent. '
'WALT is a device for measuring latency of physical '
'sensors on phones and computers. '
'See https://github.com/google/walt')
return options
def get_config(options):
return WaltConfig(options.device_serial_number, options.is_walt_enabled)
class WaltAgent(tracing_agents.TracingAgent):
"""
This tracing agent requires the WALT app to be installed on the Android phone,
and requires the WALT device to be attached to the phone. WALT is a device
for measuring latency of physical sensors and outputs on phones and
computers. For more information, visit https://github.com/google/walt
"""
def __init__(self):
super(WaltAgent, self).__init__()
self._trace_contents = None
self._config = None
self._device_utils = None
self._clock_sync_marker = None
self._collection_thread = None
def __repr__(self):
return 'WaltAgent'
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StartAgentTracing(self, config, timeout=None):
del timeout # unused
self._config = config
self._device_utils = device_utils.DeviceUtils(
self._config.device_serial_number)
if self._device_utils.PathExists(TRACE_FILE_PATH):
# clear old trace events so they are not included in the current trace
self._device_utils.WriteFile(TRACE_FILE_PATH, '')
return True
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StopAgentTracing(self, timeout=None):
"""Stops tracing and starts collecting results.
To synchronously retrieve the results after calling this function,
call GetResults().
"""
del timeout # unused
self._collection_thread = threading.Thread(
target=self._collect_trace_data)
self._collection_thread.start()
return True
def _collect_trace_data(self):
self._trace_contents = self._device_utils.ReadFile(TRACE_FILE_PATH)
def SupportsExplicitClockSync(self):
return True
def RecordClockSyncMarker(self, sync_id, did_record_clock_sync_callback):
cmd = 'cat /proc/timer_list | grep now'
t1 = trace_time_module.Now()
command_result = self._device_utils.RunShellCommand(cmd, shell=True)
nsec = command_result[0].split()[2]
self._clock_sync_marker = format_clock_sync_marker(sync_id, nsec)
did_record_clock_sync_callback(t1, sync_id)
@py_utils.Timeout(tracing_agents.GET_RESULTS_TIMEOUT)
def GetResults(self, timeout=None):
del timeout # unused
self._collection_thread.join()
self._collection_thread = None
return trace_result.TraceResult('waltTrace', self._get_trace_result())
def _get_trace_result(self):
result = '# tracer: \n' + CLOCK_DOMAIN_MARKER + self._trace_contents
if self._clock_sync_marker is not None:
result += self._clock_sync_marker
return result
def format_clock_sync_marker(sync_id, nanosec_time):
return ('<0>-0 (-----) [001] ...1 ' + str(float(nanosec_time) / 1e9)
+ ': tracing_mark_write: trace_event_clock_sync: name='
+ sync_id + '\n')
|
endlessm/chromium-browser
|
third_party/catapult/systrace/systrace/tracing_agents/walt_agent.py
|
Python
|
bsd-3-clause
| 4,178
|
[
"VisIt"
] |
0a5db0939fda812a023ce0ab76cfeb581d1c9e8acfe7a44cba7e0151761202cb
|
import pdb
import sys, bisect, scipy.stats, gc, os
from string import maketrans
import getNew1000GSNPAnnotations, InBindingSite, GetCNVAnnotations, Mapping2
import binom
MAXREADLEN=75
#tmp_trans={"paternal":0, "maternal":1}
# This is unfortunate; Alex's map files used REF, PAT, MAT, but everywhere else we use paternal, maternal.
hap_trans={"paternal":"PAT", "maternal":"MAT"}
class chromRec(object):
def __init__(self):
self.reads=[]
self.ends=[]
def testOverlap(self, a1, a2, b):
assert a1<a2
return a1<=b and b<a2
def getOverlappingReads(self, pos):
readset = self.reads[bisect.bisect_left(self.ends, pos-MAXREADLEN):bisect.bisect_right(self.ends, pos+MAXREADLEN)]
readset=[r for r in readset if self.testOverlap(int(r[1]), int(r[0]), pos)] # first part only approximate
return readset
def parse_bowtie(f, mapper):
d={}
for l in open(f):
mms=""
vals=l.rstrip().split('\t')
ident, strand, chrom, start, seq, qual, dummy = vals[0:7]
chrom, hap = chrom.split('_')
if len(vals)==8:
mms=vals[7]
nmm=len(mms.split(','))
# end and start are hap coords!
start=int(start)+1 # bowtie is 0-based.
end=start+len(seq)
if not d.has_key(chrom):
d[chrom]=chromRec()
rec=d[chrom]
# now we sort by end in hap coords. Both hap and ref coords are included in the record.
ref_start=mapper.trans(mapper.mt[hap_trans[hap]], mapper.mt["REF"], start)
ref_end =mapper.trans(mapper.mt[hap_trans[hap]], mapper.mt["REF"], end)
if ref_start!=0 and ref_end!=0:
rec.reads.append((ref_end, ref_start, end, start, nmm, seq, strand, ident, hap))
for rec in d.itervalues():
rec.reads.sort()
rec.ends=[e[0] for e in rec.reads]
return d
# FIX This is currently broken
def parse_eland(f):
d={}
for l in open(f):
elems=l.rstrip().split()
ident, seq, mapcode = elems[0:3]
if mapcode in ['U0', 'U1', 'U2']:
chrom, pos, strand = elems[6:9]
pos=int(pos)
end=pos+len(seq)
if not d.has_key(chrom):
d[chrom]=chromRec()
rec=d[chrom]
rec.reads.append((end, pos, mapcode, seq, strand, ident))
for rec in d.itervalues():
rec.reads.sort()
rec.ends=[e[0] for e in rec.reads]
return d
#NOTE: in the output P->Father, M->Mother, C->child
THRESH1=0.90
THRESH2=0.05
SYMMETRIC="Sym"
ASYMMETRIC="Asym"
HOMOZYGOUS="Homo"
WEIRD="Weird"
tbl={
'a':('a','a'),
'c':('c','c'),
'g':('g','g'),
't':('t','t'),
'r':('a','g'),
'y':('c','t'),
's':('c','g'),
'w':('a','t'),
'k':('g','t'),
'm':('a','c')
}
def convert(a):
return tbl[a.lower()]
def testCounts(counts, snp):
winningParent='?'
snpchr, snppos, snprec = snp
mat_genotype, pat_genotype, child_genotype, mat_allele, pat_allele, typ, ref, hetSNP = snprec
# first, make sure that the expected alleles are the bulk of the counts
total = counts['a']+counts['c']+counts['g']+counts['t']
a1,a2=convert(child_genotype)
if a1==a2:
allelecnts = counts[a1]
else:
allelecnts = counts[a1]+counts[a2]
both=counts[a1]+counts[a2]
sortedCounts=sorted([(counts['a'], 'a'), (counts['c'],'c'), (counts['g'], 'g'), (counts['t'], 't')], reverse=True)
majorAllele=sortedCounts[0][1]
smaller=min(counts[a1], counts[a2])
#pval=binomialDist.cdf(smaller, both, 0.5)*2 # This had problems for large sample sizes. Switched to using scipy
pval = binom.binomtest(smaller, both, 0.5) # scipy.binom_test was unstable for large counts
if float(allelecnts)/total < THRESH1:
print >>LOGFP, "WARNING %s:%d failed thresh 1 %d %d" % (snpchr, snppos, allelecnts, total)
return (WEIRD, pval, a1, a2, counts, winningParent)
# if the snp was phased
if mat_allele and pat_allele:
if mat_allele.lower()==majorAllele.lower():
winningParent='M'
elif pat_allele.lower()==majorAllele.lower():
winningParent='P'
else:
winningParent='?'
if a1!=a2:
# we expect roughly 50/50.
if pval < THRESH2:
print >>LOGFP, "NOTE %s:%d Looks interesting: failed thresh 2 %d %d %f" % (snpchr, snppos, both, smaller, pval)
print >>LOGFP, "SNPS %s/%s, COUNTS a:%d c:%d g:%d t:%d" % (a1, a2, counts['a'], counts['c'], counts['g'], counts['t'])
print >>LOGFP, "Phasing P:%s M:%s D:%s" % (pat_allele, mat_allele, snprec)
print >>LOGFP, "\n"
return (ASYMMETRIC, pval, a1, a2, counts, winningParent)
else:
return (SYMMETRIC, pval, a1, a2, counts, winningParent)
else:
return (HOMOZYGOUS, pval, a1, a2, counts, winningParent)
TABLE=maketrans('ACGTacgt', 'TGCAtgca')
def reverseComplement(seq):
tmp=seq[::-1]
return tmp.translate(TABLE)
def doChrom(chrm, readfile, maptmplt):
hetSnps=0
interestingSnps=0
readlen=None
g=_1000G.getAnnotationsGenerator(chrm) #'22'
# FIX abs path
mapper = Mapping2.Mapping(maptmplt % ('chr%s' % chrm))
try:
reads=parse_bowtie(readfile, mapper)
except IOError:
print >> sys.stderr, "Failed to open %s, skipping" % readfile
return
for snp in g: # skip non het?
KSVals=[]
counts={'a':0, 'c':0, 'g':0, 't':0, 'n':0}
snpchrm, snppos, snprec = snp
mat_genotype, pat_genotype, child_genotype, mat_allele, pat_allele, typ, ref, hetSNP = snprec
if not hetSNP:
print >>LOGFP, "Position %s %d failed het test" % (chrm, snppos)
continue # skip non-Het snps
snppos=int(snppos)
chrrec=reads['chr%s'%chrm] # 'chr22.fa'
readset=chrrec.getOverlappingReads(snppos)
if len(readset)<mindepth:
print >>LOGFP, "Position %s %d failed depth test with %d" % (chrm, snppos, len(readset))
continue
passed=0
for read in readset:
end, start, hap_end, hap_start, nmm, seq, strand, ident, hap = read
readlen=len(seq)
start=int(start)
end=int(end)
hap_start=int(hap_start)
hap_end=int(hap_end)
hap_snppos=mapper.trans(mapper.mt["REF"], mapper.mt[hap_trans[hap]], snppos)
if hap_snppos==0: # This hap occurs in a gap in this haplotype.
continue
# bowtie appears to hand this internally
'''
if strand=='-':
seq=reverseComplement(seq)
KSVals.append(end)
else:
KSVals.append(start)
'''
# changed to use % location of snppos within read
p=float(hap_snppos-hap_start)/readlen
assert(p>=0.0 and p<=1.0)
KSVals.append(p)
allele=seq[hap_snppos-hap_start]
allele=allele.lower()
counts[allele]=counts[allele]+1
passed+=1
if passed==0: continue # FIX not needed??
np=1.0
if len(KSVals):
nd, np = scipy.stats.kstest(KSVals, 'uniform', (0.0, 1.0))
t, pval, a1, a2, counts, winningParent = testCounts(counts, snp)
if t==ASYMMETRIC or t==SYMMETRIC:
hetSnps+=1
if t==ASYMMETRIC:
interestingSnps+=1
if BShandler:
inBS=1 if BShandler.check("chr%s"%chrm, snppos) else 0
else:
inBS=-1
cnv=CNVhandler.getAnnotation("chr%s"%chrm, snppos)
if cnv:
cnv=cnv[2]
else:
cnv='1.0'
print >>OUTFP, "%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%d\t%d\t%s\t%s\t%f\t%f\t%d\t%s\t%s" % (chrm, snppos, ref, mat_genotype, pat_genotype, child_genotype, typ, mat_allele, pat_allele, counts['a'], counts['c'], counts['g'], counts['t'], winningParent, t, pval, np, inBS, "DUMMY", cnv)
print >>KSFP, "%s,%d,%d,%d,%s" % (chrm, snppos, len(KSVals), readlen, ",".join(map(str, KSVals)))
OUTFP.flush()
chrms=[str(c) for c in range(1, 23)] + ['X'] # +['X', 'Y', 'M']
#chrms=['22']
USAGE="%s mindepth snpfile readfiletmplt maptmplt bindingsites cnvfile outfile logfile ksfile"
if __name__=='__main__':
if len(sys.argv) != 10:
print USAGE % sys.argv[0]
sys.exit(-1)
mindepth=int(sys.argv[1])
snpfile=sys.argv[2]
readfiletmplt=sys.argv[3]
maptmplt=sys.argv[4]
BindingSitefile=sys.argv[5]
CNVFile=sys.argv[6]
OUTFP = open(sys.argv[7], 'w')
LOGFP = open(sys.argv[8], 'w')
KSFP = open(sys.argv[9], 'w')
gc.disable()
print >>OUTFP, '\t'.join(('chrm', 'snppos ', 'ref', 'mat_gtyp', 'pat_gtyp', 'c_gtyp', 'phase', 'mat_all', 'pat_all', 'cA', 'cC', 'cG', 'cT', 'winning', 'SymCls', 'SymPval', 'KSPval', 'BindingSite', 'SymQval', 'cnv'))
_1000G=getNew1000GSNPAnnotations.Handler(snpfile, hasHeader=True)
# absence of bs file signals that we don't do this test
if os.access(BindingSitefile, os.R_OK):
BShandler=InBindingSite.BSHandler(BindingSitefile)
else:
BShandler=None
CNVhandler=GetCNVAnnotations.Handler(CNVFile)
for chrm in chrms:
try:
readfile=readfiletmplt%chrm
except TypeError:
readfile=readfiletmplt
doChrom(chrm, readfile, maptmplt)
|
gersteinlab/AlleleDB
|
alleledb_pipeline/GetSnpCounts.py
|
Python
|
cc0-1.0
| 9,440
|
[
"Bowtie"
] |
e828f4a3436d5d8c1a9f2366a30a1fa56759564bcfc0b81dc080d63d2ac6816b
|
#!/usr/bin/env python
"""
An animated image
"""
from copy import copy
import netCDF4 as nc4
import numpy as np
import pysgrid
from datetime import timedelta
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.animation import FFMpegWriter
from matplotlib.widgets import Slider, Button, RadioButtons
from pysgrid.processing_2d import rotate_vectors, vector_sum
import cartopy.crs as ccrs
from cartopy.io import shapereader
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
url = (
'http://geoport-dev.whoi.edu/thredds/dodsC/clay/usgs/users/zdefne/run076/his/00_dir_roms_display.ncml')
# url =
# ('C:\Users\Jay.Hennen\Documents\Code\pygnome\py_gnome\scripts\script_curv_field\TBOFS.nc')
lons, lats = np.mgrid[-74.38:-74.26:600j, 39.45:39.56:600j]
# lons, lats = np.mgrid[-82.8:-82.5:600j, 27.5:27.75:600j]
maxslice = 3
fps = 10
def interpolated_velocities(grid, points, ind, timeobj, tindex, u, v, depth=-1.):
'''
Finds velocities at the points at the time specified, interpolating in 2D
over the u and v grids to do so.
:param time: The time in the simulation
:param points: a numpy array of points that you want to find interpolated velocities for
:param indices: Numpy array of indices of the points, if already known.
:return: interpolated velocities at the specified points
'''
t_alphas = timeobj.ialphas(tindex)
t_index = int(np.floor(tindex))
mem = True
_hash = grid._hash_of_pts(points)
u0 = grid.interpolate_var_to_points(points, grid.u, slices=[t_index, -1], memo=mem, _hash=_hash)
u1 = grid.interpolate_var_to_points(points, grid.u, slices=[t_index + 1, -1], memo=mem, _hash=_hash)
v0 = grid.interpolate_var_to_points(points, grid.v, slices=[t_index, -1], memo=mem, _hash=_hash)
v1 = grid.interpolate_var_to_points(points, grid.v, slices=[t_index + 1, -1], memo=mem, _hash=_hash)
u_vels = u0 + (u1 - u0) * t_alphas
v_vels = v0 + (v1 - v0) * t_alphas
return u_vels, v_vels
class Time(object):
def __init__(self, data, base_dt_str=None):
"""
:param data: A netCDF, biggus, or dask source for time data
:return:
"""
self.time = nc4.num2date(data[:], units=data.units)
def ialphas(self, index):
'''
given a floating point index between 0 and max index, give interpolation alphas for that time
'''
i0 = np.floor(index)
i1 = np.ceil(index)
frac = index - i0
return frac
t0 = self.time[i0]
t1 = self.time[i1]
if i0 == i1:
return t0
else:
return t0 * frac + t1 * (1 - frac)
def time_str(self, index):
i0 = np.floor(index)
i1 = np.ceil(index)
frac = index - i0
t0 = self.time[i0]
t1 = self.time[i1]
time = t0 + timedelta(seconds=(t1 - t0).total_seconds() * frac)
return time.strftime('%c')
def f(time):
'''
time: float index
'''
vels = interpolated_velocities(
sgrid, points, timeobj, time, sgrid.u, sgrid.v, u_alphas, v_alphas, u_ind, v_ind)
u_rot = vels[:, 0]
v_rot = vels[:, 1]
u_rot, v_rot = rotate_vectors(u_rot, v_rot, angles)
u_rot = u_rot.reshape(600, -1)
v_rot = v_rot.reshape(600, -1)
uv_vector_sum = vector_sum(u_rot, v_rot)
return uv_vector_sum
def make_map(projection=ccrs.PlateCarree(), figsize=(9, 9)):
fig, ax = plt.subplots(figsize=figsize,
subplot_kw=dict(projection=projection))
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
return fig, ax
nc = nc4.Dataset(url)
timeobj = sgrid = None
if ('ocean_time' in nc.variables.keys()):
timeobj = Time(nc['ocean_time'])
else:
timeobj = Time(nc['time'])
if 'grid' in nc.variables.keys():
sgrid = pysgrid.load_grid(nc)
else:
sgrid = pysgrid.SGrid(node_lon=nc['lon_psi'],
node_lat=nc['lat_psi'],
edge1_lon=nc['lon_u'],
edge1_lat=nc['lat_u'],
edge2_lon=nc['lon_v'],
edge2_lat=nc['lat_v'],
)
sgrid.u = pysgrid.variables.SGridVariable(data=nc['u'])
sgrid.v = pysgrid.variables.SGridVariable(data=nc['v'])
sgrid.angles = pysgrid.variables.SGridVariable(data=nc['angle'])
points = np.stack((lons, lats), axis=-1).reshape(-1, 2)
ind = sgrid.locate_faces(points)
ang_ind = ind + [1, 1]
angles = sgrid.angles[:][ang_ind[:, 0], ang_ind[:, 1]]
# ims is a list of lists, each row is a list of artists to draw in the
# current frame; here we are just animating one artist, the image, in
# each frame
fig, ax = make_map()
print fig
print ax
index = 0
ax.coastlines('10m')
t = np.linspace(0, maxslice, maxslice * fps)
cs = qv = tl = None
time_str = timeobj.time_str(0)
tl = ax.text(0, 1, time_str, bbox=dict(
facecolor='white', alpha=0.8), transform=ax.transAxes)
def gen_map(k):
global t, index, cs, qv, tl, timeobj
tindex = t[index]
if cs is not None:
cs.remove()
qv.remove()
time_str = timeobj.time_str(tindex)
tl.set_text(time_str)
mscale = 1
vscale = 15
scale = 0.04
lon_data = lons
lat_data = lats
print tindex
print time_str
u_rot, v_rot = interpolated_velocities(sgrid, points, ind, timeobj, tindex, sgrid.u, sgrid.v)
u_rot, v_rot = rotate_vectors(u_rot, v_rot, angles)
u_rot = u_rot.reshape(600, -1)
v_rot = v_rot.reshape(600, -1)
uv_vector_sum = vector_sum(u_rot, v_rot)
kw = dict(scale=1.0 / scale, pivot='middle', width=0.003, color='black')
cs = plt.pcolormesh(lon_data[::mscale, ::mscale],
lat_data[::mscale, ::mscale],
uv_vector_sum[::mscale, ::mscale], zorder=1, cmap=plt.cm.rainbow)
qv = plt.quiver(lon_data[::vscale, ::vscale], lat_data[::vscale, ::vscale],
u_rot[::vscale, ::vscale], v_rot[::vscale, ::vscale], zorder=2, **kw)
index += 1
return cs, qv, tl
print 'creating animation'
ani = animation.FuncAnimation(
fig, gen_map, frames=maxslice * fps - 1, interval=100, blit=True, repeat=False)
writer = FFMpegWriter(fps=fps, bitrate=1500)
# plt.show()
print 'saving'
ani.save('currents_movie.mp4', writer=writer)
print 'done'
|
NOAA-ORR-ERD/pysgrid
|
demos/demo_matlabanim.py
|
Python
|
bsd-3-clause
| 6,456
|
[
"NetCDF"
] |
44b60a61c9d5a58666dda809870e371bf36c0845145056f72024cc07cc0a8242
|
#! /bin/env python
# -*- coding: utf-8 -*-
# Beief documentation:
# Pathperf client, estimate path BTC from vantage website to this client
# Written and tested under Fedora11 with Python 2.6
# parameter list:
# [-t1/--thread=1] [-i any/--interface=any] [-b/--bypassCDN] [-c/--crawl] [-q/---quota] [-v/--verbose] [-h/--help] IPv4/IPv6_addr
# Copyright [2012] [Kun Yu yukun2005@gmail.com]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os,sys,string,re,threading,subprocess,math,time,socket,logging,getopt,glob,shlex
from urlparse import urlparse
from easyprocess import EasyProcess
import dns.resolver #install dnspython first http://www.dnspython.org/
def usage():
print "usage: python",sys.argv[0],"[-h/--help] [-v/--verbose] [-i any/--interface=any] [-b/bypassCDN] [-c/--crawl] [-q/---quota] [-t1/--thread=1] IP_address\n"
print "Estimate BTC from vantage website to this client."
print "Vantage website is decided by pathperf from any IP address in the same AS."
print "For full description of pathperf, visit http://search.sasm3.net/documentation.html\n"
print "-i: interface tcpdump listens to"
print "-b: bypass Local DNS, use DNS provided by pathperf"
print "-c: use wget as a simple crawler, using tcpdump to estimate bw at the same time"
print "-q: set download quota for wget in crawler mode"
print "-t: number of threads used to download webpage"
print "eg:",sys.argv[0]," 166.111.1.1"
print "or:",sys.argv[0],"-cvt1 -i eth0 166.111.1.1\n"
def dnslookup(querydomain,type='A'):
type=type.upper()
response=[]
resolver = dns.resolver.Resolver()
resolver.nameservers=[socket.gethostbyname('ip2.sasm4.net')]
try:
answer=resolver.query(querydomain,type)
#except dns.exception.DNSException:
except dns.resolver.NXDOMAIN:
#Local DNS may return "NXDOMAIN" error, if this happens, pathperf queries authoritative DNS server for answer
resolver.nameservers=[socket.gethostbyname('ip2.sasm4.net')]
answer=resolver.query(querydomain,type)
#dir(answer)
if(type=='A' or type=='AAAA'):
if( str(answer.response.answer[0][0])[-1]=='.'): #CNAME
response.append(str(answer.response.answer[0][0])[:-1]) #CNAME
else:
response.append(str(answer.response.answer[0][0])) #CNAME
response.append(str(answer.response.answer[1][0])) #A/AAAA
return response
if(type=='TXT'):
response.append(str(answer.response.answer[0][0])) #ASN or URL
return response
def ipv6exp(ip6addr):
"""ipv6 address expanding function
replace :: in an IPv6address with zeros
return the list after split(':')
"""
ast2=ip6addr.count('::')
if(ast2==0): return ip6addr.split(':')
ast1=ip6addr.count(':')-2*ast2
num=7-ast1
i=1
pad=':'
while i<num:
pad=pad+'0:'
i=i+1
ip6full=ip6addr.replace('::',pad)
if ip6full[-1]==':':ip6full=ip6full+'0'
if ip6full[0]==':':ip6full='0'+ip6full
#print ip6full
return ip6full.split(':')
class mea_thread(threading.Thread):
def __init__(self, ip, domain, url, version, verbose, number, bypassCDN, crawl, quota):
threading.Thread.__init__(self)
self.ip=ip
self.domain=domain
self.url=url
self.version=str(int(version))
self.verbose=int(verbose)
self.bypassCDN=int(bypassCDN)
self.crawl=int(crawl)
self.quota=int(quota)
self.number=int(number)
def run(self):
global est_result
realurl=self.url
realip=self.ip
realdomain=self.domain
verbose=self.verbose
number=self.number
name=str(time.time())+'-'+str(number)
filepath=path+os.path.sep+name
if(self.crawl):
cmd = "wget -r -p -e robots=off --delete-after -Q"+str(self.quota)+"m -U Mozilla "
else:
cmd = "wget "
if(self.bypassCDN):
#wget --header="Host: ak.buy.com" http://206.132.122.75/PI/0/500/207502093.jpg
cmd += '--header="Host: '+self.domain+'" '
cmd += '-'+self.version+' -T 10 -t 1 -o '+filepath+'.txt -O '+filepath+'.html http://'
if(self.version=='6'):
cmd=cmd+'['+self.ip+']'
else:
cmd=cmd+self.ip
else:
cmd += '-'+self.version+' -T 10 -t 1 -o '+filepath+'.txt -O '+filepath+'.html http://'\
+self.domain
if not self.crawl:
cmd += self.url
print cmd
stdout=EasyProcess(cmd).call(timeout=15).stdout #timeout after 15s
try:
#print filepath
file=open(filepath+'.txt','r')
log=file.read()
file.close()
except :
logging.error("Reading log failed.\nDownload command: %s",cmd)
est_result=0
exit()
file_list=glob.glob(filepath+'*')
for f in file_list:
os.remove(f)
if verbose: print '-'*70,'\nParsing Wget log:\n'
if(log.count(' saved [')==0):
logging.error("Wget failed to download any files. Wget log:\n%s",log)
est_result=2
exit()
loglist=log.split('\n')
for linenum,logline in enumerate(loglist):
#print logline
if(logline.count('Location: ')): #HTTP redirect
logging.warning('HTTP redirection found.\nThe file may not come from\
the vantage website designated by pathperf.\nThe result could be inaccuate.')
if(loglist[linenum+1].count('Warning: ')):
nextline=loglist[linenum+2]
else:
nextline=loglist[linenum+1]
nextlist=nextline.split(' ')
realurl=nextlist[1]
try:
urlist=urlparse(nextlist[1])
realurl=urlist.path
logging.warning('Actual URL: %s',nextlist[1])
except:
logging.error("URL extraction error. Wget log:\n%s",logline)
elif(logline.count('Connecting to ') and logline.count('connected')):
#print 'con****, ',logline
if(logline.count(self.ip)==0):
pattern=re.compile('Connecting to (\S+).*\|(\S+)\|\S+.+connected\.')
reout=pattern.search(logline)
if reout:
realdomain=reout.group(1)
realip=reout.group(2)
logging.error('Connection info: %s',logline)
elif(logline.count('saved [')):
#print 'save***, ',logline
a1=logline.split(' saved [')
a2=a1[1].split(']')
if(a2[0].count('/')):
a7=a2[0].split('/')
a2[0]=a7[0]
pagesize=int(a2[0])
a3=a1[0].split('\'')
a4=a3[len(a3)-2].split('`')
a5=a4[0].split('(')
a6=a5[1].split(')')
if(a6[0].count('MB/s')):
a8=a6[0].split(' ')
literal=float(a8[0])*1024*1024
elif(a6[0].count('KB/s')):
a8=a6[0].split(' ')
literal=float(a8[0])*1024
else:
a8=a6[0].split(' ')
literal=float(a8[0])
global total_size,max_time
total_size+=int(pagesize)
down_time=int(pagesize)/literal
if max_time<down_time: max_time=down_time
bandwidth=a6[0]
#print ip4[0],'\n',ipnum,'\n',tmp1,'\n',directory,'\n',asn,'\n',pagesize,'\n',bandwidth
break
else:
continue
if self.verbose:
print "{0}".format(log)
print '-'*70
print "Input website: {0}|{1}|{2}".format(self.domain,self.ip,self.url)
print "Download from: {0}|{1}|{2}".format(realdomain,realip,realurl)
print "Download size: {0}, Bulk Throughput Capacity: {1}".format(pagesize,bandwidth)
'''
if(self.ip==realip):
print "Estimation succeeds!"
else:
print "Estimation fails! Add '-b' and try again."
est_result=0
'''
start=time.time()
path= '.'
'''
#test code
estimation=mea_thread('206.132.122.75','ak.buy.com','/db_assets/large_images/093/207502093.jpg',4) #ip,domain,url,version
estimation.start()
while estimation.isAlive():
time.sleep(0.5)
exit()
'''
if __name__=='__main__':
try:
opts,args = getopt.gnu_getopt(sys.argv[1:],"hvt:i:bcq:",["help", "verbose", "thread=", "interface=", "bypassCDN", "crawl", "quota="])
except getopt.GetoptError as err:
print "\n",str(err),"\n"
usage()
sys.exit(2)
thread = 1
multi_speed = 0
total_size = 0
max_time = 0
verbose = 0
bypassCDN = 0
crawl = 0
quota = 2
interface='any'
ipaddr='166.111.1.1' #default IP address belongs to AS4538
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-v", "--verbose"):
verbose = 1
elif o in ("-t", "--thread"):
thread = int(a)
elif o in ("-i", "--interface"):
interface = a
elif o in ("-b", "--bypassCDN"):
bypassCDN = 1
elif o in ("-c", "--crawl"):
crawl = 1
print "Crawl mode"
elif o in ("-q", "--quota"):
try:
quota = int(a)
except:
quota = 2
if quota>10: quota = 10
else:
assert False, "unhandled option"
if args:
ipaddr=args[0]
try:
socket.inet_pton(socket.AF_INET6,ipaddr)
ipv=6
except:
try:
socket.inet_pton(socket.AF_INET,ipaddr)
ipv=4
except:
logging.error("Invaild IP address.")
sys.exit(1)
else:
logging.error("IP address missing!")
usage()
sys.exit(2)
if ipv==4:
ip4list=ipaddr.split('.')
ip4list.reverse()
#IP address to AS number mapping
#ip4asn='.'.join(ip4list)+'.ip2asn.sasm4.net'
#print dnslookup(ip4asn,'txt')
ip4ser='.'.join(ip4list)+'.ip2server.sasm4.net'
arg1=dnslookup(ip4ser,'a')
ip4url='.'.join(ip4list)+'.ip2url.sasm4.net'
arg2=dnslookup(ip4url,'txt')
else:
ip6list=ipv6exp(ipaddr)
ip6list.reverse()
#IP address to AS number mapping
#ip6asn='.'.join(ip6list)+'.ip6asn.sasm4.net'
#print dnslookup(ip6asn,'txt')
ip6ser='.'.join(ip6list[4:])+'.ip6server.sasm4.net'
arg1=dnslookup(ip6ser,'aaaa')
ip6url='.'.join(ip6list[4:])+'.ip6url.sasm4.net'
arg2=dnslookup(ip6url,'txt')
if arg1[0] in ('Error.','No-IP-Record.','No-Web-Server-in-that-AS.', 'nowebsite.wind.sasm4.net.'):
logging.error("Vantage website localization failed: %s",arg1[0])
sys.exit(3)
thr_list=[]
cmd1 = ['tcpdump', '-i', interface, '-tt','-nn', 'src', arg1[1]]
#print cmd1
#starts tcpdump to capture packets
print '-'*70
est_result=1
filepath=path+os.path.sep+str(time.time())+'.'+'tcpdump.log'
file_handle=open(filepath,'w')
p1 = subprocess.Popen(cmd1, stdout=file_handle, stderr=file_handle)
for i in range(thread):
thr_list.append(mea_thread(arg1[1],arg1[0],arg2[0].strip('"'), ipv, verbose, i, bypassCDN, crawl, quota) ) #ip,domain,url,version,verbose,num
for estimation in thr_list:
estimation.start()
for estimation in thr_list:
while estimation.isAlive():
time.sleep(0.5)
end=time.time()
p1.terminate() #stops tcpdump
file_handle.flush()
file_handle.close()
print '-'*70,'\n'
if(est_result):
search=re.compile('^(\d*\.\d*).*\((\d+)\).*')
#search=re.compile('^(\d*\.\d*).*length\s(\d+)$')
#tcpdump for linux and Mac have different output format
time_list=[];size_list=[]
#for line in iter(p1.stdout.readline, ""):
for line in open(filepath,'r'):
out=search.search(line)
if out:
time_list.append(float(out.group(1)))
size_list.append(int(out.group(2)))
#print out.group(1),out.group(2)
#print time_list
os.remove(filepath)
down_size=sum(size_list)
if(len(time_list)):
time_total=time_list[-1]-time_list[0]
if time_total>4:
part=int(time_total)
else:
part=2
size=int(len(time_list)/part)
bw=[]
for i in range(part):
data_part=sum(size_list[i*size:(i+1)*size])
time_part=time_list[(i+1)*size-1]-time_list[i*size]
if(time_part<0.2):
continue
bw.append(data_part/time_part)
max_bw=max(bw)
if max_bw>1024*1024:
max_bw/=(1024*1024)
unit='MB/s'
elif max_bw>1024:
max_bw/=1024
unit='KB/s'
else:
unit='B/s'
print 'tcpdump statistics: download size',down_size,'B, Peak BTC:',round(max_bw),unit
print '-'*70,'\n'
print "Time used: {0} s".format(round(end-start,2))
|
neoyk/search
|
pathperf_client.py
|
Python
|
apache-2.0
| 14,117
|
[
"VisIt"
] |
8034b6f2b3df3986856eff54f45e5da04416be87454cf8aa5b9fded274c2426c
|
#!/usr/bin/env python
import os
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
from ase.io import read
from ase.units import Bohr
from pyDFTutils.ase_utils.ase_utils import symbol_number
from pyDFTutils.ase_utils.kpoints import ir_kpts
from .wannier import read_basis
from pyDFTutils.wannier90 import pythtb_forj as pythtb
from pyDFTutils.vasp.vasp_utils import read_efermi
#import pythtb
from pyDFTutils.plot.wannier_band_plot import plot_band_weight
class anatb():
"""
analyze the tight binding model.
"""
def __init__(self, tbmodel, kpts=None, kweights=None):
self.tbmodel = tbmodel
self.kpts = kpts
self.kweights = kweights
if kpts is not None and kweights is None:
nkpts = len(kpts)
self.kweights = np.ones(nkpts, dtype='float') / nkpts
self.norbs = self.tbmodel.get_num_orbitals()
def calc_cohp_k(self, ham_k, evec_kj):
"""
calculate COHP for a wavefunction at a point.
Parameters:
--------------
ham_k: The hamiltonian at k. (ndim*ndim), where ndim is the number of wannier functions.
evec_kj: the jth eigenvector at k point
Return:
--------------
a matrix huv, u&v are the indices of wannier functions.
"""
cohp_kj = np.outer(np.conj(evec_kj), evec_kj) * ham_k
#cohp_kj = np.outer(evec_kj, evec_kj) * ham_k
return np.real(cohp_kj)
def calc_cohp_allk(self, kpts=None, iblock=None, jblock=None):
"""
calculate all COHPs.
"""
nkpts = len(self.kpts)
if iblock is None:
iblock = range(self.norbs)
if jblock is None:
jblock = range(self.norbs)
self.evals = np.zeros(( self.norbs,nkpts))
self.cohp = np.zeros((nkpts, self.norbs, len(iblock), len(jblock)))
## [ikpt,iband, iorb, iorb]
for ik, k in enumerate(self.kpts):
ham_k = self.tbmodel._gen_ham(k)
evals_k, evecs_k = self.tbmodel._sol_ham(ham_k, eig_vectors=True)
self.evals[:,ik] = evals_k
## for each kpt,band there is a cohp matrix.
for iband in range(self.norbs):
## note that evec[i,:] is the ith eigenvector
evec = evecs_k[iband, :]
self.cohp[ik, iband] = self.calc_cohp_k(ham_k, evec)
return self.cohp
def get_cohp_pair(self, i, j):
return self.cohp[:, :, i, j]
def get_cohp_block_pair(self, iblock, jblock):
iblock = np.array(iblock, dtype=int)
jblock = np.array(jblock, dtype=int)
iiblock=np.array(list(set(iblock)&set(jblock)),dtype=int) # for removing diagonal terms.
I, J = np.meshgrid(iblock, jblock)
# print(self.cohp.shape)
#return np.sum(self.cohp[:, :, I, J], axis=(2,3))
return np.einsum('ijkl->ij', self.cohp[:,:,I,J]) - np.einsum('ijk->ij',
self.cohp[:,:,iiblock,iiblock])
def get_cohp_all_pair(self):
return self.get_cohp_block_pair(range(self.norbs), range(self.norbs))
def get_cohp_density(self, kpts=None, kweights=None, emin=-20, emax=20):
"""
cohp(E)= sum_k cohp(k) (\delta(Ek-E))
"""
if kpts is None:
kpts = self.kpts
if kweights is None:
kweights = self.kweights
def get_COHP_energy(self):
"""
COHP as function of energy.
"""
# raise NotImplementedError('COHP density has not been implemented yet.')
pass
def plot_COHP_fatband(self,kpts=None,k_x=None,iblock=None,jblock=None,show=False,efermi=None, axis=None,**kwargs):
self.calc_cohp_allk(kpts=kpts)
if iblock is None:
wks = self.get_cohp_all_pair()
else:
wks = self.get_cohp_block_pair(iblock,jblock)
wks = np.moveaxis(wks, 0, -1)
kslist = [k_x] * self.norbs
ekslist = self.evals
axis = plot_band_weight(
kslist,
ekslist,
wkslist=wks,
efermi=efermi,
yrange=None,
style='color',
color='blue',
width=10,
axis=axis,
**kwargs)
axis.set_ylabel('Energy (eV)')
if show:
plt.show()
return axis
class wann_ham():
def __init__(self,
path,
atoms=None,
min_hopping_norm=1e-3,
max_distance=None,
nelect=0,
efermi=0):
self.path = path
self.tb_up = pythtb.w90(path, 'wannier90.up')
self.tb_dn = pythtb.w90(path, 'wannier90.dn')
self.tbmodel_up = self.tb_up.model(
min_hopping_norm=min_hopping_norm, max_distance=max_distance)
self.tbmodel_dn = self.tb_dn.model(
min_hopping_norm=min_hopping_norm, max_distance=max_distance)
self.efermi = efermi
self.nelect = nelect
self.atoms = atoms
self.basis = None
self.kpts = None
self.kweights = None
basis_fname = os.path.join(self.path, 'basis.txt')
if os.path.exists(basis_fname):
self.basis = read_basis(basis_fname)
self.nwann = len(self.basis.keys())
self.block_dict = basis_to_block(self.basis)
def set_kpts(self, mpgrid=[6, 6, 6], ir=False):
"""
set the kpoint mesh.
"""
assert (
self.atoms is not None
), 'should set atomic structure first (use self.set_atoms(atoms)).'
self.kpts, self.kweights = ir_kpts(
self.atoms, mpgrid, is_shift=[0, 0, 0], ir=ir)
def set_basis(self, basis):
self.basis = basis
def set_atoms(self, atoms):
"""
set the atomic structure.
"""
self.atoms = atoms
def set_efermi(self, efermi):
self.efermi = efermi
def write_system(self):
"""
write system.am
"""
# 1. hash. removed from exchange code
text = '&hash\n' + ' 0\n\n'
# 2: cell.
cell_text = '&cell\n'
cell_text += '1.0\n'
cell = self.atoms.get_cell()
for l in cell:
cell_text += ' ' + ' '.join(
['%.5f' % (x / Bohr) for x in l]) + '\n'
text += cell_text + '\n'
# 3. positions.
atoms_text = '&atoms\n'
atoms_text += str(len(self.atoms)) + '\n'
sdict = symbol_number(self.atoms)
for s in sdict:
atoms_text += s + ' '
pos = self.atoms.get_positions()[sdict[s]] / Bohr
atoms_text += ' '.join(['%.5f' % x for x in pos]) + '\n'
text += atoms_text + '\n'
#4. nelect
text += '&nelec\n %s\n\n' % (self.nelect)
#5. efermi
text += '&efermi\n %s\n\n' % (self.efermi)
#6. basis
basis_text = '&basis\n'
## number of wannier basis, number of atoms with wannier functions.
basis_text += str(self.nwann) + ' ' + str(
len(self.block_dict.keys())) + '\n'
block_dict = self.block_dict
block_start = 1
for key in block_dict:
atom_sym, l_sym = key
atom_num = sdict[atom_sym] + 1
orbitals = block_dict[key]
block_dim = len(orbitals)
basis_text += '%s %s %s %s %s ' % (
atom_sym, atom_num, l_sym, block_dim, block_start) + ' '.join(
[str(orb) for orb in orbitals]) + '\n'
block_start += block_dim
text += basis_text
print(text)
with open('system.am', 'w') as myfile:
myfile.write(text)
def write_hamil(self):
"""
write 'hamilt.am'
"""
text = ''
## 1. hash
text += '&hash\n 0\n\n'
## 2. spin
text += '&nspin\n2\n\n'
## 3. number of kpts.
text += '&nkp\n %s\n\n' % (len(self.kpts))
## 4. number of wannier basis.
text += '&dim\n %s\n\n' % (self.nwann)
## 5. kpoints
text += '&kpoints\n'
for w, k in zip(self.kweights, self.kpts):
text += ' %s ' % (w) + ' '.join(map(str, k)) + '\n'
text += '\n'
## 6. hamiltonian.
text += '&hamiltonian\n'
print(text)
for model in [self.tbmodel_up, self.tbmodel_dn]:
for k in self.kpts:
ham = model._gen_ham(k)
for i in range(self.nwann):
for j in range(i, self.nwann):
hij = ham[i, j]
text += '%s %s\n' % (hij.real, hij.imag)
with open('hamilt.am', 'w') as myfile:
myfile.write(text)
def write_input(self):
"""
write input file to exchanges.def.in
"""
with open('exchange.def.in', 'w+') as myfile:
myfile.write("""&exchanges
emin = -15
emax = 0.1
height = 0.1
nz1 = 250
nz2 = 450
nz3 = 250
mode = 'distance'
distance = 1.5
/
""")
def run_exchange(self):
os.system('exchanges.x < exchange.def.in | tee exchanges.def.out')
def basis_to_block(bdict):
orbs = [
's', 'y', 'z', 'x', 'xy', 'yz', '3z^2-1', 'xz', 'x^2-y^2',
'y(3x^2-y^2)', 'xyz', 'y(5z^2-1)', 'z(5z^2-3)', 'x(5z^2-1)',
'z(x^2-y^2)', 'x(3y^2-x^2)'
]
orbs_name = [
's', 'py', 'pz', 'px', 'dxy', 'dyz', 'dz2', 'dxz', 'dx2-y2',
'fy(3x^2-y^2)', 'fxyz', 'fy(5z^2-1)', 'fz(5z^2-3)', 'fx(5z^2-1)',
'fz(x^2-y^2)', 'fx(3y^2-x^2)'
]
orbs_dict = OrderedDict()
for i, o in enumerate(orbs_name):
orbs_dict[o] = i + 1
blocks_dict = OrderedDict()
for symbol in bdict:
atoms_sym, orb_sym, _, _ = symbol.split('|')
l_sym = orb_sym[0]
key = (atoms_sym, l_sym)
if key in blocks_dict:
blocks_dict[key].append(orbs_dict[orb_sym])
else:
blocks_dict[key] = [orbs_dict[orb_sym]]
return blocks_dict
#print(basis_to_block(read_basis('basis.txt')))
def exchange():
atoms = read('./POSCAR')
efermi = read_efermi('SCF/OUTCAR')
exchTB = exchange('./', nelect=164, efermi=efermi)
exchTB.set_atoms(atoms)
exchTB.set_kpts(mpgrid=[5, 5, 5])
exchTB.write_system()
exchTB.write_input()
exchTB.write_hamil()
exchTB.run()
|
mailhexu/pyDFTutils
|
pyDFTutils/wannier90/wann_ham.py
|
Python
|
lgpl-3.0
| 10,371
|
[
"ASE",
"VASP",
"Wannier90"
] |
c6f4dc76a6900a009f6337ddabb58eb2e4667ef913c0a31c2316f898c077ab1c
|
""" DIRAC FileCatalog Database """
__RCSID__ = "$Id$"
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectoryMetadata import DirectoryMetadata
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.FileMetadata import FileMetadata
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectorySimpleTree import DirectorySimpleTree
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectoryNodeTree import DirectoryNodeTree
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectoryLevelTree import DirectoryLevelTree
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectoryFlatTree import DirectoryFlatTree
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.WithFkAndPs.DirectoryClosure import DirectoryClosure
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.FileManagerFlat import FileManagerFlat
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.FileManager import FileManager
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.WithFkAndPs.FileManagerPs import FileManagerPs
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.SEManager import SEManagerCS,SEManagerDB
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.SecurityManager import NoSecurityManager, DirectorySecurityManager, FullSecurityManager, DirectorySecurityManagerWithDelete, PolicyBasedSecurityManager
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.UserAndGroupManager import UserAndGroupManagerCS,UserAndGroupManagerDB
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DatasetManager import DatasetManager
#############################################################################
class FileCatalogDB( DB ):
def __init__( self, databaseLocation = 'DataManagement/FileCatalogDB' ):
""" Standard Constructor
"""
# The database location can be specified in System/Database form or in just the Database name
# in the DataManagement system
db = databaseLocation
if db.find( '/' ) == -1:
db = 'DataManagement/' + db
DB.__init__( self, 'FileCatalogDB', db )
def setConfig( self, databaseConfig ):
self.directories = {}
# In memory storage of the various parameters
self.users = {}
self.uids = {}
self.groups = {}
self.gids = {}
self.seNames = {}
self.seids = {}
self.seDefinitions = {}
# Obtain some general configuration of the database
self.uniqueGUID = databaseConfig['UniqueGUID']
self.globalReadAccess = databaseConfig['GlobalReadAccess']
self.lfnPfnConvention = databaseConfig['LFNPFNConvention']
if self.lfnPfnConvention == "None":
self.lfnPfnConvention = False
self.resolvePfn = databaseConfig['ResolvePFN']
self.umask = databaseConfig['DefaultUmask']
self.validFileStatus = databaseConfig['ValidFileStatus']
self.validReplicaStatus = databaseConfig['ValidReplicaStatus']
self.visibleFileStatus = databaseConfig['VisibleFileStatus']
self.visibleReplicaStatus = databaseConfig['VisibleReplicaStatus']
try:
# Obtain the plugins to be used for DB interaction
self.ugManager = eval( "%s(self)" % databaseConfig['UserGroupManager'] )
self.seManager = eval( "%s(self)" % databaseConfig['SEManager'] )
self.securityManager = eval( "%s(self)" % databaseConfig['SecurityManager'] )
self.dtree = eval( "%s(self)" % databaseConfig['DirectoryManager'] )
self.fileManager = eval( "%s(self)" % databaseConfig['FileManager'] )
self.datasetManager = eval( "%s(self)" % databaseConfig['DatasetManager'] )
self.dmeta = eval( "%s(self)" % databaseConfig['DirectoryMetadata'] )
self.fmeta = eval( "%s(self)" % databaseConfig['FileMetadata'] )
except Exception, x:
gLogger.fatal( "Failed to create database objects", x )
return S_ERROR( "Failed to create database objects" )
return S_OK()
def setUmask( self, umask ):
self.umask = umask
########################################################################
#
# SE based write methods
#
def addSE( self, seName, credDict ):
"""
Add a new StorageElement
:param str seName Name of the StorageElement
:param credDict credential
"""
res = self._checkAdminPermission( credDict )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( "Permission denied" )
return self.seManager.addSE( seName )
def deleteSE( self, seName, credDict ):
"""
Delete a StorageElement
:param str seName Name of the StorageElement
:param creDict credential
"""
res = self._checkAdminPermission( credDict )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( "Permission denied" )
return self.seManager.deleteSE( seName )
########################################################################
#
# User/groups based write methods
#
def addUser( self, userName, credDict ):
"""
Add a new user
:param str userName Name of the User
:param creDict credential
"""
res = self._checkAdminPermission( credDict )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( "Permission denied" )
return self.ugManager.addUser( userName )
def deleteUser( self, userName, credDict ):
"""
Delete a user
:param str userName Name of the User
:param creDict credential
"""
res = self._checkAdminPermission( credDict )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( "Permission denied" )
return self.ugManager.deleteUser( userName )
def addGroup( self, groupName, credDict ):
"""
Add a new group
:param str groupName Name of the group
:param creDict credential
"""
res = self._checkAdminPermission( credDict )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( "Permission denied" )
return self.ugManager.addGroup( groupName )
def deleteGroup( self, groupName, credDict ):
"""
Delete a group
:param str groupName Name of the group
:param creDict credential
"""
res = self._checkAdminPermission( credDict )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( "Permission denied" )
return self.ugManager.deleteGroup( groupName )
########################################################################
#
# User/groups based read methods
#
def getUsers( self, credDict ):
"""
Returns the list of users
:param creDict credential
:return dictionary indexed on the user name
"""
res = self._checkAdminPermission( credDict )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( "Permission denied" )
return self.ugManager.getUsers()
def getGroups( self, credDict ):
"""
Returns the list of groups
:param creDict credential
:return dictionary indexed on the group name
"""
res = self._checkAdminPermission( credDict )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( "Permission denied" )
return self.ugManager.getGroups()
########################################################################
#
# Path based read methods
#
def exists(self, lfns, credDict):
res = self._checkPathPermissions( 'exists', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
if res['Value']['Successful']:
res = self.fileManager.exists( res['Value']['Successful'] )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
notExist = []
for lfn in res['Value']['Successful'].keys():
if not successful[lfn]:
notExist.append( lfn )
successful.pop( lfn )
if notExist:
res = self.dtree.exists( notExist )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
return S_OK( {'Successful':successful,'Failed':failed} )
def getPathPermissions(self, lfns, credDict):
""" Get permissions for the given user/group to manipulate the given lfns
"""
return self.securityManager.getPathPermissions( lfns.keys(), credDict )
def hasAccess( self, opType, paths, credDict ):
""" Get permissions for the given user/group to execute the given operation
on the given paths
returns Successful dict with True/False
"""
return self.securityManager.hasAccess( opType, paths, credDict )
########################################################################
#
# Path based read methods
#
def changePathOwner( self, paths, credDict, recursive = False ):
""" Bulk method to change Owner for the given paths
:param dict paths: dictionary < lfn : owner >
:param dict credDict: dictionary of the caller credentials
:param boolean recursive: flag to apply the operation recursively
"""
res = self._checkPathPermissions( 'changePathOwner', paths, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
if res['Value']['Successful']:
result = self.__changePathFunction( res['Value']['Successful'], credDict,
self.dtree.changeDirectoryOwner,
self.fileManager.changeFileOwner,
recursive = recursive )
failed.update( result['Value']['Failed'] )
successful = result['Value']['Successful']
return S_OK( { 'Successful':successful, 'Failed':failed } )
def changePathGroup( self, paths, credDict, recursive = False ):
""" Bulk method to change Group for the given paths
:param dict paths: dictionary < lfn : group >
:param dict credDict: dictionary of the caller credentials
:param boolean recursive: flag to apply the operation recursively
"""
res = self._checkPathPermissions( 'changePathGroup', paths, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
if res['Value']['Successful']:
result = self.__changePathFunction( res['Value']['Successful'], credDict,
self.dtree.changeDirectoryGroup,
self.fileManager.changeFileGroup,
recursive = recursive )
failed.update( result['Value']['Failed'] )
successful = result['Value']['Successful']
return S_OK( { 'Successful':successful, 'Failed':failed } )
def changePathMode( self, paths, credDict, recursive = False ):
""" Bulk method to change Mode for the given paths
:param dict paths: dictionary < lfn : mode >
:param dict credDict: dictionary of the caller credentials
:param boolean recursive: flag to apply the operation recursively
"""
res = self._checkPathPermissions( 'changePathMode', paths, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
if res['Value']['Successful']:
result = self.__changePathFunction( res['Value']['Successful'], credDict,
self.dtree.changeDirectoryMode,
self.fileManager.changeFileMode,
recursive = recursive )
failed.update( result['Value']['Failed'] )
successful = result['Value']['Successful']
return S_OK( { 'Successful':successful, 'Failed':failed } )
def __changePathFunction( self, paths, credDict,
change_function_directory,
change_function_file,
recursive = False ):
""" A generic function to change Owner, Group or Mode for the given paths
:param dict paths: dictionary < lfn : parameter_value >
:param dict credDict: dictionary of the caller credentials
:param function change_function_directory: function to change directory parameters
:param function change_function_file: function to change file parameters
:param boolean recursive: flag to apply the operation recursively
"""
dirList = []
result = self.isDirectory( paths, credDict )
if not result['OK']:
return result
for di in result['Value']['Successful']:
if result['Value']['Successful'][di]:
dirList.append( di )
fileList = []
if len( dirList ) < len( paths ):
result = self.isFile( paths, credDict )
if not result['OK']:
return result
for fi in result['Value']['Successful']:
if result['Value']['Successful'][fi]:
fileList.append( fi )
successful = {}
failed = {}
dirArgs = {}
fileArgs = {}
for path in paths:
if ( not path in dirList ) and ( not path in fileList ):
failed[path] = 'No such file or directory'
if path in dirList:
dirArgs[path] = paths[path]
elif path in fileList:
fileArgs[path] = paths[path]
if dirArgs:
result = change_function_directory( dirArgs, recursive = recursive )
if not result['OK']:
return result
successful.update( result['Value']['Successful'] )
failed.update( result['Value']['Failed'] )
if fileArgs:
result = change_function_file( fileArgs )
if not result['OK']:
return result
successful.update( result['Value']['Successful'] )
failed.update( result['Value']['Failed'] )
return S_OK( {'Successful':successful, 'Failed':failed} )
########################################################################
#
# File based write methods
#
def addFile( self, lfns, credDict ):
"""
Add a new File
:param dict lfns: indexed on file's LFN, the values are dictionaries which contains
the attributes of the files (PFN, SE, Size, GUID, Checksum)
:param creDict credential
:return Successful/Failed dict.
"""
res = self._checkPathPermissions( 'addFile', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.fileManager.addFile(res['Value']['Successful'],credDict)
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful, 'Failed':failed} )
def setFileStatus( self, lfns, credDict ):
"""
Set the status of a File
:param dict lfns: dict indexed on the LFNs. The values are the status (should be in config['ValidFileStatus'])
:param creDict credential
:return Successful/Failed dict.
"""
res = self._checkPathPermissions( 'setFileStatus', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.fileManager.setFileStatus( res['Value']['Successful'], credDict )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful, 'Failed':failed} )
def removeFile( self, lfns, credDict ):
"""
Remove files
:param list lfns: list of LFNs to remove
:param creDict credential
:return Successful/Failed dict.
"""
res = self._checkPathPermissions( 'removeFile', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.fileManager.removeFile(res['Value']['Successful'])
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful, 'Failed':failed} )
def addReplica( self, lfns, credDict ):
"""
Add a replica to a File
:param dict lfns: keys are LFN. The values are dict with key PFN and SE
(e.g. {myLfn : {"PFN" : "myPfn", "SE" : "mySE"}})
:param creDict credential
:return Successful/Failed dict.
"""
res = self._checkPathPermissions( 'addReplica', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.fileManager.addReplica(res['Value']['Successful'])
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful, 'Failed':failed} )
def removeReplica( self, lfns, credDict ):
"""
Remove replicas
:param dict lfns: keys are LFN. The values are dict with key PFN and SE
(e.g. {myLfn : {"PFN" : "myPfn", "SE" : "mySE"}})
:param creDict credential
:return Successful/Failed dict.
"""
res = self._checkPathPermissions( 'removeReplica', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.fileManager.removeReplica(res['Value']['Successful'])
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful, 'Failed':failed} )
def setReplicaStatus( self, lfns, credDict ):
"""
Set the status of a Replicas
:param dict lfns: dict indexed on the LFNs. The values are dict with keys
"SE" and "Status" (that has to be in config['ValidReplicaStatus'])
:param creDict credential
:return Successful/Failed dict.
"""
res = self._checkPathPermissions( 'setReplicaStatus', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.fileManager.setReplicaStatus(res['Value']['Successful'])
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful, 'Failed':failed} )
def setReplicaHost(self, lfns, credDict):
res = self._checkPathPermissions( 'setReplicaHost', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.fileManager.setReplicaHost(res['Value']['Successful'])
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful, 'Failed':failed} )
def addFileAncestors( self, lfns, credDict ):
""" Add ancestor information for the given LFNs
"""
res = self._checkPathPermissions( 'addFileAncestors', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.fileManager.addFileAncestors(res['Value']['Successful'])
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful, 'Failed':failed} )
########################################################################
#
# File based read methods
#
def isFile( self, lfns, credDict ):
"""
Checks whether a list of LFNS are files or not
:param list lfns: list of LFN to check
:param creDict credential
:return Successful/Failed dict.
The values of the successful dict are True or False whether it's a file or not
"""
res = self._checkPathPermissions( 'isFile', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.fileManager.isFile(res['Value']['Successful'])
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful, 'Failed':failed} )
def getFileSize( self, lfns, credDict ):
"""
Gets the size of a list of lfns
:param list lfns: list of LFN to check
:param creDict credential
:return Successful/Failed dict.
"""
res = self._checkPathPermissions( 'getFileSize', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.fileManager.getFileSize(res['Value']['Successful'])
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful, 'Failed':failed} )
def getFileMetadata( self, lfns, credDict ):
"""
Gets the metadata of a list of lfns
:param list lfns: list of LFN to check
:param creDict credential
:return Successful/Failed dict.
"""
res = self._checkPathPermissions( 'getFileMetadata', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.fileManager.getFileMetadata( res['Value']['Successful'] )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful, 'Failed':failed} )
def getReplicas( self, lfns, allStatus, credDict ):
"""
Gets the list of replicas of a list of lfns
:param list lfns: list of LFN to check
:param allStatus : if all the status are visible, or only those defined in config['ValidReplicaStatus']
:param creDict credential
:return Successful/Failed dict.
Successful is indexed on the LFN, and the values are dictionary with the SEName as keys
"""
res = self._checkPathPermissions( 'getReplicas', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.fileManager.getReplicas(res['Value']['Successful'],allStatus=allStatus)
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful, 'Failed':failed, 'SEPrefixes': res['Value'].get( 'SEPrefixes', {} ) } )
def getReplicaStatus( self, lfns, credDict ):
"""
Gets the status of a list of replicas
:param dict lfns: <lfn, se name>
:param creDict credential
:return Successful/Failed dict.
"""
res = self._checkPathPermissions( 'getReplicaStatus', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.fileManager.getReplicaStatus(res['Value']['Successful'])
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful,'Failed':failed} )
def getFileAncestors(self, lfns, depths, credDict):
res = self._checkPathPermissions( 'getFileAncestors', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.fileManager.getFileAncestors(res['Value']['Successful'],depths)
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful,'Failed':failed} )
def getFileDescendents(self, lfns, depths, credDict):
res = self._checkPathPermissions( 'getFileDescendents', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.fileManager.getFileDescendents(res['Value']['Successful'],depths)
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful, 'Failed':failed} )
def getFileDetails( self, lfnList, credDict ):
""" Get all the metadata for the given files
"""
connection = False
result = self.fileManager._findFiles( lfnList, connection = connection )
if not result['OK']:
return result
resultDict = {}
fileIDDict = {}
lfnDict = result['Value']['Successful']
for lfn in lfnDict:
fileIDDict[lfnDict[lfn]['FileID']] = lfn
result = self.fileManager._getFileMetadataByID( fileIDDict.keys(), connection = connection )
if not result['OK']:
return result
for fileID in result['Value']:
resultDict[ fileIDDict[fileID] ] = result['Value'][fileID]
result = self.fmeta._getFileUserMetadataByID( fileIDDict.keys(), credDict, connection = connection )
if not result['OK']:
return result
for fileID in fileIDDict:
resultDict[ fileIDDict[fileID] ].setdefault( 'Metadata', {} )
if fileID in result['Value']:
resultDict[ fileIDDict[fileID] ]['Metadata'] = result['Value'][fileID]
return S_OK( resultDict )
def getLFNForGUID( self, guids, credDict ):
"""
Gets the lfns that match a list of guids
:param list lfns: list of guid to look for
:param creDict credential
:return S_OK({guid:lfn}) dict.
"""
res = self._checkAdminPermission( credDict )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( "Permission denied" )
res = self.fileManager.getLFNForGUID( guids )
return res
########################################################################
#
# Directory based Write methods
#
def createDirectory( self, lfns, credDict ):
"""
Create new directories
:param list lfns: list of directories
:param creDict credential
:return Successful/Failed dict.
"""
res = self._checkPathPermissions( 'createDirectory', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.dtree.createDirectory(res['Value']['Successful'],credDict)
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful, 'Failed':failed} )
def removeDirectory( self, lfns, credDict ):
"""
Remove directories
:param list lfns: list of directories
:param creDict credential
:return Successful/Failed dict.
"""
res = self._checkPathPermissions( 'removeDirectory', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.dtree.removeDirectory( res['Value']['Successful'], credDict )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
if not successful:
return S_OK( {'Successful':successful,'Failed':failed} )
# Remove the directory metadata now
dirIdList = [ successful[p]['DirID'] for p in successful if 'DirID' in successful[p] ]
result = self.dmeta.removeMetadataForDirectory( dirIdList, credDict )
if not result['OK']:
return result
failed.update( result['Value']['Failed'] )
# We remove from The successful those that failed in the metadata removal
map( lambda x: successful.pop( x ) if x in successful else None, failed )
# We update the successful
successful.update( result["Value"]["Successful"] )
return S_OK( {'Successful':successful, 'Failed':failed} )
########################################################################
#
# Directory based read methods
#
def listDirectory( self, lfns, credDict, verbose = False ):
"""
List directories
:param list lfns: list of directories
:param creDict credential
:return Successful/Failed dict.
The successful values are dictionaries indexed "Files", "Datasets", "Subdirs" and "Links"
"""
res = self._checkPathPermissions( 'listDirectory', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.dtree.listDirectory(res['Value']['Successful'],verbose=verbose)
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful, 'Failed':failed} )
def isDirectory( self, lfns, credDict ):
"""
Checks whether a list of LFNS are directories or not
:param list lfns: list of LFN to check
:param creDict credential
:return Successful/Failed dict.
The values of the successful dict are True or False whether it's a dir or not
"""
res = self._checkPathPermissions( 'isDirectory', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.dtree.isDirectory(res['Value']['Successful'])
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( {'Successful':successful, 'Failed':failed} )
def getDirectoryReplicas(self,lfns,allStatus,credDict):
res = self._checkPathPermissions( 'getDirectoryReplicas', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.dtree.getDirectoryReplicas(res['Value']['Successful'],allStatus)
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( { 'Successful':successful, 'Failed':failed, 'SEPrefixes': res['Value'].get( 'SEPrefixes', {} )} )
def getDirectorySize( self, lfns, longOutput, fromFiles, credDict ):
"""
Get the sizes of a list of directories
:param list lfns: list of LFN to check
:param creDict credential
:return Successful/Failed dict.
The successful values are dictionaries indexed "LogicalFiles" (nb of files),
"LogicalDirectories" (nb of dir) and "LogicalSize" (sum of File's sizes)
"""
res = self._checkPathPermissions( 'getDirectorySize', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK( {'Successful':{}, 'Failed':failed} )
res = self.dtree.getDirectorySize(res['Value']['Successful'],longOutput,fromFiles)
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
queryTime = res['Value'].get( 'QueryTime', -1. )
return S_OK( {'Successful':successful, 'Failed':failed, 'QueryTime':queryTime} )
def getDirectoryMetadata( self, lfns, credDict ):
''' Get standard directory metadata
:param list lfns: list of directory paths
:param dict credDict: credentials
:return: Successful/Failed dict.
'''
res = self._checkPathPermissions( 'getDirectoryMetadata', lfns, credDict )
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for lfn in res['Value']['Successful']:
result = self.dtree.getDirectoryParameters( lfn )
if result['OK']:
successful[lfn] = result['Value']
else:
failed[lfn] = result['Message']
return S_OK( { 'Successful': successful, 'Failed': failed } )
def rebuildDirectoryUsage( self ):
""" Rebuild DirectoryUsage table from scratch
"""
result = self.dtree._rebuildDirectoryUsage()
return result
def repairCatalog( self, directoryFlag = True, credDict = {} ):
""" Repair catalog inconsistencies
"""
result = S_OK()
if directoryFlag:
result = self.dtree.recoverOrphanDirectories( credDict )
return result
#######################################################################
#
# Catalog metadata methods
#
def setMetadata( self, path, metadataDict, credDict ):
""" Add metadata to the given path
"""
res = self._checkPathPermissions( 'setMetadata', path, credDict )
if not res['OK']:
return res
if not res['Value']['Successful']:
return S_ERROR( 'Permission denied' )
if not res['Value']['Successful'][path]:
return S_ERROR( 'Permission denied' )
result = self.dtree.isDirectory( {path:True} )
if not result['OK']:
return result
if not result['Value']['Successful']:
return S_ERROR( 'Failed to determine the path type' )
if result['Value']['Successful'][path]:
# This is a directory
return self.dmeta.setMetadata( path, metadataDict, credDict )
else:
# This is a file
return self.fmeta.setMetadata( path, metadataDict, credDict )
def setMetadataBulk( self, pathMetadataDict, credDict ):
""" Add metadata for the given paths
"""
successful = {}
failed = {}
for path, metadataDict in pathMetadataDict.items():
result = self.setMetadata( path, metadataDict, credDict )
if result['OK']:
successful[path] = True
else:
failed[path] = result['Message']
return S_OK( { 'Successful': successful, 'Failed': failed } )
def removeMetadata( self, pathMetadataDict, credDict ):
""" Remove metadata for the given paths
"""
successful = {}
failed = {}
for path, metadataDict in pathMetadataDict.items():
result = self.__removeMetadata( path, metadataDict, credDict )
if result['OK']:
successful[path] = True
else:
failed[path] = result['Message']
return S_OK( { 'Successful': successful, 'Failed': failed } )
def __removeMetadata( self, path, metadata, credDict ):
""" Remove metadata from the given path
"""
res = self._checkPathPermissions( '__removeMetadata', path, credDict )
if not res['OK']:
return res
if not res['Value']['Successful']:
return S_ERROR( 'Permission denied' )
if not res['Value']['Successful'][path]:
return S_ERROR( 'Permission denied' )
result = self.dtree.isDirectory( {path:True} )
if not result['OK']:
return result
if not result['Value']['Successful']:
return S_ERROR( 'Failed to determine the path type' )
if result['Value']['Successful'][path]:
# This is a directory
return self.dmeta.removeMetadata( path, metadata, credDict )
else:
# This is a file
return self.fmeta.removeMetadata( path, metadata, credDict )
#######################################################################
#
# Catalog admin methods
#
def getCatalogCounters( self, credDict ):
counterDict = {}
res = self._checkAdminPermission( credDict )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( "Permission denied" )
# res = self.dtree.getDirectoryCounters()
# if not res['OK']:
# return res
# counterDict.update(res['Value'])
res = self.fileManager.getFileCounters()
if not res['OK']:
return res
counterDict.update( res['Value'] )
res = self.fileManager.getReplicaCounters()
if not res['OK']:
return res
counterDict.update( res['Value'] )
res = self.dtree.getDirectoryCounters()
if not res['OK']:
return res
counterDict.update( res['Value'] )
return S_OK( counterDict )
########################################################################
#
# Security based methods
#
def _checkAdminPermission( self, credDict ):
return self.securityManager.hasAdminAccess( credDict )
def _checkPathPermissions( self, operation, lfns, credDict ):
res = self.securityManager.hasAccess( operation, lfns.keys(), credDict )
if not res['OK']:
return res
# Do not consider those paths for which we failed to determine access
failed = res['Value']['Failed']
for lfn in failed.keys():
lfns.pop( lfn )
# Do not consider those paths for which access is denied
successful = {}
for lfn, access in res['Value']['Successful'].items():
if not access:
failed[lfn] = 'Permission denied'
else:
successful[lfn] = lfns[lfn]
return S_OK( {'Successful':successful, 'Failed':failed} )
|
vmendez/DIRAC
|
DataManagementSystem/DB/FileCatalogDB.py
|
Python
|
gpl-3.0
| 38,872
|
[
"DIRAC"
] |
0050e5418f2dd14b6fbf07d0e73f71a5695fde956fe0d372673048157c281d09
|
''' Tests for netcdf '''
from __future__ import division, print_function, absolute_import
import os
from os.path import join as pjoin, dirname
import shutil
import tempfile
import warnings
from io import BytesIO
from glob import glob
from contextlib import contextmanager
import numpy as np
from numpy.testing import (assert_, assert_allclose, assert_raises,
assert_equal)
from scipy.io.netcdf import netcdf_file
from scipy._lib._numpy_compat import suppress_warnings
from scipy._lib._tmpdirs import in_tempdir
TEST_DATA_PATH = pjoin(dirname(__file__), 'data')
N_EG_ELS = 11 # number of elements for example variable
VARTYPE_EG = 'b' # var type for example variable
@contextmanager
def make_simple(*args, **kwargs):
f = netcdf_file(*args, **kwargs)
f.history = 'Created for a test'
f.createDimension('time', N_EG_ELS)
time = f.createVariable('time', VARTYPE_EG, ('time',))
time[:] = np.arange(N_EG_ELS)
time.units = 'days since 2008-01-01'
f.flush()
yield f
f.close()
def check_simple(ncfileobj):
'''Example fileobj tests '''
assert_equal(ncfileobj.history, b'Created for a test')
time = ncfileobj.variables['time']
assert_equal(time.units, b'days since 2008-01-01')
assert_equal(time.shape, (N_EG_ELS,))
assert_equal(time[-1], N_EG_ELS-1)
def assert_mask_matches(arr, expected_mask):
'''
Asserts that the mask of arr is effectively the same as expected_mask.
In contrast to numpy.ma.testutils.assert_mask_equal, this function allows
testing the 'mask' of a standard numpy array (the mask in this case is treated
as all False).
Parameters
----------
arr: ndarray or MaskedArray
Array to test.
expected_mask: array_like of booleans
A list giving the expected mask.
'''
mask = np.ma.getmaskarray(arr)
assert_equal(mask, expected_mask)
def test_read_write_files():
# test round trip for example file
cwd = os.getcwd()
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
with make_simple('simple.nc', 'w') as f:
pass
# read the file we just created in 'a' mode
with netcdf_file('simple.nc', 'a') as f:
check_simple(f)
# add something
f._attributes['appendRan'] = 1
# To read the NetCDF file we just created::
with netcdf_file('simple.nc') as f:
# Using mmap is the default
assert_(f.use_mmap)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Read it in append (and check mmap is off)
with netcdf_file('simple.nc', 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Now without mmap
with netcdf_file('simple.nc', mmap=False) as f:
# Using mmap is the default
assert_(not f.use_mmap)
check_simple(f)
# To read the NetCDF file we just created, as file object, no
# mmap. When n * n_bytes(var_type) is not divisible by 4, this
# raised an error in pupynere 1.0.12 and scipy rev 5893, because
# calculated vsize was rounding up in units of 4 - see
# http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj) as f:
# by default, don't use mmap for file-like
assert_(not f.use_mmap)
check_simple(f)
# Read file from fileobj, with mmap
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj, mmap=True) as f:
assert_(f.use_mmap)
check_simple(f)
# Again read it in append mode (adding another att)
with open('simple.nc', 'r+b') as fobj:
with netcdf_file(fobj, 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
f.createDimension('app_dim', 1)
var = f.createVariable('app_var', 'i', ('app_dim',))
var[:] = 42
# And... check that app_var made it in...
with netcdf_file('simple.nc') as f:
check_simple(f)
assert_equal(f.variables['app_var'][:], 42)
except:
os.chdir(cwd)
shutil.rmtree(tmpdir)
raise
os.chdir(cwd)
shutil.rmtree(tmpdir)
def test_read_write_sio():
eg_sio1 = BytesIO()
with make_simple(eg_sio1, 'w') as f1:
str_val = eg_sio1.getvalue()
eg_sio2 = BytesIO(str_val)
with netcdf_file(eg_sio2) as f2:
check_simple(f2)
# Test that error is raised if attempting mmap for sio
eg_sio3 = BytesIO(str_val)
assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True)
# Test 64-bit offset write / read
eg_sio_64 = BytesIO()
with make_simple(eg_sio_64, 'w', version=2) as f_64:
str_val = eg_sio_64.getvalue()
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
# also when version 2 explicitly specified
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64, version=2) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
def test_read_example_data():
# read any example data files
for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):
with netcdf_file(fname, 'r') as f:
pass
with netcdf_file(fname, 'r', mmap=False) as f:
pass
def test_itemset_no_segfault_on_readonly():
# Regression test for ticket #1202.
# Open the test file in read-only mode.
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"Cannot close a netcdf_file opened with mmap=True, when netcdf_variables or arrays referring to its data still exist")
with netcdf_file(filename, 'r') as f:
time_var = f.variables['time']
# time_var.assignValue(42) should raise a RuntimeError--not seg. fault!
assert_raises(RuntimeError, time_var.assignValue, 42)
def test_write_invalid_dtype():
dtypes = ['int64', 'uint64']
if np.dtype('int').itemsize == 8: # 64-bit machines
dtypes.append('int')
if np.dtype('uint').itemsize == 8: # 64-bit machines
dtypes.append('uint')
with netcdf_file(BytesIO(), 'w') as f:
f.createDimension('time', N_EG_ELS)
for dt in dtypes:
assert_raises(ValueError, f.createVariable, 'time', dt, ('time',))
def test_flush_rewind():
stream = BytesIO()
with make_simple(stream, mode='w') as f:
x = f.createDimension('x',4)
v = f.createVariable('v', 'i2', ['x'])
v[:] = 1
f.flush()
len_single = len(stream.getvalue())
f.flush()
len_double = len(stream.getvalue())
assert_(len_single == len_double)
def test_dtype_specifiers():
# Numpy 1.7.0-dev had a bug where 'i2' wouldn't work.
# Specifying np.int16 or similar only works from the same commit as this
# comment was made.
with make_simple(BytesIO(), mode='w') as f:
f.createDimension('x',4)
f.createVariable('v1', 'i2', ['x'])
f.createVariable('v2', np.int16, ['x'])
f.createVariable('v3', np.dtype(np.int16), ['x'])
def test_ticket_1720():
io = BytesIO()
items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
with netcdf_file(io, 'w') as f:
f.history = 'Created for a test'
f.createDimension('float_var', 10)
float_var = f.createVariable('float_var', 'f', ('float_var',))
float_var[:] = items
float_var.units = 'metres'
f.flush()
contents = io.getvalue()
io = BytesIO(contents)
with netcdf_file(io, 'r') as f:
assert_equal(f.history, b'Created for a test')
float_var = f.variables['float_var']
assert_equal(float_var.units, b'metres')
assert_equal(float_var.shape, (10,))
assert_allclose(float_var[:], items)
def test_mmaps_segfault():
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
with warnings.catch_warnings():
warnings.simplefilter("error")
with netcdf_file(filename, mmap=True) as f:
x = f.variables['lat'][:]
# should not raise warnings
del x
def doit():
with netcdf_file(filename, mmap=True) as f:
return f.variables['lat'][:]
# should not crash
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"Cannot close a netcdf_file opened with mmap=True, when netcdf_variables or arrays referring to its data still exist")
x = doit()
x.sum()
def test_zero_dimensional_var():
io = BytesIO()
with make_simple(io, 'w') as f:
v = f.createVariable('zerodim', 'i2', [])
# This is checking that .isrec returns a boolean - don't simplify it
# to 'assert not ...'
assert v.isrec is False, v.isrec
f.flush()
def test_byte_gatts():
# Check that global "string" atts work like they did before py3k
# unicode and general bytes confusion
with in_tempdir():
filename = 'g_byte_atts.nc'
f = netcdf_file(filename, 'w')
f._attributes['holy'] = b'grail'
f._attributes['witch'] = 'floats'
f.close()
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['holy'], b'grail')
assert_equal(f._attributes['witch'], b'floats')
f.close()
def test_open_append():
# open 'w' put one attr
with in_tempdir():
filename = 'append_dat.nc'
f = netcdf_file(filename, 'w')
f._attributes['Kilroy'] = 'was here'
f.close()
# open again in 'a', read the att and and a new one
f = netcdf_file(filename, 'a')
assert_equal(f._attributes['Kilroy'], b'was here')
f._attributes['naughty'] = b'Zoot'
f.close()
# open yet again in 'r' and check both atts
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['Kilroy'], b'was here')
assert_equal(f._attributes['naughty'], b'Zoot')
f.close()
def test_append_recordDimension():
dataSize = 100
with in_tempdir():
# Create file with record time dimension
with netcdf_file('withRecordDimension.nc', 'w') as f:
f.createDimension('time', None)
f.createVariable('time', 'd', ('time',))
f.createDimension('x', dataSize)
x = f.createVariable('x', 'd', ('x',))
x[:] = np.array(range(dataSize))
f.createDimension('y', dataSize)
y = f.createVariable('y', 'd', ('y',))
y[:] = np.array(range(dataSize))
f.createVariable('testData', 'i', ('time', 'x', 'y'))
f.flush()
f.close()
for i in range(2):
# Open the file in append mode and add data
with netcdf_file('withRecordDimension.nc', 'a') as f:
f.variables['time'].data = np.append(f.variables["time"].data, i)
f.variables['testData'][i, :, :] = np.ones((dataSize, dataSize))*i
f.flush()
# Read the file and check that append worked
with netcdf_file('withRecordDimension.nc') as f:
assert_equal(f.variables['time'][-1], i)
assert_equal(f.variables['testData'][-1, :, :].copy(), np.ones((dataSize, dataSize))*i)
assert_equal(f.variables['time'].data.shape[0], i+1)
assert_equal(f.variables['testData'].data.shape[0], i+1)
# Read the file and check that 'data' was not saved as user defined
# attribute of testData variable during append operation
with netcdf_file('withRecordDimension.nc') as f:
with assert_raises(KeyError) as ar:
f.variables['testData']._attributes['data']
ex = ar.exception
assert_equal(ex.args[0], 'data')
def test_maskandscale():
t = np.linspace(20, 30, 15)
t[3] = 100
tm = np.ma.masked_greater(t, 99)
fname = pjoin(TEST_DATA_PATH, 'example_2.nc')
with netcdf_file(fname, maskandscale=True) as f:
Temp = f.variables['Temperature']
assert_equal(Temp.missing_value, 9999)
assert_equal(Temp.add_offset, 20)
assert_equal(Temp.scale_factor, np.float32(0.01))
found = Temp[:].compressed()
del Temp # Remove ref to mmap, so file can be closed.
expected = np.round(tm.compressed(), 2)
assert_allclose(found, expected)
with in_tempdir():
newfname = 'ms.nc'
f = netcdf_file(newfname, 'w', maskandscale=True)
f.createDimension('Temperature', len(tm))
temp = f.createVariable('Temperature', 'i', ('Temperature',))
temp.missing_value = 9999
temp.scale_factor = 0.01
temp.add_offset = 20
temp[:] = tm
f.close()
with netcdf_file(newfname, maskandscale=True) as f:
Temp = f.variables['Temperature']
assert_equal(Temp.missing_value, 9999)
assert_equal(Temp.add_offset, 20)
assert_equal(Temp.scale_factor, np.float32(0.01))
expected = np.round(tm.compressed(), 2)
found = Temp[:].compressed()
del Temp
assert_allclose(found, expected)
# ------------------------------------------------------------------------
# Test reading with masked values (_FillValue / missing_value)
# ------------------------------------------------------------------------
def test_read_withValuesNearFillValue():
# Regression test for ticket #5626
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var1_fillval0'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withNoFillValue():
# For a variable with no fill value, reading data with maskandscale=True
# should return unmasked data
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var2_noFillval'][:]
assert_mask_matches(vardata, [False, False, False])
assert_equal(vardata, [1,2,3])
def test_read_withFillValueAndMissingValue():
# For a variable with both _FillValue and missing_value, the _FillValue
# should be used
IRRELEVANT_VALUE = 9999
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var3_fillvalAndMissingValue'][:]
assert_mask_matches(vardata, [True, False, False])
assert_equal(vardata, [IRRELEVANT_VALUE, 2, 3])
def test_read_withMissingValue():
# For a variable with missing_value but not _FillValue, the missing_value
# should be used
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var4_missingValue'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withFillValNaN():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var5_fillvalNaN'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withChar():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var6_char'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_with2dVar():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var7_2d'][:]
assert_mask_matches(vardata, [[True, False], [False, False], [False, True]])
def test_read_withMaskAndScaleFalse():
# If a variable has a _FillValue (or missing_value) attribute, but is read
# with maskandscale set to False, the result should be unmasked
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
# Open file with mmap=False to avoid problems with closing a mmap'ed file
# when arrays referring to its data still exist:
with netcdf_file(fname, maskandscale=False, mmap=False) as f:
vardata = f.variables['var3_fillvalAndMissingValue'][:]
assert_mask_matches(vardata, [False, False, False])
assert_equal(vardata, [1, 2, 3])
|
apbard/scipy
|
scipy/io/tests/test_netcdf.py
|
Python
|
bsd-3-clause
| 16,677
|
[
"NetCDF"
] |
374cfdf27f2495e01d078b1ca8ff5cf0170644e57d7a9d4c9777f12d7225cc8d
|
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs={}):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances,
}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=True)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
mehdidc/scikit-learn
|
sklearn/metrics/pairwise.py
|
Python
|
bsd-3-clause
| 41,636
|
[
"Gaussian"
] |
db75e7ebba216b1faa89154c957352889b2a070c599a493cc7b74a741520949f
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
`elasticsearch.py`
ElasticSearch backend implementation.
"""
import logging
from elasticsearch import Elasticsearch
LOGGER = logging.getLogger(__name__)
DEFAULT_DATE_FIELD = '@timestamp'
class ElasticsearchBackend:
"""Backend for querying metrics from ElasticSearch.
Args:
client (elasticsearch.ElasticSearch): Existing ES client.
es_config (dict): ES client configuration.
"""
def __init__(self, client=None, **es_config):
self.client = client
if self.client is None:
self.client = Elasticsearch(**es_config)
# pylint: disable=unused-argument
def good_bad_ratio(self, timestamp, window, slo_config):
"""Query two timeseries, one containing 'good' events, one containing
'bad' events.
Args:
timestamp (int): UNIX timestamp.
window (int): Window size (in seconds).
slo_config (dict): SLO configuration.
Returns:
tuple: A tuple (good_event_count, bad_event_count)
"""
measurement = slo_config['backend']['measurement']
index = measurement['index']
query_good = measurement['query_good']
query_bad = measurement.get('query_bad')
query_valid = measurement.get('query_valid')
date_field = measurement.get('date_field', DEFAULT_DATE_FIELD)
# Build ELK request bodies
good = ES.build_query(query_good, window, date_field)
bad = ES.build_query(query_bad, window, date_field)
valid = ES.build_query(query_valid, window, date_field)
# Get good events count
response = self.query(index, good)
good_events_count = ES.count(response)
# Get bad events count
if query_bad is not None:
response = self.query(index, bad)
bad_events_count = ES.count(response)
elif query_valid is not None:
response = self.query(index, valid)
bad_events_count = ES.count(response) - good_events_count
else:
raise Exception("`filter_bad` or `filter_valid` is required.")
return (good_events_count, bad_events_count)
def query(self, index, body):
"""Query ElasticSearch server.
Args:
index (str): Index to query.
body (dict): Query body.
Returns:
dict: Response.
"""
return self.client.search(index=index, body=body)
@staticmethod
def count(response):
"""Count event in Prometheus response.
Args:
response (dict): Prometheus query response.
Returns:
int: Event count.
"""
try:
return response['hits']['total']['value']
except KeyError as exception:
LOGGER.warning("Couldn't find any values in timeseries response")
LOGGER.debug(exception)
return 0
@staticmethod
def build_query(query, window, date_field=DEFAULT_DATE_FIELD):
"""Build ElasticSearch query.
Add window to existing query.
Replace window for different error budget steps on-the-fly.
Args:
body (dict): Existing query body.
window (int): Window in seconds.
date_field (str): Field to filter time on (must be an ElasticSearch
field of type `date`. Defaults to `@timestamp` (Logstash-
generated date field).
Returns:
dict: Query body with range clause added.
"""
if query is None:
return None
body = {"query": {"bool": query}, "track_total_hits": True}
range_query = {
f"{date_field}": {
"gte": f"now-{window}s/s",
"lt": "now/s"
}
}
# If a 'filter' clause already exist, add the range query on top,
# otherwise create the 'filter' clause.
if "filter" in body["query"]["bool"]:
body["query"]["bool"]["filter"]["range"] = range_query
else:
body["query"]["bool"] = {"filter": {"range": range_query}}
return body
ES = ElasticsearchBackend
|
CloudVLab/professional-services
|
tools/slo-generator/slo_generator/backends/elasticsearch.py
|
Python
|
apache-2.0
| 4,740
|
[
"Elk"
] |
855916e96d5dd1407f9196c445a959490d958b447fe72f50e22fecb130516f44
|
"""
DIRAC.StorageManagementSystem.Agent package
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
yujikato/DIRAC
|
src/DIRAC/StorageManagementSystem/Agent/__init__.py
|
Python
|
gpl-3.0
| 164
|
[
"DIRAC"
] |
f3b0e976c215154d28cf842ad97a7705f7967d230bd6b0b0a6d369f4466537a3
|
# -*- coding: utf-8 -*-
#
# DeBaCl documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 29 19:11:02 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import debacl
for module in ['level_set_tree', 'utils']:
module_path = 'debacl.{}'.format(module)
sys.modules[module_path] = eval(module_path)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../debacl/'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'numpydoc']
numpydoc_show_class_members = False
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DeBaCl'
copyright = u'2013-2016, Brian P. Kent'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1'
# The full version, including alpha/beta/rc tags.
release = '1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DeBaCldoc'
# -- Options for LaTeX output --------------------------------------------------
# latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# }
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
# latex_documents = [
# ('index', 'DeBaCl.tex', u'DeBaCl Documentation',
# u'Brian P. Kent', 'manual'),
# ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
# man_pages = [
# ('index', 'debacl', u'DeBaCl Documentation',
# [u'Brian P. Kent'], 1)
# ]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
# texinfo_documents = [
# ('index', 'DeBaCl', u'DeBaCl Documentation',
# u'Brian P. Kent', 'DeBaCl', 'One line description of project.',
# 'Miscellaneous'),
# ]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
CoAxLab/DeBaCl
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,305
|
[
"Brian"
] |
f71aad2fd000e7eed978c9d46351c35c3d3f9de24846175aad3f5150346d6015
|
from Bio import SeqIO
from Bio.Blast import NCBIWWW
from Bio.Blast import NCBIXML
import sys
dna = set("ATGC-actg")
def validate(seq, alphabet=dna):
"Checks that a sequence only contains values from an alphabet"
leftover = set(seq.upper()) - alphabet
return not leftover
file = open('result.txt', 'w')
format= sys.argv[1].split(".")
if(format[-1] == 'fasta' or format[-1] == 'fa' or format[-1] == 'fastq'):
records = list(SeqIO.parse(sys.argv[1], "fasta"))
print("Found %i reads" % len(records))
file.write("Found," + str(len(records)) + "\n")
total = 0
for t in records:
total=total+len(t)
if(validate(records[1].seq[:10])):
print "Number of nucleotides: ", total
file.write("nucleotides," + str(total) + "\n")
else:
print "Number of amino acids: ", total
file.write("acids," + str(total) + "\n")
if(format[-1] == 'gbk' or format[-1] == 'gb'):
records = list(SeqIO.parse(sys.argv[1], "genbank"))
print("Found %i reads" % len(records))
file.write("Found," + str(len(records)) + "\n")
total = 0
for t in records:
total=total+len(t)
if(validate(records[1].seq[:10])):
print "Number of nucleotides: ", total
file.write("nucleotides," + str(total) + "\n")
else:
print "Number of amino acids: ", total
file.write("acids," + str(total) + "\n")
if(format[-1] == 'sff'):
records = list(SeqIO.parse(sys.argv[1], "sff"))
print("Found %i reads" % len(records))
file.write("Found," + str(len(records)) + "\n")
total = 0
for t in records:
total=total+len(t)
if(validate(records[1].seq[:10])):
print "Number of nucleotides: ", total
file.write("nucleotides," + str(total) + "\n")
else:
print "Number of amino acids: ", total
file.write("acids," + str(total) + "\n")
if(format[-1] == 'sam'):
records = list(SeqIO.parse(sys.argv[1], "sam"))
print("Found %i reads" % len(records))
file.write("Found," + str(len(records)) + "\n")
total = 0
for t in records:
total=total+len(t)
if(validate(records[1].seq[:10])):
print "Number of nucleotides: ", total
file.write("nucleotides," + str(total) + "\n")
else:
print "Number of amino acids: ", total
file.write("acids," + str(total) + "\n")
if(format[-1] == 'bam'):
records = list(SeqIO.parse(sys.argv[1], "bam"))
print("Found %i reads" % len(records))
file.write("Found," + str(len(records)) + "\n")
total = 0
for t in records:
total=total+len(t)
if(validate(records[1].seq[:10])):
print "Number of nucleotides: ", total
file.write("nucleotides," + str(total) + "\n")
else:
print "Number of amino acids: ", total
file.write("acids," + str(total) + "\n")
else:
print "unknown format"
file.close()
|
gigascience/gigadb-website
|
protected/scripts/read_stat_script.py
|
Python
|
gpl-3.0
| 3,143
|
[
"BLAST"
] |
9855a31670d5612bf14c1a10185de2282df5ec1742bf5c7474198f1810709ae2
|
# (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import collections
# FIXME: can we store these as something else before we ship it?
import sys
import time
import json
from ansible import constants as C
from ansible.plugins.cache.base import BaseCacheModule
try:
from redis import StrictRedis
except ImportError:
print "The 'redis' python module is required, 'pip install redis'"
sys.exit(1)
class CacheModule(BaseCacheModule):
"""
A caching module backed by redis.
Keys are maintained in a zset with their score being the timestamp
when they are inserted. This allows for the usage of 'zremrangebyscore'
to expire keys. This mechanism is used or a pattern matched 'scan' for
performance.
"""
def __init__(self, *args, **kwargs):
if C.CACHE_PLUGIN_CONNECTION:
connection = C.CACHE_PLUGIN_CONNECTION.split(':')
else:
connection = []
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self._prefix = C.CACHE_PLUGIN_PREFIX
self._cache = StrictRedis(*connection)
self._keys_set = 'ansible_cache_keys'
def _make_key(self, key):
return self._prefix + key
def get(self, key):
value = self._cache.get(self._make_key(key))
# guard against the key not being removed from the zset;
# this could happen in cases where the timeout value is changed
# between invocations
if value is None:
self.delete(key)
raise KeyError
return json.loads(value)
def set(self, key, value):
value2 = json.dumps(value)
if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
self._cache.setex(self._make_key(key), int(self._timeout), value2)
else:
self._cache.set(self._make_key(key), value2)
self._cache.zadd(self._keys_set, time.time(), key)
def _expire_keys(self):
if self._timeout > 0:
expiry_age = time.time() - self._timeout
self._cache.zremrangebyscore(self._keys_set, 0, expiry_age)
def keys(self):
self._expire_keys()
return self._cache.zrange(self._keys_set, 0, -1)
def contains(self, key):
self._expire_keys()
return (self._cache.zrank(self._keys_set, key) >= 0)
def delete(self, key):
self._cache.delete(self._make_key(key))
self._cache.zrem(self._keys_set, key)
def flush(self):
for key in self.keys():
self.delete(key)
def copy(self):
# FIXME: there is probably a better way to do this in redis
ret = dict()
for key in self.keys():
ret[key] = self.get(key)
return ret
|
wulczer/ansible
|
v2/ansible/plugins/cache/redis.py
|
Python
|
gpl-3.0
| 3,394
|
[
"Brian"
] |
bd8c169d40f86eb03653a38a5eb018c9c8a4ba5b7ca9788077ece7be24439a6e
|
#!/usr/bin/env python
"""
Test colormaps
@author alexander@gokliya.net
"""
from __future__ import print_function
from icqsol.shapes.icqShapeManager import ShapeManager
from icqsol import util
shape_mgr = ShapeManager()
s = shape_mgr.createShape('box', origin=(0., 0., 0.), lengths=[10., 1., 1.])
s2 = shape_mgr.refineShape(s, refine=4)
# add a field
pdata = shape_mgr.addSurfaceFieldFromExpressionToShape(s2, 'myField', 'x', [0.0])
# color
pdataHot = shape_mgr.colorSurfaceField(pdata, 'hot', field_name='myField')
pdataCold = shape_mgr.colorSurfaceField(pdata, 'cold', field_name='myField')
pdataGnu = shape_mgr.colorSurfaceField(pdata, 'gnu', field_name='myField')
pdataBlackbody = shape_mgr.colorSurfaceField(pdata, 'blackbody', field_name='myField')
# write files
shape_mgr.setWriter(file_format='vtk', vtk_dataset_type='POLYDATA')
shape_mgr.saveVtkPolyData(pdataHot, file_name='hot.vtk', file_type='ascii')
shape_mgr.saveVtkPolyData(pdataCold, file_name='cold.vtk', file_type='ascii')
shape_mgr.saveVtkPolyData(pdataGnu, file_name='gnu.vtk', file_type='ascii')
shape_mgr.saveVtkPolyData(pdataBlackbody, file_name='blackbody.vtk', file_type='ascii')
|
gregvonkuster/icqsol
|
tests/testColormaps.py
|
Python
|
mit
| 1,160
|
[
"VTK"
] |
89daaa82548068c29a50b09ffe450a4a6bc3617ab3f8b4985b3a05d8c7435272
|
import os
import time
from page_objects import *
def go_to_web_pro(context):
context.browser.visit(context.base_url+ '/login/')
time.sleep(1)
def hide_tool_bar(context):
time.sleep(1)
context.browser.find_by_id('djHideToolBarButton').click()
def go_to_login_page(context):
time.sleep(1)
return context.browser.visit(context.base_url + '/login/')
def go_to_profile_page(context):
time.sleep(1)
return context.browser.visit(context.base_url + '/profile/')
def go_to_shout_page(context):
time.sleep(1)
return context.browser.visit(context.base_url + '/shout/')
def go_to_register_page(context):
time.sleep(1)
return context.browser.visit(context.base_url + '/register/')
def login_to_web_pro(context, email, password):
time.sleep(1)
context.browser.visit(context.base_url + '/login/')
context.browser.fill('email', email)
context.browser.fill('password', password)
time.sleep(1)
send_submit_btn(context.browser).click()
def deactivate_profile(browser):
time.sleep(1)
browser.find_link_by_href('/profile/deactivate/').click()
time.sleep(2)
browser.find_by_css('.btn.btn-danger').click()
time.sleep(3)
def send_a_message(browser, message):
browser.fill('message', message)
time.sleep(1)
send_submit_btn(browser).click()
time.sleep(1)
def log_out_of_web_pro(context):
time.sleep(2)
assert logout_link(context.browser), 'Logout link not found'
logout_link(context.browser).click()
time.sleep(1)
go_to_login_page(context)
time.sleep(1)
def get_become_reporter_btn(context):
time.sleep(1)
return register_link.first(context.browser)
def reset_password(browser, password, confirm_password):
time.sleep(1)
browser.fill('password', password)
browser.fill('confirm_password', confirm_password)
time.sleep(2)
browser.find_by_css('.wp-send.btn').click()
def send_forgot_password_email(browser, email):
time.sleep(2)
browser.fill('email', email)
browser.find_by_css('.wp-send.btn').click()
time.sleep(1)
|
rapidpro/ureport-web-participation
|
features/steps/page_actions.py
|
Python
|
agpl-3.0
| 2,079
|
[
"VisIt"
] |
9ede17d2bb51fa6d402187cdfad9d3eefb742d9da8aa91a32c5c35d5db48811f
|
import numpy as np
from .interp_dens_map import interp_dens_map
from .xyz_coords import xyz_coords
from .best_fit_angles import get_angles
from .method1 import m1_ccc_map
from .method2 import m2_fix_plane_perp_dist
from .method3 import m3_min_perp_distance
def draw_rand_dep_dist(d_g, e_d_g):
'''
Take each 3D distance between a cluster and the center of the galaxy,
and normally draw a random value using its error as the standard deviation.
'''
rand_dist = []
for mu, std in zip(*[d_g, e_d_g]):
r_dist = np.random.normal(mu, std)
# Don't store negative distances.
rand_dist.append(max(0., r_dist))
return rand_dist
def draw_rand_dist_mod(dm_g, e_dm_g):
"""
Draw random distance moduli assuming a normal distribution of the errors.
"""
r_dist = np.random.normal(np.asarray(dm_g), np.asarray(e_dm_g))
return r_dist
def monte_carlo_errors(N_maps, method, params):
"""
Obtain N_maps angles-CCC density maps, by randomly sampling
the distance to each cluster before calculating the CCC.
"""
# Unpack params.
if method == 'deproj_dists':
inc_lst, pa_lst, xi, yi, d_f, e_d_f, dep_dist_i_PA_vals =\
params
elif method == 'perp_d_fix_plane':
inc_lst, pa_lst, xi, yi, dm_f, e_dm_f, rho_f, phi_f, gal_dist,\
plane_abc = params
elif method == 'perp_d_free_plane':
dm_f, e_dm_f, rho_f, phi_f, gal_dist, N_min = params
inc_pa_mcarlo = []
milestones = [0.1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
for _ in range(N_maps):
if method == 'deproj_dists':
# Draw random deprojected distances (in Kpc), obtained via the
# ASteCA distance moduli + astropy.
rand_dist = draw_rand_dep_dist(d_f, e_d_f)
# Obtain density map of CCC (z) values.
z = m1_ccc_map(dep_dist_i_PA_vals, rand_dist)
# Obtain (finer) interpolated angles-CCC density map.
# Rows in zi correspond to inclination values.
# Columns correspond to position angle values.
zi = interp_dens_map(inc_lst, pa_lst, xi, yi, z)
# Store values of inclination and position angles for the
# interpolated map.
best_angles_pars = [xi, yi, zi]
elif method == 'perp_d_fix_plane':
# Draw random distances moduli, obtained via ASteCA.
rand_dist = draw_rand_dist_mod(dm_f, e_dm_f)
# Positions in the (x,y,z) system.
x, y, z = xyz_coords(rho_f, phi_f, gal_dist, rand_dist)
# Obtain density map (z), composed of the sum of the absolute
# values of the distances to each plane.
z = m2_fix_plane_perp_dist(plane_abc, x, y, z)
zi = interp_dens_map(inc_lst, pa_lst, xi, yi, z)
best_angles_pars = [xi, yi, zi]
elif method == 'perp_d_free_plane':
# Draw random distances moduli, obtained via ASteCA.
rand_dist = draw_rand_dist_mod(dm_f, e_dm_f)
# Obtain coords in the (x, y, z) system, using the random
# distance moduli values.
x, y, z = xyz_coords(rho_f, phi_f, gal_dist, rand_dist)
# Store params used to obtain the Monte Carlo errors.
best_angles_pars = m3_min_perp_distance(x, y, z, N_min)
inc_b, pa_b, _ = get_angles(method, best_angles_pars)
inc_pa_mcarlo.append([inc_b, pa_b])
# Print percentage done.
percentage_complete = (100. * (_ + 1) / N_maps)
while len(milestones) > 0 and \
percentage_complete >= milestones[0]:
# print " {:>3}% done".format(milestones[0])
# Remove that milestone from the list.
milestones = milestones[1:]
# import matplotlib.pyplot as plt
# x, y = list(zip(*inc_pa_mcarlo))
# plt.subplot(121)
# plt.hist(x, bins=50)
# plt.subplot(122)
# plt.hist(y, bins=50)
# plt.show()
return inc_pa_mcarlo
|
Gabriel-p/mcs_rot_angles
|
modules/mc_errors.py
|
Python
|
gpl-3.0
| 4,014
|
[
"Galaxy"
] |
84edfe20cd8292fa6a275ca2471f0eeb560be5a07d4df1f2c16cf69415fb5027
|
# -*- coding: utf-8 -*-
"""
Authors: Gonzalo E. Espinoza-Dávalos
IHE Delft 2017
Contact: g.espinoza@un-ihe.org
Repository: https://github.com/gespinoza/waterpix
Module: waterpix
"""
from __future__ import division
import os
import tempfile
from .davgis import (Spatial_Reference, Buffer, Feature_to_Raster, Resample,
Clip, Raster_to_Array, Get_Extent)
import pandas as pd
import netCDF4
def create_input_nc(start_date, years,
cellsize, basin_shp,
p_path, et_path, eto_path, lai_path,
swi_path, swio_path, swix_path,
qratio_path, rainydays_path,
thetasat_ras, rootdepth_ras,
input_nc, epsg=4326, bbox=None):
"""
Creates the input netcdf file required to run waterpix
"""
# Script parameters
print "Variable\tRaster"
if bbox:
latlim = [bbox[1], bbox[3]]
lonlim = [bbox[0], bbox[2]]
else:
xmin, ymin, xmax, ymax = Get_Extent(basin_shp)
latlim = [ymin, ymax]
lonlim = [xmin, xmax]
time_range = pd.date_range(start_date, periods=12*years, freq='MS')
time_ls = [d.strftime('%Y%m') for d in time_range]
time_dt = [pd.to_datetime(i, format='%Y%m')
for i in time_ls]
time_n = len(time_ls)
years_ls = set()
years_ls = [i.year for i in time_dt
if i.year not in years_ls and not years_ls.add(i.year)]
time_indeces = {}
for j, item in enumerate(years_ls):
temp_ls = [int(i.strftime('%Y%m')) for i in
pd.date_range(str(item) + '0101',
str(item) + '1231', freq='MS')]
time_indeces[item] = [time_ls.index(str(i)) for i in temp_ls]
for key in time_indeces.keys():
if time_indeces[key] != range(time_indeces[key][0],
time_indeces[key][-1] + 1):
raise Exception('The year {0} in the netcdf file is incomplete'
' or the dates are non-consecutive')
all_paths = {'p': p_path, 'et': et_path, 'eto': eto_path, 'lai': lai_path,
'swi': swi_path, 'swio': swio_path, 'swix': swix_path,
'qratio': qratio_path, 'rainydays': rainydays_path}
# Latitude and longitude
lat_ls = pd.np.arange(latlim[0] + 0.5*cellsize, latlim[1] + 0.5*cellsize,
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = pd.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1] + 0.5*cellsize,
cellsize)
lat_n = len(lat_ls)
lon_n = len(lon_ls)
projection = Spatial_Reference(epsg, True)
ll_corner = [lonlim[0], latlim[0]]
bbox = [lonlim[0], latlim[0], lonlim[1], latlim[1]]
# Temp directory
temp_dir1 = tempfile.mkdtemp()
# Basin mask
basin_ras = os.path.join(temp_dir1, 'bas.tif')
buff_shp = os.path.join(temp_dir1, 'bas.shp')
Buffer(basin_shp, buff_shp, 2*cellsize)
Feature_to_Raster(buff_shp, basin_ras, cellsize, False, -9999)
# Create NetCDF file
nc_file = netCDF4.Dataset(input_nc, 'w', format="NETCDF4")
nc_file.set_fill_on()
# Create dimensions
lat_dim = nc_file.createDimension('latitude', lat_n)
lon_dim = nc_file.createDimension('longitude', lon_n)
month_dim = nc_file.createDimension('time_yyyymm', time_n)
year_dim = nc_file.createDimension('time_yyyy', len(years_ls))
# Create NetCDF variables
crs_var = nc_file.createVariable('crs', 'i', (), fill_value=-9999)
crs_var.standard_name = 'crs'
crs_var.grid_mapping_name = 'latitude_longitude'
crs_var.crs_wkt = projection
lat_var = nc_file.createVariable('latitude', 'f8', ('latitude'),
fill_value=-9999)
lat_var.units = 'degrees_north'
lat_var.standard_name = 'latitude'
lon_var = nc_file.createVariable('longitude', 'f8', ('longitude'),
fill_value=-9999)
lon_var.units = 'degrees_east'
lon_var.standard_name = 'longitude'
month_var = nc_file.createVariable('time_yyyymm', 'l', ('time_yyyymm'),
fill_value=-9999)
month_var.standard_name = 'time'
month_var.format = 'YYYYMM'
year_var = nc_file.createVariable('time_yyyy', 'l', ('time_yyyy'),
fill_value=-9999)
year_var.standard_name = 'time'
year_var.format = 'YYYY'
# Variables
p_var = nc_file.createVariable('Precipitation_M', 'f8',
('time_yyyymm', 'latitude', 'longitude'),
fill_value=-9999)
p_var.long_name = 'Precipitation'
p_var.units = 'mm/month'
py_var = nc_file.createVariable('Precipitation_Y', 'f8',
('time_yyyy', 'latitude', 'longitude'),
fill_value=-9999)
py_var.long_name = 'Precipitation'
py_var.units = 'mm/year'
et_var = nc_file.createVariable('Evapotranspiration_M', 'f8',
('time_yyyymm', 'latitude', 'longitude'),
fill_value=-9999)
et_var.long_name = 'Evapotranspiration'
et_var.units = 'mm/month'
ety_var = nc_file.createVariable('Evapotranspiration_Y', 'f8',
('time_yyyy', 'latitude', 'longitude'),
fill_value=-9999)
ety_var.long_name = 'Evapotranspiration'
ety_var.units = 'mm/year'
eto_var = nc_file.createVariable('ReferenceET_M', 'f8',
('time_yyyymm', 'latitude', 'longitude'),
fill_value=-9999)
eto_var.long_name = 'Reference Evapotranspiration'
eto_var.units = 'mm/month'
lai_var = nc_file.createVariable('LeafAreaIndex_M', 'f8',
('time_yyyymm', 'latitude', 'longitude'),
fill_value=-9999)
lai_var.long_name = 'Leaf Area Index'
lai_var.units = 'm2/m2'
swi_var = nc_file.createVariable('SWI_M', 'f8',
('time_yyyymm', 'latitude', 'longitude'),
fill_value=-9999)
swi_var.long_name = 'Soil Water Index - Monthly mean'
swi_var.units = '%'
swio_var = nc_file.createVariable('SWIo_M', 'f8',
('time_yyyymm', 'latitude', 'longitude'),
fill_value=-9999)
swio_var.long_name = 'Soil water index - First day of the month'
swio_var.units = '%'
swix_var = nc_file.createVariable('SWIx_M', 'f8',
('time_yyyymm', 'latitude', 'longitude'),
fill_value=-9999)
swix_var.long_name = 'Soil water index - Last day of the month'
swix_var.units = '%'
qratio_var = nc_file.createVariable('RunoffRatio_Y', 'f8',
('time_yyyy',
'latitude', 'longitude'),
fill_value=-9999)
qratio_var.long_name = 'Runoff ratio'
qratio_var.units = '-'
rainydays_var = nc_file.createVariable('RainyDays_M', 'f8',
('time_yyyymm',
'latitude', 'longitude'),
fill_value=-9999)
rainydays_var.long_name = 'Number of rainy days per month'
rainydays_var.units = 'No. rainy days/month'
thetasat_var = nc_file.createVariable('SaturatedWaterContent', 'f8',
('latitude', 'longitude'),
fill_value=-9999)
thetasat_var.long_name = 'Saturated water content (top soil)'
thetasat_var.units = 'cm3/cm3'
rootdepth_var = nc_file.createVariable('RootDepth', 'f8',
('latitude', 'longitude'),
fill_value=-9999)
rootdepth_var.long_name = 'Root depth'
rootdepth_var.units = 'mm'
basinmask_var = nc_file.createVariable('BasinBuffer', 'l',
('latitude', 'longitude'),
fill_value=0)
basinmask_var.long_name = 'Basin buffer'
# Load data
lat_var[:] = lat_ls
lon_var[:] = lon_ls
month_var[:] = time_ls
year_var[:] = years_ls
# Static variables
# Theta sat
print "{0}\t{1}".format('thetasat', thetasat_ras)
thetasat_temp1 = os.path.join(temp_dir1, 'thetasat1.tif')
thetasat_temp2 = os.path.join(temp_dir1, 'thetasat2.tif')
Resample(thetasat_ras, thetasat_temp1, cellsize, 'NearestNeighbour')
Clip(thetasat_temp1, thetasat_temp2, bbox)
array_thetasat = Raster_to_Array(thetasat_temp2, ll_corner, lon_n, lat_n)
thetasat_var[:, :] = array_thetasat[:, :]
# Root depth
print "{0}\t{1}".format('rootdepth', rootdepth_ras)
rootdepth_temp1 = os.path.join(temp_dir1, 'rootdepth1.tif')
rootdepth_temp2 = os.path.join(temp_dir1, 'rootdepth2.tif')
Resample(rootdepth_ras, rootdepth_temp1, cellsize, 'NearestNeighbour')
Clip(rootdepth_temp1, rootdepth_temp2, bbox)
array_rootdepth = Raster_to_Array(rootdepth_temp2, ll_corner, lon_n, lat_n)
rootdepth_var[:, :] = array_rootdepth[:, :]
# Basin mask
print "{0}\t{1}".format('basin_mask', basin_ras)
basinmask_temp1 = os.path.join(temp_dir1, 'basinmask1.tif')
basinmask_temp2 = os.path.join(temp_dir1, 'basinmask2.tif')
Resample(basin_ras, basinmask_temp1, cellsize, 'NearestNeighbour')
Clip(basinmask_temp1, basinmask_temp2, bbox)
array_basinmask = Raster_to_Array(basinmask_temp2, ll_corner, lon_n, lat_n)
array_basinmask[pd.np.isnan(array_basinmask)] = 0.0
basinmask_var[:, :] = array_basinmask[:, :]
# Dynamic variables
for var in ['p', 'et', 'eto', 'lai',
'swi', 'swio', 'swix', 'rainydays']:
# Make temp directory
temp_dir2 = tempfile.mkdtemp()
temp_dir3 = tempfile.mkdtemp()
for yyyymm in time_ls:
yyyy = yyyymm[:4]
mm = yyyymm[-2:]
ras = all_paths[var].format(yyyy=yyyy, mm=mm)
print "{0}\t{1}".format(var, ras)
Resample(ras, os.path.join(temp_dir2, os.path.basename(ras)),
cellsize, 'NearestNeighbour')
Clip(os.path.join(temp_dir2, os.path.basename(ras)),
os.path.join(temp_dir3, os.path.basename(ras)),
bbox)
array = Raster_to_Array(os.path.join(temp_dir3,
os.path.basename(ras)),
ll_corner, lon_n, lat_n)
t_index = time_ls.index(yyyymm)
exec('{0}_var[t_index, :, :] = array[:, :]'.format(var))
# Runoff ratio
temp_dir2 = tempfile.mkdtemp()
temp_dir3 = tempfile.mkdtemp()
for yyyy in years_ls:
for yyyymm in time_ls:
ras = all_paths['qratio'].format(yyyy=yyyy)
print "{0}\t{1}".format('qratio', ras)
Resample(ras, os.path.join(temp_dir2, os.path.basename(ras)),
cellsize, 'NearestNeighbour')
Clip(os.path.join(temp_dir2, os.path.basename(ras)),
os.path.join(temp_dir3, os.path.basename(ras)),
bbox)
array = Raster_to_Array(os.path.join(temp_dir3,
os.path.basename(ras)),
ll_corner, lon_n, lat_n)
y_index = years_ls.index(yyyy)
qratio_var[y_index, :, :] = array[:, :]
# Calculate yearly rasters
for yyyy in years_ls:
yyyyi = years_ls.index(yyyy)
ti1 = time_indeces[yyyy][0]
ti2 = time_indeces[yyyy][-1] + 1
py_var[yyyyi, :, :] = pd.np.sum(p_var[ti1:ti2, :, :], axis=0)
ety_var[yyyyi, :, :] = pd.np.sum(et_var[ti1:ti2, :, :], axis=0)
# Close file
nc_file.close()
# Return
return input_nc
|
wateraccounting/wa
|
Models/waterpix/wp_gdal/create_input_nc.py
|
Python
|
apache-2.0
| 12,191
|
[
"NetCDF"
] |
2e8e62c4b8e1746092508b852846f44362fc0b6f825687cac305d9b4a511cef3
|
#!/usr/bin/python2
# tf_train.py
#
# Collection of ML algorithms to fingerprint radio devices using Tensorflow.
# A high level overview of the functionality provided by this code is given in
# the paper entitled "Physical-Layer Fingerprinting of LoRa devices using
# Supervised and Zero-Shot Learning", which was presented at WiSec 2017. A VM
# containing the training data and scripts required to reproduce the results
# from our paper will be published on Zenodo. Please contact one of the authors
# for more information.
#
# The code provides an abstraction layer on top of Tensorflow, consisting of
# "Models" and "Layers", in order to build a "Classifier" for raw radio signals.
# If you plan on using this framework for your research, I would recommend using
# the library "Keras" to build the models instead of "raw" Tensorflow. Keras was
# developed concurrently with this work, and provides a more concise and mature
# implementation for the same types of models that are used here.
#
# Author: Pieter Robyns
# Contact: pieter.robyns@uhasselt.be
import tensorflow as tf
import colorama
import random
import numpy as np
import scipy.io as sio
import os
import configparser
import argparse
import preprocessing
import visualization
import pickle
import json
import sklearn
import utilities
from colorama import Fore,Back,Style
from pymongo import MongoClient
from pymongo.errors import OperationFailure, AutoReconnect
from scipy import stats
from sklearn.manifold import TSNE
from sklearn.svm import SVC
from mapping import Mapping
from cache import GenericCache
from datetime import datetime
from random import randint
from tensorflow.contrib.tensorboard.plugins import projector
from sklearn.cluster import DBSCAN
from itertools import combinations
from collections import defaultdict
# ----------------------------------------------------
# Globals
# ----------------------------------------------------
colorama.init(autoreset=True)
EPSILON = 0.00000000001
defaults = {
'exclude_classes': '',
'epochs': -1,
'num_zs_test_samples': 40,
}
cp = configparser.RawConfigParser(defaults)
flags = tf.app.flags
FLAGS = flags.FLAGS
# ----------------------------------------------------
# Static functions
# ----------------------------------------------------
def load_conf(conf): # Configure the classifier using settings from conf file
cp.read(conf)
# Flags
flags.DEFINE_string('logdir', '/tmp/tensorboard', 'Tensorboard summaries directory')
flags.DEFINE_string('trainedmodelsdir', cp.get("DEFAULT", "trained_models_path"), 'Trained models directory')
flags.DEFINE_string('dataset', cp.get("DEFAULT", "dataset"), 'Dataset type (mongo or matlab)')
flags.DEFINE_string('classifier', cp.get("DEFAULT", "classifier"), 'Type of classifier to use')
flags.DEFINE_string('clustering', cp.get("DEFAULT", "clustering"), 'Type of clustering to use if doing open set classification')
flags.DEFINE_string('model_name', cp.get("DEFAULT", "model_name"), 'Name of the experiment / model. Used for saving it')
flags.DEFINE_integer('limit', cp.getint("DEFAULT", "limit"), 'Limit input tensor to n samples')
flags.DEFINE_integer('num_train_samples', cp.getint("DEFAULT", "num_train_samples"), 'Number of training samples')
flags.DEFINE_integer('num_test_samples', cp.getint("DEFAULT", "num_test_samples"), 'Number of test samples')
flags.DEFINE_integer('num_zs_test_samples', cp.getint("DEFAULT", "num_zs_test_samples"), 'Number of zero shot test samples')
flags.DEFINE_integer('batch_size', cp.getint("DEFAULT", "batch_size"), 'Training batch size')
flags.DEFINE_integer('print_step', cp.getint("DEFAULT", "print_step"), 'Print step')
flags.DEFINE_integer('epochs', cp.getint("DEFAULT", "epochs"), 'Epochs to train')
flags.DEFINE_integer('sampling_freq', cp.getint("DEFAULT", "sampling_freq"), 'Sampling frequency')
flags.DEFINE_string('mode', cp.get("DEFAULT", "mode"), 'Analysis mode (ifreq, iphase, or fft)')
flags.DEFINE_float('keep_prob', cp.getfloat("DEFAULT", "keep_prob"), 'Probability to keep neuron when using CNN')
flags.DEFINE_integer('retrain_batch', cp.getint("DEFAULT", "retrain_batch"), 'Number of times to retrain the same batch (speeds up, but also overfits)')
flags.DEFINE_string('exclude_classes', cp.get("DEFAULT", "exclude_classes"), 'Classes to exclude from training')
# Mode specific options
if cp.get("DEFAULT", "dataset") == 'matlab': # TODO: Bug in Tensorflow: once FLAGS.dataset is accessed it's no longer possible to define new strings
flags.DEFINE_string('matlabfile', cp.get("matlab", "matlabfile"), 'MATLAB LoRa database')
flags.DEFINE_integer('chirp_length', cp.getint("matlab", "chirp_length"), 'Length of a single chirp')
elif cp.get("DEFAULT", "dataset") == 'mongo':
flags.DEFINE_string ('ip', cp.get("mongo", "ip"), 'MongoDB server IP')
flags.DEFINE_integer('port', cp.get("mongo", "port"), 'MongoDB server port')
flags.DEFINE_string ('db', cp.get("mongo", "db"), 'MongoDB database name')
flags.DEFINE_string ('collection', cp.get("mongo", "collection"), 'MongoDB chirp collection name')
flags.DEFINE_string ('test_collection', cp.get("mongo", "test_collection"), 'MongoDB test chirp collection name')
flags.DEFINE_integer ('random_mode', RandomMode.s2e(cp.get("mongo", "random_mode")), 'Data randomization approach')
flags.DEFINE_string ('random_date', cp.get("mongo", "random_date"), 'Date for split date mode')
flags.DEFINE_string ('filter', cp.get("mongo", "filter"), 'Query filter for "find" queries')
elif cp.get("DEFAULT", "dataset") == 'random':
flags.DEFINE_integer('num_classes', cp.get("random", "num_classes"), 'Number of random classes')
flags.DEFINE_integer('num_samples', cp.get("random", "num_samples"), 'Number of random samples')
# Classifier specific options
if cp.get("DEFAULT", "classifier") == 'mlp':
flags.DEFINE_integer('num_hidden_layers', cp.getint("mlp", "num_hidden_layers"), 'Number of hidden layers')
flags.DEFINE_integer('num_hidden_neurons', cp.getint("mlp", "num_hidden_neurons"), 'Number of hidden neurons in a hidden layer')
elif cp.get("DEFAULT", "classifier") == 'cnn':
flags.DEFINE_integer('conv_kernel_width', cp.getint("cnn", "conv_kernel_width"), 'Convolution kernel width')
flags.DEFINE_integer('pooling_kernel_width', cp.getint("cnn", "pooling_kernel_width"), 'Max pooling kernel width')
elif cp.get("DEFAULT", "classifier") == 'mdn':
flags.DEFINE_integer('num_hidden_layers', cp.getint("mdn", "num_hidden_layers"), 'Number of hidden layers')
flags.DEFINE_integer('num_hidden_neurons', cp.getint("mdn", "num_hidden_neurons"), 'Number of hidden neurons in a hidden layer')
def print_conf(cp): # Print settings to terminal
for e in cp.defaults():
print("[+] " + Fore.YELLOW + Style.BRIGHT + e + ": " + str(cp.get("DEFAULT", e)))
def select_cols(matrix, c1, c2): # Select two columns from a numpy matrix
return matrix[:, [c1, c2]]
# ----------------------------------------------------
# Dataset classes
# ----------------------------------------------------
class TensorIO():
def __init__(self, x, y):
self.x = x # Input
self.y = y # Output
class Dataset(): # Dataset base class
def __init__(self):
self.num_training_samples = FLAGS.num_train_samples
self.num_test_samples = FLAGS.num_test_samples
# Based on the tag, get the LoRa ID
def _determine_id(self, tag):
if 'lora' in tag:
lora_id = int(tag[4:])
return lora_id
print("[!] Warning: unable to determine lora_id for entry " + str(tag))
return None
# Preprocess an input so that it can be learned by Tensorflow
def _data_to_tf_record(self, lora_id, chirp, debug=False):
features = []
#visualization.dbg_plot(preprocessing.iphase(chirp), title='Preprocessed chirp')
chirp = preprocessing.roll_to_base(chirp)
for m in FLAGS.mode.split(','):
if m == 'iphase':
features.append(preprocessing.iphase(chirp))
elif m == 'fft':
features.append(preprocessing.fft(chirp))
elif m == 'ifreq':
features.append(preprocessing.ifreq(chirp, FLAGS.sampling_freq))
elif m == 'iamp':
features.append(preprocessing.iamp(chirp))
elif m == 'raw':
features.append(preprocessing.normalize(chirp))
else:
print(Fore.RED + Style.BRIGHT + "[-] Analysis mode must be configured to be either 'fft', 'iphase', 'ifreq', or a comma separated combination.")
exit(1)
if debug:
if lora_id == 1:
visualization.dbg_plot(features[0], title='First feature vector of LoRa 1 chirp')
tf_record = {"lora_id": lora_id, "iq": features}
return tf_record
class GNURadioDataset(Dataset): # Convert pmt of IQ samples to numpy complex 64
def __init__(self, pmt, symbol_length):
self.pmt = pmt
self.symbol_length = symbol_length
def get(self):
data = []
frame = np.frombuffer(self.pmt, dtype=np.complex64)
symbols = [frame[i:i+self.symbol_length] for i in range(0, len(frame), self.symbol_length)]
for symbol in symbols:
tf_record = self._data_to_tf_record(None, symbol)
data.append(tf_record)
return data
class FakeSampleDataset(Dataset):
def __init__(self, host='localhost', port=27017, name="chirps"):
Dataset.__init__(self)
self.name = name
def get(self, projection={}, num_records=500):
return [{"lora_id": 1, "iq": [0+0j] * 74200}] * num_records
class UniformRandomDataset(Dataset): # Sanity check dataset
def __init__(self):
Dataset.__init__(self)
self.num_classes = FLAGS.num_classes
self.lora_ids = set()
for i in range(1, self.num_classes+1):
self.lora_ids.add(i)
def get(self, projection={}):
data = []
for i in range(0, FLAGS.num_samples):
record = {"lora_id": random.randint(1,self.num_classes), "iq": [random.random() for x in range(0, FLAGS.limit)]}
data.append(record)
return data
class MatlabDataset(Dataset):
def __init__(self):
Dataset.__init__(self)
self.path = FLAGS.matlabfile
self.data = []
self.lora_ids = set()
# Load the file and contents
mat_contents = sio.loadmat(self.path)
self.all_samples = mat_contents['all_samples']
# Determine number of classes
for entry in self.all_samples:
entry_name = os.path.basename(entry[0][0])
lora_id = self._determine_id(entry_name)
if lora_id is None:
continue
self.lora_ids.add(lora_id)
def _determine_id(self, filename):
for elem in filename.split('-'):
if 'lora' in elem:
return Dataset._determine_id(self, elem)
def get(self, projection={}, num_records=0): # TODO: projection
data = []
# Parse class data
for entry in self.all_samples:
entry_name = os.path.basename(entry[0][0])
entry_data = entry[1]
lora_id = self._determine_id(entry_name)
if lora_id is None:
continue
print("Parsing " + entry_name + " (class " + str(lora_id) + ", " + str(len(entry_data)) + " samples)")
for record in entry_data:
for i in range(0, 8):
chirp = record[i*FLAGS.chirp_length:(i+1)*FLAGS.chirp_length]
tf_record = self._data_to_tf_record(lora_id, chirp, debug=args.debug)
data.append(tf_record)
return data
class RandomMode:
RANDOMIZE_SYMBOLS = 0
RANDOMIZE_FRAMES = 1
SPLIT_DATE = 2
SPLIT_COLLECTION = 3
_STR_RANDOMIZE_SYMBOLS = 'randomize_symbols'
_STR_RANDOMIZE_FRAMES = 'randomize_frames'
_STR_SPLIT_DATE = 'split_date'
_STR_SPLIT_COLLECTION = 'split_collection'
@staticmethod
def e2s(enum):
if enum == RandomMode.RANDOMIZE_SYMBOLS:
return RandomMode._STR_RANDOMIZE_SYMBOLS
elif enum == RandomMode.RANDOMIZE_FRAMES:
return RandomMode._STR_RANDOMIZE_FRAMES
elif enum == RandomMode.SPLIT_DATE:
return RandomMode._STR_SPLIT_DATE
elif enum == RandomMode.SPLIT_COLLECTION:
return RandomMode._STR_SPLIT_COLLECTION
else:
print(Fore.YELLOW + Style.BRIGHT + "[!] Warning: unknown enum %d. Defaulting to 0." % enum)
return 0
@staticmethod
def s2e(string):
if string == RandomMode._STR_RANDOMIZE_SYMBOLS:
return RandomMode.RANDOMIZE_SYMBOLS
elif string == RandomMode._STR_RANDOMIZE_FRAMES:
return RandomMode.RANDOMIZE_FRAMES
elif string == RandomMode._STR_SPLIT_DATE:
return RandomMode.SPLIT_DATE
elif string == RandomMode._STR_SPLIT_COLLECTION:
return RandomMode.SPLIT_COLLECTION
else:
print(Fore.YELLOW + Style.BRIGHT + "[!] Warning: unknown randomization mode '%s'. Defaulting to randomize_symbols." % string)
return RandomMode.RANDOMIZE_SYMBOLS
class MongoDataset(Dataset):
def __init__(self):
Dataset.__init__(self)
self.ip = FLAGS.ip
self.port = FLAGS.port
self.client = MongoClient(self.ip, self.port)
self.db = self.client[FLAGS.db]
self.collection = self.db[FLAGS.collection]
self.collection_test = self.db[FLAGS.test_collection]
self.lora_ids = set()
self.random_mode = FLAGS.random_mode
self.filter = json.loads(FLAGS.filter)
self.num_samples = self.collection.find(self.filter).count()
print(Fore.MAGENTA + Style.BRIGHT + "[+] Filter: %s" % str(self.filter))
self.sort = '$natural' if args.natural else 'rand'
# Randomize mongo set
self.randomize()
# Randomize all symbols and divide into training and test set
if self.random_mode == RandomMode.RANDOMIZE_SYMBOLS:
self.cursor_train = self.collection.find(self.filter).sort(self.sort, 1).skip(0).limit(self.num_training_samples)
self.cursor_test = self.collection.find(self.filter).sort(self.sort, 1).skip(self.num_training_samples).limit(self.num_test_samples)
elif self.random_mode == RandomMode.RANDOMIZE_FRAMES:
self.collection.create_index("fn")
# Find out how many test frames we need
frames_for_test = int(self.num_test_samples / 36) # 36 = number of symbols in frame
# Find highest frame number
print("[+] Finding highest frame number")
last_fn = self.collection.find(self.filter).sort("fn", -1).limit(1)[0]['fn']
# Generate list of random frame numbers to be used as test set
test_fns = []
for i in range(0, frames_for_test):
test_fns.append(randint(0, last_fn))
# Assign the cursors
train_query = self.filter.copy()
train_query["fn"] = {"$nin": test_fns}
self.cursor_train = self.collection.find(train_query).sort(self.sort, 1).limit(self.num_training_samples)
test_query = self.filter.copy()
test_query["fn"] = {"$in": test_fns}
self.cursor_test = self.collection.find(test_query).sort(self.sort, 1).limit(self.num_test_samples)
elif self.random_mode == RandomMode.SPLIT_DATE:
self.collection.create_index("date")
print("[+] Splitting test set after date: %s" % FLAGS.random_date)
the_date = datetime.strptime(FLAGS.random_date,'%Y-%m-%dT%H:%M:%SZ')
train_query = self.filter.copy()
train_query["date"] = {"$lt": the_date}
self.cursor_train = self.collection.find(train_query).sort(self.sort, 1).limit(self.num_training_samples)
test_query = self.filter.copy()
test_query["date"] = {"$gte": the_date}
self.cursor_test = self.collection.find(test_query).sort(self.sort, 1).limit(self.num_test_samples)
elif self.random_mode == RandomMode.SPLIT_COLLECTION:
self.cursor_train = self.collection.find(self.filter).sort(self.sort, 1).limit(self.num_training_samples)
self.cursor_test = self.collection_test.find(self.filter).sort(self.sort, 1).limit(self.num_test_samples)
# Determine number of classes
print("[+] Determining number of classes")
for tag in self.cursor_train.distinct('tag'):
lora_id = self._determine_id(tag)
if lora_id is None:
continue
self.lora_ids.add(lora_id)
self.cursor_train.rewind()
# Create caches
self.cache_train = GenericCache(name="train")
self.cache_test = GenericCache(name="test")
def randomize(self):
if os.path.isfile('/tmp/randomized_mongo'):
print("[+] MongoDB dataset is already randomized")
return
self._randomize(self.collection, "")
if self.random_mode == RandomMode.SPLIT_COLLECTION: # If random mode is set to split collection, also randomize this collection
self._randomize(self.collection_test, "(test set)")
with open('/tmp/randomized_mongo', "w") as f:
f.write('')
def _randomize(self, collection, label=""):
print("[+] Randomizing MongoDB dataset %s" % label)
progress = 0
for entry in collection.find(self.filter):
collection.update({"_id": entry["_id"]}, {"$set": {"rand": random.random()}}, upsert=False, multi=False)
progress += 1
print("\r[+] Progress: %d / %d (estimation) " % (progress, self.num_samples)),
print("")
print("[+] Creating index")
collection.create_index("rand")
def get(self, train=True, projection={}, num_records=1000):
data = []
set_in_memory = False
if train:
cursor = self.cursor_train
cache = self.cache_train
num_records_total = self.num_training_samples
else:
cursor = self.cursor_test
cache = self.cache_test
num_records_total = self.num_test_samples
if len(cache) == num_records_total:
set_in_memory = True
# Set is already loaded in cache memory
if set_in_memory:
for i in range(0, num_records):
try:
tf_record = cache.next()
except StopIteration:
cache.rewind()
tf_record = cache.next()
data.append(tf_record)
else: # Go through each record in the MongoDB
for i in range(0, num_records):
try:
record = cursor.next()
except StopIteration:
cursor.rewind()
record = cursor.next()
except (OperationFailure, AutoReconnect) as e:
print("[!] Warning: Got other exception than StopIteration: "),
print(e)
cursor.rewind()
record = cursor.next()
lora_id = self._determine_id(record['tag'])
if lora_id is None:
continue
tf_record = cache.get(record['_id'])
if tf_record is None:
chirp = np.frombuffer(record['chirp'], dtype=np.complex64)
tf_record = self._data_to_tf_record(lora_id, chirp, debug=args.debug)
cache.store(record['_id'], tf_record)
data.append(tf_record)
return data
# The Instances class is responsible for providing:
# - Preprocessing of the raw chirp data into features
# - Separation of dataset into training and test sets
# - Random shuffling of training and test data
class Instances():
def __init__(self, limit=None, exclude_classes=[], name="", mapping=None):
self.name = name
self.num_excluded_samples = 0
self.limit = limit
self.exclude_classes = exclude_classes
# Select dataset type
if cp.get("DEFAULT", "dataset") == 'matlab':
self.dataset = MatlabDataset()
elif cp.get("DEFAULT", "dataset") == 'mongo':
self.dataset = MongoDataset()
elif cp.get("DEFAULT", "dataset") == 'random':
self.dataset = UniformRandomDataset()
else:
print(Fore.RED + Style.BRIGHT + "[-] Unknown dataset type '" + cp.get("DEFAULT", "dataset") + "'. Exiting")
exit(1)
# Make sure we don't underestimate available data
print("[+] Got " + Fore.GREEN + Style.BRIGHT + str(self.dataset.num_samples) + Style.RESET_ALL + " samples")
if self.dataset.num_test_samples + self.dataset.num_training_samples > self.dataset.num_samples:
print(Fore.RED + Style.BRIGHT + "[-] Sum of training and test samples exceeds available samples. Exiting")
exit(1)
# Get length of input samples (= number of features) and configure limit
print("[+] Getting number of features (1 record get from test set)")
self.num_features = self._get_num_features(self.dataset.get(train=False, num_records=1))
if self.limit == -1 or self.limit is None:
self.limit = self.num_features
print("[+] First sample contains %d features (limited to %d)" % (self.num_features, self.limit))
# Create mapping from LoRa ID to One Hot Vector if necessary
if mapping is None:
self.mapping = Mapping(self.dataset.lora_ids, exclude_classes=self.exclude_classes)
self.mapping.display()
else: # Update existing map with any new entries found
self.mapping = mapping
self.mapping.update(self.dataset.lora_ids, exclude_classes=self.exclude_classes)
self.mapping.display()
def next_batch(self, train, size):
temp = list(self.dataset.get(train=train, num_records=size))
if len(temp) > 0:
# Randomize (already done in Mongo, but not for other datasets)
random.shuffle(temp)
# Create instances
instances_x = []
instances_y = []
for i in range(0, size):
processed_record = self.process_record(temp[i])
if not (processed_record is None):
instances_x.append(processed_record.x[0:self.limit])
instances_y.append(processed_record.y)
instances_x = np.array(instances_x, dtype=np.float32)
instances_y = np.array(instances_y, dtype=np.float32)
# Done!
#if len(self.exclude_classes) > 0:
# print(Fore.GREEN + Style.BRIGHT + "[+] EXCLUDING %d samples" % self.num_excluded_samples)
else:
print("[-] No samples found in dataset. Exiting")
exit(1)
if len(instances_x) == 0:
raise Exception
return instances_x, instances_y
def _get_num_features(self, x):
return len(np.array(x[0]["iq"]).flatten())
def process_record(self, record):
# Do some preprocessing on the records here
if record["lora_id"] in self.exclude_classes:
self.num_excluded_samples += 1
return None
one_hot_vector = self.mapping.lora_id_to_oh(record["lora_id"])
features = np.array(record["iq"]).flatten()
return TensorIO(features, one_hot_vector)
# ----------------------------------------------------
# ML models
# Some of these models are based on the reference im-
# plementations provided by Aymeric Damien. See
# https://github.com/aymericdamien/TensorFlow-Examples
# for more information.
# ----------------------------------------------------
class MLModel(): # Base class for ML models
def __init__(self):
self.learning_rate = None
self.layers = []
self.output_layer = None
self.cost_function = None
self.correct_prediction = None
class MLPModel(MLModel):
def __init__(self, x, num_inputs, y, num_classes, hidden_layers=0, hidden_neurons=0, name='mlp'):
MLModel.__init__(self)
self.learning_rate = 0.0001 #0.001 works pretty good too
next_layer = x
next_layer_size = num_inputs
for i in range(0, hidden_layers):
self.layers.append(LinearReluLayer(next_layer, next_layer_size, hidden_neurons, name=name+'lin' + str(i)))
self.output_layer = self.layers[-1]
next_layer = self.output_layer.h
next_layer_size = hidden_neurons
self.layers.append(LinearLayer(next_layer, next_layer_size, num_classes, name=name+'clin', init_zero=True)) # Since it will be softmaxed later, init to zero. Seems to affect training speed and making the weights align on a diagonal faster
self.output_layer = self.layers[-1]
#self.cost_function = tf.reduce_mean(-tf.reduce_sum(y * tf.log(tf.nn.softmax(self.output_layer.h)+EPSILON), reduction_indices=[1])) # Doesn't deal with edge cases so we need to add EPSILON
self.cost_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.output_layer.h, labels=y))
#self.cost_function = tf.reduce_mean(tf.reduce_sum(tf.square(y - tf.nn.softmax(self.output_layer.h)), reduction_indices=[1]))
self.correct_prediction = tf.equal(tf.argmax(self.output_layer.h,1), tf.argmax(y,1))
class ConvNeuralNetModel(MLModel):
def __init__(self, x, num_inputs, y, num_classes, keep_prob=None, name='cnn'):
MLModel.__init__(self)
self.learning_rate = 0.001 # 0.0001
# Make image
x_shaped = tf.reshape(x, shape=[-1, 1, num_inputs, 1])
# Append convolution layers
self.layers.append(NNLayer(x_shaped, [1, FLAGS.conv_kernel_width, 1, 32], [32], name=name+'wc1'))
self.output_layer = self.layers[-1]
self.layers.append(NNLayer(self.output_layer.h, [1, FLAGS.conv_kernel_width, 32, 64], [64], name=name+'wc2'))
self.output_layer = self.layers[-1]
# Reshape conv2 output to fit fully connected layer input
relu_inputs = (num_inputs/pow(FLAGS.pooling_kernel_width, 2))*64 # 64 = output channels per sample from conv. 4 = k from polling (see paper notes). Power of two because max pooling twice
relu_outputs = num_inputs
out_shaped = tf.reshape(self.output_layer.h, [-1, relu_inputs])
# Append fully connected layer
self.layers.append(LinearReluDropLayer(out_shaped, relu_inputs, relu_outputs, keep_prob))
self.output_layer = self.layers[-1]
# Output, class prediction
self.layers.append(LinearLayer(self.output_layer.h, relu_outputs, num_classes, name=name+'lin'))
self.output_layer = self.layers[-1]
self.cost_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.output_layer.h, labels=y))
self.correct_prediction = tf.equal(tf.argmax(self.output_layer.h,1), tf.argmax(y,1))
class MDNModel(MLModel):
def __init__(self, x, num_inputs, y, num_classes, hidden_layers=0, hidden_neurons=0, name='mdn'):
MLModel.__init__(self)
self.num_classes = num_classes
self.learning_rate = 0.001
next_layer = x
next_layer_size = num_inputs
# Hidden layers
for i in range(0, hidden_layers):
self.layers.append(LinearLayer(next_layer, next_layer_size, hidden_neurons, name=name+'lin' + str(i)))
self.output_layer = self.layers[-1]
next_layer = self.output_layer.h
next_layer_size = hidden_neurons
# MDN layer
self.layers.append(MixtureLayer(next_layer, next_layer_size, num_classes, name=name+"mix"))
self.output_layer = self.layers[-1]
self.pi, self.mu, self.sigma = self._get_components(self.output_layer)
self.gauss = tf.contrib.distributions.Normal(mu=self.mu, sigma=self.sigma)
# Cost function
self.cost_function = self._get_cost_function(y)
# Evaluation
self.correct_prediction = tf.equal(tf.argmax(tf.mul(self.pi,self.gauss.mean()), 1), tf.argmax(y,1))
def _get_components(self, layer):
pi = tf.placeholder("float", [None, layer.num_components])
mu = tf.placeholder("float", [None, layer.num_components])
sigma = tf.placeholder("float", [None, layer.num_components])
pi, mu, sigma = tf.split(1, layer.num_components, layer.h)
pi = tf.nn.softmax(pi)
#assert_op = tf.Assert(tf.equal(tf.reduce_sum(pi), 1.), [pi])
#pi = tf.with_dependencies([assert_op], pi)
sigma = tf.exp(sigma)
return pi, mu, sigma
def _get_cost_function(self, y):
return tf.reduce_mean(-tf.log(tf.reduce_sum(tf.mul(self.pi, self.gauss.pdf(y)), 1, keep_dims=True)))
def _sample(self, n):
# Randomly sample x times according to pi distribution
mixture_indices = tf.reshape(tf.multinomial(tf.log(self.pi), n), [-1]) # Pi must be a log probability
# Sample all gaussian distributions x times
samples = tf.reshape(self.gauss.sample(n), [-1, self.num_classes])
# Select only the one according to pi
select_gaussians = tf.reduce_sum(tf.one_hot(mixture_indices, self.num_classes) * samples, 1)
return select_gaussians
def _mean(self):
# Get the indices of the most likely mixtures beloning to each x
mixture_indices = tf.argmax(self.pi, 1)
# Get the expected values of all gaussians
exp_values = self.gauss.mean()
# Get expected value of most likely mixture
select_exp = tf.reduce_sum(tf.one_hot(mixture_indices, self.num_classes) * exp_values, 1)
return select_exp
class ModelType:
MLP = 0
CONVNET = 1
MDN = 2
@staticmethod
def str2type(string):
if string == "mlp":
return ModelType.MLP
elif string == "cnn":
return ModelType.CONVNET
elif string == "mdn":
return ModelType.MDN
else:
print(Fore.RED + Style.BRIGHT + "[-] Model type "+ string +" does not exist.")
exit(1)
# ----------------------------------------------------
# ML classifiers
# ----------------------------------------------------
class SVM():
def __init__(self, name="svc"):
print("[+] SVM Classifier")
self.m = SVC()
self.name = name
def _get_lora_id_labels(self, instances, oh_labels):
result = []
for i in range(0, len(oh_labels)):
result.append(instances.mapping.oh_to_lora_id(oh_labels[i]))
return result
def _to_vendor(self, instances, lora_id_labels):
result = []
for i in range(0, len(lora_id_labels)):
result.append(instances.mapping.lora_id_to_vendor_id(lora_id_labels[i]))
return result
def train(self, instances, batch_size=2500):
print("[+] Getting %d training samples" % batch_size)
train_samples_x, train_samples_y = instances.next_batch(True, batch_size)
train_samples_y = self._get_lora_id_labels(instances, train_samples_y)
print("[+] Training model")
self.m.fit(train_samples_x, train_samples_y)
def save(self):
path = FLAGS.trainedmodelsdir + self.name + "/"
if not os.path.exists(path):
os.makedirs(path)
# Save model
pickle.dump(self.m, open(path + 'svc_model.p', "wb"))
@staticmethod
def load():
path = FLAGS.trainedmodelsdir + FLAGS.model_name + "/"
# Set up classifier based on config and stored data
net = SVM()
net.m = pickle.load(open(path + 'svc_model.p', "rb"))
return net
def bin_class_per_sample(self, instances, limit=200, adv_detect=True, vendor_only=False):
print("[+] Getting %d test samples" % limit)
test_samples_x, test_samples_y = instances.next_batch(False, limit)
test_samples_y = self._get_lora_id_labels(instances, test_samples_y)
print("[+] Evaluating model")
predicted_y = self.m.predict(test_samples_x)
if vendor_only:
metrics = utilities.get_eval_metrics_percent(self._to_vendor(instances, test_samples_y), self._to_vendor(instances, predicted_y))
else:
metrics = utilities.get_eval_metrics_percent(test_samples_y, predicted_y)
utilities.print_metrics(metrics)
return
def visualize_embeddings(self, instances, limit=200, train=True):
print("[!] Warning: visualize_embeddings not implemented for SVM")
return
class Classifier():
# Build the classifier
def __init__(self, num_inputs, num_classes, name, modeltype=ModelType.MLP):
self.num_inputs = num_inputs
self.num_classes = num_classes
self.name = name
self.step = 0
self.modeltype = modeltype
self.expected_values = None
self.std = None
self.distance_threshold = np.zeros(num_classes)
self.sess = None
self.instances_mapping = None
model_summaries = []
self.x = tf.placeholder("float", [None, self.num_inputs], name='inputs')
self.y = tf.placeholder("float", [None, self.num_classes], name='map-id-oh')
self.keep_prob = tf.placeholder(tf.float32, name='dropout')
if modeltype == ModelType.MLP:
self.m = MLPModel(self.x, self.num_inputs, self.y, self.num_classes, hidden_layers=FLAGS.num_hidden_layers, hidden_neurons=FLAGS.num_hidden_neurons, name="mlp") # Build MLP model
elif modeltype == ModelType.CONVNET:
self.m = ConvNeuralNetModel(self.x, self.num_inputs, self.y, self.num_classes, keep_prob=self.keep_prob, name="cnn") # Build Convolutional Neural Network model
elif modeltype == ModelType.MDN:
self.m = MDNModel(self.x, self.num_inputs, self.y, self.num_classes, hidden_layers=FLAGS.num_hidden_layers, hidden_neurons=FLAGS.num_hidden_neurons, name="mdn") # Build MDN model
else:
raise Exception("No model type specified")
# Define optimizer
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.m.learning_rate).minimize(self.m.cost_function)
# Define accuracy model
self.accuracy = tf.reduce_mean(tf.cast(self.m.correct_prediction, tf.float32))
# Merge TensorBoard summaries for the model
model_summaries.append(tf.summary.scalar('accuracy', self.accuracy))
model_summaries.append(tf.summary.scalar('cost', self.m.cost_function))
self.merged_model_summaries = tf.summary.merge(model_summaries, collections=None, name=None)
# Define session object and summary writers
self.sess = tf.Session()
self.train_writer = tf.summary.FileWriter(FLAGS.logdir + '/train', graph=self.sess.graph)
self.test_writer = tf.summary.FileWriter(FLAGS.logdir + '/test')
def __del__(self):
if not (self.sess is None):
self.sess.close()
self.train_writer.close()
self.test_writer.close()
# Plot sample data to Tensorboard
def _plot_samples(self, samples_x, samples_y):
# Register plot summaries
plot_summaries = []
plots_to_show = 5
learned_weights_tensor = tf.identity(self.m.output_layer.W)
learned_weights = self.sess.run(learned_weights_tensor)
plot_summaries.append(visualization.plot_values(samples_x[0], self.instances_mapping, height=500, width=self.num_inputs, tag="weights", title="Weights", label=np.argmax(samples_y[0]), backdrop=learned_weights))
for i in range(1, 6):
label = np.argmax(samples_y[i])
guess = self.get_accuracy([samples_x[i]], [samples_y[i]])
plot_summaries.append(visualization.plot_values(samples_x[i], self.instances_mapping, height=500, width=self.num_inputs, tag="trd" + str(i) + "c" + str(label) + "g" + str(guess), title="Training data", label=label))
# Merge TensorBoard summaries for plots
merged_plot_summaries = tf.summary.merge(plot_summaries, collections=None, name=None)
summary_plot = self.sess.run(merged_plot_summaries)
self.train_writer.add_summary(summary_plot)
# Plot kernel data to Tensorboard
def _plot_kernels(self):
plot_summaries = []
# TODO go through layers and check .startswith("wc")
kernels_tensor = self.m.layers[0].W
kernels_shaped_tensor = tf.reshape(kernels_tensor, [-1, FLAGS.conv_kernel_width]) # Arrange kernels so that there is one per row
kernels_shaped = self.sess.run(kernels_shaped_tensor)
plot_summaries.append(visualization.plot_kernels(kernels_shaped, FLAGS.conv_kernel_width, height=4096, width=1024, tag="kernels", title="CNN Kernels"))
# Merge TensorBoard summaries for plots TODO dup code
merged_plot_summaries = tf.summary.merge(plot_summaries, collections=None, name=None)
summary_plot = self.sess.run(merged_plot_summaries)
self.train_writer.add_summary(summary_plot)
def get_output_weights(self, samples_x):
return self.sess.run(self.m.output_layer.h, feed_dict={self.x: samples_x, self.keep_prob: 1.0})
def _plot_output_weights_2d(self, samples_x, samples_y, predictions_y, instances, metrics): # Do not use new samples from instances
plot_summaries = []
# Get the output weight values for all classes
output_weights = self.get_output_weights(samples_x)
# OLD: Get first two weights to visualize
# weights = select_cols(output_weights, 0, 1)
# Reduce dimensionality of weights to 2
tsne = TSNE(n_components=2, init='pca', n_iter=5000)
weights = tsne.fit_transform(output_weights)
#xlabel = "Weight #" + str(0) + " values"
#ylabel = "Weight #" + str(1) + " values"
xlabel = "t-SNE dimension 1"
ylabel = "t-SNE dimension 2"
plot_summaries.append(visualization.plot_weights(weights, samples_y, predictions_y, self.expected_values, self.distance_threshold, instances.mapping, tag=self.name+"-w", title="2D projection of output feature weights", xlabel=xlabel, ylabel=ylabel, metrics=metrics))
# Merge TensorBoard summaries for plots TODO dup code
merged_plot_summaries = tf.summary.merge(plot_summaries, collections=None, name=None)
summary_plot = self.sess.run(merged_plot_summaries)
self.train_writer.add_summary(summary_plot)
def train(self, instances, batch_size=2500):
# Let's go
print("[+] Training")
self.sess.run(tf.global_variables_initializer())
# Start learning weights
try:
while True:
train_batch_x, train_batch_y = instances.next_batch(True, batch_size)
test_batch_x, test_batch_y = instances.next_batch(False, batch_size)
# Execute training step(s) on batch
#print(self.sess.run(self.m.tmp, feed_dict={self.x: train_batch_x, self.y: train_batch_y, self.keep_prob: FLAGS.keep_prob})) # To test something inside model with the same data
for i in range(0, FLAGS.retrain_batch):
self.sess.run(self.optimizer, feed_dict={self.x: train_batch_x, self.y: train_batch_y, self.keep_prob: FLAGS.keep_prob})
# Print progress
if self.step % FLAGS.print_step == 0:
# Print stats about step
summary_train, c_train, a_train = self.sess.run([self.merged_model_summaries, self.m.cost_function, self.accuracy], feed_dict={self.x: train_batch_x, self.y: train_batch_y, self.keep_prob: 1.0})
summary_test = self.sess.run(self.merged_model_summaries, feed_dict={self.x: test_batch_x, self.y: test_batch_y, self.keep_prob: 1.0})
# Add summaries
self.train_writer.add_summary(summary_train, self.step)
self.test_writer.add_summary(summary_test, self.step)
# Print info about training
print("Epoch {:d}: cost={:.6f}, tr_acc={:.6f}, W0_0={:.6f}".format(self.step, c_train, a_train, self.sess.run(self.m.output_layer.W)[0][0]))
# Next step
self.step += 1
if self.step == FLAGS.epochs:
raise KeyboardInterrupt
except KeyboardInterrupt:
pass
# Save the mapping used during training from LoRa ID to Map ID
self.instances_mapping = instances.mapping
# Mixture components
self.expected_values, self.std = self.calculate_mixture_components(instances)
# Show results
print(Fore.GREEN + Style.BRIGHT + "[+] Done training!")
if self.modeltype == ModelType.MLP:
print(Fore.GREEN + Style.BRIGHT + "[+] Plotting training samples")
self._plot_samples(train_batch_x, train_batch_y)
else:
print(Fore.GREEN + Style.BRIGHT + "[+] Plotting model kernels")
self._plot_kernels()
# Evaluation
print("[+] Training set accuracy")
print(self.get_accuracy(train_batch_x, train_batch_y))
print("[+] Test set accuracy")
print(self.get_accuracy(test_batch_x, test_batch_y))
# Assert that nothing unexpected happened during the whole process
GenericCache.assert_disjunction(instances.dataset.cache_train, instances.dataset.cache_test)
print(Fore.GREEN + Style.BRIGHT + "[+] Training assertions passed")
def determine_ideal_threshold(self, map_id, samples_x, expected_values):
output_weights = self.sess.run(self.m.output_layer.h, feed_dict={self.x: samples_x, self.keep_prob: 1.0})
threshold = 0.0
for output_weight in output_weights:
#threshold = max(np.linalg.norm(output_weight - expected_values), threshold)
#threshold = (np.linalg.norm(output_weight - expected_values) + threshold) / 2.0
threshold += np.linalg.norm(output_weight - expected_values)
threshold /= len(output_weights)
return threshold
def calculate_mixture_components(self, instances, num_samples_to_use=10000):
print("[+] Determining mixture model components")
train_batch_x, train_batch_y = instances.next_batch(True, num_samples_to_use)
expected_values = np.ndarray(shape=(self.num_classes,self.num_classes), dtype=np.float32)
std = np.ndarray(shape=(self.num_classes,self.num_classes), dtype=np.float32)
for lora_id in instances.mapping.keys():
map_id = instances.mapping.lora_to_map_id(lora_id)
samples_x = []
# Collect samples belonging to class map_id
for i in range(0, len(train_batch_x)):
if np.argmax(train_batch_y[i]) == map_id:
samples_x.append(train_batch_x[i])
if len(samples_x) == 0:
print(train_batch_y)
print("[-] Error: no samples in training set for LoRa %d. Dumped y training set" % lora_id)
exit()
# Determine mean and std deviation for all features
nn_output_weights = self.sess.run(tf.identity(self.m.output_layer.h), feed_dict={self.x: samples_x, self.keep_prob: 1.0})
expected_values[map_id] = np.mean(nn_output_weights, axis=0)
std[map_id] = np.std(nn_output_weights, axis=0)
# Determine ideal threshold based on expected values
# this threshold is used when doing nearest neighbor classification
# as the outlier detection (not discussed in paper)
if args.distance_threshold == 'auto':
print("\r[+] Determining expected value distance threshold for LoRa %d " % lora_id),
self.distance_threshold[map_id] = self.determine_ideal_threshold(map_id, samples_x, expected_values[map_id])
else:
self.distance_threshold[map_id] = args.distance_threshold
# Clean up
del samples_x
print("")
return expected_values, std
# Calculates the distance between a point and a centroid
def calculate_expected_values_distance(self, samples_x):
if self.expected_values is None or self.distance_threshold is None:
raise Exception("Tried to evaluate expected value MSE without training values")
output_weights = self.sess.run(self.m.output_layer.h, feed_dict={self.x: samples_x, self.keep_prob: 1.0})
distances = []
for output_weight_v in output_weights:
distances.append(np.linalg.norm(output_weight_v - self.expected_values, axis=1)) # Distance from E(X) for each class to X
return distances
def get_accuracy(self, samples_x, samples_y):
return self.sess.run(self.accuracy, feed_dict={self.x: samples_x, self.y: samples_y, self.keep_prob: 1.0})
def save(self):
path = FLAGS.trainedmodelsdir + self.name + "/"
if not os.path.exists(path):
os.makedirs(path)
# Save number of inputs
np.save(path + 'value-inputs', self.num_inputs)
# Save number of classes
np.save(path + 'value-classes', self.num_classes)
# Save layers
for layer in self.m.layers:
filename = path + 'layer-' + layer.name
layer.saver.save(self.sess, filename, global_step=0)
# Save expected classification output
np.save(path + 'value-expected', self.expected_values)
np.save(path + 'value-std', self.std)
# Save distance threshold
np.save(path + 'value-dt', self.distance_threshold)
# Save instance mapping
pickle.dump(self.instances_mapping, open(path + 'value-mapping.p', "wb"))
@staticmethod
def load(self, step=0):
path = FLAGS.trainedmodelsdir + FLAGS.model_name + "/"
# Load inputs and classes. Required to set up models.
num_inputs = np.load(path + 'value-inputs' + '.npy')
num_classes = np.load(path + 'value-classes' + '.npy')
# Set up classifier based on config and stored data
net = Classifier(num_inputs=num_inputs, num_classes=num_classes, name=FLAGS.model_name, modeltype=ModelType.str2type(FLAGS.classifier))
for layer in net.m.layers:
filename = path + 'layer-' + layer.name + '-' + str(step)
layer.saver.restore(net.sess, filename)
try:
net.expected_values = np.load(path + 'value-expected' + '.npy')
net.std = np.load(path + 'value-std' + '.npy')
except IOError:
print("[!] Warning: model does not have 'value-expected' and/or 'value-std', and will not be able to perform zero shot classification as a result.")
pass
net.distance_threshold = np.load(path + 'value-dt' + '.npy')
net.instances_mapping = pickle.load(open(path + 'value-mapping.p', "rb"))
return net
def test(self, instances, limit=200):
test_samples_x, test_samples_y = instances.next_batch(False, limit)
# Metrics
accuracy = self.get_accuracy(test_samples_x, test_samples_y)
print(Fore.GREEN + Style.BRIGHT + "[+] Evaluation accuracy for %d samples: %.2f percent" % (limit, accuracy * 100.0))
# Determine to which class a (set of) symbols belongs.
# If clustering is used, then the frame is sent by an attacker if it does not belong to any cluster
def _predict(self, samples_x, adv_detect):
if FLAGS.clustering == "l1nn":
return self._predict_nearest_neighbor_l1(samples_x, adv_detect)
elif FLAGS.clustering == "argmax" or FLAGS.clustering == "none":
if adv_detect: # TODO: Threshold in this case?
print("[!] Warning: adv_detect cannot be used with argmax clustering at the moment")
return self._predict_argmax(samples_x)
else: # Don't do clustering, but use the closest predicted class
print("[!] Warning: unknown clustering approach '%s'; defaulting to 'none'" % FLAGS.clustering)
return self._predict_argmax(samples_x)
# Predict class with least L1 distance to expected weight
def _predict_nearest_neighbor_l1(self, samples_x, adv_detect):
expected_values_distance = self.calculate_expected_values_distance(samples_x)
idmap_predictions = []
for ed in expected_values_distance:
map_id = np.argmin(ed)
if adv_detect and (ed[map_id] > self.distance_threshold[map_id]):
map_id = -1
idmap_predictions.append(map_id)
most_probable = stats.mode(idmap_predictions)[0][0]
return most_probable, idmap_predictions
# Predict class with highest weight
def _predict_argmax(self, samples_x):
idmap_predictions = self.sess.run(tf.argmax(self.m.output_layer.h, 1), feed_dict={self.x: samples_x, self.keep_prob: 1.0})
most_probable = stats.mode(idmap_predictions)[0][0]
return most_probable, idmap_predictions
def _predict_zeroshot(self, samples_x):
weights = self.sess.run(self.m.output_layer.h, feed_dict={self.x: samples_x, self.keep_prob: 1.0})
probabilities = self.sess.run(tf.nn.softmax(self.m.output_layer.h), feed_dict={self.x: samples_x, self.keep_prob: 1.0})
return weights, probabilities
# Function to visualize confusion matrix and calculate the metrics ourselves
def _print_statistics(self, confusion_matrix):
num_classes = confusion_matrix.shape[0]
true_positives = np.zeros(num_classes)
false_positives = np.zeros(num_classes)
false_negatives = np.zeros(num_classes)
true_negatives = np.zeros(num_classes)
precision = np.zeros(num_classes)
recall = np.zeros(num_classes)
accuracy = np.zeros(num_classes)
# Calculate metrics
for i in range(num_classes):
true_positives[i] = confusion_matrix[i,i]
for i in range(num_classes):
false_positives[i] = np.sum(confusion_matrix[:,i]) - true_positives[i]
for i in range(num_classes):
false_negatives[i] = np.sum(confusion_matrix[i,:]) - true_positives[i]
for i in range(num_classes):
true_negatives[i] = np.sum(confusion_matrix) - (false_positives[i] + false_negatives[i] + true_positives[i])
for i in range(num_classes):
precision[i] = true_positives[i] / (true_positives[i] + false_positives[i])
for i in range(num_classes):
recall[i] = true_positives[i] / (true_positives[i] + false_negatives[i])
for i in range(num_classes):
accuracy[i] = (true_positives[i] + true_negatives[i]) / (true_positives[i] + false_positives[i] + false_negatives[i] + true_negatives[i])
np.set_printoptions(threshold='nan', linewidth=200)
print("Confusion matrix")
print(confusion_matrix)
print("TP")
print(true_positives)
print("FP")
print(false_positives)
print("FN")
print(false_negatives)
print("TN")
print(true_negatives)
print("Precision")
print(precision)
print("Recall")
print(recall)
print("Accuracy")
print(accuracy)
# Accuracy according to Wikipedia. This metric is not correct because
# it counts partially correct samples in the true negatives part of the
# confusion matrix. For example: when class 5 is a true negative with
# respect to a class 3 one-v-all classifier, it is considered correct
# even though the true class is 7.
model_accuracy_partial_correct = np.mean(accuracy)
# Decent metrics
model_accuracy = np.sum(true_positives) / np.sum(confusion_matrix)
model_precision_macro = np.mean(precision)
model_recall_macro = np.mean(recall)
print("Macc_PARTIAL : %.2f" % (model_accuracy_partial_correct*100.0))
print("Macc : %.2f" % (model_accuracy*100.0))
print("Mprec (macro): %.2f" % (model_precision_macro*100.0))
print("Mrec (macro) : %.2f" % (model_recall_macro*100.0))
# Perform a per-sample classification of whether it belongs to a class or not
# This is done by calculating the distance to the expected value (mode) of the
# Gaussian distribution of output weights for each class, and choosing the shortest
# distance.
def bin_class_per_sample(self, instances, limit=200, adv_detect=True, vendor_only=False):
test_samples_x, test_samples_y = instances.next_batch(False, limit)
num_samples = len(test_samples_x)
num_classes = instances.mapping.size+1 if adv_detect else instances.mapping.size # If adv_detect: use extra class for unknown
# Metrics
predicted_y = []
true_y = []
true_y_vis = []
confusion_matrix = np.zeros(shape=(num_classes,num_classes))
print('[+] Predicting %d samples...' % num_samples)
for i in range(0, num_samples):
true_class_map = np.argmax(test_samples_y[i]) # Get the true map ID from the dataset
predicted_class_map,_ = self._predict([test_samples_x[i]], adv_detect) # Get the map ID according to the model
true_class = instances.mapping.map_to_lora_id(true_class_map) # Get the LoRa ID from the dataset
predicted_class = self.instances_mapping.map_to_lora_id(predicted_class_map) # Get the LoRa ID according to the model
if predicted_class is None:
predicted_class = -1
if vendor_only:
true_class = instances.mapping.lora_id_to_vendor_id(true_class)
predicted_class = self.instances_mapping.lora_id_to_vendor_id(predicted_class)
predicted_y.append(predicted_class)
if adv_detect:
if not true_class in self.instances_mapping.keys(): # self.instances_mapping = learned mapping from model
true_y_vis.append(true_class)
true_y.append(-1)
confusion_matrix[0, predicted_class_map+1] += 1 # Make it so adv class(=-1) becomes class 0
else:
true_y.append(true_class)
true_y_vis.append(true_class)
confusion_matrix[true_class_map+1, predicted_class_map+1] += 1
else:
true_y.append(true_class)
true_y_vis.append(true_class)
confusion_matrix[true_class_map, predicted_class_map] += 1
print("[+] True classes encountered: %s" % len(set(true_y)))
self._print_statistics(confusion_matrix) # For debugging
assert(np.sum(confusion_matrix) == num_samples)
metrics = utilities.get_eval_metrics_percent(true_y, predicted_y)
utilities.print_metrics(metrics)
print('[+] Plotting output weights for first %d samples' % num_samples)
self._plot_output_weights_2d(test_samples_x, true_y_vis, predicted_y, instances, metrics)
def bin_class_per_frame(self, frame, symbol_length, adv_detect=True):
dataset = GNURadioDataset(frame, symbol_length)
data_x = [np.array(x["iq"]).flatten() for x in dataset.get()]
map_id, all_map_id_predictions = self._predict(data_x, adv_detect)
# Debug
lora_id_predictions = []
for map_id in all_map_id_predictions:
lora_id_predictions.append(self.instances_mapping.map_to_lora_id(map_id))
print("%s: %s" % (FLAGS.clustering, str(lora_id_predictions)))
return stats.mode(lora_id_predictions)[0][0]
def _labels_to_tsv_file(self, labels, mapping, out=None):
result = ""
for i in range(len(labels)):
result += str(mapping.oh_to_lora_id(labels[i])) + "\n"
if out:
with open(out, "w") as f:
f.write(result)
# TODO: Actually doesn't need to be inside the Classifier class
def visualize_embeddings(self, instances, limit=200, train=True):
print("[+] Gathering instances...")
samples_x, samples_y = instances.next_batch(train, limit)
weights = net.get_output_weights(samples_x)
print(Fore.GREEN + Style.BRIGHT + "[+] Visualizing embeddings for %d samples" % limit)
embeddings_instances = tf.Variable(tf.stack(samples_x, axis=0), trainable=False, name='instances')
embeddings_weights = tf.Variable(tf.stack(weights, axis=0), trainable=False, name='weights')
self.sess.run(tf.variables_initializer([embeddings_instances, embeddings_weights]))
embeddings_saver = tf.train.Saver([embeddings_instances, embeddings_weights])
embeddings_writer = tf.summary.FileWriter(FLAGS.logdir + '/projector', self.sess.graph)
conf = projector.ProjectorConfig()
# Add embeddings
# Instances
e = conf.embeddings.add()
e.tensor_name = embeddings_instances.name
self._labels_to_tsv_file(samples_y, instances.mapping, out=FLAGS.logdir + '/projector/metadata.tsv')
e.metadata_path = FLAGS.logdir + '/projector/metadata.tsv'
# Generate sprite, save to tmp and assign here
#e.sprite.image_path = FLAGS.logdir +
#e.sprite.single_image_dim.extend([1024, 768])
# Weights
e = conf.embeddings.add()
e.tensor_name = embeddings_weights.name
self._labels_to_tsv_file(samples_y, instances.mapping, out=FLAGS.logdir + '/projector/metadata.tsv')
e.metadata_path = FLAGS.logdir + '/projector/metadata.tsv'
projector.visualize_embeddings(embeddings_writer, conf)
embeddings_saver.save(self.sess, FLAGS.logdir + '/projector/model_embeddings.ckpt')
# Calculates distance between pairs of centroids
def _intercluster_distance(self, centroids, method='min'):
num_centroids = len(centroids)
if not method in ['min','mean','mean_of_min']:
print("[!] Warning: _intercluster_distance: no such method '%s'. Defaulting to 'min'." % method)
method = 'min'
print("[+] Finding %s distance between %d centroids" % ("minimum" if method == 'min' else ("mean" if method == "mean" else "mean of minimum"), num_centroids))
if method == 'mean_of_min':
minimums = []
for i in range(len(centroids)):
first = centroids[i]
distances = []
for j in range(len(centroids)):
if i == j:
continue
second = centroids[j]
distance = np.linalg.norm(second - first)
distances.append(distance)
minimums.append(np.min(distances))
return np.mean(minimums)
else:
distances = []
for pair in combinations(range(num_centroids), 2):
distance = np.linalg.norm(centroids[pair[0]] - centroids[pair[1]])
distances.append(distance)
if method == 'min':
return np.min(distances)
elif method == 'mean':
return np.mean(distances)
# Convert predicted labels to real labels so that they can be compared
# in terms of accuracy
def _get_zeroshot_labels(self, dbscan_labels, real_labels):
counts = defaultdict(list)
# Get dbscan labels for each real label
for i in range(len(real_labels)):
counts[real_labels[i]].append(dbscan_labels[i])
# Get most frequent dbscan label for each real label
# and use dbscan label as key for lookup dict
keys = {}
keys_counts = defaultdict(lambda: 0)
for key in set(real_labels):
mode_count = stats.mode(counts[key])[1][0]
mode_value = stats.mode(counts[key])[0][0]
if mode_count > keys_counts[mode_value]:
keys[mode_value] = key
keys_counts[mode_value] = mode_count
# Apply lookup dict to transform labels
result = []
for i in range(len(dbscan_labels)):
try:
result.append(keys[dbscan_labels[i]])
except KeyError: # No prevalent real label for this dbscan label found, so use outlier
result.append(-1)
return np.array(result)
def classify_zeroshot(self, instances, limit=40, threshold_outlier=0.0001, vendor_only=False):
num_mixtures = len(self.std)
mixtures = []
outlier_points = []
outlier_labels = []
print("[+] Gathering test samples")
test_samples_x, test_samples_y = instances.next_batch(False, limit)
num_samples = len(test_samples_x)
print("[+] Building %d gaussian mixtures based on trained parameters" % num_mixtures)
from scipy.stats import multivariate_normal
for i in range(num_mixtures):
# TF method
#g = tf.contrib.distributions.Normal(mu=self.expected_values[i], sigma=self.std[i])
# Numpy method
#g = multivariate_normal(self.expected_values[i], np.diag(np.power(self.std[i], 2)))
g = NumpyNormWrapper(mu=self.expected_values[i], sigma=self.std[i])
mixtures.append(g)
print("[+] Finding inter-cluster distance of training samples")
icd = self._intercluster_distance(self.expected_values, method='mean_of_min')
print("[+] ICD is %f" % icd)
print("[+] Calculating weights and probabilities")
weights, probabilities = self._predict_zeroshot(test_samples_x)
print("[+] Calculating marginals")
marginals = np.zeros(shape=(num_samples, num_mixtures))
for i in range(num_samples):
point = weights[i]
pi = probabilities[i]
for j in range(num_mixtures):
# TF method
#marginals[i] += pi[j] * self.sess.run(mixtures[j].pdf(point))
# Numpy method
marginals[i] += pi[j] * mixtures[j].pdf(point)
outlier = False
for j in range(num_mixtures):
if marginals[i][j] < threshold_outlier:
outlier = True
outlier_points.append(point)
lora_id = instances.mapping.oh_to_lora_id(test_samples_y[i])
if vendor_only: # If we only care about classifying correct vendor
lora_id = instances.mapping.lora_id_to_vendor_id(lora_id)
outlier_labels.append(lora_id)
break
#print("%02d: %s | marg:%s, pi:%s, meanmarg:%s (%d/%d)" % (instances.mapping.oh_to_lora_id(test_samples_y[i]), str(outlier), str(marginals[i]), pi, str(np.mean(marginals[i])),i,num_samples))
print("[+] Finding nearest neighbors based on inter-cluster distance of training data")
db = DBSCAN(eps=icd, min_samples=1).fit(outlier_points)
zeroshot_labels = self._get_zeroshot_labels(db.labels_, outlier_labels)
guess_clusters = len(set(db.labels_)) - (1 if -1 in db.labels_ else 0)
print(db.labels_)
print(np.array(outlier_labels))
print(zeroshot_labels)
print(guess_clusters)
metrics = utilities.get_eval_metrics_percent(outlier_labels, zeroshot_labels)
utilities.print_metrics(metrics)
# Reduce dimensionality of weights to 2
tsne = TSNE(n_components=2, init='pca', n_iter=5000)
vis = tsne.fit_transform(outlier_points)
visualization.plot_weights(vis, outlier_labels, zeroshot_labels, None, None, instances.mapping, tag=self.name+"-zero-w", metrics=metrics, tf=True)
# Class to make numpy normal distribution act the same as TF normal distribution
class NumpyNormWrapper():
def __init__(self, mu, sigma):
from scipy.stats import norm
if len(mu) != len(sigma):
raise Exception
# Initialize
self.num_distributions = len(mu)
self.distributions = []
for i in range(self.num_distributions):
self.distributions.append(norm(mu[i], sigma[i]))
def pdf(self, values):
if len(values) != self.num_distributions:
raise Exception
result = []
for i in range(self.num_distributions):
result.append(self.distributions[i].pdf(values[i]))
return np.array(result)
# ----------------------------------------------------
# ML layers
# ----------------------------------------------------
class NNLayer():
def __init__(self, inputs, Wshape, bshape, name=''): # input features and outputs
self.inputs = inputs
self.Wshape = Wshape
self.bshape = bshape
self.name = name
# Define model
self.W = tf.Variable(tf.random_normal(Wshape)) # Filter kernel
self.b = tf.Variable(tf.random_normal(bshape))
# Input: [batch, height, width, channels]
# Kernel: [filter_height, filter_width, in_channels, out_channels]
k = FLAGS.pooling_kernel_width
s = 1
self.conv = tf.nn.conv2d(inputs, self.W, strides=[1, 1, s, 1], padding='SAME') # Convolution Layer
self.conv_b = tf.nn.bias_add(self.conv, self.b) # Convolution layer bias
self.relu = tf.nn.relu(self.conv_b) # ReLU activation layer
self.h = tf.nn.max_pool(self.relu, ksize=[1, 1, k, 1], strides=[1, 1, k, 1], padding='SAME') # Max pooling layer (down-sampling)
self.saver = tf.train.Saver([self.W, self.b])
class LinearLayer():
def __init__(self, inputs, num_inputs, num_outputs, name='', init_zero=False): # input features and outputs
self.inputs = inputs
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.name = name
# Define model
if init_zero:
self.W = tf.Variable(tf.zeros([num_inputs, num_outputs]))
self.b = tf.Variable(tf.zeros([num_outputs]))
else:
self.W = tf.Variable(tf.random_normal([num_inputs, num_outputs]))
self.b = tf.Variable(tf.random_normal([num_outputs]))
self.h = tf.add(tf.matmul(inputs, self.W), self.b)
self.saver = tf.train.Saver([self.W, self.b])
class LinearReluLayer():
def __init__(self, inputs, num_inputs, num_outputs, name='', init_zero=False): # input features and outputs
self.inputs = inputs
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.name = name
# Define model
if init_zero:
self.W = tf.Variable(tf.zeros([num_inputs, num_outputs]))
self.b = tf.Variable(tf.zeros([num_outputs]))
else:
self.W = tf.Variable(tf.random_normal([num_inputs, num_outputs]))
self.b = tf.Variable(tf.random_normal([num_outputs]))
self.h = tf.nn.relu(tf.add(tf.matmul(inputs, self.W), self.b))
self.saver = tf.train.Saver([self.W, self.b])
class MixtureLayer():
def __init__(self, inputs, num_inputs, num_mixtures, mixture_type='gaussian', name='', init_zero=False): # input features and outputs
self.inputs = inputs
self.num_inputs = num_inputs
self.num_mixtures = num_mixtures
self.num_components = 3
self.num_outputs = self.num_mixtures * self.num_components
self.name = name
# Define model
if init_zero:
self.W = tf.Variable(tf.zeros([self.num_inputs, self.num_outputs]))
self.b = tf.Variable(tf.zeros([self.num_outputs]))
else:
self.W = tf.Variable(tf.random_normal([self.num_inputs, self.num_outputs], stddev=0.1))
self.b = tf.Variable(tf.random_normal([self.num_outputs], stddev=0.1))
# Mixture model hypothesis
tanh_inputs = tf.nn.tanh(inputs)
self.h = tf.add(tf.matmul(tanh_inputs, self.W), self.b)
self.saver = tf.train.Saver([self.W, self.b])
class LinearReluDropLayer():
def __init__(self, inputs, num_inputs, num_outputs, keep, name=''):
self.inputs = inputs
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.name = name
# Define model
self.W = tf.Variable(tf.random_normal([num_inputs, num_outputs]))
self.b = tf.Variable(tf.random_normal([num_outputs]))
self.h = tf.add(tf.matmul(inputs, self.W), self.b)
self.h = tf.nn.relu(self.h)
self.h = tf.nn.dropout(self.h, keep)
self.saver = tf.train.Saver([self.W, self.b])
class SoftmaxLayer():
def __init__(self, inputs, num_inputs, num_outputs, name=''): # input features and outputs
self.inputs = inputs
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.name = name
# Define model
self.W = tf.Variable(tf.zeros([num_inputs, num_outputs]))
self.b = tf.Variable(tf.zeros([num_outputs]))
self.h = tf.nn.softmax(tf.add(tf.matmul(inputs, self.W), self.b)) # Hypothesis
# If requested, save weights W and biases b
self.saver = tf.train.Saver([self.W, self.b])
# ----------------------------------------------------
# Standalone run code
# ----------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Tensorflow based fingerprinting of devices implementing the LoRa PHY layer')
parser.add_argument('action', type=str, choices=['train', 'test', 'train_embeddings', 'test_embeddings', 'zeroshot'], help='Action to perform')
parser.add_argument('configfile', type=str, help='Path to the config file to use')
parser.add_argument('--dt', dest='distance_threshold', type=str, help='Distance threshold to determine whether a device is an adversary. Set to "auto" to calculate automatically', default='auto')
parser.add_argument('--debug', dest='debug', action='store_true', default=False, help='Debug mode')
parser.add_argument('--save', dest='save', action='store_true', default=False, help='Save trained network')
parser.add_argument('--adv', dest='adv', action='store_true', default=False, help='Treat excluded classes as attackers')
parser.add_argument('--vendor', dest='vendor', action='store_true', default=False, help='Test on chip model only')
parser.add_argument('--natural', dest='natural', action='store_true', default=False, help='Natural sorting')
args, unknown = parser.parse_known_args()
# Argument preprocessing]
if args.distance_threshold != 'auto': # Define distance threshold
args.distance_threshold = float(args.distance_threshold)
# Conf stuff
load_conf(args.configfile)
print_conf(cp)
if tf.gfile.Exists(FLAGS.logdir):
tf.gfile.DeleteRecursively(FLAGS.logdir) # Clean tmp dir
if type(FLAGS.exclude_classes) == str and FLAGS.exclude_classes != '': # Exclude classes from training
exclude_classes = [int(x) for x in FLAGS.exclude_classes.split(',')]
else:
exclude_classes = []
# Let's go
if args.action == 'train':
print("[+] Excluding %s" % str(exclude_classes))
instances = Instances(limit=FLAGS.limit, exclude_classes=exclude_classes, name="train")
if cp.get("DEFAULT", "classifier") == 'svm':
net = SVM(name=FLAGS.model_name)
else:
net = Classifier(num_inputs=instances.limit, num_classes=instances.mapping.size, name=FLAGS.model_name, modeltype=ModelType.str2type(FLAGS.classifier))
net.train(instances, batch_size=FLAGS.batch_size)
if args.save:
net.save()
net.bin_class_per_sample(instances, limit=1000, adv_detect=False, vendor_only=False) # Never adv detect during training
net.visualize_embeddings(instances, limit=1000, train=True)
elif args.action == 'test':
instances = Instances(limit=FLAGS.limit, exclude_classes=[], name="test")
if cp.get("DEFAULT", "classifier") == 'svm':
net = SVM.load()
else:
net = Classifier.load(0)
print("[+] Testing...")
net.bin_class_per_sample(instances, limit=1500, adv_detect=args.adv, vendor_only=args.vendor)
net.visualize_embeddings(instances, limit=1000, train=False)
elif args.action == 'train_embeddings':
instances = Instances(limit=FLAGS.limit, exclude_classes=exclude_classes, name="train")
print("[+] Loading model...")
net = Classifier.load(0)
net.visualize_embeddings(instances, limit=1000, train=True)
elif args.action == 'test_embeddings':
instances = Instances(limit=FLAGS.limit, exclude_classes=[], name="test")
print("[+] Loading model...")
net = Classifier.load(0)
net.visualize_embeddings(instances, limit=1000, train=False)
elif args.action == 'zeroshot':
instances = Instances(limit=FLAGS.limit, exclude_classes=[], name="test")
net = Classifier.load(0)
net.classify_zeroshot(instances, FLAGS.num_zs_test_samples, vendor_only=args.vendor)
|
rpp0/lora-phy-fingerprinting
|
tf_train.py
|
Python
|
bsd-3-clause
| 73,515
|
[
"Gaussian",
"NEURON"
] |
a35d1a98e46dfe2615afcd9af475197d49a2fdb7893cc009609f215783193139
|
import numpy as np
from alis import almsgs
from alis import alfunc_base
msgs=almsgs.msgs()
class ThAr(alfunc_base.Base) :
"""
Returns a 1-dimensional gaussian of form:
p[0] = amplitude
p[1] = centroid of the gaussian
p[2] = FWHM
"""
def __init__(self, prgname="", getinst=False, atomic=None, verbose=2):
self._idstr = 'thar' # ID string for this class
self._pnumr = 3 # Total number of parameters fed in
self._keywd = dict({'specid':[], 'continuum':False, 'blind':False}) # Additional arguments to describe the model --- 'input' cannot be used as a keyword
self._keych = dict({'specid':0, 'continuum':0, 'blind':0}) # Require keywd to be changed (1 for yes, 0 for no)
self._keyfm = dict({'specid':"", 'continuum':"", 'blind':""}) # Format for the keyword. "" is the Default setting
self._parid = ['amplitude', 'centre', 'fwhm'] # Name of each parameter
self._defpar = [ 0.0, 0.0, 3.0 ] # Default values for parameters that are not provided
self._fixpar = [ None, None, None ] # By default, should these parameters be fixed?
self._limited = [ [1 ,0 ], [0 ,0 ], [1 ,0 ] ] # Should any of these parameters be limited from below or above
self._limits = [ [0.0,0.0], [0.0,0.0], [1.0E-2,0.0] ] # What should these limiting values be
self._svfmt = [ "{0:.8g}", "{0:.8g}", "{0:.8g}"] # Specify the format used to print or save output
self._prekw = [] # Specify the keywords to print out before the parameters
# DON'T CHANGE THE FOLLOWING --- it tells ALIS what parameters are provided by the user.
tempinput = self._parid+list(self._keych.keys()) #
self._keywd['input'] = dict(zip((tempinput),([0]*np.size(tempinput)))) #
########################################################################
self._verbose = verbose
# Set the atomic data
self._atomic = atomic
if getinst: return
def call_CPU(self, x, p, ae='em', mkey=None, ncpus=1):
"""
Define the functional form of the model
--------------------------------------------------------
x : array of wavelengths
p : array of parameters for this model
--------------------------------------------------------
"""
def model(par):
"""
Define the model here
"""
return par[0]*np.exp(-(x-par[1])**2/(2.0*(par[2]**2)))
#############
yout = np.zeros((p.shape[0],x.size))
for i in range(p.shape[0]):
yout[i,:] = model(p[i,:])
if ae == 'em': return yout.sum(axis=0)
else: return yout.prod(axis=0)
def parin(self, i, par, parb):
"""
This routine converts a parameter in the input model file
to the parameter used in 'call'
--------------------------------------------------------
When writing a new function, one should change how each
input parameter 'par' is converted into a parameter used
in the function specified by 'call'
--------------------------------------------------------
"""
if i == 0: pin = par
elif i == 1: pin = par
elif i == 2: pin = par/(2.0*np.sqrt(2.0*np.log(2.0)))
return pin
def set_vars(self, p, level, mp, ival, wvrng=[0.0,0.0], spid='None', levid=None, nexbin=None, ddpid=None, getinfl=False):
"""
Return the parameters for a Gaussian function to be used by 'call'
The only thing that should be changed here is the parb values
and possibly the nexbin details...
"""
levadd=0
params=np.zeros(self._pnumr)
parinf=[]
for i in range(self._pnumr):
lnkprm = None
parb = dict({})
if mp['mtie'][ival][i] >= 0:
getid = mp['tpar'][mp['mtie'][ival][i]][1]
elif mp['mtie'][ival][i] <= -2:
if len(mp['mlnk']) == 0:
lnkprm = mp['mpar'][ival][i]
else:
for j in range(len(mp['mlnk'])):
if mp['mlnk'][j][0] == mp['mtie'][ival][i]:
cmd = 'lnkprm = ' + mp['mlnk'][j][1]
namespace = dict({'p': p})
exec(cmd, namespace)
lnkprm = namespace['lnkprm']
levadd += 1
else:
getid = level+levadd
levadd+=1
if lnkprm is None:
params[i] = self.parin(i, p[getid], parb)
if mp['mfix'][ival][i] == 0: parinf.append(getid)
else:
params[i] = lnkprm
if ddpid is not None:
if ddpid not in parinf: return []
if nexbin is not None:
if params[2] == 0.0: msgs.error("Cannot calculate "+self._idstr+" subpixellation -- width = 0.0")
if nexbin[0] == "km/s": return params, int(round(parb['ap_2a']*nexbin[1]/(299792.458*params[2]) + 0.5))
elif nexbin[0] == "A" : return params, int(round(nexbin[1]/params[2] + 0.5))
else: msgs.bug("bintype "+nexbin[0]+" should not have been specified in model function: "+self._idstr, verbose=self._verbose)
elif getinfl: return params, parinf
else: return params
|
rcooke-ast/ALIS
|
alis/alfunc_thar.py
|
Python
|
gpl-3.0
| 5,501
|
[
"Gaussian"
] |
612f293ef3c9786fcf1f53cb9d9d0a803bfed9342601e7f53ec21c5ca5e3e966
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
# Do not modify this file! It is auto-generated by the document_options_and_tests
# script, from psi4topdir/psi4/include/psi4/physconst.h
h = 6.62606896E-34 # The Planck constant (Js)
c = 2.99792458E8 # Speed of light (ms$^{-1}$)
kb = 1.3806504E-23 # The Boltzmann constant (JK$^{-1}$)
R = 8.314472 # Universal gas constant (JK$^{-1}$mol$^{-1}$)
bohr2angstroms = 0.52917720859 # Bohr to Angstroms conversion factor
bohr2m = 0.52917720859E-10 # Bohr to meters conversion factor
bohr2cm = 0.52917720859E-8 # Bohr to centimeters conversion factor
amu2g = 1.660538782E-24 # Atomic mass units to grams conversion factor
amu2kg = 1.660538782E-27 # Atomic mass units to kg conversion factor
au2amu = 5.485799097E-4 # Atomic units (m$@@e$) to atomic mass units conversion factor
hartree2J = 4.359744E-18 # Hartree to joule conversion factor
hartree2aJ = 4.359744 # Hartree to attojoule (10$^{-18}$J) conversion factor
cal2J = 4.184 # Calorie to joule conversion factor
dipmom_au2si = 8.47835281E-30 # Atomic units to SI units (Cm) conversion factor for dipoles
dipmom_au2debye = 2.54174623 # Atomic units to Debye conversion factor for dipoles
dipmom_debye2si = 3.335640952E-30 # Debye to SI units (Cm) conversion factor for dipoles
c_au = 137.035999679 # Speed of light in atomic units
hartree2ev = 27.21138 # Hartree to eV conversion factor
hartree2wavenumbers = 219474.6 # Hartree to cm$^{-1}$ conversion factor
hartree2kcalmol = 627.5095 # Hartree to kcal mol$^{-1}$ conversion factor
hartree2kJmol = 2625.500 # Hartree to kilojoule mol$^{-1}$ conversion factor
hartree2MHz = 6.579684E9 # Hartree to MHz conversion factor
kcalmol2wavenumbers = 349.7551 # kcal mol$^{-1}$ to cm$^{-1}$ conversion factor
e0 = 8.854187817E-12 # Vacuum permittivity (Fm$^{-1}$)
na = 6.02214179E23 # Avagadro's number
me = 9.10938215E-31 # Electron rest mass (in kg)
|
rmcgibbo/psi4public
|
psi4/driver/constants/physconst.py
|
Python
|
lgpl-3.0
| 3,542
|
[
"Psi4"
] |
3e822fa9d868e89b4ff3accc1eed2ff8cbebd93db900c84b48843e8a8e7040b4
|
#!/usr/bin/env python
# Copyright 2016 Daniel Nunes
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from traceback import print_tb
from io import StringIO
from os.path import join
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtGui import QPixmap
from . import __version__, cur_folder
def excepthook(exc_type, exc_value, tracebackobj):
"""
Global function to catch unhandled exceptions.
:param exc_type: exception type
:param exc_value: exception value
:param tracebackobj: traceback object
"""
notice = (
"An unhandled exception occurred. Please report the problem"
" at <a href = https://github.com/GandaG/fomod-designer/issues>Github</a>,"
" <a href = http://www.nexusmods.com/skyrim/?>Nexus</a> or"
" <a href = http://forum.step-project.com/index.php>STEP</a>.")
version_info = __version__
tbinfofile = StringIO()
print_tb(tracebackobj, None, tbinfofile)
tbinfofile.seek(0)
tbinfo = tbinfofile.read()
errmsg = 'Error information:\n\nVersion: {}\n{}: {}\n'.format(version_info, str(exc_type), str(exc_value))
sections = [errmsg, tbinfo]
msg = '\n'.join(sections)
errorbox = QMessageBox()
errorbox.setText(notice)
errorbox.setWindowTitle("Nobody Panic!")
errorbox.setDetailedText(msg)
errorbox.setIconPixmap(QPixmap(join(cur_folder, "resources/logos/logo_admin.png")))
errorbox.exec_()
class DesignerError(Exception):
"""
Base class for all exceptions.
"""
def __init__(self):
self.title = "Generic Error"
self.detailed = ""
Exception.__init__(self, "Something happened...")
class MissingFileError(DesignerError):
"""
Exception raised when the export/import functions could not find a file/folder.
"""
def __init__(self, fname):
self.title = "I/O Error"
self.message = "{} is missing.".format(fname.capitalize())
self.detailed = ""
self.file = fname
Exception.__init__(self, self.message)
class ParserError(DesignerError):
"""
Exception raised when the parser was unable to properly parse the file.
It tries to locate the line where the error occurred if lxml provides it.
"""
def __init__(self, msg):
self.title = "Parser Error"
self.detailed = ""
if len(msg.split(",")) <= 2:
self.msg = "The parser couldn't read the installer file. If you need help visit " \
"<a href = http://www.w3schools.com/xml/xml_syntax.asp>W3Schools</a>."
else:
self.msg = "The parser couldn't read the installer file, there was an error around" + \
msg.split(",")[len(msg.split(",")) - 2] + \
". If you need help visit <a href = http://www.w3schools.com/xml/xml_syntax.asp>W3Schools</a>."
Exception.__init__(self, self.msg)
class TagNotFound(DesignerError):
"""
Exception raised when the element factory did not match the element tag.
"""
def __init__(self, element):
self.title = "Tag Lookup Error"
self.message = "Tag {} at line {} could not be matched.".format(element.tag, element.sourceline)
self.detailed = ""
Exception.__init__(self, self.message)
class BaseInstanceException(Exception):
"""
Exception raised when trying to instanced base classes (not meant to be used).
"""
def __init__(self, base_instance):
self.title = "Instance Error"
self.message = "{} is not meant to be instanced. A subclass should be used instead.".format(type(base_instance))
self.detailed = ""
Exception.__init__(self, self.message)
|
GandaG/fomod-designer
|
src/exceptions.py
|
Python
|
apache-2.0
| 4,178
|
[
"VisIt"
] |
af254bde8e9030b8fc8859de335e5378576c9bad91ff3c8c9836ac3a2bf6bedf
|
import cPickle as pickle
from sympy.matrices import Matrix
from sympy import sympify
import sys
from ...utils.misc import extract_model
from ...utils.misc import get_filename_from_caller
from ...modeltools import make_path, get_file_path
from ...latextools import LatexExpr
from .symca_toolbox import SymcaToolBox as SMCAtools
from numpy import savetxt, array
from ...utils import ConfigReader
import warnings
all = ['Symca']
class Symca(object):
"""
A class that performs Symbolic Metabolic Control Analysis.
This class takes pysces model as an input and performs symbolic inversion
of the ``E matrix`` using ``Sympy`` by calculating the determinant and
adjoint matrices of this ``E matrix``.
Parameters
----------
mod : PysMod
The pysces model on which to perform symbolic control analysis.
auto_load : boolean
If true
Returns
------
"""
def __init__(self, mod, auto_load=False, internal_fixed=False, ignore_steady_state=False, keep_zero_elasticities=True):
super(Symca, self).__init__()
ConfigReader.get_config()
self._ignore_steady_state = ignore_steady_state
self._keep_zero_ecs = keep_zero_elasticities
self.mod, obj_type = extract_model(mod)
if not self._ignore_steady_state:
self.mod.doMca()
else:
warnings.warn_explicit("\nIgnoring steady-state solution: Steady-state variables set to 1. Note that parameter scan functionality is unavailable.",
Warning,
filename=get_filename_from_caller(),
lineno=36)
SMCAtools.populate_with_fake_elasticities(mod)
SMCAtools.populate_with_fake_fluxes(mod)
SMCAtools.populate_with_fake_ss_concentrations(mod)
self._analysis_method = 'symca'
self._internal_filename = 'object_data'
self._working_dir = make_path(self.mod, self._analysis_method)
self._ltxe = LatexExpr(self.mod)
self.cc_results = None
self._nmatrix = None
self._species = None
self._num_ind_species = None
self._species_independent = None
self._species_dependent = None
self._fluxes = None
self._num_ind_fluxes = None
self._fluxes_independent = None
self._fluxes_dependent = None
self._kmatrix = None
self._lmatrix = None
self._subs_fluxes = None
self._scaled_k = None
self._scaled_l = None
self._scaled_k0 = None
self._scaled_l0 = None
self._es_matrix = None
self._esL = None
self._ematrix = None
self.internal_fixed = internal_fixed
if obj_type == 'RateCharData':
self.internal_fixed = True
if auto_load:
try:
self.load_session()
except:
print 'Nothing to load_session: Run `do_symca` first'
@property
def nmatrix(self):
if not self._nmatrix:
self._nmatrix = SMCAtools.get_nmatrix(self.mod)
return self._nmatrix
@property
def num_ind_species(self):
if not self._num_ind_species:
self._num_ind_species = SMCAtools.get_num_ind_species(self.mod)
return self._num_ind_species
@property
def species(self):
if not self._species:
self._species = SMCAtools.get_species_vector(self.mod)
return self._species
@property
def species_independent(self):
if not self._species_independent:
self._species_independent = Matrix(
self.species[:self.num_ind_species]
)
return self._species_independent
@property
def species_dependent(self):
if not self._species_dependent:
self._species_dependent = Matrix(
self.species[self.num_ind_species:]
)
return self._species_dependent
@property
def num_ind_fluxes(self):
if not self._num_ind_fluxes:
self._num_ind_fluxes = SMCAtools.get_num_ind_fluxes(self.mod)
return self._num_ind_fluxes
@property
def fluxes(self):
if not self._fluxes:
self._fluxes = SMCAtools.get_fluxes_vector(self.mod)
return self._fluxes
@property
def fluxes_independent(self):
if not self._fluxes_independent:
self._fluxes_independent = Matrix(
self.fluxes[:self.num_ind_fluxes]
)
return self._fluxes_independent
@property
def fluxes_dependent(self):
if not self._fluxes_dependent:
self._fluxes_dependent = Matrix(
self.fluxes[self.num_ind_fluxes:]
)
return self._fluxes_dependent
@property
def kmatrix(self):
if not self._kmatrix:
self._kmatrix = Matrix(self.mod.kmatrix)
return self._kmatrix
@property
def lmatrix(self):
if not self._lmatrix:
self._lmatrix = Matrix(self.mod.lmatrix)
return self._lmatrix
@property
def subs_fluxes(self):
if not self._subs_fluxes:
self._subs_fluxes = SMCAtools.substitute_fluxes(
self.fluxes,
self.kmatrix
)
return self._subs_fluxes
@property
def scaled_l(self):
if not self._scaled_l:
self._scaled_l = SMCAtools.scale_matrix(
self.species,
self.lmatrix,
self.species_independent
)
return self._scaled_l
@property
def scaled_k(self):
if not self._scaled_k:
self._scaled_k = SMCAtools.scale_matrix(
self.subs_fluxes,
self.kmatrix,
self.fluxes_independent
)
return self._scaled_k
@property
def scaled_l0(self):
if not self._scaled_l0:
self._scaled_l0 = self.scaled_l[self.num_ind_species:, :]
return self._scaled_l0
@property
def scaled_k0(self):
if not self._scaled_k0:
self._scaled_k0 = self.scaled_k[self.num_ind_fluxes:, :]
return self._scaled_k0
@property
def es_matrix(self):
if not self._es_matrix:
if self._ignore_steady_state or self._keep_zero_ecs:
es_method = SMCAtools.get_es_matrix_no_mca
else:
es_method = SMCAtools.get_es_matrix
self._es_matrix = es_method(
self.mod,
self.nmatrix,
self.fluxes,
self.species
)
return self._es_matrix
@property
def esL(self):
if not self._esL:
self._esL = self.es_matrix * self.scaled_l
return self._esL
@property
def ematrix(self):
if not self._ematrix:
self._ematrix = SMCAtools.simplify_matrix(
self.scaled_k.row_join(
-self.esL
)
)
return self._ematrix
def path_to(self, path):
full_path = make_path(self.mod, self._analysis_method, [path])
return full_path
def save_session(self, file_name=None):
file_name = get_file_path(working_dir=self._working_dir,
internal_filename=self._internal_filename,
fmt='pickle',
file_name=file_name,
write_suffix=False)
assert self.cc_results, 'Nothing to save_session, run ``do_symca`` method first'
main_cc_dict = SMCAtools.make_inner_dict(self.cc_results, 'cc_results')
counter = 0
while True:
cc_container_name = 'cc_results_{0}'.format(counter)
try:
cc_container = getattr(self, cc_container_name)
main_cc_dict.update(
SMCAtools.make_inner_dict(cc_container, cc_container_name))
counter += 1
except:
break
to_save = main_cc_dict
with open(file_name, 'w') as f:
pickle.dump(to_save, f)
def load_session(self, file_name=None):
file_name = get_file_path(working_dir=self._working_dir,
internal_filename=self._internal_filename,
fmt='pickle',
file_name=file_name,
write_suffix=False)
with open(file_name, 'r') as f:
main_cc_dict = pickle.load(f)
cc_containers = {}
for key, value in main_cc_dict.iteritems():
common_denom_exp = value.pop('common_denominator')
cc_container = SMCAtools.spawn_cc_objects(self.mod,
value.keys(),
[exp for exp in
value.values()],
common_denom_exp,
self._ltxe)
cc_containers[key] = SMCAtools.make_CC_dot_dict(cc_container)
for key, value in cc_containers.iteritems():
setattr(self, key, value)
def save_results(self, file_name=None, separator=',',fmt='%.9f'):
file_name = get_file_path(working_dir=self._working_dir,
internal_filename='cc_summary',
fmt='csv',
file_name=file_name, )
rows = []
cc_counter = 0
cc_dicts = [self.cc_results]
max_len = 0
while True:
try:
next_dict = getattr(self, 'cc_results_%s' % cc_counter)
cc_dicts.append(next_dict)
cc_counter += 1
except:
break
sep = ('######################', 0, '', '')
cc_counter = -1
for cc_dict in cc_dicts:
result_name = '# results from cc_results'
if cc_counter >= 0:
result_name += '_%s' % cc_counter
head = (result_name, 0, '', '')
rows.append(head)
for cc_name in sorted(cc_dict.keys()):
cc_obj = cc_dict[cc_name]
row_1 = (cc_obj.name,
cc_obj.value,
cc_obj.latex_name,
cc_obj.latex_expression)
expr_len = len(cc_obj.latex_expression)
if expr_len > max_len:
max_len = expr_len
rows.append(row_1)
if not cc_obj.name == 'common_denominator':
for cp in cc_obj.control_patterns.itervalues():
cols = (cp.name,
cp.value,
cp.latex_name,
cp.latex_expression)
rows.append(cols)
rows.append(sep)
cc_counter += 1
str_fmt = 'S%s' % max_len
head = ['name', 'value', 'latex_name', 'latex_expression']
X = array(rows,
dtype=[(head[0], str_fmt),
(head[1], 'float'),
(head[2], str_fmt),
(head[3], str_fmt)])
try:
savetxt(fname=file_name,
X=X,
header=separator.join(head),
delimiter=separator,
fmt=['%s', fmt, '%s', '%s'],)
except IOError as e:
print e.strerror
def do_symca(self, internal_fixed=None, auto_save_load=False):
if internal_fixed is None:
internal_fixed = self.internal_fixed
def do_symca_internals(self):
CC_i_num, common_denom_expr = SMCAtools.invert(
self.ematrix,
self.path_to('temp')
)
cc_sol = SMCAtools.solve_dep(
CC_i_num,
self.scaled_k0,
self.scaled_l0,
self.num_ind_fluxes,
self.path_to('temp')
)
cc_sol, common_denom_expr = SMCAtools.fix_expressions(
cc_sol,
common_denom_expr,
self.lmatrix,
self.species_independent,
self.species_dependent
)
cc_names = SMCAtools.build_cc_matrix(
self.fluxes,
self.fluxes_independent,
self.species_independent,
self.fluxes_dependent,
self.species_dependent
)
cc_objects = SMCAtools.spawn_cc_objects(self.mod,
cc_names,
cc_sol,
common_denom_expr,
self._ltxe)
self.cc_results = SMCAtools.make_CC_dot_dict(cc_objects)
if internal_fixed:
simpl_dic = SMCAtools.make_internals_dict(cc_sol,
cc_names,
common_denom_expr,
self.path_to('temp'))
CC_block_counter = 0
for each_common_denom_expr, name_num in simpl_dic.iteritems():
name_num[1], \
each_common_denom_expr = SMCAtools.fix_expressions(
name_num[1],
each_common_denom_expr,
self.lmatrix,
self.species_independent,
self.species_dependent
)
simpl_cc_objects = SMCAtools.spawn_cc_objects(self.mod,
name_num[0],
name_num[1],
each_common_denom_expr,
self._ltxe, )
CC_dot_dict = SMCAtools.make_CC_dot_dict(simpl_cc_objects)
setattr(self, 'cc_results_%s' %
CC_block_counter, CC_dot_dict)
CC_block_counter += 1
self.CC_i_num = CC_i_num
if auto_save_load:
try:
self.load_session()
except:
do_symca_internals(self)
self.save_session()
else:
do_symca_internals(self)
|
exe0cdc/PyscesToolbox
|
psctb/analyse/_symca/_symca.py
|
Python
|
bsd-3-clause
| 14,914
|
[
"PySCeS"
] |
dc37f7a6bc958199546177f3c63228366ca09dc194d15a82aed7ca81a9523282
|
#!/usr/bin/env python
# encoding: utf-8
#
# @Author: Brian Cherinka, José Sánchez-Gallego, Brett Andrews
# @Date: Oct 25, 2017
# @Filename: base.py
# @License: BSD 3-Clause
# @Copyright: Brian Cherinka, José Sánchez-Gallego, Brett Andrews
from __future__ import absolute_import, division, print_function
from astropy import units as u
from marvin.utils.datamodel.maskbit import get_maskbits
from .base import RSS, DataCube, DRPCubeDataModel, DRPCubeDataModelList, Spectrum
spaxel_unit = u.Unit('spaxel', represents=u.pixel, doc='A spectral pixel', parse_strict='silent')
fiber_unit = u.Unit('fiber', represents=u.pixel, doc='Spectroscopic fibre', parse_strict='silent')
MPL4_datacubes = [
DataCube('flux', 'FLUX', 'WAVE', extension_ivar='IVAR',
extension_mask='MASK', unit=u.erg / u.s / (u.cm ** 2) / u.Angstrom / spaxel_unit,
scale=1e-17, formats={'string': 'Flux'},
description='3D rectified cube')
]
MPL4_spectra = [
Spectrum('spectral_resolution', 'SPECRES', extension_wave='WAVE', extension_std='SPECRESD',
unit=u.Angstrom, scale=1, formats={'string': 'Median spectral resolution'},
description='Median spectral resolution as a function of wavelength '
'for the fibers in this IFU'),
]
MPL6_datacubes = [
DataCube('dispersion', 'DISP', 'WAVE', extension_ivar=None,
extension_mask='MASK', unit=u.Angstrom,
scale=1, formats={'string': 'Dispersion'},
description='Broadened dispersion solution (1sigma LSF)'),
DataCube('dispersion_prepixel', 'PREDISP', 'WAVE', extension_ivar=None,
extension_mask='MASK', unit=u.Angstrom,
scale=1, formats={'string': 'Dispersion pre-pixel'},
description='Broadened pre-pixel dispersion solution (1sigma LSF)')
]
MPL6_spectra = [
Spectrum('spectral_resolution_prepixel', 'PRESPECRES', extension_wave='WAVE',
extension_std='PRESPECRESD', unit=u.Angstrom, scale=1,
formats={'string': 'Median spectral resolution pre-pixel'},
description='Median pre-pixel spectral resolution as a function of '
'wavelength for the fibers in this IFU'),
]
RSS_extensions = [
RSS('xpos', 'XPOS', extension_wave='WAVE', unit=u.arcsec, db_table='rssfiber',
formats={'string': 'Fiber X-positions from the IFU center'},
description='Array of fiber X-positions relative to the IFU center'),
RSS('ypos', 'YPOS', extension_wave='WAVE', unit=u.arcsec, db_table='rssfiber',
formats={'string': 'Fiber Y-positions from the IFU center'},
description='Array of fiber Y-positions relative to the IFU center'),
]
MPL10_datacubes = [
DataCube('dispersion', 'LSFPOST', 'WAVE', extension_ivar=None,
extension_mask='MASK', unit=u.Angstrom, db_column='disp',
scale=1, formats={'string': 'Dispersion'},
description='Broadened dispersion solution (1sigma LSF)'),
DataCube('dispersion_prepixel', 'LSFPRE', 'WAVE', extension_ivar=None,
extension_mask='MASK', unit=u.Angstrom, db_column='predisp',
scale=1, formats={'string': 'Dispersion pre-pixel'},
description='Broadened pre-pixel dispersion solution (1sigma LSF)')
]
MPL4 = DRPCubeDataModel('MPL-4', aliases=['MPL4', 'v1_5_1'],
datacubes=MPL4_datacubes,
spectra=MPL4_spectra,
bitmasks=get_maskbits('MPL-4'),
qual_flag='DRP3QUAL')
MPL5 = DRPCubeDataModel('MPL-5', aliases=['MPL5', 'v2_0_1'],
datacubes=MPL4_datacubes,
spectra=MPL4_spectra,
bitmasks=get_maskbits('MPL-5'),
qual_flag='DRP3QUAL')
MPL6 = DRPCubeDataModel('MPL-6', aliases=['MPL6', 'v2_3_1'],
datacubes=MPL4_datacubes + MPL6_datacubes,
spectra=MPL4_spectra + MPL6_spectra,
bitmasks=get_maskbits('MPL-6'),
qual_flag='DRP3QUAL')
MPL7 = DRPCubeDataModel('MPL-7', aliases=['MPL7', 'v2_4_3', 'DR15'],
datacubes=MPL4_datacubes + MPL6_datacubes,
spectra=MPL4_spectra + MPL6_spectra,
bitmasks=get_maskbits('MPL-7'),
qual_flag='DRP3QUAL')
DR15 = DRPCubeDataModel('DR15', aliases=['DR15', 'v2_4_3'],
datacubes=MPL4_datacubes + MPL6_datacubes,
spectra=MPL4_spectra + MPL6_spectra,
bitmasks=get_maskbits('MPL-7'),
qual_flag='DRP3QUAL')
MPL8 = DRPCubeDataModel('MPL-8', aliases=['MPL8', 'v2_5_3'],
datacubes=MPL4_datacubes + MPL6_datacubes,
spectra=MPL4_spectra + MPL6_spectra,
bitmasks=get_maskbits('MPL-8'),
qual_flag='DRP3QUAL')
DR16 = DRPCubeDataModel('DR16', aliases=['DR15', 'v2_4_3'],
datacubes=MPL4_datacubes + MPL6_datacubes,
spectra=MPL4_spectra + MPL6_spectra,
bitmasks=get_maskbits('MPL-7'),
qual_flag='DRP3QUAL')
MPL9 = DRPCubeDataModel('MPL-9', aliases=['MPL9', 'v2_7_1'],
datacubes=MPL4_datacubes + MPL6_datacubes,
spectra=MPL4_spectra + MPL6_spectra,
bitmasks=get_maskbits('MPL-9'),
qual_flag='DRP3QUAL')
MPL10 = DRPCubeDataModel('MPL-10', aliases=['MPL10', 'v3_0_1'],
datacubes=MPL4_datacubes + MPL10_datacubes,
spectra=MPL4_spectra + MPL6_spectra,
bitmasks=get_maskbits('MPL-10'),
qual_flag='DRP3QUAL')
MPL11 = DRPCubeDataModel('MPL-11', aliases=['MPL11', 'v3_1_1', 'DR17'],
datacubes=MPL4_datacubes + MPL10_datacubes,
spectra=MPL4_spectra + MPL6_spectra,
bitmasks=get_maskbits('MPL-11'),
qual_flag='DRP3QUAL')
# The DRP Cube Datamodel
datamodel = DRPCubeDataModelList([MPL4, MPL5, MPL6, MPL7, DR15, MPL8, DR16, MPL9, MPL10, MPL11])
# Define the RSS Datamodel. Start by copying the Cube datamodel for convenience.
datamodel_rss = datamodel.copy()
for release in datamodel_rss:
datamodel_rss[release] = datamodel_rss[release].to_rss()
datamodel_rss[release].rss += RSS_extensions
flux = datamodel_rss[release].rss.flux
flux.description = 'Row-stacked spectra from all exposures for the target'
flux.unit = flux.unit * spaxel_unit / fiber_unit
locals()
|
sdss/marvin
|
python/marvin/utils/datamodel/drp/MPL.py
|
Python
|
bsd-3-clause
| 6,763
|
[
"Brian"
] |
fa54529610dd0c06be506a4ee4eed4ad45a336406be0ed0a259cee1e86d7c563
|
#!/usr/bin/env python
#MIT License
#Copyright (c) 2017 Massimiliano Patacchiola
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#In this example I will use the class DroneLanding to generate a 3D world
#in which the drone will move. Using the Q-learning algorithm I
#will estimate the state-action matrix.
import numpy as np
from drone_landing import DroneLanding
import matplotlib.pyplot as plt
def update_state_action(state_action_matrix, visit_counter_matrix, observation, new_observation,
action, reward, alpha, gamma):
"""Return the updated state-action matrix
@param state_action_matrix the matrix before the update
@param observation the state obsrved at t
@param new_observation the state observed at t+1
@param action the action at t
@param new_action the action at t+1
@param reward the reward observed after the action
@param alpha the ste size (learning rate)
@param gamma the discount factor
@return the updated state action matrix
"""
#Getting the values of Q at t and estimating q
x = observation[0]
y = observation[1]
z = observation[2]
q = state_action_matrix[x,y,z,action] # Estimating q
# Estimating the q_t1 using observation at t+1
x_t1 = new_observation[0]
y_t1 = new_observation[1]
z_t1 = new_observation[2]
q_t1 = np.amax(state_action_matrix[x_t1,y_t1,z_t1,:])
#Calculate alpha based on how many time it has been visited
alpha_counted = 1.0 / (1.0 + visit_counter_matrix[x,y,z,action])
#Applying the update rule
#Here you can change "alpha" with "alpha_counted" if you want
#to take into account how many times that particular state-action
#pair has been visited until now.
state_action_matrix[x,y,z,action] = state_action_matrix[x,y,z,action] + alpha * (reward + gamma * q_t1 - q)
return state_action_matrix
def update_visit_counter(visit_counter_matrix, observation, action):
"""Update the visit counter
Counting how many times a state-action pair has been
visited. This information can be used during the update.
@param visit_counter_matrix a matrix initialised with zeros
@param observation the state observed
@param action the action taken
"""
x = observation[0]
y = observation[1]
z = observation[2]
visit_counter_matrix[x,y,z,action] += 1.0
return visit_counter_matrix
def update_policy(policy_matrix, state_action_matrix, observation):
"""Return the updated policy matrix (q-learning)
@param policy_matrix the matrix before the update
@param state_action_matrix the state-action matrix
@param observation the state obsrved at t
@return the updated state action matrix
"""
x = observation[0]
y = observation[1]
z = observation[2]
#Getting the index of the action with the highest utility
best_action = np.argmax(state_action_matrix[x,y,z,:])
#Updating the policy
policy_matrix[x,y,z] = best_action
return policy_matrix
def return_epsilon_greedy_action(policy_matrix, observation, epsilon=0.1):
x = observation[0]
y = observation[1]
z = observation[2]
# Get the total number of actions
tot_actions = int(np.nanmax(policy_matrix) + 1)
# Return a random action or the one with highest utility
if np.random.uniform(0, 1) <= epsilon:
action = np.random.randint(low=0, high=tot_actions)
else:
action = int(policy_matrix[x,y,z])
return action
def return_decayed_value(starting_value, global_step, decay_step):
"""Returns the decayed value.
decayed_value = starting_value * decay_rate ^ (global_step / decay_steps)
@param starting_value the value before decaying
@param global_step the global step to use for decay (positive integer)
@param decay_step the step at which the value is decayed
"""
decayed_value = starting_value * np.power(0.1, (global_step/decay_step))
return decayed_value
def plot_curve(data_list, filepath="./my_plot.png",
x_label="X", y_label="Y",
x_range=(0, 1), y_range=(0,1), color="-r", kernel_size=50, alpha=0.4, grid=True):
"""Plot a graph using matplotlib
"""
if(len(data_list) <=1):
print("[WARNING] the data list is empty, no plot will be saved.")
return
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=x_range, ylim=y_range)
ax.grid(grid)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.plot(data_list, color, alpha=alpha) # The original data is showed in background
kernel = np.ones(int(kernel_size))/float(kernel_size) # Smooth the graph using a convolution
tot_data = len(data_list)
lower_boundary = int(kernel_size/2.0)
upper_boundary = int(tot_data-(kernel_size/2.0))
data_convolved_array = np.convolve(data_list, kernel, 'same')[lower_boundary:upper_boundary]
#print("arange: " + str(np.arange(tot_data)[lower_boundary:upper_boundary]))
#print("Convolved: " + str(np.arange(tot_data).shape))
ax.plot(np.arange(tot_data)[lower_boundary:upper_boundary], data_convolved_array, color, alpha=1.0) # Convolved plot
fig.savefig(filepath)
fig.clear()
plt.close(fig)
# print(plt.get_fignums()) # print the number of figures opened in background
def main():
world_size = 21
env = DroneLanding(world_size)
tot_actions = 6
#Define the state matrix
state_matrix = np.zeros((world_size,world_size,world_size,tot_actions))
state_matrix[0, 3] = 1
state_matrix[1, 3] = 1
state_matrix[1, 1] = -1
print("State Matrix:")
print(state_matrix)
#Random policy
policy_matrix = np.random.randint(low=0, high=6, size=(world_size,world_size,world_size)).astype(np.float32)
# Q-table and visit counter
state_action_matrix = np.zeros((world_size,world_size,world_size,tot_actions))
visit_counter_matrix = np.zeros((world_size,world_size,world_size,tot_actions))
# Hyperparameters
gamma = 0.999
alpha = 0.001 #constant step size
tot_episode = 2500000
print_episode = 1000
render_episode = 100000
save_log_episode = 10
reward_list = list()
for episode in range(tot_episode+1):
#Reset and return the first observation
observation = env.reset(exploring_starts=True)
#epsilon = return_decayed_value(0.1, episode, decay_step=50000)
epsilon = 0.1
cumulated_reward = 0
for step in range(50):
#Take the action from the action matrix
#action = policy_matrix[observation[0], observation[1]]
#Take the action using epsilon-greedy
action = return_epsilon_greedy_action(policy_matrix, observation, epsilon=epsilon)
#Move one step in the environment and get obs and reward
new_observation, reward, done = env.step(action)
#Updating the state-action matrix
state_action_matrix = update_state_action(state_action_matrix, visit_counter_matrix, observation, new_observation,
action, reward, alpha, gamma)
#Updating the policy
policy_matrix = update_policy(policy_matrix, state_action_matrix, observation)
#Increment the visit counter
visit_counter_matrix = update_visit_counter(visit_counter_matrix, observation, action)
observation = new_observation
cumulated_reward += reward
if done: break
if(episode % save_log_episode == 0):
reward_list.append(cumulated_reward)
if(episode % print_episode == 0):
print("")
print("Episode: " + str(episode) + " of " + str(tot_episode))
print("Epsilon: " + str(epsilon))
print("Cumulated reward: " + str(cumulated_reward))
print("Q-max: " + str(np.amax(state_action_matrix)))
print("Q-mean: " + str(np.mean(state_action_matrix)))
print("Q-min: " + str(np.amin(state_action_matrix)))
if episode % render_episode == 0:
print("Saving the gif in ./drone_landing.gif")
env.render(file_path='./drone_landing.gif', mode='gif')
print("Done!")
print("Saving the reward graph in ./reward.png")
plot_curve(reward_list, filepath="./reward.png",
x_label="Episode", y_label="Reward",
x_range=(0, len(reward_list)), y_range=(-1.55,1.05),
color="red", kernel_size=500,
alpha=0.4, grid=True)
print("Done!")
#Training complete
print("Finished!!!")
if __name__ == "__main__":
main()
|
mpatacchiola/dissecting-reinforcement-learning
|
src/6/drone-landing/qlearning_drone_landing.py
|
Python
|
mit
| 9,824
|
[
"VisIt"
] |
77aaddf01498cb19c8a1272442471645077493994d4f7e41457e5af2a353d128
|
from sklearn.utils import shuffle
import numpy as np
import cv2
import csv
def get_data_path(data_folders, minimum_speed=10.0, angle_correction=0.25, target_avg_factor = 1.0, num_bins=40):
'''
This function gets the path to the training samples, adjust the angles for left/right images and discard those samples
which were taken traveling at less than the minimum speed.
The function also tries to control the distribution of training samples by analyzing the histogram of training angles and removing those bins which are
over-represented.
'''
image_paths = []
steering_angles = []
# Get path to training images and angles
for data_folder in data_folders:
with open(data_folder + '/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
if line[0] == 'center':
# Ignore header
continue
else:
# Remove those samples which were taken when car wasn't moving too fast
if float(line[6]) > minimum_speed:
# Center image path and angle
source_path = data_folder + '/IMG/'+line[0].split('/')[-1]
image_paths.append(source_path)
steering_angles.append(float(line[3]))
# Left image path and angle
source_path = data_folder + '/IMG/'+line[1].split('/')[-1]
image_paths.append(source_path)
steering_angles.append(float(line[3])+angle_correction)
# Right image path and angle
source_path = data_folder + '/IMG/'+line[2].split('/')[-1]
image_paths.append(source_path)
steering_angles.append(float(line[3])-angle_correction)
image_paths = np.array(image_paths)
steering_angles = np.array(steering_angles)
# Try to normalize the distribution of training samples as shown in jupyter notebook (data_preprocessing.ipynb)
hist, bins = np.histogram(steering_angles, num_bins)
avg_samples_per_bin = np.mean(hist)
# Computing keep probability for each sample. For each bin, we try to keep samples proportionally to how over or under-represented is each 'categorie'.
new_target_avg = avg_samples_per_bin * target_avg_factor
keep_probs = []
for i in range(num_bins):
if hist[i] < new_target_avg:
keep_probs.append(1.)
else:
keep_probs.append(1./(hist[i]/new_target_avg))
# Remove samples according to probability of each bin
idx_to_remove = []
for i in range(len(steering_angles)):
for j in range(num_bins):
if steering_angles[i] >= bins[j] and steering_angles[i] <= bins[j+1]:
# Delete with probability 1-keep_prob
if np.random.random_sample() > keep_probs[j]:
idx_to_remove.append(i)
image_paths = np.delete(image_paths, idx_to_remove, axis=0)
steering_angles = np.delete(steering_angles, idx_to_remove)
return image_paths, steering_angles
def preprocess_image(img):
'''
Adds gaussian blur and transforms BGR to YUV.
'''
new_img = cv2.GaussianBlur(img, (3,3), 0)
new_img = cv2.cvtColor(new_img, cv2.COLOR_BGR2YUV)
return new_img
def random_distortion(img):
'''
Adds random distortion to training dataset: random brightness, shadows and a random vertical shift
of the horizon position
'''
new_img = img.astype(float)
# Add random brightness
value = np.random.randint(-28, 28)
new_img[:,:,0] = np.minimum(np.maximum(new_img[:,:,0],0),255)
# Add random shadow covering the entire height but random width
img_height, img_width = new_img.shape[0:2]
middle_point = np.random.randint(0,img_width)
darkening = np.random.uniform(0.6,0.8)
if np.random.rand() > .5:
new_img[:,0:middle_point,0] *= darkening
else:
new_img[:,middle_point:img_width,0] *= darkening
# Applying a perspective transform at the beginning of the horizon line
horizon = 2*img_height/5 # Assumes horizon to be located at 2/5 of image height
v_shift = np.random.randint(-img_height/8,img_height/8) # Shifting horizon by up to 1/8
# First points correspond to a rectangle surrounding the image below the horizon line
pts1 = np.float32([[0,horizon],[img_width,horizon],[0,img_height],[img_width,img_height]])
# Second set of points correspond to same rectangle plus a random vertical shift
pts2 = np.float32([[0,horizon+v_shift],[img_width,horizon+v_shift],[0,img_height],[img_width,img_height]])
# Getting the perspective transformation
M = cv2.getPerspectiveTransform(pts1,pts2)
# pplying the perspective transformation
new_img = cv2.warpPerspective(new_img,M,(img_width,img_height), borderMode=cv2.BORDER_REPLICATE)
return new_img.astype(np.uint8)
def generator(image_paths, steering_angles, batch_size=32, validation_flag=False):
'''
Training batches generator. Does not distort the images if "validation_flag" is set to True
'''
num_samples = len(image_paths)
while 1:
image_paths, steering_angles = shuffle(image_paths, steering_angles)
for offset in range(0, num_samples, batch_size):
batch_images = image_paths[offset:offset+batch_size]
batch_angles = steering_angles[offset:offset+batch_size]
images = []
angles = []
for batch_angle ,batch_image in zip(batch_angles,batch_images):
img = cv2.imread(batch_image)
img = preprocess_image(img)
if not validation_flag:
img = random_distortion(img)
# Randomly flipping the image to augment data
# Only augmenting rare examples (angle > ~0.3)
if abs(batch_angle) > 0.3 and np.random.random_sample() >= 0.5:
img = cv2.flip(img, 1)
batch_angle *= -1
images.append(img)
angles.append(batch_angle)
X_train = np.array(images)
y_train = np.array(angles)
yield shuffle(X_train, y_train)
|
camigord/Self-Driving-Car-Nanodegree
|
P3-Behavioral-Cloning/utils/utils.py
|
Python
|
mit
| 6,328
|
[
"Gaussian"
] |
a59067e491f86df029e5672f92cf7c11252eb625bcdf37f9875f19db08e24386
|
#pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
#pylint: enable=missing-docstring
import os
import multiprocessing
import re
import shutil
import subprocess
import logging
import livereload
import mooseutils
import MooseDocs
from MooseDocs.MooseMarkdown import MooseMarkdown
from MooseDocs import common
LOG = logging.getLogger(__name__)
def build_options(parser):
"""
Command-line options for build command.
"""
parser.add_argument('--config-file', type=str, default='website.yml',
help="The configuration file to use for building the documentation using "
"MOOSE. (Default: %(default)s)")
parser.add_argument('--content', type=str, default='content.yml',
help="The YAML file containing the locations containing the markdown "
"files (Default: %(default)s). If the file doesn't exists the default "
"is {'default':{'base':'docs/content', 'include':'docs/content/*'}}")
if MooseDocs.ROOT_DIR == MooseDocs.MOOSE_DIR:
parser.add_argument('--init', action='store_true', help="Initialize and/or update the "
"large media submodule if needed.")
parser.add_argument('--dump', action='store_true',
help="Display the website file tree that will be created without "
"performing a build.")
parser.add_argument('--clean', action='store_true',
help="Clean the 'site-dir', this happens by default when the '--serve' "
"command is used.")
parser.add_argument('--num-threads', '-j', type=int, default=multiprocessing.cpu_count(),
help="Specify the number of threads to build pages with.")
parser.add_argument('--template', type=str, default='website.html',
help="The template html file to utilize (Default: %(default)s).")
parser.add_argument('--host', default='127.0.0.1', type=str,
help="The local host location for live web server (default: %(default)s).")
parser.add_argument('--port', default='8000', type=str,
help="The local host port for live web server (default: %(default)s).")
parser.add_argument('--site-dir', type=str, default=os.path.join(MooseDocs.ROOT_DIR, 'site'),
help="The location to build the website content (Default: %(default)s).")
parser.add_argument('--serve', action='store_true',
help="Serve the presentation with live reloading, the 'site_dir' is "
"ignored for this case.")
parser.add_argument('--no-livereload', action='store_true',
help="When --serve is used this flag disables the live reloading.")
def submodule_status():
"""
Return the status of each of the git submodule.
"""
out = dict()
result = subprocess.check_output(['git', 'submodule', 'status'], cwd=MooseDocs.MOOSE_DIR)
regex = re.compile(r'(?P<status>[\s\-\+U])(?P<sha1>[a-f0-9]{40})\s(?P<name>.*?)\s')
for match in regex.finditer(result):
out[match.group('name')] = match.group('status')
return out
class WebsiteBuilder(common.Builder):
"""
Builder object for creating websites.
"""
def __init__(self, content=None, **kwargs):
super(WebsiteBuilder, self).__init__(**kwargs)
if (content is None) or (not os.path.isfile(content)):
LOG.info("Using default content directory configuration "
"(i.e., --content does not include a valid filename).")
content = dict(default=dict(base=os.path.join(os.getcwd(), 'content'),
include=[os.path.join(os.getcwd(), 'content', '*')]))
else:
content = MooseDocs.yaml_load(content)
self._content = content
def buildNodes(self):
return common.moose_docs_file_tree(self._content)
class MooseDocsWatcher(livereload.watcher.Watcher):
"""
A livereload watcher for MooseDocs that rebuilds the entire content if markdown files are
added or removed.
"""
def __init__(self, builder, num_threads, *args, **kwargs):
super(MooseDocsWatcher, self).__init__(*args, **kwargs)
self._builder = builder
self._num_threads = num_threads
self.init()
def init(self):
"""
Define the content to watch.
"""
self._count = self._builder.count()
for page in self._builder:
self.watch(page.filename, lambda p=page: self._builder.buildPage(p), delay=2)
def reset(self):
"""
Perform a complete build and establish the items to watch.
"""
# Clear the current tasks
self._tasks = dict()
self.filepath = None
# Re-build
LOG.info('START: Complete re-build of markdown content.')
self._builder.build(num_threads=self._num_threads)
self.init()
LOG.info('FINISH: Complete re-build of markdown content.')
def examine(self):
"""
Override the default function to investigate if the number of markdown files changed.
"""
self._builder.init()
if self._count != self._builder.count():
self.reset()
else:
super(MooseDocsWatcher, self).examine()
return self.filepath, None
def build(config_file=None, site_dir=None, num_threads=None, no_livereload=False, content=None,
dump=False, clean=False, serve=False, host=None, port=None, template=None, init=False,
**template_args):
"""
The main build command.
"""
if serve:
clean = True
site_dir = os.path.abspath(os.path.join(MooseDocs.TEMP_DIR, 'site'))
# Clean/create site directory
if clean and os.path.exists(site_dir):
LOG.info('Cleaning build directory: %s', site_dir)
shutil.rmtree(site_dir)
# Create the "temp" directory
if not os.path.exists(site_dir):
os.makedirs(site_dir)
# Check submodule for large_media
if MooseDocs.ROOT_DIR == MooseDocs.MOOSE_DIR:
status = submodule_status()
if status['docs/content/media/large_media'] == '-':
if init:
subprocess.call(['git', 'submodule', 'update', '--init',
'docs/content/media/large_media'], cwd=MooseDocs.MOOSE_DIR)
else:
LOG.warning("The 'large_media' submodule for storing images above 1MB is not "
"initialized, thus some images will not be visible within the "
"generated website. Run the build command with the --init flag to "
"initialize the submodule.")
# Check media files size
if MooseDocs.ROOT_DIR == MooseDocs.MOOSE_DIR:
media = os.path.join(MooseDocs.MOOSE_DIR, 'docs', 'content', 'media')
ignore = set()
for base, _, files in os.walk(os.path.join(media, 'large_media')):
for name in files:
ignore.add(os.path.join(base, name))
large = mooseutils.check_file_size(base=media, ignore=ignore)
if large:
msg = "Media files above the limit of 1 MB detected, these files should be stored in " \
"large media repository (docs/content/media/large_media):"
for name, size in large:
msg += '\n{}{} ({:.2f} MB)'.format(' '*4, name, size)
LOG.error(msg)
# Create the markdown parser
config = MooseDocs.load_config(config_file, template=template, template_args=template_args)
parser = MooseMarkdown(config)
# Create the builder object and build the pages
builder = WebsiteBuilder(parser=parser, site_dir=site_dir, content=content)
builder.init()
if dump:
print builder
return None
builder.build(num_threads=num_threads)
# Serve
if serve:
if not no_livereload:
server = livereload.Server(watcher=MooseDocsWatcher(builder, num_threads))
else:
server = livereload.Server()
server.serve(root=site_dir, host=host, port=port, restart_delay=0)
return 0
|
liuwenf/moose
|
python/MooseDocs/commands/build.py
|
Python
|
lgpl-2.1
| 9,630
|
[
"MOOSE"
] |
689fc33f80d528ca554c5b21ed4ae25eaffdcd9c2b560794e0a787ec01f92e19
|
# opts.py - module to parse command line options and output what parameters will be used for this run
import os
import os.path
import json
import sys
import yaml
from argparse import ArgumentParser
# fs-drift module dependencies
from common import OK, NOTOK, FsDriftException, FileAccessDistr, USEC_PER_SEC
from common import FileAccessDistr2str
from parser_data_types import boolean, positive_integer, non_negative_integer, bitmask
from parser_data_types import positive_float, non_negative_float, positive_percentage
from parser_data_types import host_set, file_access_distrib
from parser_data_types import FsDriftParseException, TypeExc
def getenv_or_default(var_name, var_default):
v = os.getenv(var_name)
if v == None:
v = var_default
return v
# command line parameter variables here
class FsDriftOpts:
def __init__(self):
self.input_yaml = None
self.output_json_path = None # filled in later
self.host_set = [] # default is local test
self.top_directory = '/tmp/foo'
self.threads = 2 # number of subprocesses per host
self.is_slave = False
self.duration = 1
self.max_files = 200
self.max_file_size_kb = 10
self.max_record_size_kb = 1
self.max_random_reads = 2
self.max_random_writes = 2
self.fdatasync_probability_pct = 10
self.fsync_probability_pct = 20
self.levels = 2
self.subdirs_per_dir = 3
self.rsptimes = False
self.workload_table_csv_path = None
self.stats_report_interval = max(self.duration // 60, 5)
self.pause_between_ops = 100
self.pause_secs = self.pause_between_ops / float(USEC_PER_SEC)
self.incompressible = False
# new parameters related to gaussian filename distribution
self.random_distribution = FileAccessDistr.uniform
self.mean_index_velocity = 1.0 # default is a fixed mean for the distribution
# just a guess, means most of accesses are limited to 1% of total files
# so more cache-friendly
self.gaussian_stddev = self.max_files * 0.01
if self.max_files < 1000:
self.gaussian_stddev = self.max_files * 0.1
# just a guess, most files will be created before they are read
self.create_stddevs_ahead = 3.0
self.drift_time = -1
self.mount_command = None
self.fullness_limit_pct = 85
# not settable
self.is_slave = False
self.as_host = None # filled in by worker host
self.verbosity = 0
self.tolerate_stale_fh = False
self.launch_as_daemon = False
self.python_prog = getenv_or_default('PYTHONPROG', '/usr/bin/python')
self.fsd_remote_dir = getenv_or_default('FSD_REMOTE_DIR', '/usr/local/bin')
def kvtuplelist(self):
return [
('input YAML', self.input_yaml),
('top directory', self.top_directory),
('JSON output file', self.output_json_path),
('save response times?', self.rsptimes),
('stats report interval', self.stats_report_interval),
('workload table csv path', self.workload_table_csv_path),
('host set', ','.join(self.host_set)),
('threads', self.threads),
('test duration', self.duration),
('maximum file count', self.max_files),
('maximum file size (KB)', self.max_file_size_kb),
('maximum record size (KB)', self.max_record_size_kb),
('maximum random reads per op', self.max_random_reads),
('maximum random writes per op', self.max_random_writes),
('fsync probability pct', self.fsync_probability_pct),
('fdatasync probability pct', self.fdatasync_probability_pct),
('directory levels', self.levels),
('subdirectories per directory', self.subdirs_per_dir),
('incompressible data', self.incompressible),
('pause between ops (usec)', self.pause_between_ops),
('distribution', FileAccessDistr2str(self.random_distribution)),
('mean index velocity', self.mean_index_velocity),
('gaussian std. dev.', self.gaussian_stddev),
('create stddevs ahead', self.create_stddevs_ahead),
('mount command', self.mount_command),
('verbosity', self.verbosity),
('pause path', self.pause_path),
('abort path', self.abort_path),
('tolerate stale file handles', self.tolerate_stale_fh),
('fullness limit percent', self.fullness_limit_pct),
('launch using daemon', self.launch_as_daemon),
('python program', self.python_prog),
('fs-drift-remote.py directory', self.fsd_remote_dir),
]
def __str__(self, use_newline=True, indentation=' '):
kvlist = [ '%-40s = %s' % (k, str(v)) for (k, v) in self.kvtuplelist() ]
if use_newline:
return indentation + ('\n%s' % indentation).join(kvlist)
else:
return ' , '.join(kvlist)
def to_json_obj(self):
d = {}
for (k, v) in self.kvtuplelist():
d[k] = v
return d
def validate(self):
if len(self.top_directory) < 6:
raise FsDriftException(
'top directory %s too short, may be system directory' %
self.top_directory)
if not os.path.isdir(self.top_directory):
raise FsDriftException(
'top directory %s does not exist, so please create it' %
self.top_directory)
if self.workload_table_csv_path == None:
self.workload_table_csv_path = os.path.join(self.top_directory,
'example_workload_table.csv')
workload_table = [
'read, 2',
'random_read, 1',
'random_write, 1',
'append, 4',
'delete, 0.1',
'hardlink, 0.01',
'softlink, 0.02',
'truncate, 0.05',
'rename, 1',
'readdir, 0.1',
'create, 4']
with open(self.workload_table_csv_path, 'w') as w_f:
w_f.write( '\n'.join(workload_table))
def parseopts(cli_params=sys.argv[1:]):
o = FsDriftOpts()
parser = ArgumentParser(description='parse fs-drift parameters')
add = parser.add_argument
add('--input-yaml', help='input YAML file containing parameters',
default=None)
add('--output-json', help='output file containing results in JSON format',
default=None)
add('--workload-table', help='.csv file containing workload mix',
default=None)
add('--duration', help='seconds to run test',
type=positive_integer,
default=o.duration)
add('--host-set', help='comma-delimited list of host names/ips',
type=host_set,
default=o.host_set)
add('--top', help='directory containing all file accesses',
default=o.top_directory)
add('--threads', help='number of subprocesses per host',
type=positive_integer,
default=o.threads)
add('--max-files', help='maximum number of files to access',
type=positive_integer,
default=o.max_files)
add('--max-file-size-kb', help='maximum file size in KB',
type=positive_integer,
default=o.max_file_size_kb)
add('--pause-between-ops', help='delay between ops in microsec',
type=non_negative_integer,
default=o.pause_between_ops)
add('--max-record-size-kb', help='maximum read/write size in KB',
type=positive_integer,
default=o.max_record_size_kb)
add('--max-random-reads', help='maximum consecutive random reads',
type=positive_integer,
default=o.max_random_reads)
add('--max-random-writes', help='maximum consecutive random writes',
type=positive_integer,
default=o.max_random_writes)
add('--fdatasync-pct', help='probability of fdatasync after write',
type=positive_percentage,
default=o.fdatasync_probability_pct)
add('--fsync-pct', help='probability of fsync after write',
type=positive_percentage,
default=o.fsync_probability_pct)
add('--levels', help='number of directory levels in tree',
type=non_negative_integer,
default=o.levels)
add('--dirs-per-level', help='number of subdirectories per directory',
type=non_negative_integer,
default=o.subdirs_per_dir)
add('--report-interval', help='seconds between counter output',
type=positive_integer,
default=o.stats_report_interval)
add('--response-times', help='if True then save response times to CSV file',
type=boolean,
default=o.rsptimes)
add('--incompressible', help='if True then write incompressible data',
type=boolean,
default=o.incompressible)
add('--random-distribution', help='either "uniform" or "gaussian"',
type=file_access_distrib,
default=FileAccessDistr.uniform)
add('--mean-velocity', help='rate at which mean advances through files',
type=float,
default=o.mean_index_velocity)
add('--gaussian-stddev', help='std. dev. of file number',
type=float,
default=o.gaussian_stddev)
add('--create-stddevs-ahead', help='file creation ahead of other opts by this many stddevs',
type=float,
default=o.create_stddevs_ahead)
add('--mount-command', help='command to mount the filesystem containing top directory',
default=o.mount_command)
add('--tolerate-stale-file-handles', help='if true, do not throw exception on ESTALE',
type=boolean,
default=o.tolerate_stale_fh)
add('--fullness-limit-percent', help='stop adding to filesystem when it gets this full',
type=positive_percentage,
default=o.fullness_limit_pct)
add('--verbosity', help='decimal or hexadecimal integer bitmask controlling debug logging',
type=bitmask,
default=o.verbosity)
add('--launch-as-daemon', help='launch remote/containerized fs-drift without ssh',
type=boolean,
default=o.launch_as_daemon)
# parse the command line and update opts
args = parser.parse_args(cli_params)
o.top_directory = args.top
o.output_json_path = args.output_json
o.rsptimes = args.response_times
o.stats_report_interval = args.report_interval
o.host_set = args.host_set
o.threads = args.threads
o.report_interval = args.report_interval
o.workload_table_csv_path = args.workload_table
o.duration = args.duration
o.max_files = args.max_files
o.max_file_size_kb = args.max_file_size_kb
o.max_record_size_kb = args.max_record_size_kb
o.max_random_reads = args.max_random_reads
o.max_random_writes = args.max_random_writes
o.fdatasync_probability_pct = args.fdatasync_pct
o.fsync_probability_pct = args.fsync_pct
o.levels = args.levels
o.subdirs_per_dir = args.dirs_per_level
o.incompressible = args.incompressible
o.pause_between_ops = args.pause_between_ops
o.pause_secs = o.pause_between_ops / float(USEC_PER_SEC)
o.response_times = args.response_times
o.random_distribution = args.random_distribution
o.mean_index_velocity = args.mean_velocity
o.gaussian_stddev = args.gaussian_stddev
o.create_stddevs_ahead = args.create_stddevs_ahead
o.mount_command = args.mount_command
o.tolerate_stale_fh = args.tolerate_stale_file_handles
o.fullness_limit_pct = args.fullness_limit_percent
o.launch_as_daemon = args.launch_as_daemon
o.verbosity = args.verbosity
if args.input_yaml:
print('parsing input YAML file %s' % args.input_yaml)
parse_yaml(o, args.input_yaml)
# some fields derived from user inputs
o.network_shared_path = os.path.join(o.top_directory, 'network-shared')
nsjoin = lambda fn : os.path.join(o.network_shared_path, fn)
o.starting_gun_path = nsjoin('starting-gun.tmp')
o.stop_file_path = nsjoin('stop-file.tmp')
o.param_pickle_path = nsjoin('params.pickle')
o.rsptime_path = nsjoin('host-%s_thrd-%s_rsptimes.csv')
o.abort_path = nsjoin('abort.tmp')
o.pause_path = nsjoin('pause.tmp')
o.checkerflag_path = nsjoin('checkered_flag.tmp')
#o.remote_pgm_dir = os.path.dirname(sys.argv[0])
#if o.remote_pgm_dir == '.':
# o.remote_pgm_dir = os.getcwd()
o.is_slave = sys.argv[0].endswith('fs-drift-remote.py')
return o
# module to parse YAML input file containing fs-drift parameters
# YAML parameter names are identical to CLI parameter names
# except that the leading "--" is removed and single '-' characters
# must be changed to underscore '_' characters
# modifies test_params object with contents of YAML file
def parse_yaml(options, input_yaml_file):
with open(input_yaml_file, 'r') as f:
try:
y = yaml.safe_load(f)
if y == None:
y = {}
except yaml.YAMLError as e:
emsg = "YAML parse error: " + str(e)
raise FsDriftParseException(emsg)
try:
for k in y.keys():
v = y[k]
if k == 'input_yaml':
raise FsDriftParseException('cannot specify YAML input file from within itself!')
elif k == 'top':
options.top_directory = v
elif k == 'output_json':
options.output_json = v
elif k == 'workload_table':
options.workload_table = v
elif k == 'duration':
options.duration = positive_integer(v)
elif k == 'host_set':
options.host_set = host_set(v)
elif k == 'threads':
options.threads = positive_integer(v)
elif k == 'max_files':
options.max_files = positive_integer(v)
elif k == 'max_file_size_kb':
options.max_file_size_kb = positive_integer(v)
elif k == 'pause_between_ops':
options.pause_between_ops = non_negative_integer(v)
elif k == 'max_record_size_kb':
options.max_record_size_kb = positive_integer(v)
elif k == 'max_random_reads':
options.max_random_reads = positive_integer(v)
elif k == 'max_random_writes':
options.max_random_writes = positive_integer(v)
elif k == 'fdatasync_pct':
options.fdatasync_probability_pct = non_negative_integer(v)
elif k == 'fsync_pct':
options.fsync_probability_pct = non_negative_integer(v)
elif k == 'levels':
options.levels = positive_integer(v)
elif k == 'dirs_per_level':
options.dirs_per_level = positive_integer(v)
elif k == 'report_interval':
options.stats_report_interval = positive_integer(v)
elif k == 'response_times':
options.rsptimes = boolean(v)
elif k == 'incompressible':
options.incompressible = boolean(v)
elif k == 'random_distribution':
options.random_distribution = file_access_distrib(v)
elif k == 'mean_velocity':
options.mean_velocity = float(v)
elif k == 'gaussian_stddev':
options.gaussian_stddev = float(v)
elif k == 'create_stddevs_ahead':
options.create_stddevs_ahead = float(v)
elif k == 'tolerate_stale_file_handles':
options.tolerate_stale_fh = boolean(v)
elif k == 'fullness_limit_percent':
options.fullness_limit_pct = positive_percentage(v)
elif k == 'verbosity':
options.verbosity = bitmask(v)
elif k == 'launch_as_daemon':
options.launch_as_daemon = boolean(v)
else:
raise FsDriftParseException('unrecognized parameter name %s' % k)
except TypeExc as e:
emsg = 'YAML parse error for key "%s" : %s' % (k, str(e))
raise FsDriftParseException(emsg)
if __name__ == "__main__":
# if user supplies command line parameters
if len(sys.argv) > 2:
# accept CLI and parse it without doing anything else
options = parseopts()
options.validate()
print(options)
print('json format:')
print(json.dumps(options.to_json_obj(), indent=2, sort_keys=True))
sys.exit(0)
# otherwise run unit test
import unittest2
class YamlParseTest(unittest2.TestCase):
def setUp(self):
self.params = FsDriftOpts()
def test_parse_all(self):
params = []
params.extend(['--top', '/var/tmp'])
params.extend(['--output-json', '/var/tmp/x.json'])
params.extend(['--workload-table', '/var/tmp/x.csv'])
params.extend(['--duration', '60'])
params.extend(['--threads', '30'])
params.extend(['--max-files', '10000'])
params.extend(['--max-file-size-kb', '1000000'])
params.extend(['--pause-between-ops', '100'])
params.extend(['--max-record-size-kb', '4096'])
params.extend(['--max-random-reads', '4'])
params.extend(['--max-random-writes', '6'])
params.extend(['--fdatasync-pct', '2'])
params.extend(['--fsync-pct', '3'])
params.extend(['--levels', '4'])
params.extend(['--dirs-per-level', '50'])
params.extend(['--report-interval', '60'])
params.extend(['--response-times', 'Y'])
params.extend(['--incompressible', 'false'])
params.extend(['--random-distribution', 'gaussian'])
params.extend(['--mean-velocity', '4.2'])
params.extend(['--gaussian-stddev', '100.2'])
params.extend(['--create-stddevs-ahead', '3.2'])
params.extend(['--tolerate-stale-file-handles', 'y'])
params.extend(['--fullness-limit-percent', '80'])
params.extend(['--verbosity', '0xffffffff'])
params.extend(['--launch-as-daemon', 'Y'])
options = parseopts(cli_params=params)
options.validate()
print(options)
print('json format:')
print(json.dumps(options.to_json_obj(), indent=2, sort_keys=True))
def test_parse_all_from_yaml(self):
fn = '/tmp/sample_parse.yaml'
with open(fn, 'w') as f:
w = lambda s: f.write(s + '\n')
w('top: /tmp')
w('output_json: /var/tmp/x.json')
w('workload_table: /var/tmp/x.csv')
w('duration: 60')
w('threads: 30')
w('max_files: 10000')
w( 'max_file_size_kb: 1000000')
w('pause_between_ops: 100')
w('max_record_size_kb: 4096')
w('max_random_reads: 4')
w('max_random_writes: 6')
w('fdatasync_pct: 2')
w('fsync_pct: 3')
w('levels: 4')
w('dirs_per_level: 50')
w('report_interval: 60')
w('response_times: Y')
w('incompressible: false')
w('random_distribution: gaussian')
w('mean_velocity: 4.2')
w('gaussian_stddev: 100.2')
w('create_stddevs_ahead: 3.2')
w('tolerate_stale_file_handles: y')
w('fullness_limit_percent: 80')
w('verbosity: 0xffffffff')
w('launch_as_daemon: Y')
p = self.params
parse_yaml(p, fn)
assert(p.top_directory == '/tmp')
assert(p.output_json == '/var/tmp/x.json')
assert(p.workload_table == '/var/tmp/x.csv')
assert(p.duration == 60)
assert(p.threads == 30)
assert(p.max_files == 10000)
assert(p.max_file_size_kb == 1000000)
assert(p.pause_between_ops == 100)
assert(p.max_record_size_kb == 4096)
assert(p.max_random_reads == 4)
assert(p.max_random_writes == 6)
assert(p.fdatasync_probability_pct == 2)
assert(p.fsync_probability_pct == 3)
assert(p.levels == 4)
assert(p.dirs_per_level == 50)
assert(p.stats_report_interval == 60)
assert(p.rsptimes == True)
assert(p.incompressible == False)
assert(p.random_distribution == FileAccessDistr.gaussian)
assert(p.mean_velocity == 4.2)
assert(p.gaussian_stddev == 100.2)
assert(p.create_stddevs_ahead == 3.2)
assert(p.tolerate_stale_fh == True)
assert(p.fullness_limit_pct == 80)
assert(p.verbosity == 0xffffffff)
assert(p.launch_as_daemon == True)
def test_parse_negint(self):
fn = '/tmp/sample_parse_negint.yaml'
with open(fn, 'w') as f:
f.write('max_files: -3\n')
try:
parse_yaml(self.params, fn)
except FsDriftParseException as e:
msg = str(e)
if not msg.__contains__('greater than zero'):
raise e
def test_parse_hostset(self):
fn = '/tmp/sample_parse_hostset.yaml'
with open(fn, 'w') as f:
f.write('host_set: host-foo,host-bar\n')
parse_yaml(self.params, fn)
assert(self.params.host_set == [ 'host-foo', 'host-bar' ])
unittest2.main()
|
bengland2/fsstress
|
opts.py
|
Python
|
apache-2.0
| 22,098
|
[
"Gaussian"
] |
481aab4c3513c1f4f430b34b361bf7413b9f90b6235106d286957d138c027108
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in control_flow_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.ops import control_flow_ops
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.control_flow_ops import *
from tensorflow.python.ops.gen_control_flow_ops import *
# pylint: enable=wildcard-import
def _SwitchGrad(op, *grad):
"""Gradients for a Switch op is calculated using a Merge op.
If the switch is a loop switch, it will be visited twice. We create
the merge on the first visit, and update the other input of the merge
on the second visit. A next_iteration is also added on second visit.
"""
real_op = GetRealOp(op)
# pylint: disable=protected-access
ctxt = real_op._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(ctxt, WhileContext):
merge_op = op.grad_state.switch_map.get(real_op)
if merge_op:
# This is the second time this Switch is visited. It comes from
# the non-exit branch of the Switch, so update the second input
# to the Merge.
# TODO: Need to perform shape inference with this new input.
# pylint: disable=protected-access
merge_op._update_input(1, control_flow_ops._NextIteration(grad[1]))
# pylint: enable=protected-access
return None, None
else:
# This is the first time this Switch is visited. It always comes
# from the Exit branch, which is grad[0]. grad[1] is empty at this point.
# Use grad[0] for both inputs to merge for now, but update the second
# input of merge when we see this Switch the second time.
merge_fn = control_flow_ops._Merge # pylint: disable=protected-access
merge_op = merge_fn([grad[0], grad[0]], name="b_switch")[0]
op.grad_state.switch_map[real_op] = merge_op.op
return merge_op, None
elif isinstance(ctxt, CondContext):
good_grad = grad[ctxt.branch]
zero_grad = grad[1 - ctxt.branch]
# If this Switch is wrapped, it is part of a cond within a loop. In
# this case, we have called ControlFlowState.ZeroLike() so grad is
# ready for merge. Otherwise, we need a switch to control zero_grad.
if not isinstance(op, ControlFlowOpWrapper):
dtype = good_grad.dtype
zero_grad = switch(zero_grad, ctxt.pred, dtype=dtype)[1 - ctxt.branch]
return merge([good_grad, zero_grad], name="cond_grad")[0], None
else:
false_grad = switch(grad[0], real_op.inputs[1])[0]
true_grad = switch(grad[1], real_op.inputs[1])[1]
return merge([false_grad, true_grad])[0], None
ops.RegisterGradient("Switch")(_SwitchGrad)
ops.RegisterGradient("RefSwitch")(_SwitchGrad)
@ops.RegisterGradient("Merge")
def _MergeGrad(op, grad, _):
"""Gradients for a Merge op are calculated using a Switch op."""
real_op = GetRealOp(op)
input_op = real_op.inputs[0].op
# pylint: disable=protected-access
ctxt = input_op._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(ctxt, WhileContext):
grad_ctxt = op.grad_state.grad_context
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot)
# pylint: enable=protected-access
elif isinstance(ctxt, CondContext):
pred = ctxt.pred
if isinstance(op, ControlFlowOpWrapper):
# This Merge node is part of a cond within a loop.
# The backprop needs to have the value of this predicate for every
# iteration. So we must have its values accumulated in the forward, and
# use the accumulated values as the predicate for this backprop switch.
grad_state = op.grad_state
real_pred = grad_state.history_map.get(pred.name)
if not real_pred:
# Remember the value of pred for every iteration.
grad_ctxt = grad_state.grad_context
grad_ctxt.Exit()
history_pred = grad_state.AddForwardAccumulator(pred)
grad_ctxt.Enter()
# Add the stack pop op. If pred.op is in a (outer) CondContext,
# the stack pop will be guarded with a switch.
real_pred = grad_state.AddBackPropAccumulatedValue(history_pred, pred)
grad_state.history_map[pred.name] = real_pred
pred = real_pred
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, pred, name="cond_grad")
# pylint: enable=protected-access
else:
num_inputs = len(real_op.inputs)
cond = [math_ops.equal(real_op.outputs[1], i) for i in xrange(num_inputs)]
# pylint: disable=protected-access
return [control_flow_ops._SwitchRefOrTensor(grad, cond[i])[1]
for i in xrange(num_inputs)]
# pylint: enable=protected-access
@ops.RegisterGradient("RefMerge")
def _RefMergeGrad(op, grad, _):
return _MergeGrad(op, grad, _)
@ops.RegisterGradient("Exit")
def _ExitGrad(op, grad):
"""Gradients for an exit op are calculated using an Enter op."""
real_op = GetRealOp(op)
# pylint: disable=protected-access
forward_ctxt = real_op._get_control_flow_context()
# pylint: enable=protected-access
if not forward_ctxt.back_prop:
# No gradient computation for this loop.
return None
grad_ctxt = op.grad_state.grad_context
grad_ctxt.AddName(grad.name)
enter_fn = control_flow_ops._Enter # pylint: disable=protected-access
grad_ctxt.Enter()
result = enter_fn(grad, grad_ctxt.name, is_constant=False,
parallel_iterations=grad_ctxt.parallel_iterations,
name="b_exit")
grad_ctxt.Exit()
return result
ops.RegisterGradient("RefExit")(_ExitGrad)
@ops.RegisterGradient("NextIteration")
def _NextIterationGrad(_, grad):
"""A forward next_iteration is translated into a backprop identity.
Note that the backprop next_iteration is added in switch grad.
"""
return grad
@ops.RegisterGradient("RefNextIteration")
def _RefNextIterationGrad(_, grad):
return _NextIterationGrad(_, grad)
@ops.RegisterGradient("Enter")
def _EnterGrad(op, grad):
"""Gradients for an Enter are calculated using an Exit op.
For loop variables, grad is the gradient so just add an exit.
For loop invariants, we need to add an accumulator loop.
"""
real_op = GetRealOp(op)
# pylint: disable=protected-access
forward_ctxt = real_op._get_control_flow_context()
# pylint: enable=protected-access
if not forward_ctxt.back_prop:
# The flag `back_prop` is set by users to suppress gradient
# computation for this loop. If the flag `back_prop` is true,
# no gradient computation.
return grad
grad_ctxt = op.grad_state.grad_context
if real_op.get_attr("is_constant"):
# Add a gradient accumulator for each loop invariant.
result = grad_ctxt.AddBackPropAccumulator(grad)
else:
result = exit(grad)
grad_ctxt.ExitResult([result])
return result
@ops.RegisterGradient("RefEnter")
def _RefEnterGrad(op, grad):
return _EnterGrad(op, grad)
@ops.RegisterGradient("LoopCond")
def _LoopCondGrad(_):
"""Stop backprop for the predicate of a while loop."""
return None
|
DailyActie/Surrogate-Model
|
01-codes/tensorflow-master/tensorflow/python/ops/control_flow_grad.py
|
Python
|
mit
| 8,308
|
[
"VisIt"
] |
f62953b8aa171e9cfd82990bf135ed4b7b9eed10bd43dc130a5da503ec17a84c
|
# Copyright (C) 2013, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import time
import os
import subprocess
from os.path import join, abspath
from zeroinstall.injector import model, qdom
from repo import incoming, build, catalog, cmd
DAY = 60 * 60 * 24
TIME_TO_GRADUATE = 14 * DAY
def handle(args):
cmd.find_config()
config = cmd.load_config()
messages = incoming.process_incoming_dir(config)
do_update(config, messages)
def do_update(config, messages = None):
feeds, files = build.build_public_feeds(config)
files += [f.public_rel_path for f in feeds]
files += catalog.write_catalogs(config, feeds)
feeds_dir = abspath('feeds')
os.chdir('public')
# Add default styles, if missing
resources_dir = join('resources')
if not os.path.isdir(resources_dir):
os.mkdir(resources_dir)
for resource in ['catalog.xsl', 'catalog.xsl.de', 'catalog.css', 'feed.xsl', 'feed.xsl.de', 'feed.css']:
target = join('resources', resource)
files.append(target)
if not os.path.exists(target):
with open(join(config.default_resources, resource), 'rt') as stream:
data = stream.read()
data = data.replace('@REPOSITORY_BASE_URL@', config.REPOSITORY_BASE_URL)
with open(target, 'wt') as stream:
stream.write(data)
if not messages:
messages = ['0repo update']
config.upload_public_dir(files, message = ', '.join(messages))
out = subprocess.check_output(['git', 'status', '--porcelain'], cwd = feeds_dir, encoding = 'utf-8').strip('\n')
if out:
print("Note: you have uncommitted changes in {feeds}:".format(feeds = feeds_dir))
print(out)
print("Run 'git commit -a' from that directory to save your changes.")
if getattr(config, 'TRACK_TESTING_IMPLS', True):
graduation_check(feeds, feeds_dir)
def graduation_check(feeds, feeds_dir):
# Warn about releases that are still 'testing' a while after release
now = time.time()
def age(impl):
released = impl.metadata.get('released', None)
if not released:
return 0
released_time = time.mktime(time.strptime(released, '%Y-%m-%d'))
return now - released_time
shown_header = False
for feed in feeds:
with open(feed.source_path, 'rb') as stream:
zfeed = model.ZeroInstallFeed(qdom.parse(stream))
if zfeed.implementations:
# Find the latest version number (note that there may be several implementations with this version number)
latest_version = max(impl.version for impl in list(zfeed.implementations.values()))
testing_impls = [impl for impl in list(zfeed.implementations.values())
if impl.version == latest_version and
impl.upstream_stability == model.stability_levels['testing'] and
age(impl) > TIME_TO_GRADUATE]
if testing_impls:
if not shown_header:
print("Releases which are still marked as 'testing' after {days} days:".format(
days = TIME_TO_GRADUATE / DAY))
shown_header = True
print("- {name} v{version}, {age} days ({path})".format(
age = int(age(testing_impls[0]) / DAY),
name = zfeed.get_name(),
path = os.path.relpath(feed.source_path, feeds_dir),
version = model.format_version(latest_version)))
|
0install/0repo
|
repo/cmd/update.py
|
Python
|
lgpl-2.1
| 3,147
|
[
"VisIt"
] |
5fb0773792f068c1c0e4bd77a1d2399840652552e87cf554dbe574f3869dafd1
|
""" This is a test of the chain
ReportsClient -> ReportsGeneratorHandler -> AccountingDB
It supposes that the DB is present, and that the service is running.
Also the service DataStore has to be up and running.
this is pytest!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=invalid-name,wrong-import-position
import datetime
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.tests.Utilities.Accounting import createDataOperationAccountingRecord
from DIRAC.tests.Utilities.Accounting import createStorageOccupancyAccountingRecord
gLogger.setLevel("DEBUG")
def test_addAndRemoveDataOperation():
# just inserting one record
record = createDataOperationAccountingRecord()
record.setStartTime()
record.setEndTime()
res = gDataStoreClient.addRegister(record)
assert res["OK"]
res = gDataStoreClient.commit()
assert res["OK"]
rc = ReportsClient()
res = rc.listReports("DataOperation")
assert res["OK"]
res = rc.listUniqueKeyValues("DataOperation")
assert res["OK"]
res = rc.getReport(
"DataOperation",
"Successful transfers",
datetime.datetime.utcnow(),
datetime.datetime.utcnow(),
{},
"Destination",
)
assert res["OK"]
# now removing that record
res = gDataStoreClient.remove(record)
assert res["OK"]
def test_addAndRemoveStorageOccupancy():
# just inserting one record
record = createStorageOccupancyAccountingRecord()
record.setStartTime()
record.setEndTime()
res = gDataStoreClient.addRegister(record)
assert res["OK"]
res = gDataStoreClient.commit()
assert res["OK"]
rc = ReportsClient()
res = rc.listReports("StorageOccupancy")
assert res["OK"]
res = rc.listUniqueKeyValues("StorageOccupancy")
assert res["OK"]
res = rc.getReport(
"StorageOccupancy",
"Free and Used Space",
datetime.datetime.utcnow(),
datetime.datetime.utcnow(),
{},
"StorageElement",
)
assert res["OK"]
# now removing that record
res = gDataStoreClient.remove(record)
assert res["OK"]
|
ic-hep/DIRAC
|
tests/Integration/AccountingSystem/Test_ReportsClient.py
|
Python
|
gpl-3.0
| 2,505
|
[
"DIRAC"
] |
59c7e86c2b235237938e7fb19e9f1409dffc0f82c5fe1801ca545c1a7434d4ba
|
""" Test functions for stats module
"""
from __future__ import division, print_function, absolute_import
import warnings
import re
import sys
import pickle
import os
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_warns)
import pytest
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
import numpy
import numpy as np
from numpy import typecodes, array
from numpy.lib.recfunctions import rec_append_fields
from scipy import special
from scipy.integrate import IntegrationWarning
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import scipy.stats.distributions
from scipy.special import xlogy
from .test_continuous_basic import distcont
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
# Generate test cases to test cdf and distribution consistency.
# Note that this list does not include all distributions.
dists = ['uniform', 'norm', 'lognorm', 'expon', 'beta',
'powerlaw', 'bradford', 'burr', 'fisk', 'cauchy', 'halfcauchy',
'foldcauchy', 'gamma', 'gengamma', 'loggamma',
'alpha', 'anglit', 'arcsine', 'betaprime', 'dgamma', 'moyal',
'exponnorm', 'exponweib', 'exponpow', 'frechet_l', 'frechet_r',
'gilbrat', 'f', 'ncf', 'chi2', 'chi', 'nakagami', 'genpareto',
'genextreme', 'genhalflogistic', 'pareto', 'lomax', 'halfnorm',
'halflogistic', 'fatiguelife', 'foldnorm', 'ncx2', 't', 'nct',
'weibull_min', 'weibull_max', 'dweibull', 'maxwell', 'rayleigh',
'genlogistic', 'logistic', 'gumbel_l', 'gumbel_r', 'gompertz',
'hypsecant', 'laplace', 'reciprocal', 'trapz', 'triang',
'tukeylambda', 'vonmises', 'vonmises_line', 'pearson3', 'gennorm',
'halfgennorm', 'rice', 'kappa4', 'kappa3', 'truncnorm', 'argus',
'crystalball']
def _assert_hasattr(a, b, msg=None):
if msg is None:
msg = '%s does not have attribute %s' % (a, b)
assert_(hasattr(a, b), msg=msg)
def test_api_regression():
# https://github.com/scipy/scipy/issues/3802
_assert_hasattr(scipy.stats.distributions, 'f_gen')
# check function for test generator
def check_distribution(dist, args, alpha):
with suppress_warnings() as sup:
# frechet_l and frechet_r are deprecated, so all their
# methods generate DeprecationWarnings.
sup.filter(category=DeprecationWarning, message=".*frechet_")
D, pval = stats.kstest(dist, '', args=args, N=1000)
if (pval < alpha):
D, pval = stats.kstest(dist, '', args=args, N=1000)
assert_(pval > alpha,
msg="D = {}; pval = {}; alpha = {}; args = {}".format(
D, pval, alpha, args))
def cases_test_all_distributions():
np.random.seed(1234)
for dist in dists:
distfunc = getattr(stats, dist)
nargs = distfunc.numargs
alpha = 0.01
if dist == 'fatiguelife':
alpha = 0.001
if dist == 'trapz':
args = tuple(np.sort(np.random.random(nargs)))
elif dist == 'triang':
args = tuple(np.random.random(nargs))
elif dist == 'reciprocal' or dist == 'truncnorm':
vals = np.random.random(nargs)
vals[1] = vals[0] + 1.0
args = tuple(vals)
elif dist == 'vonmises':
yield dist, (10,), alpha
yield dist, (101,), alpha
args = tuple(1.0 + np.random.random(nargs))
else:
args = tuple(1.0 + np.random.random(nargs))
yield dist, args, alpha
@pytest.mark.parametrize('dist,args,alpha', cases_test_all_distributions())
def test_all_distributions(dist, args, alpha):
check_distribution(dist, args, alpha)
def check_vonmises_pdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s)))
def check_vonmises_cdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1)
def test_vonmises_pdf_periodic():
for k in [0.1, 1, 101]:
for x in [0, 1, numpy.pi, 10, 100]:
check_vonmises_pdf_periodic(k, 0, 1, x)
check_vonmises_pdf_periodic(k, 1, 1, x)
check_vonmises_pdf_periodic(k, 0, 10, x)
check_vonmises_cdf_periodic(k, 0, 1, x)
check_vonmises_cdf_periodic(k, 1, 1, x)
check_vonmises_cdf_periodic(k, 0, 10, x)
def test_vonmises_line_support():
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical():
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
@pytest.mark.parametrize('dist',
['alpha', 'betaprime',
'fatiguelife', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gilbrat',
'powerlognorm', 'rayleigh', 'wald'])
def test_support(dist):
"""gh-6235"""
dct = dict(distcont)
args = dct[dist]
dist = getattr(stats, dist)
assert_almost_equal(dist.pdf(dist.a, *args), 0)
assert_equal(dist.logpdf(dist.a, *args), -np.inf)
assert_almost_equal(dist.pdf(dist.b, *args), 0)
assert_equal(dist.logpdf(dist.b, *args), -np.inf)
@pytest.mark.parametrize('dist,args,alpha', cases_test_all_distributions())
def test_retrieving_support(dist, args, alpha):
""""""
dist = getattr(stats, dist)
loc, scale = 1, 2
supp = dist.support(*args)
supp_loc_scale = dist.support(*args, loc=loc, scale=scale)
assert_almost_equal(np.array(supp)*scale + loc, np.array(supp_loc_scale))
class TestRandInt(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.randint.rvs(5, 30, size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5, 30, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15, 46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
val = stats.randint(15, 46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k, 5, 30)
assert_array_almost_equal(vals, out)
def test_cdf(self):
x = np.linspace(0, 36, 100)
k = numpy.floor(x)
out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0)
vals = stats.randint.cdf(x, 5, 30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for Ticket #1842
vals1 = stats.binom.pmf(100, 100, 1)
vals2 = stats.binom.pmf(0, 100, 0)
assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
def test_entropy(self):
# Basic entropy tests.
b = stats.binom(2, 0.5)
expected_p = np.array([0.25, 0.5, 0.25])
expected_h = -sum(xlogy(expected_p, expected_p))
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.binom(2, 0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.binom(2, 1.0)
h = b.entropy()
assert_equal(h, 0.0)
def test_warns_p0(self):
# no spurious warnigns are generated for p=0; gh-3817
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_equal(stats.binom(n=2, p=0).mean(), 0)
assert_equal(stats.binom(n=2, p=0).std(), 0)
class TestBernoulli(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_entropy(self):
# Simple tests of entropy.
b = stats.bernoulli(0.25)
expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.bernoulli(0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.bernoulli(1.0)
h = b.entropy()
assert_equal(h, 0.0)
class TestBradford(object):
# gh-6216
def test_cdf_ppf(self):
c = 0.1
x = np.logspace(-20, -4)
q = stats.bradford.cdf(x, c)
xx = stats.bradford.ppf(q, c)
assert_allclose(x, xx)
class TestNBinom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0, 1, 1)
assert_equal(val, 0)
class TestNormInvGauss(object):
def setup_method(self):
np.random.seed(1234)
def test_cdf_R(self):
# test pdf and cdf vals against R
# require("GeneralizedHyperbolic")
# x_test <- c(-7, -5, 0, 8, 15)
# r_cdf <- GeneralizedHyperbolic::pnig(x_test, mu = 0, a = 1, b = 0.5)
# r_pdf <- GeneralizedHyperbolic::dnig(x_test, mu = 0, a = 1, b = 0.5)
r_cdf = np.array([8.034920282e-07, 2.512671945e-05, 3.186661051e-01,
9.988650664e-01, 9.999848769e-01])
x_test = np.array([-7, -5, 0, 8, 15])
vals_cdf = stats.norminvgauss.cdf(x_test, a=1, b=0.5)
assert_allclose(vals_cdf, r_cdf, atol=1e-9)
def test_pdf_R(self):
# values from R as defined in test_cdf_R
r_pdf = np.array([1.359600783e-06, 4.413878805e-05, 4.555014266e-01,
7.450485342e-04, 8.917889931e-06])
x_test = np.array([-7, -5, 0, 8, 15])
vals_pdf = stats.norminvgauss.pdf(x_test, a=1, b=0.5)
assert_allclose(vals_pdf, r_pdf, atol=1e-9)
def test_stats(self):
a, b = 1, 0.5
gamma = np.sqrt(a**2 - b**2)
v_stats = (b / gamma, a**2 / gamma**3, 3.0 * b / (a * np.sqrt(gamma)),
3.0 * (1 + 4 * b**2 / a**2) / gamma)
assert_equal(v_stats, stats.norminvgauss.stats(a, b, moments='mvsk'))
def test_ppf(self):
a, b = 1, 0.5
x_test = np.array([0.001, 0.5, 0.999])
vals = stats.norminvgauss.ppf(x_test, a, b)
assert_allclose(x_test, stats.norminvgauss.cdf(vals, a, b))
class TestGeom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
vals = stats.geom.pmf([1, 2, 3], 0.5)
assert_array_almost_equal(vals, [0.5, 0.25, 0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))
vals2 = stats.geom.logpmf([1, 2, 3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
def test_ppf_underflow(self):
# this should not underflow
assert_allclose(stats.geom.ppf(1e-20, 1e-20), 1.0, atol=1e-14)
class TestPlanck(object):
def setup_method(self):
np.random.seed(1234)
def test_sf(self):
vals = stats.planck.sf([1, 2, 3], 5.)
expected = array([4.5399929762484854e-05,
3.0590232050182579e-07,
2.0611536224385579e-09])
assert_array_almost_equal(vals, expected)
def test_logsf(self):
vals = stats.planck.logsf([1000., 2000., 3000.], 1000.)
expected = array([-1001000., -2001000., -3001000.])
assert_array_almost_equal(vals, expected)
class TestGennorm(object):
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
class TestHalfgennorm(object):
def test_expon(self):
# test against exponential (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 1)
pdf2 = stats.expon.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_halfnorm(self):
# test against half normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 2)
pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_gennorm(self):
# test against generalized normal
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, .497324)
pdf2 = stats.gennorm.pdf(points, .497324)
assert_almost_equal(pdf1, 2*pdf2)
class TestTruncnorm(object):
def setup_method(self):
np.random.seed(1234)
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
def test_gh_2477_small_values(self):
# Check a case that worked in the original issue.
low, high = -11, -10
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# Check a case that failed in the original issue.
low, high = 10, 11
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_moments(self):
m, v, s, k = stats.truncnorm.stats(-30, 30, moments='mvsk')
assert_almost_equal(m, 0)
assert_almost_equal(v, 1)
assert_almost_equal(s, 0.0)
assert_almost_equal(k, 0.0)
@pytest.mark.xfail(reason="truncnorm rvs is know to fail at extreme tails")
def test_gh_2477_large_values(self):
# Check a case that fails because of extreme tailness.
low, high = 100, 101
with np.errstate(divide='ignore'):
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_1489_trac_962_rvs(self):
# Check the original example.
low, high = 10, 15
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
class TestHypergeom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = [stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten)
for eaten in fruits_eaten]
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
def test_logsf(self):
# Test logsf for very large numbers. See issue #4982
# Results compare with those from R (v3.2.0):
# phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)
# -2239.771
k = 1e4
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2239.771 # From R
assert_almost_equal(result, expected, decimal=3)
k = 1
M = 1600
n = 600
N = 300
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2.566567e-68 # From R
assert_almost_equal(result, expected, decimal=15)
def test_logcdf(self):
# Test logcdf for very large numbers. See issue #8692
# Results compare with those from R (v3.3.2):
# phyper(k, n, M-n, N, lower.tail=TRUE, log.p=TRUE)
# -5273.335
k = 1
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -5273.335 # From R
assert_almost_equal(result, expected, decimal=3)
# Same example as in issue #8692
k = 40
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -7.565148879229e-23 # From R
assert_almost_equal(result, expected, decimal=15)
k = 125
M = 1600
n = 250
N = 500
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -4.242688e-12 # From R
assert_almost_equal(result, expected, decimal=15)
# test broadcasting robustness based on reviewer
# concerns in PR 9603; using an array version of
# the example from issue #8692
k = np.array([40, 40, 40])
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = np.full(3, -7.565148879229e-23) # filled from R result
assert_almost_equal(result, expected, decimal=15)
class TestLoggamma(object):
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
class TestLogistic(object):
# gh-6226
def test_cdf_ppf(self):
x = np.linspace(-20, 20)
y = stats.logistic.cdf(x)
xx = stats.logistic.ppf(y)
assert_allclose(x, xx)
def test_sf_isf(self):
x = np.linspace(-20, 20)
y = stats.logistic.sf(x)
xx = stats.logistic.isf(y)
assert_allclose(x, xx)
def test_extreme_values(self):
# p is chosen so that 1 - (1 - p) == p in double precision
p = 9.992007221626409e-16
desired = 34.53957599234088
assert_allclose(stats.logistic.ppf(1 - p), desired)
assert_allclose(stats.logistic.isf(p), desired)
class TestLogser(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf_small_p(self):
m = stats.logser.pmf(4, 1e-20)
# The expected value was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 64
# >>> k = 4
# >>> p = mpmath.mpf('1e-20')
# >>> float(-(p**k)/k/mpmath.log(1-p))
# 2.5e-61
# It is also clear from noticing that for very small p,
# log(1-p) is approximately -p, and the formula becomes
# p**(k-1) / k
assert_allclose(m, 2.5e-61)
def test_mean_small_p(self):
m = stats.logser.mean(1e-8)
# The expected mean was computed using mpmath:
# >>> import mpmath
# >>> mpmath.dps = 60
# >>> p = mpmath.mpf('1e-8')
# >>> float(-p / ((1 - p)*mpmath.log(1 - p)))
# 1.000000005
assert_allclose(m, 1.000000005)
class TestNorm(object):
def test_bad_keyword_arg(self):
x = [1, 2, 3]
assert_raises(TypeError, stats.norm.fit, x, plate="shrimp")
class TestPareto(object):
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
assert_equal(m, 3.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
assert_equal(m, 2.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
assert_allclose(m, 2.5 / 1.5)
assert_allclose(v, 2.5 / (1.5*1.5*0.5))
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
assert_allclose(m, 1.5)
assert_allclose(v, 0.75)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
assert_allclose(m, 3.5 / 2.5)
assert_allclose(v, 3.5 / (2.5*2.5*1.5))
assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5*3.5*2.5))
assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
def test_sf(self):
x = 1e9
b = 2
scale = 1.5
p = stats.pareto.sf(x, b, loc=0, scale=scale)
expected = (scale/x)**b # 2.25e-18
assert_allclose(p, expected)
class TestGenpareto(object):
def test_ab(self):
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
stats.genpareto._argcheck(c) # ugh
a, b = stats.genpareto._get_support(c)
assert_equal(a, 0.)
assert_(np.isposinf(b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
stats.genpareto._argcheck(c)
assert_allclose(stats.genpareto._get_support(c), [0., 0.5])
def test_c0(self):
# with c=0, genpareto reduces to the exponential distribution
rv = stats.genpareto(c=0.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.expon.pdf(x))
assert_allclose(rv.cdf(x), stats.expon.cdf(x))
assert_allclose(rv.sf(x), stats.expon.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.expon.ppf(q))
def test_cm1(self):
# with c=-1, genpareto reduces to the uniform distr on [0, 1]
rv = stats.genpareto(c=-1.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
assert_allclose(rv.sf(x), stats.uniform.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
# logpdf(1., c=-1) should be zero
assert_allclose(rv.logpdf(1), 0)
def test_x_inf(self):
# make sure x=inf is handled gracefully
rv = stats.genpareto(c=0.1)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=0.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=-1.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
def test_c_continuity(self):
# pdf is continuous at c=0, -1
x = np.linspace(0, 10, 30)
for c in [0, -1]:
pdf0 = stats.genpareto.pdf(x, c)
for dc in [1e-14, -1e-14]:
pdfc = stats.genpareto.pdf(x, c + dc)
assert_allclose(pdf0, pdfc, atol=1e-12)
cdf0 = stats.genpareto.cdf(x, c)
for dc in [1e-14, 1e-14]:
cdfc = stats.genpareto.cdf(x, c + dc)
assert_allclose(cdf0, cdfc, atol=1e-12)
def test_c_continuity_ppf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
ppf0 = stats.genpareto.ppf(q, c)
for dc in [1e-14, -1e-14]:
ppfc = stats.genpareto.ppf(q, c + dc)
assert_allclose(ppf0, ppfc, atol=1e-12)
def test_c_continuity_isf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
isf0 = stats.genpareto.isf(q, c)
for dc in [1e-14, -1e-14]:
isfc = stats.genpareto.isf(q, c + dc)
assert_allclose(isf0, isfc, atol=1e-12)
def test_cdf_ppf_roundtrip(self):
# this should pass with machine precision. hat tip @pbrod
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [1e-8, -1e-18, 1e-15, -1e-15]:
assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
q, atol=1e-15)
def test_logsf(self):
logp = stats.genpareto.logsf(1e10, .01, 0, 1)
assert_allclose(logp, -1842.0680753952365)
class TestPearson3(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllFloat'])
val = stats.pearson3.rvs(0.5)
assert_(isinstance(val, float))
val = stats.pearson3(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllFloat'])
assert_(len(val) == 3)
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
class TestKappa4(object):
def test_cdf_genpareto(self):
# h = 1 and k != 0 is generalized Pareto
x = [0.0, 0.1, 0.2, 0.5]
h = 1.0
for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0,
1.9]:
vals = stats.kappa4.cdf(x, h, k)
# shape parameter is opposite what is expected
vals_comp = stats.genpareto.cdf(x, -k)
assert_allclose(vals, vals_comp)
def test_cdf_genextreme(self):
# h = 0 and k != 0 is generalized extreme value
x = np.linspace(-5, 5, 10)
h = 0.0
k = np.linspace(-3, 3, 10)
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.genextreme.cdf(x, k)
assert_allclose(vals, vals_comp)
def test_cdf_expon(self):
# h = 1 and k = 0 is exponential
x = np.linspace(0, 10, 10)
h = 1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.expon.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_gumbel_r(self):
# h = 0 and k = 0 is gumbel_r
x = np.linspace(-5, 5, 10)
h = 0.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.gumbel_r.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_logistic(self):
# h = -1 and k = 0 is logistic
x = np.linspace(-5, 5, 10)
h = -1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.logistic.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_uniform(self):
# h = 1 and k = 1 is uniform
x = np.linspace(-5, 5, 10)
h = 1.0
k = 1.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.uniform.cdf(x)
assert_allclose(vals, vals_comp)
def test_integers_ctor(self):
# regression test for gh-7416: _argcheck fails for integer h and k
# in numpy 1.12
stats.kappa4(1, 2)
class TestPoisson(object):
def setup_method(self):
np.random.seed(1234)
def test_pmf_basic(self):
# Basic case
ln2 = np.log(2)
vals = stats.poisson.pmf([0, 1, 2], ln2)
expected = [0.5, ln2/2, ln2**2/4]
assert_allclose(vals, expected)
def test_mu0(self):
# Edge case: mu=0
vals = stats.poisson.pmf([0, 1, 2], 0)
expected = [1, 0, 0]
assert_array_equal(vals, expected)
interval = stats.poisson.interval(0.95, 0)
assert_equal(interval, (0, 0))
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
mu = np.array([0.0, 1.0, 2.0])
result = stats.poisson.stats(mu, moments='mvsk')
expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])
assert_allclose(result, expected)
class TestZipf(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
class TestDLaplace(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
N = 37
xx = np.arange(-N, N+1)
pp = dl.pmf(xx)
m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
assert_equal((m, s), (0, 0))
assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
def test_stats2(self):
a = np.log(2.)
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
assert_equal((m, s), (0., 0.))
assert_allclose((v, k), (4., 3.25))
class TestInvGamma(object):
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
expected = [0.05461496450, 0.0001723162534, 1.020362676,
2.055616582]
assert_allclose(mvsk, expected)
a = [1.1, 3.1, 5.6]
mvsk = stats.invgamma.stats(a=a, moments='mvsk')
expected = ([10., 0.476190476, 0.2173913043], # mmm
[np.inf, 0.2061430632, 0.01312749422], # vvv
[np.nan, 41.95235392, 2.919025532], # sss
[np.nan, np.nan, 24.51923076]) # kkk
for x, y in zip(mvsk, expected):
assert_almost_equal(x, y)
def test_cdf_ppf(self):
# gh-6245
x = np.logspace(-2.6, 0)
y = stats.invgamma.cdf(x, 1)
xx = stats.invgamma.ppf(y, 1)
assert_allclose(x, xx)
def test_sf_isf(self):
# gh-6245
if sys.maxsize > 2**32:
x = np.logspace(2, 100)
else:
# Invgamme roundtrip on 32-bit systems has relative accuracy
# ~1e-15 until x=1e+15, and becomes inf above x=1e+18
x = np.logspace(2, 18)
y = stats.invgamma.sf(x, 1)
xx = stats.invgamma.isf(y, 1)
assert_allclose(x, xx, rtol=1.0)
class TestF(object):
def test_endpoints(self):
# Compute the pdf at the left endpoint dst.a.
data = [[stats.f, (2, 1), 1.0]]
for _f, _args, _correct in data:
ans = _f.pdf(_f.a, *_args)
print(_f, (_args), ans, _correct, ans == _correct)
ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
correct = [_correct_ for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
def test_f_moments(self):
# n-th moment of F distributions is only finite for n < dfd / 2
m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
assert_(np.isfinite(m))
assert_(np.isfinite(v))
assert_(np.isfinite(s))
assert_(not np.isfinite(k))
def test_moments_warnings(self):
# no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')
@pytest.mark.xfail(reason='f stats does not properly broadcast')
def test_stats_broadcast(self):
# stats do not fully broadcast just yet
mv = stats.f.stats(dfn=11, dfd=[11, 12])
def test_rvgeneric_std():
# Regression test for #1191
assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])
def test_moments_t():
# regression test for #8786
assert_equal(stats.t.stats(df=1, moments='mvsk'),
(np.inf, np.nan, np.nan, np.nan))
assert_equal(stats.t.stats(df=1.01, moments='mvsk'),
(0.0, np.inf, np.nan, np.nan))
assert_equal(stats.t.stats(df=2, moments='mvsk'),
(0.0, np.inf, np.nan, np.nan))
assert_equal(stats.t.stats(df=2.01, moments='mvsk'),
(0.0, 2.01/(2.01-2.0), np.nan, np.inf))
assert_equal(stats.t.stats(df=3, moments='sk'), (np.nan, np.inf))
assert_equal(stats.t.stats(df=3.01, moments='sk'), (0.0, np.inf))
assert_equal(stats.t.stats(df=4, moments='sk'), (0.0, np.inf))
assert_equal(stats.t.stats(df=4.01, moments='sk'), (0.0, 6.0/(4.01 - 4.0)))
class TestRvDiscrete(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
states = [-1, 0, 1, 2, 3, 4]
probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]
samples = 1000
r = stats.rv_discrete(name='sample', values=(states, probability))
x = r.rvs(size=samples)
assert_(isinstance(x, numpy.ndarray))
for s, p in zip(states, probability):
assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
x = r.rvs()
assert_(isinstance(x, int))
def test_entropy(self):
# Basic tests of entropy.
pvals = np.array([0.25, 0.45, 0.3])
p = stats.rv_discrete(values=([0, 1, 2], pvals))
expected_h = -sum(xlogy(pvals, pvals))
h = p.entropy()
assert_allclose(h, expected_h)
p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
h = p.entropy()
assert_equal(h, 0.0)
def test_pmf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x = [[1., 4.],
[3., 2]]
assert_allclose(rv.pmf(x),
[[0.5, 0.2],
[0., 0.3]], atol=1e-14)
def test_cdf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5]
expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1]
assert_allclose(rv.cdf(x_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.cdf(xx) for xx in x_values],
expected, atol=1e-14)
def test_ppf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.]
expected = [1, 1, 2, 2, 4, 4]
assert_allclose(rv.ppf(q_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.ppf(q) for q in q_values],
expected, atol=1e-14)
def test_cdf_ppf_next(self):
# copied and special cased from test_discrete_basic
vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1])
rv = stats.rv_discrete(values=vals)
assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8),
rv.xk[1:])
def test_expect(self):
xk = [1, 2, 4, 6, 7, 11]
pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1]
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_multidimension(self):
xk = np.arange(12).reshape((3, 4))
pk = np.array([[0.1, 0.1, 0.15, 0.05],
[0.1, 0.1, 0.05, 0.05],
[0.1, 0.1, 0.05, 0.05]])
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_bad_input(self):
xk = [1, 2, 3]
pk = [0.5, 0.5]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
pk = [1, 2, 3]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3]
pk = [0.5, 1.2, -0.7]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3, 4, 5]
pk = [0.3, 0.3, 0.3, 0.3, -0.2]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
def test_shape_rv_sample(self):
# tests added for gh-9565
# mismatch of 2d inputs
xk, pk = np.arange(4).reshape((2, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same number of elements, but shapes not compatible
xk, pk = np.arange(6).reshape((3, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same shapes => no error
xk, pk = np.arange(6).reshape((3, 2)), np.full((3, 2), 1/6)
assert_equal(stats.rv_discrete(values=(xk, pk)).pmf(0), 1/6)
class TestSkewNorm(object):
def setup_method(self):
np.random.seed(1234)
def test_normal(self):
# When the skewness is 0 the distribution is normal
x = np.linspace(-5, 5, 100)
assert_array_almost_equal(stats.skewnorm.pdf(x, a=0),
stats.norm.pdf(x))
def test_rvs(self):
shape = (3, 4, 5)
x = stats.skewnorm.rvs(a=0.75, size=shape)
assert_equal(shape, x.shape)
x = stats.skewnorm.rvs(a=-3, size=shape)
assert_equal(shape, x.shape)
def test_moments(self):
X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
def test_cdf_large_x(self):
# Regression test for gh-7746.
# The x values are large enough that the closest 64 bit floating
# point representation of the exact CDF is 1.0.
p = stats.skewnorm.cdf([10, 20, 30], -1)
assert_allclose(p, np.ones(3), rtol=1e-14)
p = stats.skewnorm.cdf(25, 2.5)
assert_allclose(p, 1.0, rtol=1e-14)
def test_cdf_sf_small_values(self):
# Triples are [x, a, cdf(x, a)]. These values were computed
# using CDF[SkewNormDistribution[0, 1, a], x] in Wolfram Alpha.
cdfvals = [
[-8, 1, 3.870035046664392611e-31],
[-4, 2, 8.1298399188811398e-21],
[-2, 5, 1.55326826787106273e-26],
[-9, -1, 2.257176811907681295e-19],
[-10, -4, 1.523970604832105213e-23],
]
for x, a, cdfval in cdfvals:
p = stats.skewnorm.cdf(x, a)
assert_allclose(p, cdfval, rtol=1e-8)
# For the skew normal distribution, sf(-x, -a) = cdf(x, a).
p = stats.skewnorm.sf(-x, -a)
assert_allclose(p, cdfval, rtol=1e-8)
class TestExpon(object):
def test_zero(self):
assert_equal(stats.expon.pdf(0), 1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
class TestExponNorm(object):
def test_moments(self):
# Some moment test cases based on non-loc/scaled formula
def get_moms(lam, sig, mu):
# See wikipedia for these formulae
# where it is listed as an exponentially modified gaussian
opK2 = 1.0 + 1 / (lam*sig)**2
exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
mu, sig, lam = 0, 1, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -3, 2, 0.1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = 0, 3, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -5, 11, 3.5
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
def test_extremes_x(self):
# Test for extreme values against overflows
assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(1, 0.01), 0.0)
assert_almost_equal(stats.exponnorm.pdf(-900, 0.01), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 0.01), 0.0)
class TestGenExpon(object):
def test_pdf_unity_area(self):
from scipy.integrate import simps
# PDF should integrate to one
p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_almost_equal(simps(p, dx=0.01), 1, 1)
def test_cdf_bounds(self):
# CDF should always be positive
cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
class TestExponpow(object):
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8),
5)
class TestSkellam(object):
def test_pmf(self):
# comparison to R
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = numpy.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
# comparison to R, only 5 decimals
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = numpy.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
class TestLognorm(object):
def test_pdf(self):
# Regression test for Ticket #1471: avoid nan with 0/0 situation
# Also make sure there are no warnings at x=0, cf gh-5202
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
pdf = stats.lognorm.pdf([0, 0.5, 1], 1)
assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])
def test_logcdf(self):
# Regression test for gh-5940: sf et al would underflow too early
x2, mu, sigma = 201.68, 195, 0.149
assert_allclose(stats.lognorm.sf(x2-mu, s=sigma),
stats.norm.sf(np.log(x2-mu)/sigma))
assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma),
stats.norm.logsf(np.log(x2-mu)/sigma))
class TestBeta(object):
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0, 1, 0.5)
assert_almost_equal(logpdf, -0.69314718056)
logpdf = stats.beta.logpdf(0, 0.5, 1)
assert_almost_equal(logpdf, np.inf)
def test_logpdf_ticket_1866(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.beta(alpha, beta)
assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.beta.fit, x, floc=0, fscale=1,
plate="shrimp")
def test_fit_duplicated_fixed_parameter(self):
# At most one of 'f0', 'fa' or 'fix_a' can be given to the fit method.
# More than one raises a ValueError.
x = [0.1, 0.5, 0.6]
assert_raises(ValueError, stats.beta.fit, x, fa=0.5, fix_a=0.5)
class TestBetaPrime(object):
def test_logpdf(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.betaprime(alpha, beta)
assert_(np.isfinite(b.logpdf(x)).all())
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_cdf(self):
# regression test for gh-4030: Implementation of
# scipy.stats.betaprime.cdf()
x = stats.betaprime.cdf(0, 0.2, 0.3)
assert_equal(x, 0.0)
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
cdfs = stats.betaprime.cdf(x, alpha, beta)
assert_(np.isfinite(cdfs).all())
# check the new cdf implementation vs generic one:
gen_cdf = stats.rv_continuous._cdf_single
cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]
assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)
class TestGamma(object):
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1./5)
assert_almost_equal(pdf, 0.002312341)
pdf = stats.gamma.pdf(3, 10, scale=1./5)
assert_almost_equal(pdf, 0.1620358)
def test_logpdf(self):
# Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
# situation
logpdf = stats.gamma.logpdf(0, 1)
assert_almost_equal(logpdf, 0)
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.gamma.fit, x, floc=0, plate="shrimp")
class TestChi2(object):
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003,
decimal=14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778,
decimal=14)
def test_ppf(self):
# Expected values computed with mpmath.
df = 4.8
x = stats.chi2.ppf(2e-47, df)
assert_allclose(x, 1.098472479575179840604902808e-19, rtol=1e-10)
x = stats.chi2.ppf(0.5, df)
assert_allclose(x, 4.15231407598589358660093156, rtol=1e-10)
df = 13
x = stats.chi2.ppf(2e-77, df)
assert_allclose(x, 1.0106330688195199050507943e-11, rtol=1e-10)
x = stats.chi2.ppf(0.1, df)
assert_allclose(x, 7.041504580095461859307179763, rtol=1e-10)
class TestGumbelL(object):
# gh-6228
def test_cdf_ppf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.cdf(x)
xx = stats.gumbel_l.ppf(y)
assert_allclose(x, xx)
def test_logcdf_logsf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.logcdf(x)
z = stats.gumbel_l.logsf(x)
u = np.exp(y)
v = -special.expm1(z)
assert_allclose(u, v)
def test_sf_isf(self):
x = np.linspace(-20, 5)
y = stats.gumbel_l.sf(x)
xx = stats.gumbel_l.isf(y)
assert_allclose(x, xx)
class TestLevyStable(object):
def test_fit(self):
# construct data to have percentiles that match
# example in McCulloch 1986.
x = [-.05413,-.05413,
0.,0.,0.,0.,
.00533,.00533,.00533,.00533,.00533,
.03354,.03354,.03354,.03354,.03354,
.05309,.05309,.05309,.05309,.05309]
alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)
assert_allclose(alpha1, 1.48, rtol=0, atol=0.01)
assert_almost_equal(beta1, -.22, 2)
assert_almost_equal(scale1, 0.01717, 4)
assert_almost_equal(loc1, 0.00233, 2) # to 2 dps due to rounding error in McCulloch86
# cover alpha=2 scenario
x2 = x + [.05309,.05309,.05309,.05309,.05309]
alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2)
assert_equal(alpha2, 2)
assert_equal(beta2, -1)
assert_almost_equal(scale2, .02503, 4)
assert_almost_equal(loc2, .03354, 4)
@pytest.mark.slow
def test_pdf_nolan_samples(self):
""" Test pdf values against Nolan's stablec.exe output
see - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
Repeat following with beta = -1, -.5, 0, .5 and 1
stablec.exe <<
1 # pdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
data = np.load(os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/stable-pdf-sample-data.npy')))
data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta')
# support numpy 1.8.2 for travis
npisin = np.isin if hasattr(np, "isin") else np.in1d
tests = [
# best selects
['best', None, 8, None],
# quadrature is accurate for most alpha except 0.25; perhaps limitation of Nolan stablec?
# we reduce size of x to speed up computation as numerical integration slow.
['quadrature', None, 8, lambda r: (r['alpha'] > 0.25) & (npisin(r['x'], [-10,-5,0,5,10]))],
# zolatarev is accurate except at alpha==1, beta != 0
['zolotarev', None, 8, lambda r: r['alpha'] != 1],
['zolotarev', None, 8, lambda r: (r['alpha'] == 1) & (r['beta'] == 0)],
['zolotarev', None, 1, lambda r: (r['alpha'] == 1) & (r['beta'] != 0)],
# fft accuracy reduces as alpha decreases, fails at low values of alpha and x=0
['fft', 0, 4, lambda r: r['alpha'] > 1],
['fft', 0, 3, lambda r: (r['alpha'] < 1) & (r['alpha'] > 0.25)],
['fft', 0, 1, lambda r: (r['alpha'] == 0.25) & (r['x'] != 0)], # not useful here
]
for ix, (default_method, fft_min_points, decimal_places, filter_func) in enumerate(tests):
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
subdata = data[filter_func(data)] if filter_func is not None else data
with suppress_warnings() as sup:
sup.record(RuntimeWarning, "Density calculation unstable for alpha=1 and beta!=0.*")
sup.record(RuntimeWarning, "Density calculations experimental for FFT method.*")
p = stats.levy_stable.pdf(subdata['x'], subdata['alpha'], subdata['beta'], scale=1, loc=0)
subdata2 = rec_append_fields(subdata, 'calc', p)
failures = subdata2[(np.abs(p-subdata['p']) >= 1.5*10.**(-decimal_places)) | np.isnan(p)]
assert_almost_equal(p, subdata['p'], decimal_places, "pdf test %s failed with method '%s'\n%s" % (ix, default_method, failures), verbose=False)
@pytest.mark.slow
def test_cdf_nolan_samples(self):
""" Test cdf values against Nolan's stablec.exe output
see - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
Repeat following with beta = -1, -.5, 0, .5 and 1
stablec.exe <<
2 # cdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
data = np.load(os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/stable-cdf-sample-data.npy')))
data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta')
tests = [
# zolatarev is accurate for all values
['zolotarev', None, 8, None],
# fft accuracy poor, very poor alpha < 1
['fft', 0, 2, lambda r: r['alpha'] > 1],
]
for ix, (default_method, fft_min_points, decimal_places, filter_func) in enumerate(tests):
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
subdata = data[filter_func(data)] if filter_func is not None else data
with suppress_warnings() as sup:
sup.record(RuntimeWarning, 'FFT method is considered ' +
'experimental for cumulative distribution ' +
'function evaluations.*')
p = stats.levy_stable.cdf(subdata['x'], subdata['alpha'], subdata['beta'], scale=1, loc=0)
subdata2 = rec_append_fields(subdata, 'calc', p)
failures = subdata2[(np.abs(p-subdata['p']) >= 1.5*10.**(-decimal_places)) | np.isnan(p)]
assert_almost_equal(p, subdata['p'], decimal_places, "cdf test %s failed with method '%s'\n%s" % (ix, default_method, failures), verbose=False)
def test_pdf_alpha_equals_one_beta_non_zero(self):
""" sample points extracted from Tables and Graphs of Stable Probability
Density Functions - Donald R Holt - 1973 - p 187.
"""
xs = np.array([0, 0, 0, 0,
1, 1, 1, 1,
2, 2, 2, 2,
3, 3, 3, 3,
4, 4, 4, 4])
density = np.array([.3183, .3096, .2925, .2622,
.1591, .1587, .1599, .1635,
.0637, .0729, .0812, .0955,
.0318, .0390, .0458, .0586,
.0187, .0236, .0285, .0384])
betas = np.array([0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1])
tests = [
['quadrature', None, 4],
#['fft', 0, 4],
['zolotarev', None, 1],
]
with np.errstate(all='ignore'), suppress_warnings() as sup:
sup.filter(category=RuntimeWarning, message="Density calculation unstable.*")
for default_method, fft_min_points, decimal_places in tests:
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
#stats.levy_stable.fft_grid_spacing = 0.0001
pdf = stats.levy_stable.pdf(xs, 1, betas, scale=1, loc=0)
assert_almost_equal(pdf, density, decimal_places, default_method)
def test_stats(self):
param_sets = [
[(1.48,-.22, 0, 1), (0,np.inf,np.NaN,np.NaN)],
[(2,.9, 10, 1.5), (10,4.5,0,0)]
]
for args, exp_stats in param_sets:
calc_stats = stats.levy_stable.stats(args[0], args[1], loc=args[2], scale=args[3], moments='mvsk')
assert_almost_equal(calc_stats, exp_stats)
class TestArrayArgument(object): # test for ticket:992
def setup_method(self):
np.random.seed(1234)
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5),
size=(10, 5))
assert_equal(rvs.shape, (10, 5))
class TestDocstring(object):
def test_docstrings(self):
# See ticket #761
if stats.rayleigh.__doc__ is not None:
assert_("rayleigh" in stats.rayleigh.__doc__.lower())
if stats.bernoulli.__doc__ is not None:
assert_("bernoulli" in stats.bernoulli.__doc__.lower())
def test_no_name_arg(self):
# If name is not given, construction shouldn't fail. See #1508.
stats.rv_continuous()
stats.rv_discrete()
class TestEntropy(object):
def test_entropy_positive(self):
# See ticket #497
pk = [0.5, 0.2, 0.3]
qk = [0.1, 0.25, 0.65]
eself = stats.entropy(pk, pk)
edouble = stats.entropy(pk, qk)
assert_(0.0 == eself)
assert_(edouble >= 0.0)
def test_entropy_base(self):
pk = np.ones(16, float)
S = stats.entropy(pk, base=2.)
assert_(abs(S - 4.) < 1.e-5)
qk = np.ones(16, float)
qk[:8] = 2.
S = stats.entropy(pk, qk)
S2 = stats.entropy(pk, qk, base=2.)
assert_(abs(S/S2 - np.log(2.)) < 1.e-5)
def test_entropy_zero(self):
# Test for PR-479
assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,
decimal=12)
def test_entropy_2d(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[0.1933259, 0.18609809])
def test_entropy_2d_zero(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[np.inf, 0.18609809])
pk[0][0] = 0.0
assert_array_almost_equal(stats.entropy(pk, qk),
[0.17403988, 0.18609809])
def TestArgsreduce():
a = array([1, 3, 2, 1, 2, 3, 3])
b, c = argsreduce(a > 1, a, 2)
assert_array_equal(b, [3, 2, 2, 3, 3])
assert_array_equal(c, [2, 2, 2, 2, 2])
b, c = argsreduce(2 > 1, a, 2)
assert_array_equal(b, a[0])
assert_array_equal(c, [2])
b, c = argsreduce(a > 0, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * numpy.size(a))
class TestFitMethod(object):
skip = ['ncf']
def setup_method(self):
np.random.seed(1234)
@pytest.mark.slow
@pytest.mark.parametrize('dist,args,alpha', cases_test_all_distributions())
def test_fit(self, dist, args, alpha):
if dist in self.skip:
pytest.skip("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'), suppress_warnings() as sup:
sup.filter(category=DeprecationWarning, message=".*frechet_")
res = distfunc.rvs(*args, **{'size': 200})
vals = distfunc.fit(res)
vals2 = distfunc.fit(res, optimizer='powell')
# Only check the length of the return
# FIXME: should check the actual results to see if we are 'close'
# to what was created --- but what is 'close' enough
assert_(len(vals) == 2+len(args))
assert_(len(vals2) == 2+len(args))
@pytest.mark.slow
@pytest.mark.parametrize('dist,args,alpha', cases_test_all_distributions())
def test_fix_fit(self, dist, args, alpha):
# Not sure why 'ncf', and 'beta' are failing
# frechet has different len(args) than distfunc.numargs
if dist in self.skip + ['frechet']:
pytest.skip("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'), suppress_warnings() as sup:
sup.filter(category=DeprecationWarning, message=".*frechet_")
res = distfunc.rvs(*args, **{'size': 200})
vals = distfunc.fit(res, floc=0)
vals2 = distfunc.fit(res, fscale=1)
assert_(len(vals) == 2+len(args))
assert_(vals[-2] == 0)
assert_(vals2[-1] == 1)
assert_(len(vals2) == 2+len(args))
if len(args) > 0:
vals3 = distfunc.fit(res, f0=args[0])
assert_(len(vals3) == 2+len(args))
assert_(vals3[0] == args[0])
if len(args) > 1:
vals4 = distfunc.fit(res, f1=args[1])
assert_(len(vals4) == 2+len(args))
assert_(vals4[1] == args[1])
if len(args) > 2:
vals5 = distfunc.fit(res, f2=args[2])
assert_(len(vals5) == 2+len(args))
assert_(vals5[2] == args[2])
def test_fix_fit_2args_lognorm(self):
# Regression test for #1551.
np.random.seed(12345)
with np.errstate(all='ignore'):
x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)
expected_shape = np.sqrt(((np.log(x) - np.log(20))**2).mean())
assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
[expected_shape, 0, 20], atol=1e-8)
def test_fix_fit_norm(self):
x = np.arange(1, 6)
loc, scale = stats.norm.fit(x)
assert_almost_equal(loc, 3)
assert_almost_equal(scale, np.sqrt(2))
loc, scale = stats.norm.fit(x, floc=2)
assert_equal(loc, 2)
assert_equal(scale, np.sqrt(3))
loc, scale = stats.norm.fit(x, fscale=2)
assert_almost_equal(loc, 3)
assert_equal(scale, 2)
def test_fix_fit_gamma(self):
x = np.arange(1, 6)
meanlog = np.log(x).mean()
# A basic test of gamma.fit with floc=0.
floc = 0
a, loc, scale = stats.gamma.fit(x, floc=floc)
s = np.log(x.mean()) - meanlog
assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# Regression tests for gh-2514.
# The problem was that if `floc=0` was given, any other fixed
# parameters were ignored.
f0 = 1
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
f0 = 2
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# loc and scale fixed.
floc = 0
fscale = 2
a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
assert_equal(loc, floc)
assert_equal(scale, fscale)
c = meanlog - np.log(fscale)
assert_almost_equal(special.digamma(a), c)
def test_fix_fit_beta(self):
# Test beta.fit when both floc and fscale are given.
def mlefunc(a, b, x):
# Zeros of this function are critical points of
# the maximum likelihood function.
n = len(x)
s1 = np.log(x).sum()
s2 = np.log(1-x).sum()
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
# Basic test with floc and fscale given.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
assert_equal(loc, 0)
assert_equal(scale, 1)
assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6)
# Basic test with f0, floc and fscale given.
# This is also a regression test for gh-2514.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
# and fix b (f1).
x2 = 1 - x
a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
assert_equal(b2, 2)
assert_equal(loc2, 0)
assert_equal(scale2, 1)
da, db = mlefunc(a2, b2, x2)
assert_allclose(da, 0, atol=1e-5)
# a2 of this test should equal b from above.
assert_almost_equal(a2, b)
# Check for detection of data out of bounds when floc and fscale
# are given.
assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
y = np.array([0, .5, 1])
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
# Check that attempting to fix all the parameters raises a ValueError.
assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
floc=2, fscale=3)
def test_expon_fit(self):
x = np.array([2, 2, 4, 4, 4, 4, 4, 8])
loc, scale = stats.expon.fit(x)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 2) # x.mean() - x.min()
loc, scale = stats.expon.fit(x, fscale=3)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 3) # fscale
loc, scale = stats.expon.fit(x, floc=0)
assert_equal(loc, 0) # floc
assert_equal(scale, 4) # x.mean() - loc
def test_lognorm_fit(self):
x = np.array([1.5, 3, 10, 15, 23, 59])
lnxm1 = np.log(x - 1)
shape, loc, scale = stats.lognorm.fit(x, floc=1)
assert_allclose(shape, lnxm1.std(), rtol=1e-12)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fscale=6)
assert_allclose(shape, np.sqrt(((lnxm1 - np.log(6))**2).mean()),
rtol=1e-12)
assert_equal(loc, 1)
assert_equal(scale, 6)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fix_s=0.75)
assert_equal(shape, 0.75)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
def test_uniform_fit(self):
x = np.array([1.0, 1.1, 1.2, 9.0])
loc, scale = stats.uniform.fit(x)
assert_equal(loc, x.min())
assert_equal(scale, x.ptp())
loc, scale = stats.uniform.fit(x, floc=0)
assert_equal(loc, 0)
assert_equal(scale, x.max())
loc, scale = stats.uniform.fit(x, fscale=10)
assert_equal(loc, 0)
assert_equal(scale, 10)
assert_raises(ValueError, stats.uniform.fit, x, floc=2.0)
assert_raises(ValueError, stats.uniform.fit, x, fscale=5.0)
def test_fshapes(self):
# take a beta distribution, with shapes='a, b', and make sure that
# fa is equivalent to f0, and fb is equivalent to f1
a, b = 3., 4.
x = stats.beta.rvs(a, b, size=100, random_state=1234)
res_1 = stats.beta.fit(x, f0=3.)
res_2 = stats.beta.fit(x, fa=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_2 = stats.beta.fit(x, fix_a=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_3 = stats.beta.fit(x, f1=4.)
res_4 = stats.beta.fit(x, fb=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
res_4 = stats.beta.fit(x, fix_b=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
# cannot specify both positional and named args at the same time
assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2)
# check that attempting to fix all parameters raises a ValueError
assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,
floc=2, fscale=3)
# check that specifying floc, fscale and fshapes works for
# beta and gamma which override the generic fit method
res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1)
aa, bb, ll, ss = res_5
assert_equal([aa, ll, ss], [3., 0, 1])
# gamma distribution
a = 3.
data = stats.gamma.rvs(a, size=100)
aa, ll, ss = stats.gamma.fit(data, fa=a)
assert_equal(aa, a)
def test_extra_params(self):
# unknown parameters should raise rather than be silently ignored
dist = stats.exponnorm
data = dist.rvs(K=2, size=100)
dct = dict(enikibeniki=-101)
assert_raises(TypeError, dist.fit, data, **dct)
class TestFrozen(object):
def setup_method(self):
np.random.seed(1234)
# Test that a frozen distribution gives the same results as the original
# object.
#
# Only tested for the normal distribution (with loc and scale specified)
# and for the gamma distribution (with a shape parameter specified).
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, loc=10.0, scale=3.0)
assert_equal(result_f, result)
assert_equal(frozen.a, dist.a)
assert_equal(frozen.b, dist.b)
def test_gamma(self):
a = 2.0
dist = stats.gamma
frozen = stats.gamma(a)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, a)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(a)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(a)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(a)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(a)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(a)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, a)
assert_equal(result_f, result)
assert_equal(frozen.a, frozen.dist.a)
assert_equal(frozen.b, frozen.dist.b)
def test_regression_ticket_1293(self):
# Create a frozen distribution.
frozen = stats.lognorm(1)
# Call one of its methods that does not take any keyword arguments.
m1 = frozen.moment(2)
# Now call a method that takes a keyword argument.
frozen.stats(moments='mvsk')
# Call moment(2) again.
# After calling stats(), the following was raising an exception.
# So this test passes if the following does not raise an exception.
m2 = frozen.moment(2)
# The following should also be true, of course. But it is not
# the focus of this test.
assert_equal(m1, m2)
def test_ab(self):
# test that the support of a frozen distribution
# (i) remains frozen even if it changes for the original one
# (ii) is actually correct if the shape parameters are such that
# the values of [a, b] are not the default [0, inf]
# take a genpareto as an example where the support
# depends on the value of the shape parameter:
# for c > 0: a, b = 0, inf
# for c < 0: a, b = 0, -1/c
c = -0.1
rv = stats.genpareto(c=c)
a, b = rv.dist._get_support(c)
assert_equal([a, b], [0., 10.])
c = 0.1
stats.genpareto.pdf(0, c=c)
assert_equal(rv.dist._get_support(c), [0, np.inf])
rv1 = stats.genpareto(c=0.1)
assert_(rv1.dist is not rv.dist)
def test_rv_frozen_in_namespace(self):
# Regression test for gh-3522
assert_(hasattr(stats.distributions, 'rv_frozen'))
def test_random_state(self):
# only check that the random_state attribute exists,
frozen = stats.norm()
assert_(hasattr(frozen, 'random_state'))
# ... that it can be set,
frozen.random_state = 42
assert_equal(frozen.random_state.get_state(),
np.random.RandomState(42).get_state())
# ... and that .rvs method accepts it as an argument
rndm = np.random.RandomState(1234)
frozen.rvs(size=8, random_state=rndm)
def test_pickling(self):
# test that a frozen instance pickles and unpickles
# (this method is a clone of common_tests.check_pickling)
beta = stats.beta(2.3098496451481823, 0.62687954300963677)
poiss = stats.poisson(3.)
sample = stats.rv_discrete(values=([0, 1, 2, 3],
[0.1, 0.2, 0.3, 0.4]))
for distfn in [beta, poiss, sample]:
distfn.random_state = 1234
distfn.rvs(size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(size=8)
assert_equal(r0, r1)
# also smoke test some methods
medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]
assert_equal(medians[0], medians[1])
assert_equal(distfn.cdf(medians[0]),
unpickled.cdf(medians[1]))
def test_expect(self):
# smoke test the expect method of the frozen distribution
# only take a gamma w/loc and scale and poisson with loc specified
def func(x):
return x
gm = stats.gamma(a=2, loc=3, scale=4)
gm_val = gm.expect(func, lb=1, ub=2, conditional=True)
gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,
lb=1, ub=2, conditional=True)
assert_allclose(gm_val, gamma_val)
p = stats.poisson(3, loc=4)
p_val = p.expect(func)
poisson_val = stats.poisson.expect(func, args=(3,), loc=4)
assert_allclose(p_val, poisson_val)
class TestExpect(object):
# Test for expect method.
#
# Uses normal distribution and beta distribution for finite bounds, and
# hypergeom for discrete distribution with finite support
def test_norm(self):
v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)
assert_almost_equal(v, 4, decimal=14)
m = stats.norm.expect(lambda x: (x), loc=5, scale=2)
assert_almost_equal(m, 5, decimal=14)
lb = stats.norm.ppf(0.05, loc=5, scale=2)
ub = stats.norm.ppf(0.95, loc=5, scale=2)
prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)
assert_almost_equal(prob90, 0.9, decimal=14)
prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,
conditional=True)
assert_almost_equal(prob90c, 1., decimal=14)
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5),
loc=5, scale=2)
assert_almost_equal(v, 1./18., decimal=13)
m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.)
assert_almost_equal(m, 19/3., decimal=13)
ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5.,
scale=2., lb=lb, ub=ub, conditional=False)
assert_almost_equal(prob90, 0.9, decimal=13)
prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5,
scale=2, lb=lb, ub=ub, conditional=True)
assert_almost_equal(prob90c, 1., decimal=13)
def test_hypergeom(self):
# test case with finite bounds
# without specifying bounds
m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)
m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)
assert_almost_equal(m, m_true, decimal=13)
v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5.)
assert_almost_equal(v, v_true, decimal=14)
# with bounds, bounds equal to shifted support
v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2,
args=(20, 10, 8),
loc=5., lb=5, ub=13)
assert_almost_equal(v_bounds, v_true, decimal=14)
# drop boundary points
prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()
prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
loc=5., lb=6, ub=12)
assert_almost_equal(prob_bounds, prob_true, decimal=13)
# conditional
prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,
lb=6, ub=12, conditional=True)
assert_almost_equal(prob_bc, 1, decimal=14)
# check simple integral
prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
lb=0, ub=8)
assert_almost_equal(prob_b, 1, decimal=13)
def test_poisson(self):
# poisson, use lower bound only
prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,
conditional=False)
prob_b_true = 1-stats.poisson.cdf(2, 2)
assert_almost_equal(prob_bounds, prob_b_true, decimal=14)
prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,
conditional=True)
assert_almost_equal(prob_lb, 1, decimal=14)
def test_genhalflogistic(self):
# genhalflogistic, changes upper bound of support in _argcheck
# regression test for gh-2622
halflog = stats.genhalflogistic
# check consistency when calling expect twice with the same input
res1 = halflog.expect(args=(1.5,))
halflog.expect(args=(0.5,))
res2 = halflog.expect(args=(1.5,))
assert_almost_equal(res1, res2, decimal=14)
def test_rice_overflow(self):
# rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows
# check that using i0e fixes it
assert_(np.isfinite(stats.rice.pdf(999, 0.74)))
assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))
def test_logser(self):
# test a discrete distribution with infinite support and loc
p, loc = 0.3, 3
res_0 = stats.logser.expect(lambda k: k, args=(p,))
# check against the correct answer (sum of a geom series)
assert_allclose(res_0,
p / (p - 1.) / np.log(1. - p), atol=1e-15)
# now check it with `loc`
res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc)
assert_allclose(res_l, res_0 + loc, atol=1e-15)
def test_skellam(self):
# Use a discrete distribution w/ bi-infinite support. Compute two first
# moments and compare to known values (cf skellam.stats)
p1, p2 = 18, 22
m1 = stats.skellam.expect(lambda x: x, args=(p1, p2))
m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2))
assert_allclose(m1, p1 - p2, atol=1e-12)
assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12)
def test_randint(self):
# Use a discrete distribution w/ parameter-dependent support, which
# is larger than the default chunksize
lo, hi = 0, 113
res = stats.randint.expect(lambda x: x, (lo, hi))
assert_allclose(res,
sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15)
def test_zipf(self):
# Test that there is no infinite loop even if the sum diverges
assert_warns(RuntimeWarning, stats.zipf.expect,
lambda x: x**2, (2,))
def test_discrete_kwds(self):
# check that discrete expect accepts keywords to control the summation
n0 = stats.poisson.expect(lambda x: 1, args=(2,))
n1 = stats.poisson.expect(lambda x: 1, args=(2,),
maxcount=1001, chunksize=32, tolerance=1e-8)
assert_almost_equal(n0, n1, decimal=14)
def test_moment(self):
# test the .moment() method: compute a higher moment and compare to
# a known value
def poiss_moment5(mu):
return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu
for mu in [5, 7]:
m5 = stats.poisson.moment(5, mu)
assert_allclose(m5, poiss_moment5(mu), rtol=1e-10)
class TestNct(object):
def test_nc_parameter(self):
# Parameter values c<=0 were not enabled (gh-2402).
# For negative values c and for c=0 results of rv.cdf(0) below were nan
rv = stats.nct(5, 0)
assert_equal(rv.cdf(0), 0.5)
rv = stats.nct(5, -1)
assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
def test_broadcasting(self):
res = stats.nct.pdf(5, np.arange(4, 7)[:, None],
np.linspace(0.1, 1, 4))
expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
[0.00217142, 0.00395366, 0.00683888, 0.01126276],
[0.00153078, 0.00291093, 0.00525206, 0.00900815]])
assert_allclose(res, expected, rtol=1e-5)
def test_variance_gh_issue_2401(self):
# Computation of the variance of a non-central t-distribution resulted
# in a TypeError: ufunc 'isinf' not supported for the input types,
# and the inputs could not be safely coerced to any supported types
# according to the casting rule 'safe'
rv = stats.nct(4, 0)
assert_equal(rv.var(), 2.0)
def test_nct_inf_moments(self):
# n-th moment of nct only exists for df > n
m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')
assert_(np.isfinite(m))
assert_equal([v, s, k], [np.inf, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')
assert_(np.isfinite([m, v, s]).all())
assert_equal(k, np.nan)
class TestRice(object):
def test_rice_zero_b(self):
# rice distribution should work with b=0, cf gh-2164
x = [0.2, 1., 5.]
assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())
q = [0.1, 0.1, 0.5, 0.9]
assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())
mvsk = stats.rice.stats(0, moments='mvsk')
assert_(np.isfinite(mvsk).all())
# furthermore, pdf is continuous as b\to 0
# rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2)
# see e.g. Abramovich & Stegun 9.6.7 & 9.6.10
b = 1e-8
assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),
atol=b, rtol=0)
def test_rice_rvs(self):
rvs = stats.rice.rvs
assert_equal(rvs(b=3.).size, 1)
assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))
class TestErlang(object):
def setup_method(self):
np.random.seed(1234)
def test_erlang_runtimewarning(self):
# erlang should generate a RuntimeWarning if a non-integer
# shape parameter is used.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
# The non-integer shape parameter 1.3 should trigger a
# RuntimeWarning
assert_raises(RuntimeWarning,
stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
# Calling the fit method with `f0` set to an integer should
# *not* trigger a RuntimeWarning. It should return the same
# values as gamma.fit(...).
data = [0.5, 1.0, 2.0, 4.0]
result_erlang = stats.erlang.fit(data, f0=1)
result_gamma = stats.gamma.fit(data, f0=1)
assert_allclose(result_erlang, result_gamma, rtol=1e-3)
class TestRayleigh(object):
# gh-6227
def test_logpdf(self):
y = stats.rayleigh.logpdf(50)
assert_allclose(y, -1246.0879769945718)
def test_logsf(self):
y = stats.rayleigh.logsf(50)
assert_allclose(y, -1250)
class TestExponWeib(object):
def test_pdf_logpdf(self):
# Regression test for gh-3508.
x = 0.1
a = 1.0
c = 100.0
p = stats.exponweib.pdf(x, a, c)
logp = stats.exponweib.logpdf(x, a, c)
# Expected values were computed with mpmath.
assert_allclose([p, logp],
[1.0000000000000054e-97, -223.35075402042244])
def test_a_is_1(self):
# For issue gh-3508.
# Check that when a=1, the pdf and logpdf methods of exponweib are the
# same as those of weibull_min.
x = np.logspace(-4, -1, 4)
a = 1
c = 100
p = stats.exponweib.pdf(x, a, c)
expected = stats.weibull_min.pdf(x, c)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.weibull_min.logpdf(x, c)
assert_allclose(logp, expected)
def test_a_is_1_c_is_1(self):
# When a = 1 and c = 1, the distribution is exponential.
x = np.logspace(-8, 1, 10)
a = 1
c = 1
p = stats.exponweib.pdf(x, a, c)
expected = stats.expon.pdf(x)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.expon.logpdf(x)
assert_allclose(logp, expected)
class TestWeibull(object):
def test_logpdf(self):
# gh-6217
y = stats.weibull_min.logpdf(0, 1)
assert_equal(y, 0)
def test_with_maxima_distrib(self):
# Tests for weibull_min and weibull_max.
# The expected values were computed using the symbolic algebra
# program 'maxima' with the package 'distrib', which has
# 'pdf_weibull' and 'cdf_weibull'. The mapping between the
# scipy and maxima functions is as follows:
# -----------------------------------------------------------------
# scipy maxima
# --------------------------------- ------------------------------
# weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b)
# weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b))
# weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b)
# weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b))
# weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b)
# weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b))
#
# weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b)
# weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b))
# weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b)
# weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b))
# weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b)
# weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b))
# -----------------------------------------------------------------
x = 1.5
a = 2.0
b = 3.0
# weibull_min
p = stats.weibull_min.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_min.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_min.cdf(x, a, scale=b)
assert_allclose(c, -special.expm1(-0.25))
lc = stats.weibull_min.logcdf(x, a, scale=b)
assert_allclose(lc, np.log(-special.expm1(-0.25)))
s = stats.weibull_min.sf(x, a, scale=b)
assert_allclose(s, np.exp(-0.25))
ls = stats.weibull_min.logsf(x, a, scale=b)
assert_allclose(ls, -0.25)
# Also test using a large value x, for which computing the survival
# function using the CDF would result in 0.
s = stats.weibull_min.sf(30, 2, scale=3)
assert_allclose(s, np.exp(-100))
ls = stats.weibull_min.logsf(30, 2, scale=3)
assert_allclose(ls, -100)
# weibull_max
x = -1.5
p = stats.weibull_max.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_max.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_max.cdf(x, a, scale=b)
assert_allclose(c, np.exp(-0.25))
lc = stats.weibull_max.logcdf(x, a, scale=b)
assert_allclose(lc, -0.25)
s = stats.weibull_max.sf(x, a, scale=b)
assert_allclose(s, -special.expm1(-0.25))
ls = stats.weibull_max.logsf(x, a, scale=b)
assert_allclose(ls, np.log(-special.expm1(-0.25)))
# Also test using a value of x close to 0, for which computing the
# survival function using the CDF would result in 0.
s = stats.weibull_max.sf(-1e-9, 2, scale=3)
assert_allclose(s, -special.expm1(-1/9000000000000000000))
ls = stats.weibull_max.logsf(-1e-9, 2, scale=3)
assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000)))
class TestRdist(object):
@pytest.mark.slow
def test_rdist_cdf_gh1285(self):
# check workaround in rdist._cdf for issue gh-1285.
distfn = stats.rdist
values = [0.001, 0.5, 0.999]
assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),
values, decimal=5)
class TestTrapz(object):
def test_reduces_to_triang(self):
modes = [0, 0.3, 0.5, 1]
for mode in modes:
x = [0, mode, 1]
assert_almost_equal(stats.trapz.pdf(x, mode, mode),
stats.triang.pdf(x, mode))
assert_almost_equal(stats.trapz.cdf(x, mode, mode),
stats.triang.cdf(x, mode))
def test_reduces_to_uniform(self):
x = np.linspace(0, 1, 10)
assert_almost_equal(stats.trapz.pdf(x, 0, 1), stats.uniform.pdf(x))
assert_almost_equal(stats.trapz.cdf(x, 0, 1), stats.uniform.cdf(x))
def test_cases(self):
# edge cases
assert_almost_equal(stats.trapz.pdf(0, 0, 0), 2)
assert_almost_equal(stats.trapz.pdf(1, 1, 1), 2)
assert_almost_equal(stats.trapz.pdf(0.5, 0, 0.8),
1.11111111111111111)
assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 1.0),
1.11111111111111111)
# straightforward case
assert_almost_equal(stats.trapz.pdf(0.1, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 0.8), 1.25)
assert_almost_equal(stats.trapz.pdf(0.9, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapz.cdf(0.1, 0.2, 0.8), 0.03125)
assert_almost_equal(stats.trapz.cdf(0.2, 0.2, 0.8), 0.125)
assert_almost_equal(stats.trapz.cdf(0.5, 0.2, 0.8), 0.5)
assert_almost_equal(stats.trapz.cdf(0.9, 0.2, 0.8), 0.96875)
assert_almost_equal(stats.trapz.cdf(1.0, 0.2, 0.8), 1.0)
def test_trapz_vect(self):
# test that array-valued shapes and arguments are handled
c = np.array([0.1, 0.2, 0.3])
d = np.array([0.5, 0.6])[:, None]
x = np.array([0.15, 0.25, 0.9])
v = stats.trapz.pdf(x, c, d)
cc, dd, xx = np.broadcast_arrays(c, d, x)
res = np.empty(xx.size, dtype=xx.dtype)
ind = np.arange(xx.size)
for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()):
res[i] = stats.trapz.pdf(x1, c1, d1)
assert_allclose(v, res.reshape(v.shape), atol=1e-15)
class TestTriang(object):
def test_edge_cases(self):
with np.errstate(all='raise'):
assert_equal(stats.triang.pdf(0, 0), 2.)
assert_equal(stats.triang.pdf(0.5, 0), 1.)
assert_equal(stats.triang.pdf(1, 0), 0.)
assert_equal(stats.triang.pdf(0, 1), 0)
assert_equal(stats.triang.pdf(0.5, 1), 1.)
assert_equal(stats.triang.pdf(1, 1), 2)
assert_equal(stats.triang.cdf(0., 0.), 0.)
assert_equal(stats.triang.cdf(0.5, 0.), 0.75)
assert_equal(stats.triang.cdf(1.0, 0.), 1.0)
assert_equal(stats.triang.cdf(0., 1.), 0.)
assert_equal(stats.triang.cdf(0.5, 1.), 0.25)
assert_equal(stats.triang.cdf(1., 1.), 1)
class TestMielke(object):
def test_moments(self):
k, s = 4.642, 0.597
# n-th moment exists only if n < s
assert_equal(stats.mielke(k, s).moment(1), np.inf)
assert_equal(stats.mielke(k, 1.0).moment(1), np.inf)
assert_(np.isfinite(stats.mielke(k, 1.01).moment(1)))
def test_burr_equivalence(self):
x = np.linspace(0.01, 100, 50)
k, s = 2.45, 5.32
assert_allclose(stats.burr.pdf(x, s, k/s), stats.mielke.pdf(x, k, s))
class TestBurr(object):
def test_endpoints_7491(self):
# gh-7491
# Compute the pdf at the left endpoint dst.a.
data = [
[stats.fisk, (1,), 1],
[stats.burr, (0.5, 2), 1],
[stats.burr, (1, 1), 1],
[stats.burr, (2, 0.5), 1],
[stats.burr12, (1, 0.5), 0.5],
[stats.burr12, (1, 1), 1.0],
[stats.burr12, (1, 2), 2.0]]
ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
correct = [_correct_ for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
ans = [_f.logpdf(_f.a, *_args) for _f, _args, _ in data]
correct = [np.log(_correct_) for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
def test_burr_stats_9544(self):
# gh-9544. Test from gh-9978
c, d = 5.0, 3
mean, variance = stats.burr(c, d).stats()
# mean = sc.beta(3 + 1/5, 1. - 1/5) * 3 = 1.4110263...
# var = sc.beta(3 + 2 / 5, 1. - 2 / 5) * 3 - (sc.beta(3 + 1 / 5, 1. - 1 / 5) * 3) ** 2
mean_hc, variance_hc = 1.4110263183925857, 0.22879948026191643
assert_allclose(mean, mean_hc)
assert_allclose(variance, variance_hc)
def test_burr_nan_mean_var_9544(self):
# gh-9544. Test from gh-9978
c, d = 0.5, 3
mean, variance = stats.burr(c, d).stats()
assert_(np.isnan(mean))
assert_(np.isnan(variance))
c, d = 1.5, 3
mean, variance = stats.burr(c, d).stats()
assert_(np.isfinite(mean))
assert_(np.isnan(variance))
c, d = 0.5, 3
e1, e2, e3, e4 = stats.burr._munp(np.array([1, 2, 3, 4]), c, d)
assert_(np.isnan(e1))
assert_(np.isnan(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 1.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isnan(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 2.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 3.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isfinite(e3))
assert_(np.isnan(e4))
c, d = 4.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isfinite(e3))
assert_(np.isfinite(e4))
def test_540_567():
# test for nan returned in tickets 540, 567
assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),
0.98353464004309321,
decimal=10, err_msg='test_540_567')
def test_regression_ticket_1316():
# The following was raising an exception, because _construct_default_doc()
# did not handle the default keyword extradoc=None. See ticket #1316.
g = stats._continuous_distns.gamma_gen(name='gamma')
def test_regression_ticket_1326():
# adjust to avoid nan with 0*log(0)
assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)
def test_regression_tukey_lambda():
# Make sure that Tukey-Lambda distribution correctly handles
# non-positive lambdas.
x = np.linspace(-5.0, 5.0, 101)
olderr = np.seterr(divide='ignore')
try:
for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:
p = stats.tukeylambda.pdf(x, lam)
assert_((p != 0.0).all())
assert_(~np.isnan(p).all())
lam = np.array([[-1.0], [0.0], [2.0]])
p = stats.tukeylambda.pdf(x, lam)
finally:
np.seterr(**olderr)
assert_(~np.isnan(p).all())
assert_((p[0] != 0.0).all())
assert_((p[1] != 0.0).all())
assert_((p[2] != 0.0).any())
assert_((p[2] == 0.0).any())
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
def test_regression_ticket_1421():
assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
assert_('pmf(x,' in stats.poisson.__doc__)
def test_nan_arguments_gh_issue_1362():
with np.errstate(invalid='ignore'):
assert_(np.isnan(stats.t.logcdf(1, np.nan)))
assert_(np.isnan(stats.t.cdf(1, np.nan)))
assert_(np.isnan(stats.t.logsf(1, np.nan)))
assert_(np.isnan(stats.t.sf(1, np.nan)))
assert_(np.isnan(stats.t.pdf(1, np.nan)))
assert_(np.isnan(stats.t.logpdf(1, np.nan)))
assert_(np.isnan(stats.t.ppf(1, np.nan)))
assert_(np.isnan(stats.t.isf(1, np.nan)))
assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
def test_frozen_fit_ticket_1536():
np.random.seed(5678)
true = np.array([0.25, 0., 0.5])
x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)
olderr = np.seterr(divide='ignore')
try:
params = np.array(stats.lognorm.fit(x, floc=0.))
finally:
np.seterr(**olderr)
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
assert_almost_equal(params, true, decimal=2)
np.random.seed(5678)
loc = 1
floc = 0.9
x = stats.norm.rvs(loc, 2., size=100)
params = np.array(stats.norm.fit(x, floc=floc))
expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
assert_almost_equal(params, expected, decimal=4)
def test_regression_ticket_1530():
# Check the starting value works for Cauchy distribution fit.
np.random.seed(654321)
rvs = stats.cauchy.rvs(size=100)
params = stats.cauchy.fit(rvs)
expected = (0.045, 1.142)
assert_almost_equal(params, expected, decimal=1)
def test_gh_pr_4806():
# Check starting values for Cauchy distribution fit.
np.random.seed(1234)
x = np.random.randn(42)
for offset in 10000.0, 1222333444.0:
loc, scale = stats.cauchy.fit(x + offset)
assert_allclose(loc, offset, atol=1.0)
assert_allclose(scale, 0.6, atol=1.0)
def test_tukeylambda_stats_ticket_1545():
# Some test for the variance and kurtosis of the Tukey Lambda distr.
# See test_tukeylamdba_stats.py for more tests.
mv = stats.tukeylambda.stats(0, moments='mvsk')
# Known exact values:
expected = [0, np.pi**2/3, 0, 1.2]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(3.13, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(0.14, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]
assert_almost_equal(mv, expected, decimal=10)
def test_poisson_logpmf_ticket_1436():
assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
def test_powerlaw_stats():
"""Test the powerlaw stats function.
This unit test is also a regression test for ticket 1548.
The exact values are:
mean:
mu = a / (a + 1)
variance:
sigma**2 = a / ((a + 2) * (a + 1) ** 2)
skewness:
One formula (see https://en.wikipedia.org/wiki/Skewness) is
gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
A short calculation shows that E[X**k] is a / (a + k), so gamma_1
can be implemented as
n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
d = sqrt(a/((a+2)*(a+1)**2)) ** 3
gamma_1 = n/d
Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
one gets the more concise formula:
gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
kurtosis: (See https://en.wikipedia.org/wiki/Kurtosis)
The excess kurtosis is
gamma_2 = mu_4 / sigma**4 - 3
A bit of calculus and algebra (sympy helps) shows that
mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
so
gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
which can be rearranged to
gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
"""
cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
(2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
for a, exact_mvsk in cases:
mvsk = stats.powerlaw.stats(a, moments="mvsk")
assert_array_almost_equal(mvsk, exact_mvsk)
def test_powerlaw_edge():
# Regression test for gh-3986.
p = stats.powerlaw.logpdf(0, 1)
assert_equal(p, 0.0)
def test_exponpow_edge():
# Regression test for gh-3982.
p = stats.exponpow.logpdf(0, 1)
assert_equal(p, 0.0)
# Check pdf and logpdf at x = 0 for other values of b.
p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 1.0, 0.0])
p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 0.0, -np.inf])
def test_gengamma_edge():
# Regression test for gh-3985.
p = stats.gengamma.pdf(0, 1, 1)
assert_equal(p, 1.0)
# Regression tests for gh-4724.
p = stats.gengamma._munp(-2, 200, 1.)
assert_almost_equal(p, 1./199/198)
p = stats.gengamma._munp(-2, 10, 1.)
assert_almost_equal(p, 1./9/8)
def test_ksone_fit_freeze():
# Regression test for ticket #1638.
d = np.array(
[-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
-0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
-0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
-0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
-0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
-0.06037974, 0.37670779, -0.21684405])
try:
olderr = np.seterr(invalid='ignore')
with suppress_warnings() as sup:
sup.filter(IntegrationWarning,
"The maximum number of subdivisions .50. has been "
"achieved.")
sup.filter(RuntimeWarning,
"floating point number truncated to an integer")
stats.ksone.fit(d)
finally:
np.seterr(**olderr)
def test_norm_logcdf():
# Test precision of the logcdf of the normal distribution.
# This precision was enhanced in ticket 1614.
x = -np.asarray(list(range(0, 120, 4)))
# Values from R
expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
-131.69539607, -203.91715537, -292.09872100, -396.25241451,
-516.38564863, -652.50322759, -804.60844201, -972.70364403,
-1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
-2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
-3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
-4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
-6277.63751711, -6733.67260303]
assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
# also test the complex-valued code path
assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8)
# test the accuracy: d(logcdf)/dx = pdf / cdf \equiv exp(logpdf - logcdf)
deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag
deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x))
assert_allclose(deriv, deriv_expected, atol=1e-10)
def test_levy_cdf_ppf():
# Test levy.cdf, including small arguments.
x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
# Expected values were calculated separately with mpmath.
# E.g.
# >>> mpmath.mp.dps = 100
# >>> x = mpmath.mp.mpf('0.01')
# >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
expected = np.array([0.9747728793699604,
0.3173105078629141,
0.1572992070502851,
0.0015654022580025495,
1.523970604832105e-23,
1.795832784800726e-219])
y = stats.levy.cdf(x)
assert_allclose(y, expected, rtol=1e-10)
# ppf(expected) should get us back to x.
xx = stats.levy.ppf(expected)
assert_allclose(xx, x, rtol=1e-13)
def test_hypergeom_interval_1802():
# these two had endless loops
assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
(152.0, 197.0))
assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
(152.0, 197.0))
# this was working also before
assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
(153.0, 196.0))
# degenerate case .a == .b
assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
def test_distribution_too_many_args():
np.random.seed(1234)
# Check that a TypeError is raised when too many args are given to a method
# Regression test for ticket 1815.
x = np.linspace(0.1, 0.7, num=5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
# These should not give errors
stats.gamma.pdf(x, 2, 3) # loc=3
stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4
stats.gamma.stats(2., 3)
stats.gamma.stats(2., 3, 4)
stats.gamma.stats(2., 3, 4, 'mv')
stats.gamma.rvs(2., 3, 4, 5)
stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)
# Also for a discrete distribution
stats.geom.pmf(x, 2, loc=3) # no error, loc=3
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
# And for distributions with 0, 2 and 3 args respectively
assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale
def test_ncx2_tails_ticket_955():
# Trac #955 -- check that the cdf computed by special functions
# matches the integrated pdf
a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
assert_allclose(a, b, rtol=1e-3, atol=0)
def test_ncx2_tails_pdf():
# ncx2.pdf does not return nans in extreme tails(example from gh-1577)
# NB: this is to check that nan_to_num is not needed in ncx2.pdf
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered in log")
assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0)
logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2)
assert_(np.isneginf(logval).all())
@pytest.mark.parametrize('method, expected', [
('cdf', np.array([2.497951336e-09, 3.437288941e-10])),
('pdf', np.array([1.238579980e-07, 1.710041145e-08])),
('logpdf', np.array([-15.90413011, -17.88416331])),
('ppf', np.array([4.865182052, 7.017182271]))
])
def test_ncx2_zero_nc(method, expected):
# gh-5441
# ncx2 with nc=0 is identical to chi2
# Comparison to R (v3.5.1)
# > options(digits=10)
# > pchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4), log=TRUE)
# > qchisq(0.1, df=10, ncp=c(0,4))
result = getattr(stats.ncx2, method)(0.1, nc=[0, 4], df=10)
assert_allclose(result, expected, atol=1e-15)
def test_ncx2_zero_nc_rvs():
# gh-5441
# ncx2 with nc=0 is identical to chi2
result = stats.ncx2.rvs(df=10, nc=0, random_state=1)
expected = stats.chi2.rvs(df=10, random_state=1)
assert_allclose(result, expected, atol=1e-15)
def test_foldnorm_zero():
# Parameter value c=0 was not enabled, see gh-2399.
rv = stats.foldnorm(0, scale=1)
assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan
def test_stats_shapes_argcheck():
# stats method was failing for vector shapes if some of the values
# were outside of the allowed range, see gh-2678
mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`
mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# -1 is not a legal shape parameter
mv3 = stats.lognorm.stats([2, 2.4, -1])
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
# stats method with multiple shape parameters is not properly vectorized
# anyway, so some distributions may or may not fail.
# Test subclassing distributions w/ explicit shapes
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, a):
return 42
class _distr2_gen(stats.rv_continuous):
def _cdf(self, x, a):
return 42 * a + x
class _distr3_gen(stats.rv_continuous):
def _pdf(self, x, a, b):
return a + b
def _cdf(self, x, a):
# Different # of shape params from _pdf, to be able to check that
# inspection catches the inconsistency."""
return 42 * a + x
class _distr6_gen(stats.rv_continuous):
# Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
def _pdf(self, x, a, b):
return a*x + b
def _cdf(self, x, a, b):
return 42 * a + x
class TestSubclassingExplicitShapes(object):
# Construct a distribution w/ explicit shapes parameter and test it.
def test_correct_shapes(self):
dummy_distr = _distr_gen(name='dummy', shapes='a')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_wrong_shapes_1(self):
dummy_distr = _distr_gen(name='dummy', shapes='A')
assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
def test_wrong_shapes_2(self):
dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
dct = dict(a=1, b=2, c=3)
assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
def test_shapes_string(self):
# shapes must be a string
dct = dict(name='dummy', shapes=42)
assert_raises(TypeError, _distr_gen, **dct)
def test_shapes_identifiers_1(self):
# shapes must be a comma-separated list of valid python identifiers
dct = dict(name='dummy', shapes='(!)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_2(self):
dct = dict(name='dummy', shapes='4chan')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_3(self):
dct = dict(name='dummy', shapes='m(fti)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_nodefaults(self):
dct = dict(name='dummy', shapes='a=2')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_args(self):
dct = dict(name='dummy', shapes='*args')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_kwargs(self):
dct = dict(name='dummy', shapes='**kwargs')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_keywords(self):
# python keywords cannot be used for shape parameters
dct = dict(name='dummy', shapes='a, b, c, lambda')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a')
assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a, b')
assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
def test_star_args(self):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
dist = _dist_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
def test_star_args_2(self):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
dist = _dist_gen(shapes='offset, extra_kwarg')
assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
stats.norm.pdf(0.5)*33 + 111)
assert_equal(dist.pdf(0.5, 111, 33),
stats.norm.pdf(0.5)*33 + 111)
def test_extra_kwarg(self):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling"
# is ignoring *args and looking for ``extra_kwarg`` and using
# that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
dist = _distr_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
def shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
dist = _dist_gen(shapes='')
assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
class TestSubclassingNoShapes(object):
# Construct a distribution w/o explicit shapes parameter and test it.
def test_only__pdf(self):
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_only__cdf(self):
# _pdf is determined from _cdf by taking numerical derivative
dummy_distr = _distr2_gen(name='dummy')
assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection(self):
# check that _pdf signature inspection works correctly, and is used in
# the class docstring
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.numargs, 1)
assert_equal(dummy_distr.shapes, 'a')
res = re.findall(r'logpdf\(x, a, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection_2args(self):
# same for 2 shape params and both _pdf and _cdf defined
dummy_distr = _distr6_gen(name='dummy')
assert_equal(dummy_distr.numargs, 2)
assert_equal(dummy_distr.shapes, 'a, b')
res = re.findall(r'logpdf\(x, a, b, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
def test_signature_inspection_2args_incorrect_shapes(self):
# both _pdf and _cdf defined, but shapes are inconsistent: raises
assert_raises(TypeError, _distr3_gen, name='dummy')
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_docstrings():
badones = [r',\s*,', r'\(\s*,', r'^\s*:']
for distname in stats.__all__:
dist = getattr(stats, distname)
if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
for regex in badones:
assert_(re.search(regex, dist.__doc__) is None)
def test_infinite_input():
assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
def test_lomax_accuracy():
# regression test for gh-4033
p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_gompertz_accuracy():
# Regression test for gh-4031
p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_truncexpon_accuracy():
# regression test for gh-4035
p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_rayleigh_accuracy():
# regression test for gh-4034
p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1)
assert_almost_equal(p, 9.0, decimal=15)
def test_genextreme_give_no_warnings():
"""regression test for gh-6219"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
p = stats.genextreme.cdf(.5, 0)
p = stats.genextreme.pdf(.5, 0)
p = stats.genextreme.ppf(.5, 0)
p = stats.genextreme.logpdf(-np.inf, 0.0)
number_of_warnings_thrown = len(w)
assert_equal(number_of_warnings_thrown, 0)
def test_genextreme_entropy():
# regression test for gh-5181
euler_gamma = 0.5772156649015329
h = stats.genextreme.entropy(-1.0)
assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(0)
assert_allclose(h, euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(1.0)
assert_equal(h, 1)
h = stats.genextreme.entropy(-2.0, scale=10)
assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)
h = stats.genextreme.entropy(10)
assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(-10)
assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14)
def test_genextreme_sf_isf():
# Expected values were computed using mpmath:
#
# import mpmath
#
# def mp_genextreme_sf(x, xi, mu=0, sigma=1):
# # Formula from wikipedia, which has a sign convention for xi that
# # is the opposite of scipy's shape parameter.
# if xi != 0:
# t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)
# else:
# t = mpmath.exp(-(x - mu)/sigma)
# return 1 - mpmath.exp(-t)
#
# >>> mpmath.mp.dps = 1000
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("1e8"), mpmath.mp.mpf("0.125"))
# >>> float(s)
# 1.6777205262585625e-57
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("-0.125"))
# >>> float(s)
# 1.52587890625e-21
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("0"))
# >>> float(s)
# 0.00034218086528426593
x = 1e8
s = stats.genextreme.sf(x, -0.125)
assert_allclose(s, 1.6777205262585625e-57)
x2 = stats.genextreme.isf(s, -0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0.125)
assert_allclose(s, 1.52587890625e-21)
x2 = stats.genextreme.isf(s, 0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0)
assert_allclose(s, 0.00034218086528426593)
x2 = stats.genextreme.isf(s, 0)
assert_allclose(x2, x)
def test_burr12_ppf_small_arg():
prob = 1e-16
quantile = stats.burr12.ppf(prob, 2, 3)
# The expected quantile was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 100
# >>> prob = mpmath.mpf('1e-16')
# >>> c = mpmath.mpf(2)
# >>> d = mpmath.mpf(3)
# >>> float(((1-prob)**(-1/d) - 1)**(1/c))
# 5.7735026918962575e-09
assert_allclose(quantile, 5.7735026918962575e-09)
def test_crystalball_function():
"""
All values are calculated using the independent implementation of the
ROOT framework (see https://root.cern.ch/).
Corresponding ROOT code is given in the comments.
"""
X = np.linspace(-5.0, 5.0, 21)[:-1]
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_pdf(x, 1.0, 2.0, 1.0) << ", ";
calculated = stats.crystalball.pdf(X, beta=1.0, m=2.0)
expected = np.array([0.0202867, 0.0241428, 0.0292128, 0.0360652, 0.045645,
0.059618, 0.0811467, 0.116851, 0.18258, 0.265652,
0.301023, 0.265652, 0.18258, 0.097728, 0.0407391,
0.013226, 0.00334407, 0.000658486, 0.000100982,
1.20606e-05])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 1.0) << ", ";
calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0)
expected = np.array([0.0019648, 0.00279754, 0.00417592, 0.00663121,
0.0114587, 0.0223803, 0.0530497, 0.12726, 0.237752,
0.345928, 0.391987, 0.345928, 0.237752, 0.12726,
0.0530497, 0.0172227, 0.00435458, 0.000857469,
0.000131497, 1.57051e-05])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5) {
# std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 2.0, 0.5);
# std::cout << ", ";
# }
calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
expected = np.array([0.00785921, 0.0111902, 0.0167037, 0.0265249,
0.0423866, 0.0636298, 0.0897324, 0.118876, 0.147944,
0.172964, 0.189964, 0.195994, 0.189964, 0.172964,
0.147944, 0.118876, 0.0897324, 0.0636298, 0.0423866,
0.0265249])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_cdf(x, 1.0, 2.0, 1.0) << ", ";
calculated = stats.crystalball.cdf(X, beta=1.0, m=2.0)
expected = np.array([0.12172, 0.132785, 0.146064, 0.162293, 0.18258,
0.208663, 0.24344, 0.292128, 0.36516, 0.478254,
0.622723, 0.767192, 0.880286, 0.94959, 0.982834,
0.995314, 0.998981, 0.999824, 0.999976, 0.999997])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 1.0) << ", ";
calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0)
expected = np.array([0.00442081, 0.00559509, 0.00730787, 0.00994682,
0.0143234, 0.0223803, 0.0397873, 0.0830763, 0.173323,
0.320592, 0.508717, 0.696841, 0.844111, 0.934357,
0.977646, 0.993899, 0.998674, 0.999771, 0.999969,
0.999997])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5) {
# std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 2.0, 0.5);
# std::cout << ", ";
# }
calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
expected = np.array([0.0176832, 0.0223803, 0.0292315, 0.0397873, 0.0567945,
0.0830763, 0.121242, 0.173323, 0.24011, 0.320592,
0.411731, 0.508717, 0.605702, 0.696841, 0.777324,
0.844111, 0.896192, 0.934357, 0.960639, 0.977646])
assert_allclose(expected, calculated, rtol=0.001)
def test_crystalball_function_moments():
"""
All values are calculated using the pdf formula and the integrate function
of Mathematica
"""
# The Last two (alpha, n) pairs test the special case n == alpha**2
beta = np.array([2.0, 1.0, 3.0, 2.0, 3.0])
m = np.array([3.0, 3.0, 2.0, 4.0, 9.0])
# The distribution should be correctly normalised
expected_0th_moment = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
calculated_0th_moment = stats.crystalball._munp(0, beta, m)
assert_allclose(expected_0th_moment, calculated_0th_moment, rtol=0.001)
# calculated using wolframalpha.com
# e.g. for beta = 2 and m = 3 we calculate the norm like this:
# integrate exp(-x^2/2) from -2 to infinity +
# integrate (3/2)^3*exp(-2^2/2)*(3/2-2-x)^(-3) from -infinity to -2
norm = np.array([2.5511, 3.01873, 2.51065, 2.53983, 2.507410455])
a = np.array([-0.21992, -3.03265, np.inf, -0.135335, -0.003174])
expected_1th_moment = a / norm
calculated_1th_moment = stats.crystalball._munp(1, beta, m)
assert_allclose(expected_1th_moment, calculated_1th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, 3.2616, 2.519908])
expected_2th_moment = a / norm
calculated_2th_moment = stats.crystalball._munp(2, beta, m)
assert_allclose(expected_2th_moment, calculated_2th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, -0.0577668])
expected_3th_moment = a / norm
calculated_3th_moment = stats.crystalball._munp(3, beta, m)
assert_allclose(expected_3th_moment, calculated_3th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, 7.78468])
expected_4th_moment = a / norm
calculated_4th_moment = stats.crystalball._munp(4, beta, m)
assert_allclose(expected_4th_moment, calculated_4th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, -1.31086])
expected_5th_moment = a / norm
calculated_5th_moment = stats.crystalball._munp(5, beta, m)
assert_allclose(expected_5th_moment, calculated_5th_moment, rtol=0.001)
def test_argus_function():
# There is no usable reference implementation.
# (RootFit implementation returns unreasonable results which are not
# normalized correctly.)
# Instead we do some tests if the distribution behaves as expected for
# different shapes and scales.
for i in range(1, 10):
for j in range(1, 10):
assert_equal(stats.argus.pdf(i + 0.001, chi=j, scale=i), 0.0)
assert_(stats.argus.pdf(i - 0.001, chi=j, scale=i) > 0.0)
assert_equal(stats.argus.pdf(-0.001, chi=j, scale=i), 0.0)
assert_(stats.argus.pdf(+0.001, chi=j, scale=i) > 0.0)
for i in range(1, 10):
assert_equal(stats.argus.cdf(1.0, chi=i), 1.0)
assert_equal(stats.argus.cdf(1.0, chi=i),
1.0 - stats.argus.sf(1.0, chi=i))
class TestHistogram(object):
def setup_method(self):
np.random.seed(1234)
# We have 8 bins
# [1,2), [2,3), [3,4), [4,5), [5,6), [6,7), [7,8), [8,9)
# But actually np.histogram will put the last 9 also in the [8,9) bin!
# Therefore there is a slight difference below for the last bin, from
# what you might have expected.
histogram = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5,
6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8)
self.template = stats.rv_histogram(histogram)
data = stats.norm.rvs(loc=1.0, scale=2.5, size=10000, random_state=123)
norm_histogram = np.histogram(data, bins=50)
self.norm_template = stats.rv_histogram(norm_histogram)
def test_pdf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
pdf_values = np.asarray([0.0/25.0, 0.0/25.0, 1.0/25.0, 1.0/25.0,
2.0/25.0, 2.0/25.0, 3.0/25.0, 3.0/25.0,
4.0/25.0, 4.0/25.0, 5.0/25.0, 5.0/25.0,
4.0/25.0, 4.0/25.0, 3.0/25.0, 3.0/25.0,
3.0/25.0, 3.0/25.0, 0.0/25.0, 0.0/25.0])
assert_allclose(self.template.pdf(values), pdf_values)
# Test explicitly the corner cases:
# As stated above the pdf in the bin [8,9) is greater than
# one would naively expect because np.histogram putted the 9
# into the [8,9) bin.
assert_almost_equal(self.template.pdf(8.0), 3.0/25.0)
assert_almost_equal(self.template.pdf(8.5), 3.0/25.0)
# 9 is outside our defined bins [8,9) hence the pdf is already 0
# for a continuous distribution this is fine, because a single value
# does not have a finite probability!
assert_almost_equal(self.template.pdf(9.0), 0.0/25.0)
assert_almost_equal(self.template.pdf(10.0), 0.0/25.0)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.pdf(x),
stats.norm.pdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_cdf_ppf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
cdf_values = np.asarray([0.0/25.0, 0.0/25.0, 0.0/25.0, 0.5/25.0,
1.0/25.0, 2.0/25.0, 3.0/25.0, 4.5/25.0,
6.0/25.0, 8.0/25.0, 10.0/25.0, 12.5/25.0,
15.0/25.0, 17.0/25.0, 19.0/25.0, 20.5/25.0,
22.0/25.0, 23.5/25.0, 25.0/25.0, 25.0/25.0])
assert_allclose(self.template.cdf(values), cdf_values)
# First three and last two values in cdf_value are not unique
assert_allclose(self.template.ppf(cdf_values[2:-1]), values[2:-1])
# Test of cdf and ppf are inverse functions
x = np.linspace(1.0, 9.0, 100)
assert_allclose(self.template.ppf(self.template.cdf(x)), x)
x = np.linspace(0.0, 1.0, 100)
assert_allclose(self.template.cdf(self.template.ppf(x)), x)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.cdf(x),
stats.norm.cdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_rvs(self):
N = 10000
sample = self.template.rvs(size=N, random_state=123)
assert_equal(np.sum(sample < 1.0), 0.0)
assert_allclose(np.sum(sample <= 2.0), 1.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 2.5), 2.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 3.0), 3.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 3.5), 4.5/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.0), 6.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.5), 8.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 5.0), 10.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 5.5), 12.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.0), 15.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.5), 17.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.0), 19.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.5), 20.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.0), 22.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.5), 23.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_equal(np.sum(sample > 9.0), 0.0)
def test_munp(self):
for n in range(4):
assert_allclose(self.norm_template._munp(n),
stats.norm._munp(n, 1.0, 2.5), rtol=0.05)
def test_entropy(self):
assert_allclose(self.norm_template.entropy(),
stats.norm.entropy(loc=1.0, scale=2.5), rtol=0.05)
|
lhilt/scipy
|
scipy/stats/tests/test_distributions.py
|
Python
|
bsd-3-clause
| 148,441
|
[
"Gaussian"
] |
ba842f6c0967d69947fe9aef48a6b75b60558f90891bffbf051fbbb5f1ac0a39
|
#!/usr/bin/env python
##!/Users/pamram/anaconda/bin/python
#import math
#math.pi
import scipy
import scipy.constants as const
import time
import datetime
import sys
"""
THIS PROGRAM COMPUTE A SCANNING SEQUENCE FOR PF/SAM/SOAR
Philippe Amram
previous update: 2015, March, 19
last update: 2016, September, 30
NOTATIONS:
epais = distance between the two plates
gap = the maximum tuning gap
QGC = Queensgate Constant
BCV = Binary Control Value
INTERACTIVE
In interactive mode, interactive = True, in non-interactive mode, interactive = False
"""
print("\n{}".format("-"*100))
print("\n WELCOME ! ")
#print(time.gmtime())
print(" ",time.strftime('%a, %d %b %Y %H:%M:%S GMT'))
print("\n This program prepares your script to run on FP/SAMI")
#print("\n START OF THE PROGRAM")
#interactive = True
interactive = False
def main():
"""
CONSTANTS
"""
#celerite_plus=const.physical_constants["speed of light in vacuum"]
celerite = const.physical_constants["speed of light in vacuum"][0] / 1000. # in km/s
#print("celerite = ",celerite)
#celerite = 299792.458
lamb_halpha = 6562.78
lamb_SIIf = 6716.47
lamb_SIIF = 6730.85
lamb_NIIf = 6548.03
lamb_NIIF = 6583.41
lamb_Ne = 6598.9529
bcv_max = 4095 # 4096 value starting from 0 up to 4095
"""
INITIALISATION OF THE TWO SCRIPT FILES
"""
""" 1) INITIALISATION OF THE SCANNING SCRIPT """
tt = time.strftime('%Y-%m-%dT%Hh%Mm%Ss')
#time.struct_time(tm_year=2015, tm_mon=1, tm_mday=28, tm_hour=19, tm_min=1, tm_sec=11, tm_wday=2, tm_yday=28, tm_isdst=0)
""" 2) INITIALISATION OF THE RUNNING DIRECTORY """
#dirtime = time.strftime('%Y%m%d')
dirtime = "20161002"
if interactive:
print("\n Data of the observing run. ")
print(" The date of the run (e.g. 20150130) will be automatically added to the address of the directory you will give now")
sdir = input(" Please, input the directory name (e.g.:/home2/images/): ")
# sdir = '/home2/images/'
print(" Your input is: {}".format(sdir))
print(" The data of the day will go into the directory : ",sdir + dirtime + '/')
running = input("\n Give the running directory name where you will put and run the script (e.g. 001): ")
sdir = sdir + dirtime + "/" + running
print(" The name of the directory where the script will be ran is : ",sdir)
else:
print(dirtime)
running = input("\n Give the running directory name where you will put and run the script (e.g. 001): ")
#running="012"
sdir = "/home2/images/" + dirtime + '/' + running
""" 3) SCRIPT TO RUN TO COPY THE SCANNING SCRIPT FROM MY COMPUTER TO BTFI COMPUTER """
tt = running
ttsh = tt+'.sh'
Fichier = open(ttsh,'w')
Fichier.write("#!/bin/csh -f\n\n")
Fichier0 = open('scpbtfidr.sh','w')
Fichier0.write("#!/bin/csh -f\n\n")
# Fichier0.write("sshpass -p \"btfi88\" scp {} btfidr@139.229.18.227:/data/{}/{}/\n".format(ttsh,dirtime,running))
Fichier0.write("sshpass -p \"btfi88\" scp {} btfidr@139.229.18.227:/data/{}/scripts/\n".format(ttsh,dirtime))
# btfidr@btfidr:/home2/images/20150317
Fichier0.close()
"""
FABRY-PEROT TO USE
"""
dico0 = {}
dico0[1,1] = "Thickness = 44 microns; tunable gap = 2 microns; p=134 @ Halpha"
dico0[1,2] = "Thickness = 200 microns; tunable gap = 2 microns; p=609 @ Halpha"
if interactive:
print("\n Please, input the name of the Fabry-Perot you wanted to use: ")
print(" For TF ({}) put (1) ".format(dico0[1,1]))
print(" For PF ({}) put (2) ".format(dico0[1,2]))
pftf = int(input(" Your choise : "))
else:
pftf = 1
if pftf > 2:
print(" Sorry, you input a value not allowed, choose between (1) and (2), please restart ")
sys.exit(0)
else:
if pftf == 1:
print(" Your input is : {}".format(dico0[1,1]))
if pftf == 2:
print(" Your input is : {}".format(dico0[1,2]))
#epais = float(input(" Please, input the thickness (not the gap) of the interferometer in $mu m$ (it should be a float, eg epais=350): "))
#epais = p_order * lamb*1e-4 /2
if pftf == 1:
epais = 44.
if pftf == 2:
epais = 200.
"""
CALIBRATION OR OBSERVATION
"""
if interactive:
calibration = int(input("\n Please, input if you make a calibration (0) or an observation (1): "))
#calibration = 0
if calibration == 0:
lamb = lamb_Ne # Ne I
print(" The wavelength of the Neon line will be: {:g}".format(lamb))
else:
calibration = int(input("\n Please, input if you make a calibration (0) or an observation (1): "))
#calibration = 1
if calibration > 1:
print(" Sorry, you input a value not allowed, choose between (1) and (2), please restart ")
sys.exit(0)
if interactive:
if calibration == 0:
print(" You requested a calibration.")
lamb = lamb_Ne
if calibration == 1:
print(" You resqueted an observation.")
#lamb = lamb_halpha
print("\n You have to give the wavelength at rest of the line you will observe.")
lamb = float(input(" Please, input this wavelength (in Angstrom, it should be a float, e.g. Hapha = 6562.78) : "))
#lamb = lamb_halpha
if not interactive:
if calibration == 0:
lamb = lamb_Ne # Ne I
#lamb = 5460.742 # Green Hg
#lamb = 4358.343 # Indigo Hg
#lamb = 6506.5281 # Ne I
#lamb = 6532.8822 # Ne I
if calibration == 1:
#lamb = 6590.4
#lamb = 6571.64
lamb = lamb_halpha
#lamb = lamb_NIIF
#lamb = 7000.
if lamb < 0:
print(" Sorry, you input a value not allowed because {} should be greater than 0, please restart ".format(lamb))
sys.exit(0)
lamb_rest=lamb
if calibration == 0:
vitesse = 0 # km/s
if calibration == 1:
if interactive:
object_name = input("\n Please, input the name of your object (e.g. NGC 7331): ")
print(" Your input is: {} ".format(object_name))
vitesse = float(input("\n Please, input the radial velocity of the galaxy (in km/s): "))
print(" Your input is: {} km/s".format(vitesse))
else:
object_name = input("\n Please, input the name of your object (e.g. NGC 7331): ")
print(" Your input is: {} ".format(object_name))
vitesse = float(input("\n Please, input the radial velocity of the galaxy (in km/s): "))
print(" Your input is: {} km/s".format(vitesse))
#object_name = "Cartwheel"
#vitesse = 9050 # km/s
"""
INITIAL PARAMETER COMPUTATION
"""
def ISL(ll,pp):
#fsr_lamb = lamb/p_order
isl_ll = ll/pp*(1+1/(pp*pp))
return isl_ll
def P_ORDER(ee,ll):
porder = 2. * ee * 1E+4 / ll
return porder
lamb = (vitesse / celerite + 1) * lamb_rest
p_order = P_ORDER(epais,lamb)
p_order_halpha = P_ORDER(epais,lamb_halpha)
p_order_Ne = P_ORDER(epais,lamb_Ne)
p_order0 = int(p_order)
e_fsr = epais /p_order
fsr_lamb = ISL(lamb,p_order)
fsr_lamb_Ne = ISL(lamb_Ne,p_order_Ne)
fsr_lamb_Ha = ISL(lamb_halpha,p_order_halpha)
fsr_kms = celerite * fsr_lamb / lamb
Fichier.write("# General parameters:\n")
Fichier.write("# - You requested to use the following FP: {} \n".format(dico0[1,pftf]))
if calibration == 0 :
Fichier.write("# - You requested to do a CALIBRATION (and not an observation on the sky)\n")
if calibration == 1 :
Fichier.write("# - You requested to do a OBSERVATION (and not a calibration)\n")
Fichier.write("# - The name of the object : {}\n".format(object_name))
Fichier.write("# - The wavelength (at rest) you gave is = {:g} angstroms\n".format(lamb_rest))
if calibration == 1 :
Fichier.write("# - The radial velocity is = {:g} km/s\n".format(vitesse))
if calibration == 1 :
Fichier.write("# - The wavelength (redshifted) = {:g} angstroms\n".format(lamb))
Fichier.write("# Interference order:\n")
Fichier.write("# - The interference order @ {:g} = {:g} \n".format(lamb_halpha,p_order_halpha))
Fichier.write("# - The interference order @ {:g} = {:g} \n".format(lamb_Ne,p_order_Ne))
Fichier.write("# - The interference order @ {:g} = {:g} \n".format(lamb,p_order))
Fichier.write("# Free Spectral Range :\n")
Fichier.write("# - The FSR @ {:g} in wavelength = {:g} Angstrom\n".format(lamb_Ne,fsr_lamb_Ne))
Fichier.write("# - The FSR @ {:g} in wavelength = {:g} Angstrom\n".format(lamb_halpha,fsr_lamb_Ha))
Fichier.write("# - The FSR @ {:g} in thickness = {:g} microns \n".format(lamb,e_fsr))
Fichier.write("# - The FSR @ {:g} in wavelength = {:g} Angstrom\n".format(lamb,fsr_lamb))
Fichier.write("# - The FSR @ {:g} in km/s = {:g} km/s\n".format(lamb,fsr_kms))
"""
QUEENSGATE CONSTANT
"""
if interactive:
print("\n (1) If you know it, you can use the Queensgate Constant already measured with the SAME CS100 AND the the SAME FP.")
print(" (2) If you do not know, you must put the total plate gap in BCV corresponding to one FSR at the wavelength")
print(" '1' means you DO want to give a the Queensgate Constant.")
print(" '2' means you DO NOT want to give a the Queensgate Constant but a number of BCV corresponding to one FSR")
QGC_or_not = int(input(" your input ('1' or '2'): "))
else:
QGC_or_not = 2
if QGC_or_not > 2 or QGC_or_not < 1:
print(" Sorry, you input {} which is a value not allowed please choose '1' or '2' ".format(QG_or_not))
sys.exit(0)
if QGC_or_not == 1:
if interactive:
QGC = float(input("\n Please, input the Queensgate Constant (in Angstrom, could be a float, e.g. 9.30): "))
#QGC = 9.15
print(" Your input is: {} Angstroms.".format(QGC))
else:
QGC = 9.15 # undersampling
QGC = 9.40 # oversampling
QGC = 9.30 # close to be perfect
#dico0[1,1] = "Thickness = 44 microns; tunable gap = 2 microns; p=134 @ Halpha"
#dico0[1,2] = "Thickness = 200 microns; tunable gap = 2 microns; p=609 @ Halpha"
""" 4096 BCV values are available with the CS100, ranging from -2047 to +2048, thus for both interferometer which have a tunable gap of 2 microns, 1 BCV should be equal to 2 microns/4096 = 0.49 nm = 4.9 A.
On the other hand, by definition, QCG = 2 * 1 BCV = 9.8 A/BCV
Obviously fsr_bcv_lamb_QGC = lambda / Q
= 6563 / 9.8
= 670 BCV
but we in fact measure half of it = 335 BCV, this could be due to a bit which is not working any more and so, one BCV is indeed 2 BCV...
"""
if QGC_or_not == 2:
if interactive:
print("\n You first must choose the wavelength at which the gap in BCV will be given.")
print(" NOTE: this value is not necessary the scanning wavelength.")
# lamb_QGC = float(input(" Give this wavelength (could be a float e.g. Ne 6598.9529): "))
lamb_QGC = 6598.9529
print("\n Please, input the total plate gap in BCV corresponding to one FSR at the wavelength {} of reference".format(lamb_QGC))
print(" The BCV range between 0 and {}".format(bcv_max))
fsr_bcv_lamb_QGC = float(input(" your input (could be a float, e.g. 705): "))
#fsr_bcv_lamb_QGC = 352.75
print(" Your input is: {}".format(fsr_bcv_lamb_QGC))
else:
fsr_bcv_lamb_QGC = 357.72
fsr_bcv_lamb_QGC = 340
lamb_QGC = lamb_Ne
QGC = lamb_QGC / fsr_bcv_lamb_QGC
print(" A queensgate has been computed : {}".format(QGC))
fsr_bcv_lamb = lamb / QGC
fsr_bcv_lamb_Ha = lamb_halpha / QGC
fsr_bcv_lamb_Ne = lamb_Ne / QGC
"""
NUMBER OF CHANNELS TO SCAN
"""
if interactive:
print("\n Taking into account the Finesse and the sampling, the number of channel to scan could be computed automatically.")
print(" Alternatively you can define yourself the number of channels to scan.")
print(" (1) You DO WISH to compute automatically the number of channels to scan")
print(" (2) You DO NOT WISH to give manually the number of channels to scan")
nchan_manuel = int(input(" Please give you choose (1 or 2): "))
else:
nchan_manuel = 1
if nchan_manuel == 1:
if interactive:
finesse = float(input("\n Please, input the Finesse (finesse must be a float): "))
else:
finesse = 17.75
finesse = 30.46
# finesse = 20.8
if finesse <= 1:
print(" Sorry, you input a value not allowed because {:g} should be greater than 1, please restart ".format(finesse))
sys.exit(0)
if interactive:
sampling = float(input("\n Please, input the sampling, Shannon indicates that the sampling could be 2 (could be a float): "))
else:
sampling = 2.0
if (sampling) <= 1:
print(" Sorry, you input a value not allowed because {:g} should be greater or equal to one, please restart ".format(sampling))
sys.exit(0)
""" Integer value + 1 to avoid undersampling """
nchan = sampling*finesse
if nchan_manuel == 2:
if interactive:
nchan = int(input("\n Please input the number of channel to scan one FSR (must be an integer): "))
else:
nchan = 38
bcv_step = fsr_bcv_lamb / nchan
if (bcv_step) < 2:
print("\n Sorry, your scanning step in BCV ={:g} is too small, it should not be lower than 2.".format(bcv_step))
if nchan_manuel == 1:
print(" This could be due to the finesse (={:g}) or/and the sampling (={:g}) too high.".format(finesse,sampling))
if nchan_manuel == 2:
print(" This could be due to the number of channels (={:g}) too high.".format(nchan))
print(" Please RESTART from the beginning.")
sys.exit(0)
Fichier.write("# - The queensgate constant QGC = {:g} Angstrom\n".format(QGC))
Fichier.write("# - The FSR in BCV @ {:g}A = {:g}\n".format(lamb,fsr_bcv_lamb))
Fichier.write("# - The FSR in BCV @ {:g}A = {:g}\n".format(lamb_halpha,fsr_bcv_lamb_Ha))
Fichier.write("# - The FSR in BCV @ {:g}A = {:g}\n".format(lamb_Ne,fsr_bcv_lamb_Ne))
Fichier.write("# Finesse & Scanning:\n")
if nchan_manuel == 1:
Fichier.write("# - You gave a real finesse = {:g}\n".format(finesse))
Fichier.write("# - Shannon sampling of the finesse = {:g}\n".format(sampling))
Fichier.write("# - Considering F={:g} and the sampling ={:g}, the float nb of ch to scan for one FSR = {:g}\n".format(finesse,sampling,nchan))
Fichier.write("# - Considering F={:g} and FSR={:g}, the spectral sampling = {:g} Angstroms\n".format(finesse,fsr_lamb,fsr_lamb/finesse))
Fichier.write("# - The spectral Resolution @ {:g} Angstroms = {:g}\n".format(lamb,int(lamb*finesse/fsr_lamb)))
else:
Fichier.write("# - The number of channels to scan for one FSR = {:g}\n".format(nchan))
Fichier.write("# - The average number of BCV for one FSR = {:g}\n".format(bcv_step))
""" For technical reasons I added the parameter delta_iBCV_max (29/09/2016), it seems indeed that the f. CS100 does not respect the order when we resquest to jump a large BCV range at once, thus I introduced a pause of 1 second (sleep 1) each time it moves delta_iBCV_max BCV """
delta_iBCV_max=3
Fichier.write("# - The maximum number of BCV that the CS100 can jump at once = {:g}\n".format(delta_iBCV_max))
"""
SCAN MORE THAN ONE FSR ?
"""
if interactive:
print("\n You can scan more than one FSR.")
print(" NOTE: The number of channel to scan for more than one FSR will be larger and computed automatically.")
overlap = float(input(" Please, input the number of FSR you want to scan (could be a float, \"1\" means you will scan one FSR): "))
else:
overlap = 1.1
if overlap < 0:
print(" Sorry, you input a value not allowed because {:g} should be greater than 0, please restart ".format(overlap))
sys.exit(0)
if (fsr_bcv_lamb*overlap) > bcv_max:
print(" \nSorry, you input a value not allowed because {:g} X {:g} = {:g} is greater than {:g}.".format(int(fsr_bcv_lamb,overlap),int(fsr_bcv_lamb*overlap),bcv_max))
print(" Please RESTART from the beginning.")
sys.exit(0)
else:
fsr_bcv_lamb = fsr_bcv_lamb * overlap
nchan = int(nchan * overlap)+1
Fichier.write("# Overscanning:\n")
Fichier.write("# - You wanted to scan = {:g} FSR \n".format(overlap))
Fichier.write("# - The BCV gap that will be scanned @ {:g} Angstro = {:g}\n".format(lamb,fsr_bcv_lamb))
Fichier.write("# - The total number of channels that will be scanned = {:g}\n".format(nchan))
""" TO SCAN IN DECREASING THE RADIUS OF THE RINGS """
#nfiniz0 = int(input(" Please, input the zero Z value (nfiniz0 must be an integer): "))
#input(" Please, input the initial Z value (nfiniz must be an integer): "))
#nfiniz0 = 0
# DIVIDED BY 4 BECAUSE OF THE UNAVAILABLE BCV RANGE
#nfiniz0 = int(bcv_max/4)
#nfiniz = nfiniz0 - int(fsr_bcv_lamb/4.)
""" TO SCAN IN INCREASING THE RADIUS OF THE RINGS """
#nfiniz0 = int(bcv_max/4)
#nfiniz = nfiniz0 + int(fsr_bcv_lamb/4.)
nfiniz = 768
nfiniz = 1024
nfiniz = 1022
nfiniz = 1010
nfiniz = 1019
nfiniz = 1900
nfiniz = 2600
# nfiniz = 750
# nfiniz = 500
nfiniz_end = nfiniz - (nchan - 1) * bcv_step
""" Checking using the basic formula """
base = lamb / QGC
step = base / nchan
# print("lamb= ",lamb," QGC =",QGC," nchan =",nchan," base (BCV)= ",base," step (BCV)= ",step)
#Fichier.write("# - The zero BCV value (nfiniz0) = {:g}\n".format(nfiniz0))
Fichier.write("# - The initial BCV value (nfiniz) = {:g}\n".format(nfiniz))
Fichier.write("# - The final BCV value should be around (nfiniz_end) = {:g}\n".format(nfiniz_end))
uneminute = 60. # second
if (calibration == 0):
basename = "p609_cal"
if (calibration == 1):
basename = "p609_obs"
if interactive:
#nsweeps = int(input(" Please, input how many \"sweeps\" will be done on this scan (nsweeps must be an integer): "))
nsweeps = 1
#nsteps = int(input(" Please, input how many Z steps each sweep will have (nsteps must be an integer): "))
nsteps = 1
#nframe = int(input(" Please, input how many images we will take in each step (each Z value, nframe must be an integer): "))
nframe = 1
basename = input("\n Please, set the basename of your fits image (basename must be a string, e.g. fp_sami): ")
#basename = "fp_sami"
print(" Your basename is : ",basename)
#binxy = input("\n Please, set the binning of the CCD image (binxy must be an integer, e.g. 4 for a 4x4 binning): ")
binxy = 4
exptim = float(input("\n Please, set the image exposure time per channel in seconds (exptim could be a float): "))
#exptim = 5
else:
nsweeps = 1
binxy = 4
nsteps = 1
nframe = 1
#basename = "fp_sami"
exptim_min = 5
exptim = exptim_min * uneminute
exptim = float(input("\n Please, set the image exposure time per channel in seconds (exptim could be a float): "))
#exptim = 120.
readout_time = 3. # 3 seconds = readout time @ binxy = 4 x 4 ???
exptim_total = (nchan * (exptim + readout_time)) / uneminute
if (exptim) < 0:
print(" Sorry, you input a value not allowed because {:g} should be greater than 0, please restart ".format(exptim))
sys.exit(0)
Fichier.write("# SAMI:\n")
Fichier.write("# - You gave nsweeps = {}\n".format(nsweeps))
Fichier.write("# - You gave nsteps = {}\n".format(nsteps))
Fichier.write("# - You gave nframe = {}\n".format(nframe))
Fichier.write("# - You gave exptim per channel = {:g} seconds\n".format(exptim))
Fichier.write("# - Readout time per exposure = {:g} seconds \n".format(readout_time))
Fichier.write("# - Total exposure time (whole observation) = {:g} minutes\n".format(exptim_total))
Fichier.write("# - Total exposure time (whole observation) = {:g} hours\n".format(exptim_total/uneminute))
Fichier.write("# - You gave binxy = {} \n".format(binxy))
Fichier.write("# - You gave the basename = {}\n\n".format(basename))
Fichier.write("set dat = `date +%Y-%m-%dT%H:%M:%S`\n")
Fichier.write("set scid = \"SCAN_$dat\"\n")
Fichier.write("echo \"SCAN $scid\"\n")
Fichier.write("set sweepkey = \"FAPERSWP\"\n")
Fichier.write("set stepkey = \"FAPERSST\"\n")
Fichier.write("set scankey = \"FAPERSID\"\n")
Fichier.write("set nsweeps = {}\n".format(nsweeps))
Fichier.write("set nsteps = {}\n".format(nsteps))
Fichier.write("set nframe = {}\n".format(nframe))
Fichier.write("set nfiniz = {}\n".format(nfiniz))
Fichier.write("set exptim = {}\n".format(exptim))
Fichier.write("set binxy = {}\n".format(binxy))
#Fichier.write("set basename = \"fp_sami\"\n")
Fichier.write("set basename = \"{}\"\n".format(basename))
Fichier.write("set cmd = `sami dhe set image.dir {}`\n".format(sdir))
Fichier.write("set cmd = `sami dhe dbs set $scankey $scid`\n")
Fichier.write("set cmd = `sami dhe dbs set $stepkey custom`\n")
Fichier.write("echo \"setting number of images, exposure time and basename\"\n")
Fichier.write("sami dhe set binning $binxy $binxy\n")
Fichier.write("sami dhe set obs.nimages $nframe\n")
Fichier.write("sami dhe set obs.exptime $exptim\n")
Fichier.write("sami dhe set image.basename $basename\n")
Fichier.write("echo\n")
Fichier.write("echo \"image $basename, exptime $exptim\"\n")
Fichier.write("echo \"binning $binxy\"\n")
dico = {'channel':[], 'step':[], 'BCV':[]}
iBCV = 0
delta_iBCV = 0
ip = 1
ipm = 1
if ip <= ipm:
for cnt in range(1,nchan+1,ip):
ip=ip+1
iBCV0=iBCV
#BCV = nfiniz + (cnt-1) * bcv_step
BCV = nfiniz - (cnt-1) * bcv_step
if BCV >= 0:
if (int(BCV + 0.5) > int(BCV)):
iBCV = int(BCV)+1
else:
iBCV = int(BCV)
else:
if (int(BCV - 0.5) < int(BCV)):
iBCV = int(BCV)-1
else:
iBCV = int(BCV)
#print("ip=",ip," cnt=",cnt," BCV=",BCV," iBCV=",iBCV)
if cnt == 1 :
delta_iBCV = 0
else:
delta_iBCV = iBCV-iBCV0
delta_iBCV_temp=delta_iBCV
icompt = 0
while abs(delta_iBCV_temp) > delta_iBCV_max:
#print("je suis dans la boucle",cnt)
icompt = icompt + 1
#print("delta_iBCV_temp=",delta_iBCV_temp," delta_iBCV_max=",delta_iBCV_max)
Fichier.write("echo\n")
itemp = iBCV0-icompt*delta_iBCV_max
#print("iBCV=",iBCV)
Fichier.write("echo \"moving FP to BCV {} \"\n".format(itemp))
Fichier.write("sami FP moveabs {}\n".format(itemp))
Fichier.write("sleep 1\n")
delta_iBCV_temp=delta_iBCV_temp+delta_iBCV_max
Fichier.write("echo\n")
Fichier.write("echo \"moving FP to channel {}: BCV={}\"\n".format(cnt,iBCV))
Fichier.write("sami FP moveabs {}\n".format(iBCV))
Fichier.write("set sweepid = C%03d\n"%cnt)
Fichier.write("set cmd = `sami dhe dbs set $sweepkey $sweepid`\n")
Fichier.write("sami dhe set image.basename $basename\"_\"$sweepid\n")
Fichier.write("echo \"SWEEP $sweepid\"\n")
Fichier.write("echo \"taking data...(sweep $sweepid step {})\"\n".format(cnt))
Fichier.write("sami dhe expose\n")
dico['channel'].append(cnt)
dico['step'].append(delta_iBCV)
dico['BCV'].append(iBCV)
Fichier.write("# Channel: +Step ==> BCV\n")
Fichier.write("# {}\n".format(dico['channel']))
Fichier.write("# {}\n".format(dico['step']))
Fichier.write("# {}\n".format(dico['BCV']))
Fichier.close()
print("\n The name of the script you have to run on SAMI computer is : ",ttsh)
print(" Copy the following script to SAMI computer in the following directory : ",sdir + '/')
print(" NOTE: You have to pass by BTFIDR computer to have access to SAMI computer")
print(" To copy the script from your computer to BTFI computer,")
print(" run the script \"scpbtfidr.sh\" which have been created now.")
print("\n END OF THE PROGRAM")
print("{}".format("-"*100))
if __name__ == '__main__':
main()
|
b1quint/samfp
|
scripts/FP_sami6.4.py
|
Python
|
bsd-3-clause
| 26,474
|
[
"Galaxy"
] |
a922111efd6f599d48d1b756d0c291690d03741c69787c5ecd3c6548553fefa9
|
from optparse import OptionParser
import logging
from collada import *
import vtk
def main():
logging.basicConfig(level=logging.DEBUG)
logging.debug('This is a debug message')
logging.warning('This is a warning message')
logging.error('This is an error message')
logging.critical('This is a critical message')
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename", help="scene FILE", metavar="FILE")
(options, args) = parser.parse_args()
logging.debug(options)
logging.info("Opening " + options.filename)
f = open(options.filename, 'rb')
mesh = Collada(f)
logging.info(mesh)
geom = mesh.geometries
print(geom)
for particle in geom:
print(particle)
for triangle in particle.primitives[0]:
print(triangle)
boundgeoms = list(mesh.scene.objects('geometry'))
print(boundgeoms)
for g in boundgeoms:
print(g)
for primSet in g.primitives():
for prim in primSet:
print(prim)
def vtkView():
colors = vtk.vtkNamedColors()
# Create a triangle
points = vtk.vtkPoints()
points.InsertNextPoint(1.0, 0.0, 0.0)
points.InsertNextPoint(0.0, 0.0, 0.0)
points.InsertNextPoint(0.0, 1.0, 0.0)
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, 0)
triangle.GetPointIds().SetId(1, 1)
triangle.GetPointIds().SetId(2, 2)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
# Create a polydata object
trianglePolyData = vtk.vtkPolyData()
# Add the geometry and topology to the polydata
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
# Create mapper and actor
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(trianglePolyData)
actor = vtk.vtkActor()
actor.GetProperty().SetColor(colors.GetColor3d("Cyan"))
actor.SetMapper(mapper)
# Create a renderer, render window, and an interactor
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Triangle")
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
# Add the actors to the scene
renderer.AddActor(actor)
renderer.SetBackground(colors.GetColor3d("DarkGreen"))
# Render and interact
renderWindow.Render()
renderWindowInteractor.Start()
if __name__ == "__main__":
main()
#vtkView()
|
KonstantinosKr/delta
|
setup/setup.py
|
Python
|
mit
| 2,550
|
[
"VTK"
] |
112bdde586ab147477a2ba3abf91a58628226be807b31876f3c7140b4915ceea
|
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
from functions import *
from pylab import *
from sklearn.decomposition import PCA
import _pickle as cPickle
import neuroseries as nts
import sys
import scipy.ndimage.filters as filters
from sklearn.mixture import GaussianMixture
from sklearn.cluster import *
from functools import reduce
from multiprocessing import Pool
import h5py as hd
from scipy.stats import zscore
from sklearn.manifold import TSNE, SpectralEmbedding
from skimage import filters
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import KFold
import xgboost as xgb
def xgb_decodage(Xr, Yr, Xt, n_class):
dtrain = xgb.DMatrix(Xr, label=Yr)
dtest = xgb.DMatrix(Xt)
params = {'objective': "multi:softprob",
'eval_metric': "mlogloss", #loglikelihood loss
'seed': np.random.randint(1, 10000), #for reproducibility
'silent': 1,
'learning_rate': 0.01,
'min_child_weight': 2,
'n_estimators': 100,
# 'subsample': 0.5,
'max_depth': 5,
'gamma': 0.5,
'num_class':n_class}
num_round = 100
bst = xgb.train(params, dtrain, num_round)
ymat = bst.predict(dtest)
pclas = np.argmax(ymat, 1)
return pclas
def fit_cv(X, Y, n_cv=10, verbose=1, shuffle = False):
if np.ndim(X)==1:
X = np.transpose(np.atleast_2d(X))
cv_kf = KFold(n_splits=n_cv, shuffle=True, random_state=42)
skf = cv_kf.split(X)
Y_hat=np.zeros(len(Y))*np.nan
n_class = len(np.unique(Y))
for idx_r, idx_t in skf:
Xr = np.copy(X[idx_r, :])
Yr = np.copy(Y[idx_r])
Xt = np.copy(X[idx_t, :])
Yt = np.copy(Y[idx_t])
if shuffle: np.random.shuffle(Yr)
Yt_hat = xgb_decodage(Xr, Yr, Xt, n_class)
Y_hat[idx_t] = Yt_hat
return Y_hat
# store_autocorr = pd.HDFStore("/mnt/DataGuillaume/MergedData/AUTOCORR_ALL.h5")
store_autocorr = pd.HDFStore("/mnt/DataGuillaume/MergedData/AUTOCORR_LONG_SMALLBINS.h5")
firing_rate = pd.HDFStore("/mnt/DataGuillaume/MergedData/FIRING_RATE_ALL.h5")['firing_rate']
fr_index = firing_rate.index.values[((firing_rate > 1.0).sum(1) == 3).values]
mappings = pd.read_hdf("/mnt/DataGuillaume/MergedData/MAPPING_NUCLEUS.h5")
#########################################################################################
# COMPARING LENGTH OF AUTOCORR FOR KMEANS
#########################################################################################
# cuttime = np.arange(10,5000,10)
cuttime = np.unique(np.geomspace(2, 5000, num = 40, dtype = np.int))
n_repeat = 1000
score = pd.DataFrame(index = cuttime, columns = np.arange(n_repeat))
shuff = pd.DataFrame(index = cuttime, columns = np.arange(n_repeat))
ct = 0
for c in cuttime:
print(ct)
ct+=1
autocorr_wak = store_autocorr['wak'].loc[0.5:]
autocorr_rem = store_autocorr['rem'].loc[0.5:]
autocorr_sws = store_autocorr['sws'].loc[0.5:]
autocorr_wak = autocorr_wak.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 1.0)
autocorr_rem = autocorr_rem.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 1.0)
autocorr_sws = autocorr_sws.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 1.0)
neurons = np.intersect1d(np.intersect1d(autocorr_wak.columns, autocorr_rem.columns), autocorr_sws.columns)
neurons = np.intersect1d(neurons, fr_index)
autocorr = pd.concat([autocorr_sws[2:c][neurons],autocorr_rem[2:c][neurons],autocorr_wak[2:c][neurons]], ignore_index = False)
if autocorr.isnull().any().any(): autocorr = autocorr.dropna(axis = 1, how = 'any')
neurons = autocorr.columns
hd = mappings.loc[neurons, 'hd'].values.astype('int')
data = autocorr.values.T
for j in range(n_repeat):
test = fit_cv(data, hd, 10, verbose = 0)
rand = fit_cv(data, hd, 10, verbose = 0, shuffle = True)
score.loc[c,j] = np.sum(test == hd)/np.size(hd)
shuff.loc[c,j] = np.sum(rand == hd)/np.size(hd)
# clf = LogisticRegressionCV(cv = 8, random_state = 0, n_jobs = 8).fit(data, hd)
# # test
# # idx = np.hstack((np.where(hd)[0],np.random.choice(np.where(~hd)[0], np.sum(hd), replace=False)))
# idx = np.where(hd)[0]
# score.loc[c] = clf.score(data[idx], hd[idx])
# # score.loc[c] = clf.predict_proba(data[idx])[:,1].mean()
store = pd.HDFStore("../figures/figures_articles/figure1/score_XGB_HDNOHD.h5", 'w')
store.put('score', score)
store.put('shuff', shuff)
store.close()
a = (score.mean(1)-shuff.mean(1))/(1.0 - shuff.mean(1))
semilogx(a.index.values, a.values)
show()
sys.exit()
# score.to_hdf("../figures/figures_articles/figure1/score_logreg.h5", 'count')
figure()
for i, c in enumerate(np.arange(10, 250, 10)):
autocorr_wak = store_autocorr['wak']
autocorr_rem = store_autocorr['rem']
autocorr_sws = store_autocorr['sws']
autocorr_wak = store_autocorr['wak'].loc[0.5:]
autocorr_rem = store_autocorr['rem'].loc[0.5:]
autocorr_sws = store_autocorr['sws'].loc[0.5:]
autocorr_wak = autocorr_wak.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_rem = autocorr_rem.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_sws = autocorr_sws.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
neurons = np.intersect1d(np.intersect1d(autocorr_wak.columns, autocorr_rem.columns), autocorr_sws.columns)
neurons = np.intersect1d(neurons, fr_index)
autocorr = pd.concat([autocorr_sws[2:c][neurons],autocorr_rem[2:c][neurons],autocorr_wak[2:c][neurons]], ignore_index = False)
if autocorr.isnull().any().any(): autocorr = autocorr.dropna(axis = 1, how = 'any')
neurons = autocorr.columns
hd_index = mappings.index.values[np.where(mappings['hd'])]
hd_index = np.intersect1d(hd_index, neurons)
data = autocorr.values.T
TSNE, divergence = makeAllTSNE(data, 1)
tsne = pd.DataFrame(index = neurons, data = TSNE[0].T)
# km = KMeans(n_clusters=2).fit(data)
km = AgglomerativeClustering(n_clusters=2).fit(data)
subplot(4,6,i+1)
scatter(tsne[0], tsne[1], s = 10, c = km.labels_)
scatter(tsne.loc[hd_index,0], tsne.loc[hd_index,1], s = 3)
show()
|
gviejo/ThalamusPhysio
|
python/main_make_autocorr_glm.py
|
Python
|
gpl-3.0
| 6,229
|
[
"Gaussian"
] |
83abc12d9a41460542e98801240b0ee66c10494b9c2c6acbf3a9737ca1392670
|
# Copyright (C) 2014 Sereina Riniker
#
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Torsion Fingerprints (Deviation) (TFD)
According to a paper from Schulz-Gasch et al., JCIM, 52, 1499-1512 (2012).
"""
from rdkit import rdBase
from rdkit import RDConfig
from rdkit import Geometry
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
import math, os
def _doMatch(inv, atoms):
""" Helper function to check if all atoms in the list are the same
Arguments:
- inv: atom invariants (used to define equivalence of atoms)
- atoms: list of atoms to check
Return: boolean
"""
match = True
for i in range(len(atoms)-1):
for j in range(i+1, len(atoms)):
if (inv[atoms[i].GetIdx()] != inv[atoms[j].GetIdx()]):
match = False
return match
return match
def _doNotMatch(inv, atoms):
""" Helper function to check if all atoms in the list are NOT the same
Arguments:
- inv: atom invariants (used to define equivalence of atoms)
- atoms: list of atoms to check
Return: boolean
"""
match = True
for i in range(len(atoms)-1):
for j in range(i+1, len(atoms)):
if (inv[atoms[i].GetIdx()] == inv[atoms[j].GetIdx()]):
match = False
return match
return match
def _doMatchExcept1(inv, atoms):
""" Helper function to check if two atoms in the list are the same,
and one not
Note: Works only for three atoms
Arguments:
- inv: atom invariants (used to define equivalence of atoms)
- atoms: list of atoms to check
Return: atom that is different
"""
if len(atoms) != 3:
raise ValueError("Number of atoms must be three")
a1 = atoms[0].GetIdx()
a2 = atoms[1].GetIdx()
a3 = atoms[2].GetIdx()
if (inv[a1] == inv[a2] and inv[a1] != inv[a3] and inv[a2] != inv[a3]):
return atoms[2]
elif (inv[a1] != inv[a2] and inv[a1] == inv[a3] and inv[a2] != inv[a3]):
return atoms[1]
elif (inv[a1] != inv[a2] and inv[a1] != inv[a3] and inv[a2] == inv[a3]):
return atoms[0]
return None
def _getAtomInvariantsWithRadius(mol, radius):
""" Helper function to calculate the atom invariants for each atom
with a given radius
Arguments:
- mol: the molecule of interest
- radius: the radius for the Morgan fingerprint
Return: list of atom invariants
"""
inv = []
for i in range(mol.GetNumAtoms()):
info = {}
fp = rdMolDescriptors.GetMorganFingerprint(mol, radius, fromAtoms=[i], bitInfo=info)
for k in info.keys():
if info[k][0][1] == radius:
inv.append(k)
return inv
def _getHeavyAtomNeighbors(atom1, aid2=-1):
""" Helper function to calculate the number of heavy atom neighbors.
Arguments:
- atom1: the atom of interest
- aid2: atom index that should be excluded from neighbors (default: none)
Return: a list of heavy atom neighbors of the given atom
"""
if aid2 < 0:
return [n for n in atom1.GetNeighbors() if n.GetSymbol()!='H']
else:
return [n for n in atom1.GetNeighbors() if (n.GetSymbol()!='H' and n.GetIdx()!=aid2)]
def _getIndexforTorsion(neighbors, inv):
""" Helper function to calculate the index of the reference atom for
a given atom
Arguments:
- neighbors: list of the neighbors of the atom
- inv: atom invariants
Return: list of atom indices as reference for torsion
"""
if len(neighbors) == 1: # atom has only one neighbor
return [neighbors[0]]
elif _doMatch(inv, neighbors): # atom has all symmetric neighbors
return neighbors
elif _doNotMatch(inv, neighbors): # atom has all different neighbors
# simply use the first neighbor
return [neighbors[0]]
at = _doMatchExcept1(inv, neighbors) # two neighbors the same, one different
if at is None:
raise ValueError("Atom neighbors are either all the same or all different")
return [at]
def _getBondsForTorsions(mol, ignoreColinearBonds):
""" Determine the bonds (or pair of atoms treated like a bond) for which
torsions should be calculated.
Arguments:
- refmol: the molecule of interest
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
"""
# flag the atoms that cannot be part of the centre atoms of a torsion
# patterns: triple bonds and allenes
patts = [Chem.MolFromSmarts(x) for x in ['*#*', '[$([C](=*)=*)]']]
atomFlags = [0]*mol.GetNumAtoms()
for p in patts:
if mol.HasSubstructMatch(p):
matches = mol.GetSubstructMatches(p)
for match in matches:
for a in match:
atomFlags[a] = 1
bonds = []
doneBonds = [0]*mol.GetNumBonds()
for b in mol.GetBonds():
if b.IsInRing(): continue
a1 = b.GetBeginAtomIdx()
a2 = b.GetEndAtomIdx()
nb1 = _getHeavyAtomNeighbors(b.GetBeginAtom(), a2)
nb2 = _getHeavyAtomNeighbors(b.GetEndAtom(), a1)
if not doneBonds[b.GetIdx()] and (nb1 and nb2): # no terminal bonds
doneBonds[b.GetIdx()] = 1;
# check if atoms cannot be middle atoms
if atomFlags[a1] or atomFlags[a2]:
if not ignoreColinearBonds: # search for alternative not-covalently bound atoms
while len(nb1)==1 and atomFlags[a1]:
a1old = a1
a1 = nb1[0].GetIdx()
b = mol.GetBondBetweenAtoms(a1old, a1)
if b.GetEndAtom().GetIdx() == a1old:
nb1 = _getHeavyAtomNeighbors(b.GetBeginAtom(), a1old)
else:
nb1 = _getHeavyAtomNeighbors(b.GetEndAtom(), a1old)
doneBonds[b.GetIdx()] = 1;
while len(nb2)==1 and atomFlags[a2]:
doneBonds[b.GetIdx()] = 1;
a2old = a2
a2 = nb2[0].GetIdx()
b = mol.GetBondBetweenAtoms(a2old, a2)
if b.GetBeginAtom().GetIdx() == a2old:
nb2 = _getHeavyAtomNeighbors(b.GetEndAtom(), a2old)
else:
nb2 = _getHeavyAtomNeighbors(b.GetBeginAtom(), a2old)
doneBonds[b.GetIdx()] = 1;
if nb1 and nb2:
bonds.append((a1, a2, nb1, nb2))
else:
bonds.append((a1, a2, nb1, nb2))
return bonds
def CalculateTorsionLists(mol, maxDev='equal', symmRadius=2, ignoreColinearBonds=True):
""" Calculate a list of torsions for a given molecule. For each torsion
the four atom indices are determined and stored in a set.
Arguments:
- mol: the molecule of interest
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: two lists of torsions: non-ring and ring torsions
"""
if maxDev not in ['equal', 'spec']:
raise ValueError("maxDev must be either equal or spec")
# get non-terminal, non-cyclic bonds
bonds = _getBondsForTorsions(mol, ignoreColinearBonds)
# get atom invariants
if symmRadius > 0:
inv = _getAtomInvariantsWithRadius(mol, symmRadius)
else:
inv = rdMolDescriptors.GetConnectivityInvariants(mol)
# get the torsions
tors_list = [] # to store the atom indices of the torsions
for a1, a2, nb1, nb2 in bonds:
d1 = _getIndexforTorsion(nb1, inv)
d2 = _getIndexforTorsion(nb2, inv)
if len(d1) == 1 and len(d2) == 1: # case 1, 2, 4, 5, 7, 10, 16, 12, 17, 19
tors_list.append(([(d1[0].GetIdx(), a1, a2, d2[0].GetIdx())], 180.0))
elif len(d1) == 1: # case 3, 6, 8, 13, 20
if len(nb2) == 2: # two neighbors
tors_list.append(([(d1[0].GetIdx(), a1, a2, nb.GetIdx()) for nb in d2], 90.0))
else: # three neighbors
tors_list.append(([(d1[0].GetIdx(), a1, a2, nb.GetIdx()) for nb in d2], 60.0))
elif len(d2) == 1: # case 3, 6, 8, 13, 20
if len(nb1) == 2:
tors_list.append(([(nb.GetIdx(), a1, a2, d2[0].GetIdx()) for nb in d1], 90.0))
else: # three neighbors
tors_list.append(([(nb.GetIdx(), a1, a2, d2[0].GetIdx()) for nb in d1], 60.0))
else: # both symmetric
tmp = []
for n1 in d1:
for n2 in d2:
tmp.append((n1.GetIdx(), a1, a2, n2.GetIdx()))
if len(nb1) == 2 and len(nb2) == 2: # case 9
tors_list.append((tmp, 90.0))
elif len(nb1) == 3 and len(nb2) == 3: # case 21
tors_list.append((tmp, 60.0))
else: # case 15
tors_list.append((tmp, 30.0))
# maximal possible deviation for non-cyclic bonds
if maxDev == 'equal':
tors_list = [(t,180.0) for t,d in tors_list]
# rings
rings = Chem.GetSymmSSSR(mol)
tors_list_rings = []
for r in rings:
# get the torsions
tmp = []
num = len(r)
maxdev = 180.0 * math.exp(-0.025*(num-14)*(num-14))
for i in range(len(r)):
tmp.append((r[i], r[(i+1)%num], r[(i+2)%num], r[(i+3)%num]))
tors_list_rings.append((tmp,maxdev))
return tors_list, tors_list_rings
def _getTorsionAtomPositions(atoms, conf):
""" Helper function to retrieve the coordinates of the four atoms
in a torsion
Arguments:
- atoms: list with the four atoms
- conf: conformation of the molecule
Return: Point3D objects of the four atoms
"""
if len(atoms) != 4:
raise ValueError("List must contain exactly four atoms")
p1 = conf.GetAtomPosition(atoms[0])
p2 = conf.GetAtomPosition(atoms[1])
p3 = conf.GetAtomPosition(atoms[2])
p4 = conf.GetAtomPosition(atoms[3])
return p1, p2, p3, p4
def CalculateTorsionAngles(mol, tors_list, tors_list_rings, confId=-1):
""" Calculate the torsion angles for a list of non-ring and
a list of ring torsions.
Arguments:
- mol: the molecule of interest
- tors_list: list of non-ring torsions
- tors_list_rings: list of ring torsions
- confId: index of the conformation (default: first conformer)
Return: list of torsion angles
"""
torsions = []
conf = mol.GetConformer(confId)
for t,maxdev in tors_list:
if len(t) == 1:
t = t[0]
p1, p2, p3, p4 = _getTorsionAtomPositions(t, conf)
tors = (Geometry.ComputeSignedDihedralAngle(p1, p2, p3, p4)/math.pi)*180.0
if tors < 0: tors += 360.0 # angle between 0 and 360
else:
# loop over torsions and take minimum
tors = 360.0
for t2 in t:
p1, p2, p3, p4 = _getTorsionAtomPositions(t2, conf)
tmp = (Geometry.ComputeSignedDihedralAngle(p1, p2, p3, p4)/math.pi)*180.0
if tmp < 0: tmp += 360.0 # angle between 0 and 360
if tmp < tors: tors = tmp
torsions.append((tors, maxdev))
# rings
for t,maxdev in tors_list_rings:
num = len(t)
# loop over torsions and sum them up
tors = 0
for t2 in t:
p1, p2, p3, p4 = _getTorsionAtomPositions(t2, conf)
tmp = abs((Geometry.ComputeSignedDihedralAngle(p1, p2, p3, p4)/math.pi)*180.0)
tors += tmp
tors /= num
torsions.append((tors, maxdev))
return torsions
def _findCentralBond(mol, distmat):
""" Helper function to identify the atoms of the most central bond.
Arguments:
- mol: the molecule of interest
- distmat: distance matrix of the molecule
Return: atom indices of the two most central atoms (in order)
"""
from numpy import std
# get the most central atom = atom with the least STD of shortest distances
stds = []
for i in range(mol.GetNumAtoms()):
# only consider non-terminal atoms
if len(_getHeavyAtomNeighbors(mol.GetAtomWithIdx(i))) < 2: continue
tmp = [d for d in distmat[i]]
tmp.pop(i)
stds.append((std(tmp), i))
stds.sort()
aid1 = stds[0][1]
# find the second most central bond that is bonded to aid1
i = 1
while 1:
if mol.GetBondBetweenAtoms(aid1, stds[i][1]) is None:
i += 1
else:
aid2 = stds[i][1]
break
return aid1, aid2 # most central atom comes first
def _calculateBeta(mol, distmat, aid1):
""" Helper function to calculate the beta for torsion weights
according to the formula in the paper.
w(dmax/2) = 0.1
Arguments:
- mol: the molecule of interest
- distmat: distance matrix of the molecule
- aid1: atom index of the most central atom
Return: value of beta (float)
"""
# get all non-terminal bonds
bonds = []
for b in mol.GetBonds():
nb1 = _getHeavyAtomNeighbors(b.GetBeginAtom())
nb2 = _getHeavyAtomNeighbors(b.GetEndAtom())
if len(nb2) > 1 and len(nb2) > 1:
bonds.append(b)
# get shortest distance
dmax = 0
for b in bonds:
bid1 = b.GetBeginAtom().GetIdx()
bid2 = b.GetEndAtom().GetIdx()
d = max([distmat[aid1][bid1], distmat[aid1][bid2]])
if (d > dmax): dmax = d
dmax2 = dmax/2.0
beta = -math.log(0.1)/(dmax2*dmax2)
return beta
def CalculateTorsionWeights(mol, aid1=-1, aid2=-1, ignoreColinearBonds=True):
""" Calculate the weights for the torsions in a molecule.
By default, the highest weight is given to the bond
connecting the two most central atoms.
If desired, two alternate atoms can be specified (must
be connected by a bond).
Arguments:
- mol: the molecule of interest
- aid1: index of the first atom (default: most central)
- aid2: index of the second atom (default: second most central)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: list of torsion weights (both non-ring and ring)
"""
# get distance matrix
distmat = Chem.GetDistanceMatrix(mol)
if aid1 < 0 and aid2 < 0:
aid1, aid2 = _findCentralBond(mol, distmat)
else:
b = mol.GetBondBetweenAtoms(aid1, aid2)
if b is None:
raise ValueError("Specified atoms must be connected by a bond.")
# calculate beta according to the formula in the paper
beta = _calculateBeta(mol, distmat, aid1)
# get non-terminal, non-cyclic bonds
bonds = _getBondsForTorsions(mol, ignoreColinearBonds)
# get shortest paths and calculate weights
weights = []
for bid1, bid2, nb1, nb2 in bonds:
if ((bid1, bid2) == (aid1, aid2)
or (bid2, bid1) == (aid1, aid2)): # if it's the most central bond itself
d = 0
else:
# get shortest distance between the 4 atoms and add 1 to get bond distance
d = min(distmat[aid1][bid1], distmat[aid1][bid2], distmat[aid2][bid1], distmat[aid2][bid2])+1
w = math.exp(-beta*(d*d))
weights.append(w)
## RINGS
rings = mol.GetRingInfo()
for r in rings.BondRings():
# get shortest distances
tmp = []
num = len(r)
for bidx in r:
b = mol.GetBondWithIdx(bidx)
bid1 = b.GetBeginAtomIdx()
bid2 = b.GetEndAtomIdx()
# get shortest distance between the 4 atoms and add 1 to get bond distance
d = min(distmat[aid1][bid1], distmat[aid1][bid2], distmat[aid2][bid1], distmat[aid2][bid2])+1
tmp.append(d)
# calculate weights and append to list
# Note: the description in the paper is not very clear, the following
# formula was found to give the same weights as shown in Fig. 1
# For a ring of size N: w = N/2 * exp(-beta*(sum(w of each bond in ring)/N)^2)
w = sum(tmp)/float(num)
w = math.exp(-beta*(w*w))
weights.append(w*(num/2.0))
return weights
def CalculateTFD(torsions1, torsions2, weights=None):
""" Calculate the torsion deviation fingerprint (TFD) given two lists of
torsion angles.
Arguments;
- torsions1: torsion angles of conformation 1
- torsions2: torsion angles of conformation 2
- weights: list of torsion weights (default: None)
Return: TFD value (float)
"""
if len(torsions1) != len(torsions2):
raise ValueError("List of torsions angles must have the same size.")
# calculate deviations and normalize (divide by max. possible deviation)
deviations = []
for t1, t2 in zip(torsions1, torsions2):
diff = abs(t1[0]-t2[0])
if (360.0-diff) < diff: # we do not care about direction
diff = 360.0 - diff
deviations.append(diff/t1[1])
# do we use weights?
if weights is not None:
if len(weights) != len(torsions1):
raise ValueError("List of torsions angles and weights must have the same size.")
deviations = [d*w for d,w in zip(deviations, weights)]
sum_weights = sum(weights)
else:
sum_weights = len(deviations)
tfd = sum(deviations)
if sum_weights != 0: # avoid division by zero
tfd /= sum_weights
return tfd
# some wrapper functions
def GetTFDBetweenConformers(mol, confIds1, confIds2, useWeights=True, maxDev='equal', symmRadius=2, ignoreColinearBonds=True):
""" Wrapper to calculate the TFD between two list of conformers
of a molecule
Arguments:
- mol: the molecule of interest
- confIds1: first list of conformer indices
- confIds2: second list of conformer indices
- useWeights: flag for using torsion weights in the TFD calculation
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: list of TFD values
"""
tl, tlr = CalculateTorsionLists(mol, maxDev=maxDev, symmRadius=symmRadius, ignoreColinearBonds=ignoreColinearBonds)
torsions1 = [CalculateTorsionAngles(mol, tl, tlr, confId=cid) for cid in confIds1]
torsions2 = [CalculateTorsionAngles(mol, tl, tlr, confId=cid) for cid in confIds2]
tfd = []
if useWeights:
weights = CalculateTorsionWeights(mol, ignoreColinearBonds=ignoreColinearBonds)
for t1 in torsions1:
for t2 in torsions2:
tfd.append(CalculateTFD(t1, t2, weights=weights))
else:
for t1 in torsions1:
for t2 in torsions2:
tfd.append(CalculateTFD(t1, t2))
return tfd
def GetTFDBetweenMolecules(mol1, mol2, confIds1=-1, confIds2=-1, useWeights=True, maxDev='equal', symmRadius=2, ignoreColinearBonds=True):
""" Wrapper to calculate the TFD between two list of conformers
of two molecules.
Important: The two molecules must be instances of the same molecule
Arguments:
- mol1: first instance of the molecule of interest
- mol2: second instance the molecule of interest
- confIds1: list of conformer indices from mol1 (default: first conformer)
- confIds2: list of conformer indices from mol2 (default: first conformer)
- useWeights: flag for using torsion weights in the TFD calculation
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: list of TFD values
"""
if (Chem.MolToSmiles(mol1) != Chem.MolToSmiles(mol2)):
raise ValueError("The two molecules must be instances of the same molecule!")
tl, tlr = CalculateTorsionLists(mol1, maxDev=maxDev, symmRadius=symmRadius, ignoreColinearBonds=ignoreColinearBonds)
# first molecule
if confIds1 < 0:
torsions1 = [CalculateTorsionAngles(mol1, tl, tlr)]
else:
torsions1 = [CalculateTorsionAngles(mol1, tl, tlr, confId=cid) for cid in confIds1]
# second molecule
if confIds2 < 0:
torsions2 = [CalculateTorsionAngles(mol2, tl, tlr)]
else:
torsions2 = [CalculateTorsionAngles(mol2, tl, tlr, confId=cid) for cid in confIds2]
tfd = []
if useWeights:
weights = CalculateTorsionWeights(mol1, ignoreColinearBonds=ignoreColinearBonds)
for t1 in torsions1:
for t2 in torsions2:
tfd.append(CalculateTFD(t1, t2, weights=weights))
else:
for t1 in torsions1:
for t2 in torsions2:
tfd.append(CalculateTFD(t1, t2))
return tfd
def GetTFDMatrix(mol, useWeights=True, maxDev='equal', symmRadius=2, ignoreColinearBonds=True):
""" Wrapper to calculate the matrix of TFD values for the
conformers of a molecule.
Arguments:
- mol: the molecule of interest
- useWeights: flag for using torsion weights in the TFD calculation
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: matrix of TFD values
Note that the returned matrix is symmetrical, i.e. it is the
lower half of the matrix, e.g. for 5 conformers:
matrix = [ a,
b, c,
d, e, f,
g, h, i, j]
"""
tl, tlr = CalculateTorsionLists(mol, maxDev=maxDev, symmRadius=symmRadius, ignoreColinearBonds=ignoreColinearBonds)
numconf = mol.GetNumConformers()
torsions = [CalculateTorsionAngles(mol, tl, tlr, confId=conf.GetId()) for conf in mol.GetConformers()]
tfdmat = []
if useWeights:
weights = CalculateTorsionWeights(mol, ignoreColinearBonds=ignoreColinearBonds)
for i in range(0, numconf):
for j in range(0, i):
tfdmat.append(CalculateTFD(torsions[i], torsions[j], weights=weights))
else:
for i in range(0, numconf):
for j in range(0, i):
tfdmat.append(CalculateTFD(torsions[i], torsions[j]))
return tfdmat
|
strets123/rdkit
|
rdkit/Chem/TorsionFingerprints.py
|
Python
|
bsd-3-clause
| 23,314
|
[
"RDKit"
] |
a28da2190439dec02bbfbf1544534d0931ce024d1154763c54834cac99b59cf3
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 23 15:54:36 2014
This file is part of pyNLO.
pyNLO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyNLO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyNLO. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import matplotlib.pyplot as plt
from pynlo.media.crystals.XTAL_PPLN import PPLN
from scipy import integrate
plt.close('all')
npoints = 2**6
crystallength = 40*1e-3
crystal = PPLN(45, length = crystallength)
pump_wl = 1064.
crystal.set_pp(crystal.calculate_poling_period(pump_wl, 1540, None))
sgnl_stop_wl = 1700
NPTS = 1000
mix_bw = crystal.calculate_mix_phasematching_bw(1064, np.linspace(1300, sgnl_stop_wl,NPTS))
idler = 1.0/(1.0/1064 - 1.0/np.linspace(1300, sgnl_stop_wl,NPTS))
print crystal.invert_dfg_qpm_to_signal_wl(1064, 24e-6)
# ODE for finding 'ideal' QPM structure
# dLambda/dz = 1/phasematching BW
# scale = 4.65e-9 # for propto BW
#scale = 1.3e5 # for propto 1/BW
scale = 7e-6 / (1e3*crystallength) # for linear chirp 10 um / crystal length
def dLdz(L, z):
signal = crystal.invert_dfg_qpm_to_signal_wl(pump_wl, L)
bw = crystal.calculate_mix_phasematching_bw(pump_wl, signal)
#return 1.0/(scale*bw)
#return (scale*bw)
return scale
z = 0
L = 32e-6 # perid to start at
period_len = L
print("Begin APPLN design")
design = [ [z, L] ]
z = period_len/2.0
while z < 5e-3:
signal = ( np.random.rand() * (5200-3000) + 3000 )
bw_invm_m = crystal.calculate_mix_phasematching_bw(pump_wl, signal)
print ("Signal: %f nm"%(signal))
L = crystal.calculate_poling_period(pump_wl, signal, None)[0]
z += L
design.append([z+L,L])
design = np.array(design)
print design
grating_zs = design[:, 0] * 1e3
grating_ps = design[:, 1]
plt.plot(grating_zs, grating_ps)
plt.show()
np.savetxt('h:\\ppln_wg_apod.dat', np.vstack((grating_zs, grating_ps)).T)
|
ycasg/PyNLO
|
src/validation/Old and Partial Tests/ppln_generate_random_design.py
|
Python
|
gpl-3.0
| 2,357
|
[
"CRYSTAL"
] |
4d9275c9b8967cf313dafaae5758aa1bc2b9d9c44a285e40fadc9e24eb3f6576
|
# -*- coding: utf-8 -*-
import os
import time
import shutil
import bisect
import tempfile
from math import sqrt
import numpy as np
import netCDF4 as nc4
from pyaxiom.netcdf import EnhancedDataset, EnhancedMFDataset
from pysgrid import load_grid
from pysgrid.read_netcdf import NetCDFDataset as SGrid
from pysgrid.processing_2d import avg_to_cell_center, rotate_vectors
import pandas as pd
from rtree import index
from django.core.cache import caches
from wms import mpl_handler
from wms import gfi_handler
from wms import data_handler
from wms import gmd_handler
from wms.models import Dataset, Layer, VirtualLayer, NetCDFDataset
from wms.utils import DotDict, calc_lon_lat_padding, calc_safety_factor, find_appropriate_time
from wms import logger
class SGridDataset(Dataset, NetCDFDataset):
@classmethod
def is_valid(cls, uri):
try:
with EnhancedDataset(uri) as ds:
try:
SGrid(ds)
return True
except ValueError:
if 'sgrid' in ds.Conventions.lower():
return True
else:
return False
except RuntimeError:
try:
with EnhancedMFDataset(uri, aggdim='time') as ds:
try:
SGrid(ds)
return True
except ValueError:
if 'sgrid' in ds.Conventions.lower():
return True
else:
return False
except (OSError, IndexError, AttributeError, RuntimeError, ValueError):
return False
except (OSError, FileNotFoundError, AttributeError):
return False
def has_grid_cache(self):
return all([
os.path.exists(self.topology_file),
os.path.exists(self.face_tree_data_file),
os.path.exists(self.face_tree_index_file)
])
def has_time_cache(self):
return caches['time'].get(self.time_cache_file) is not None
def clear_cache(self):
super().clear_cache()
return caches['time'].delete(self.time_cache_file)
def make_rtree(self):
with self.dataset() as nc:
sg = load_grid(nc)
def rtree_generator_function():
c = 0
centers = np.dstack((sg.center_lon, sg.center_lat))
for i, axis in enumerate(centers):
for j, (x, y) in enumerate(axis):
c += 1
yield (c, (x, y, x, y), (i, j))
logger.info("Building Faces (centers) Rtree Topology Cache for {0}".format(self.name))
_, temp_file = tempfile.mkstemp(suffix='.face')
start = time.time()
p = index.Property()
p.filename = str(temp_file)
p.overwrite = True
p.storage = index.RT_Disk
p.dimension = 2
idx = index.Index(p.filename,
rtree_generator_function(),
properties=p,
overwrite=True,
interleaved=True)
idx.close()
logger.info("Built Faces (centers) Rtree Topology Cache in {0} seconds.".format(time.time() - start))
shutil.move('{}.dat'.format(temp_file), self.face_tree_data_file)
shutil.move('{}.idx'.format(temp_file), self.face_tree_index_file)
def update_time_cache(self):
with self.dataset() as nc:
if nc is None:
logger.error("Failed update_time_cache, could not load dataset "
"as a netCDF4 object")
return
time_cache = {}
layer_cache = {}
time_vars = nc.get_variables_by_attributes(standard_name='time')
for time_var in time_vars:
time_cache[time_var.name] = nc4.num2date(
time_var[:],
time_var.units,
getattr(time_var, 'calendar', 'standard')
)
for ly in self.all_layers():
try:
layer_cache[ly.access_name] = find_appropriate_time(nc.variables[ly.access_name], time_vars)
except ValueError:
layer_cache[ly.access_name] = None
full_cache = {'times': time_cache, 'layers': layer_cache}
logger.info("Built time cache for {0}".format(self.name))
caches['time'].set(self.time_cache_file, full_cache, None)
return full_cache
def update_grid_cache(self, force=False):
with self.dataset() as nc:
if nc is None:
logger.error("Failed update_grid_cache, could not load dataset "
"as a netCDF4 object")
return
sg = load_grid(nc)
# Atomic write
tmphandle, tmpsave = tempfile.mkstemp()
try:
sg.save_as_netcdf(tmpsave)
finally:
os.close(tmphandle)
if os.path.isfile(tmpsave):
shutil.move(tmpsave, self.topology_file)
else:
logger.error("Failed to create topology_file cache for Dataset '{}'".format(self.dataset.name))
return
# Now do the RTree index
self.make_rtree()
def minmax(self, layer, request):
time_index, time_value = self.nearest_time(layer, request.GET['time'])
wgs84_bbox = request.GET['wgs84_bbox']
with self.dataset() as nc:
cached_sg = load_grid(self.topology_file)
lon_name, lat_name = cached_sg.face_coordinates
lon_obj = getattr(cached_sg, lon_name)
lat_obj = getattr(cached_sg, lat_name)
lon = cached_sg.center_lon[lon_obj.center_slicing]
lat = cached_sg.center_lat[lat_obj.center_slicing]
spatial_idx = data_handler.lat_lon_subset_idx(lon, lat,
lonmin=wgs84_bbox.minx,
latmin=wgs84_bbox.miny,
lonmax=wgs84_bbox.maxx,
latmax=wgs84_bbox.maxy)
subset_lon = np.unique(spatial_idx[0])
subset_lat = np.unique(spatial_idx[1])
grid_variables = cached_sg.grid_variables
vmin = None
vmax = None
raw_data = None
if isinstance(layer, Layer):
data_obj = getattr(cached_sg, layer.access_name)
raw_var = nc.variables[layer.access_name]
if len(raw_var.shape) == 4:
z_index, z_value = self.nearest_z(layer, request.GET['elevation'])
raw_data = raw_var[time_index, z_index, subset_lon, subset_lat]
elif len(raw_var.shape) == 3:
raw_data = raw_var[time_index, subset_lon, subset_lat]
elif len(raw_var.shape) == 2:
raw_data = raw_var[subset_lon, subset_lat]
else:
raise BaseException('Unable to trim variable {0} data.'.format(layer.access_name))
# handle grid variables
if set([layer.access_name]).issubset(grid_variables):
raw_data = avg_to_cell_center(raw_data, data_obj.center_axis)
vmin = np.nanmin(raw_data).item()
vmax = np.nanmax(raw_data).item()
elif isinstance(layer, VirtualLayer):
x_var = None
y_var = None
raw_vars = []
for l in layer.layers:
data_obj = getattr(cached_sg, l.access_name)
raw_var = nc.variables[l.access_name]
raw_vars.append(raw_var)
if len(raw_var.shape) == 4:
z_index, z_value = self.nearest_z(layer, request.GET['elevation'])
raw_data = raw_var[time_index, z_index, subset_lon, subset_lat]
elif len(raw_var.shape) == 3:
raw_data = raw_var[time_index, subset_lon, subset_lat]
elif len(raw_var.shape) == 2:
raw_data = raw_var[subset_lon, subset_lat]
else:
raise BaseException('Unable to trim variable {0} data.'.format(l.access_name))
if x_var is None:
if data_obj.vector_axis and data_obj.vector_axis.lower() == 'x':
x_var = raw_data
elif data_obj.center_axis == 1:
x_var = raw_data
if y_var is None:
if data_obj.vector_axis and data_obj.vector_axis.lower() == 'y':
y_var = raw_data
elif data_obj.center_axis == 0:
y_var = raw_data
if ',' in layer.var_name and raw_data is not None:
# Vectors, so return magnitude
data = [
sqrt((u * u) + (v * v)) for (u, v,) in
zip(x_var.flatten(), y_var.flatten()) if u != np.nan and v != np.nan
]
vmin = min(data)
vmax = max(data)
return gmd_handler.from_dict(dict(min=vmin, max=vmax))
def getmap(self, layer, request):
time_index, time_value = self.nearest_time(layer, request.GET['time'])
wgs84_bbox = request.GET['wgs84_bbox']
with self.dataset() as nc:
cached_sg = load_grid(self.topology_file)
lon_name, lat_name = cached_sg.face_coordinates
lon_obj = getattr(cached_sg, lon_name)
lat_obj = getattr(cached_sg, lat_name)
lon = cached_sg.center_lon[lon_obj.center_slicing]
lat = cached_sg.center_lat[lat_obj.center_slicing]
if isinstance(layer, Layer):
data_obj = getattr(cached_sg, layer.access_name)
raw_var = nc.variables[layer.access_name]
if len(raw_var.shape) == 4:
z_index, z_value = self.nearest_z(layer, request.GET['elevation'])
raw_data = raw_var[time_index, z_index, data_obj.center_slicing[-2], data_obj.center_slicing[-1]]
elif len(raw_var.shape) == 3:
raw_data = raw_var[time_index, data_obj.center_slicing[-2], data_obj.center_slicing[-1]]
elif len(raw_var.shape) == 2:
raw_data = raw_var[data_obj.center_slicing]
else:
raise BaseException('Unable to trim variable {0} data.'.format(layer.access_name))
# handle edge variables
if data_obj.location is not None and 'edge' in data_obj.location:
raw_data = avg_to_cell_center(raw_data, data_obj.center_axis)
if request.GET['image_type'] == 'pcolor':
return mpl_handler.pcolormesh_response(lon, lat, data=raw_data, request=request)
elif request.GET['image_type'] in ['filledhatches', 'hatches', 'filledcontours', 'contours']:
return mpl_handler.contouring_response(lon, lat, data=raw_data, request=request)
else:
raise NotImplementedError('Image type "{}" is not supported.'.format(request.GET['image_type']))
elif isinstance(layer, VirtualLayer):
x_var = None
y_var = None
raw_vars = []
for l in layer.layers:
data_obj = getattr(cached_sg, l.access_name)
raw_var = nc.variables[l.access_name]
raw_vars.append(raw_var)
if len(raw_var.shape) == 4:
z_index, z_value = self.nearest_z(layer, request.GET['elevation'])
raw_data = raw_var[time_index, z_index, data_obj.center_slicing[-2], data_obj.center_slicing[-1]]
elif len(raw_var.shape) == 3:
raw_data = raw_var[time_index, data_obj.center_slicing[-2], data_obj.center_slicing[-1]]
elif len(raw_var.shape) == 2:
raw_data = raw_var[data_obj.center_slicing]
else:
raise BaseException('Unable to trim variable {0} data.'.format(l.access_name))
raw_data = avg_to_cell_center(raw_data, data_obj.center_axis)
if x_var is None:
if data_obj.vector_axis and data_obj.vector_axis.lower() == 'x':
x_var = raw_data
elif data_obj.center_axis == 1:
x_var = raw_data
if y_var is None:
if data_obj.vector_axis and data_obj.vector_axis.lower() == 'y':
y_var = raw_data
elif data_obj.center_axis == 0:
y_var = raw_data
if x_var is None or y_var is None:
raise BaseException('Unable to determine x and y variables.')
dim_lengths = [ len(v.dimensions) for v in raw_vars ]
if len(list(set(dim_lengths))) != 1:
raise AttributeError('One or both of the specified variables has incorrect dimensions.')
if request.GET['image_type'] == 'vectors':
angles = cached_sg.angles[lon_obj.center_slicing]
vectorstep = request.GET['vectorstep']
# don't do this if the vectorstep is 1; let's save a microsecond or two
# it's identical to getting all the data
if vectorstep > 1:
data_dim = len(lon.shape)
step_slice = (np.s_[::vectorstep],) * data_dim # make sure the vector step is used for all applicable dimensions
lon = lon[step_slice]
lat = lat[step_slice]
x_var = x_var[step_slice]
y_var = y_var[step_slice]
angles = angles[step_slice]
vectorscale = request.GET['vectorscale']
padding_factor = calc_safety_factor(vectorscale)
# figure out the average distance between lat/lon points
# do the math after taking into the vectorstep if specified
spatial_idx_padding = calc_lon_lat_padding(lon, lat, padding_factor)
spatial_idx = data_handler.lat_lon_subset_idx(lon, lat,
lonmin=wgs84_bbox.minx,
latmin=wgs84_bbox.miny,
lonmax=wgs84_bbox.maxx,
latmax=wgs84_bbox.maxy,
padding=spatial_idx_padding
)
subset_lon = self._spatial_data_subset(lon, spatial_idx)
subset_lat = self._spatial_data_subset(lat, spatial_idx)
# rotate vectors
x_rot, y_rot = rotate_vectors(x_var, y_var, angles)
spatial_subset_x_rot = self._spatial_data_subset(x_rot, spatial_idx)
spatial_subset_y_rot = self._spatial_data_subset(y_rot, spatial_idx)
return mpl_handler.quiver_response(subset_lon,
subset_lat,
spatial_subset_x_rot,
spatial_subset_y_rot,
request,
vectorscale
)
else:
raise NotImplementedError('Image type "{}" is not supported.'.format(request.GET['image_type']))
def getfeatureinfo(self, layer, request):
with self.dataset() as nc:
data_obj = nc.variables[layer.access_name]
geo_index, closest_x, closest_y, start_time_index, end_time_index, return_dates = self.setup_getfeatureinfo(layer, request)
return_arrays = []
z_value = None
if isinstance(layer, Layer):
if len(data_obj.shape) == 4:
z_index, z_value = self.nearest_z(layer, request.GET['elevation'])
data = data_obj[start_time_index:end_time_index, z_index, geo_index[0], geo_index[1]]
elif len(data_obj.shape) == 3:
data = data_obj[start_time_index:end_time_index, geo_index[0], geo_index[1]]
elif len(data_obj.shape) == 2:
data = data_obj[geo_index[0], geo_index[1]]
else:
raise ValueError("Dimension Mismatch: data_obj.shape == {0} and time indexes = {1} to {2}".format(data_obj.shape, start_time_index, end_time_index))
return_arrays.append((layer.var_name, data))
elif isinstance(layer, VirtualLayer):
# Data needs to be [var1,var2] where var are 1D (nodes only, elevation and time already handled)
for l in layer.layers:
if len(data_obj.shape) == 4:
z_index, z_value = self.nearest_z(layer, request.GET['elevation'])
data = data_obj[start_time_index:end_time_index, z_index, geo_index[0], geo_index[1]]
elif len(data_obj.shape) == 3:
data = data_obj[start_time_index:end_time_index, geo_index[0], geo_index[1]]
elif len(data_obj.shape) == 2:
data = data_obj[geo_index[0], geo_index[1]]
else:
raise ValueError("Dimension Mismatch: data_obj.shape == {0} and time indexes = {1} to {2}".format(data_obj.shape, start_time_index, end_time_index))
return_arrays.append((l.var_name, data))
# Data is now in the return_arrays list, as a list of numpy arrays. We need
# to add time and depth to them to create a single Pandas DataFrame
if len(data_obj.shape) == 4:
df = pd.DataFrame({'time': return_dates,
'x': closest_x,
'y': closest_y,
'z': z_value})
elif len(data_obj.shape) == 3:
df = pd.DataFrame({'time': return_dates,
'x': closest_x,
'y': closest_y})
elif len(data_obj.shape) == 2:
df = pd.DataFrame({'x': closest_x,
'y': closest_y})
else:
df = pd.DataFrame()
# Now add a column for each member of the return_arrays list
for (var_name, np_array) in return_arrays:
df.loc[:, var_name] = pd.Series(np_array, index=df.index)
return gfi_handler.from_dataframe(request, df)
def wgs84_bounds(self, layer):
try:
cached_sg = load_grid(self.topology_file)
except BaseException:
pass
else:
lon_name, lat_name = cached_sg.face_coordinates
lon_var_obj = getattr(cached_sg, lon_name)
lat_var_obj = getattr(cached_sg, lat_name)
lon_trimmed = cached_sg.center_lon[lon_var_obj.center_slicing]
lat_trimmed = cached_sg.center_lat[lat_var_obj.center_slicing]
lon_max = lon_trimmed.max()
lon_min = lon_trimmed.min()
lat_max = lat_trimmed.max()
lat_min = lat_trimmed.min()
return DotDict(minx=lon_min,
miny=lat_min,
maxx=lon_max,
maxy=lat_max,
bbox=(lon_min, lat_min, lon_max, lat_max)
)
def nearest_z(self, layer, z):
"""
Return the z index and z value that is closest
"""
depths = self.depths(layer)
depth_idx = bisect.bisect_right(depths, z)
try:
depths[depth_idx]
except IndexError:
depth_idx -= 1
return depth_idx, depths[depth_idx]
def times(self, layer):
time_cache = caches['time'].get(self.time_cache_file, {'times': {}, 'layers': {}})
if layer.access_name not in time_cache['layers']:
logger.error("No layer ({}) in time cache, returning nothing".format(layer.access_name))
return []
ltv = time_cache['layers'].get(layer.access_name)
if ltv is None:
# Legit this might not be a layer with time so just return empty list (no error message)
return []
if ltv in time_cache['times']:
return time_cache['times'][ltv]
else:
logger.error("No time ({}) in time cache, returning nothing".format(ltv))
return []
def depth_variable(self, layer):
with self.dataset() as nc:
try:
layer_var = nc.variables[layer.access_name]
for cv in layer_var.coordinates.strip().split():
try:
coord_var = nc.variables[cv]
if hasattr(coord_var, 'axis') and coord_var.axis.lower().strip() == 'z':
return coord_var
elif hasattr(coord_var, 'positive') and coord_var.positive.lower().strip() in ['up', 'down']:
return coord_var
except BaseException:
pass
except AttributeError:
pass
def _spatial_data_subset(self, data, spatial_index):
rows = spatial_index[0, :]
columns = spatial_index[1, :]
data_subset = data[rows, columns]
return data_subset
# same as ugrid
def depth_direction(self, layer):
d = self.depth_variable(layer)
if d is not None:
if hasattr(d, 'positive'):
return d.positive
return 'unknown'
def depths(self, layer):
""" sci-wms only deals in depth indexes at this time (no sigma) """
d = self.depth_variable(layer)
if d is not None:
return list(range(0, d.shape[0]))
return []
def humanize(self):
return "SGRID"
|
sci-wms/sci-wms
|
wms/models/datasets/sgrid.py
|
Python
|
gpl-3.0
| 23,182
|
[
"NetCDF"
] |
b5d6a76550c8d44d0a15aa1761728ca1b1099c9e1e047ff4f71906a3521578d7
|
# Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import string
import collections
import moldesign.molecules.atomcollections
try:
import pybel as pb
import openbabel as ob
# WARNING: this is the real library, not our interface - this works because of absolute
# imports. We should probably rename this interface
except ImportError:
force_remote = True
else: # this should be configurable
force_remote = False # debugging
import moldesign as mdt
from moldesign.compute.runsremotely import runsremotely
import moldesign.molecules.atoms
from moldesign import units as u
def exports(o):
__all__.append(o.__name__)
return o
__all__ = []
def read_file(filename, name=None, format=None):
""" Read a molecule from a file
Note:
Currently only reads the first conformation in a file
Args:
filename (str): path to file
name (str): name to assign to molecule
format (str): File format: pdb, sdf, mol2, bbll, etc.
Returns:
moldesign.Molecule: parsed result
"""
# TODO: check for openbabel molecule name?
if format is None:
format = filename.split('.')[-1]
if force_remote:
with open(filename, 'r') as infile:
mol = read_string(infile.read(), format, name=name)
return mol
else:
pbmol = pb.readfile(format=format, filename=filename).next()
if name is None: name = filename
mol = pybel_to_mol(pbmol, name=os.path.basename(name))
mol.filename = filename
return mol
def read_stream(filelike, format, name=None):
""" Read a molecule from a file-like object
Note:
Currently only reads the first conformation in a file
Args:
filelike: a file-like object to read a file from
format (str): File format: pdb, sdf, mol2, bbll, etc.
name (str): name to assign to molecule
Returns:
moldesign.Molecule: parsed result
"""
molstring = filelike.read()
return read_string(molstring, format, name=name)
@runsremotely(enable=force_remote)
def read_string(molstring, format, name=None):
""" Read a molecule from a file-like object
Note:
Currently only reads the first conformation in a file
Args:
molstring (str): string containing file contents
format (str): File format: pdb, sdf, mol2, bbll, etc.
name (str): name to assign to molecule
Returns:
moldesign.Molecule: parsed result
"""
pbmol = pb.readstring(format, molstring)
mol = pybel_to_mol(pbmol, name=name)
return mol
@runsremotely(enable=force_remote)
def write_string(mol, format):
""" Create a file from the passed molecule
Args:
mol (moldesign.Molecule): molecule to write
format (str): File format: pdb, sdf, mol2, bbll, etc.
Returns:
str: contents of the file
"""
pbmol = mol_to_pybel(mol)
outstr = pbmol.write(format=format)
return outstr
def write_file(mol, filename=None, mode='w', format=None):
""" Write molecule to a file
Args:
mol (moldesign.Molecule): molecule to write
filename (str): File to write to
mode (str): Writing mode (e.g. 'w' to overwrite, the default, or 'a' to append)
format (str): File format: pdb, sdf, mol2, bbll, etc.
"""
if format is None:
format = filename.split('.')[-1]
outstr = write_string(mol, format)
if filename is None:
return outstr
else:
with open(filename, mode) as wrf:
print >> wrf, outstr
@runsremotely(enable=force_remote)
def guess_bond_orders(mol):
"""Use OpenBabel to guess bond orders using geometry and functional group templates.
Args:
mol (moldesign.Molecule): Molecule to perceive the bonds of
Returns:
moldesign.Molecule: New molecule with assigned bonds
"""
# TODO: pH, formal charges
pbmol = mol_to_pybel(mol)
pbmol.OBMol.PerceiveBondOrders()
newmol = pybel_to_mol(pbmol)
return newmol
@runsremotely(enable=force_remote)
def add_hydrogen(mol, ph=None):
"""Add hydrogens to saturate atomic valences.
Args:
mol (moldesign.Molecule): Molecule to saturate
ph (float): Assign formal charges and protonation using pH model; if None (the default),
neutral protonation will be assigned where possible.
Returns:
moldesign.Molecule: New molecule with all valences saturated
"""
pbmol = mol_to_pybel(mol)
pbmol.OBMol.AddHydrogens(False,
ph is not None,)
newmol = pybel_to_mol(pbmol, reorder_atoms_by_residue=True)
mdt.helpers.assign_unique_hydrogen_names(newmol)
return newmol
@exports
def mol_to_pybel(mdtmol):
""" Translate a moldesign molecule object into a pybel molecule object.
Note:
The focus is on translating topology and biomolecular structure -
we don't translate any metadata.
Args:
mdtmol (moldesign.Molecule): molecule to translate
Returns:
pybel.Molecule: translated molecule
"""
obmol = ob.OBMol()
obmol.BeginModify()
atommap = {}
resmap = {}
for atom in mdtmol.atoms:
obatom = obmol.NewAtom()
obatom.SetAtomicNum(atom.atnum)
atommap[atom] = obatom
pos = atom.position.value_in(u.angstrom)
obatom.SetVector(*pos)
if atom.residue and atom.residue not in resmap:
obres = obmol.NewResidue()
resmap[atom.residue] = obres
obres.SetChain(bytes(
mdt.utils.if_not_none(atom.chain.name, 'Z')[0] ))
obres.SetName(bytes(
mdt.utils.if_not_none(atom.residue.pdbname, 'UNL') ))
obres.SetNum(mdt.utils.if_not_none(atom.residue.pdbindex, '0'))
else:
obres = resmap[atom.residue]
obres.AddAtom(obatom)
obres.SetHetAtom(obatom, not atom.residue.is_standard_residue)
obres.SetAtomID(obatom, bytes(atom.name))
obres.SetSerialNum(obatom,
mdt.utils.if_not_none(atom.pdbindex, atom.index+1))
for atom in mdtmol.bond_graph:
a1 = atommap[atom]
for nbr, order in mdtmol.bond_graph[atom].iteritems():
a2 = atommap[nbr]
if a1.GetIdx() > a2.GetIdx():
obmol.AddBond(a1.GetIdx(), a2.GetIdx(), order)
obmol.EndModify()
pbmol = pb.Molecule(obmol)
for atom in atommap:
idx = atommap[atom].GetIdx()
obatom = obmol.GetAtom(idx)
obatom.SetFormalCharge(int(atom.formal_charge.value_in(u.q_e)))
return pbmol
@exports
def pybel_to_mol(pbmol,
atom_names=True,
reorder_atoms_by_residue=False,
primary_structure=True,
**kwargs):
""" Translate a pybel molecule object into a moldesign object.
Note:
The focus is on translating topology and biomolecular structure - we don't translate any metadata.
Args:
pbmol (pybel.Molecule): molecule to translate
atom_names (bool): use pybel's atom names (default True)
reorder_atoms_by_residue (bool): change atom order so that all atoms in a residue are stored
contiguously
primary_structure (bool): translate primary structure data as well as atomic data
**kwargs (dict): keyword arguments to moldesign.Molecule __init__ method
Returns:
moldesign.Molecule: translated molecule
"""
newatom_map = {}
newresidues = {}
newchains = {}
newatoms = moldesign.molecules.atomcollections.AtomList([])
backup_chain_names = list(string.ascii_uppercase)
for pybatom in pbmol.atoms:
obres = pybatom.OBAtom.GetResidue()
if atom_names:
name = obres.GetAtomID(pybatom.OBAtom).strip()
else:
name = None
if pybatom.atomicnum == 67:
print ("WARNING: openbabel parsed atom serial %d (name:%s) as Holmium; "
"correcting to hydrogen. ") % (pybatom.OBAtom.GetIdx(), name)
atnum = 1
elif pybatom.atomicnum == 0:
print "WARNING: openbabel failed to parse atom serial %d (name:%s); guessing %s. " % (
pybatom.OBAtom.GetIdx(), name, name[0])
atnum = moldesign.data.ATOMIC_NUMBERS[name[0]]
else:
atnum = pybatom.atomicnum
mdtatom = moldesign.molecules.atoms.Atom(atnum=atnum, name=name,
formal_charge=pybatom.formalcharge * u.q_e,
pdbname=name, pdbindex=pybatom.OBAtom.GetIdx())
newatom_map[pybatom.OBAtom.GetIdx()] = mdtatom
mdtatom.position = pybatom.coords * u.angstrom
if primary_structure:
obres = pybatom.OBAtom.GetResidue()
resname = obres.GetName()
residx = obres.GetIdx()
chain_id = obres.GetChain()
chain_id_num = obres.GetChainNum()
if chain_id_num not in newchains:
# create new chain
if not mdt.utils.is_printable(chain_id.strip()) or not chain_id.strip():
chain_id = backup_chain_names.pop()
print 'WARNING: assigned name %s to unnamed chain object @ %s' % (
chain_id, hex(chain_id_num))
chn = mdt.Chain(pdbname=str(chain_id))
newchains[chain_id_num] = chn
else:
chn = newchains[chain_id_num]
if residx not in newresidues:
# Create new residue
pdb_idx = obres.GetNum()
res = mdt.Residue(pdbname=resname,
pdbindex=pdb_idx)
newresidues[residx] = res
chn.add(res)
res.chain = chn
else:
res = newresidues[residx]
# Assign the atom
if mdtatom.name in res:
mdtatom.name = '%s%d' % (mdtatom.name, pybatom.idx) # prevent name clashes
res.add(mdtatom)
newatoms.append(mdtatom)
newtopo = {}
for ibond in xrange(pbmol.OBMol.NumBonds()):
obbond = pbmol.OBMol.GetBond(ibond)
a1 = newatom_map[obbond.GetBeginAtomIdx()]
a2 = newatom_map[obbond.GetEndAtomIdx()]
order = obbond.GetBondOrder()
if a1 not in newtopo:
newtopo[a1] = {}
if a2 not in newtopo:
newtopo[a2] = {}
newtopo[a1][a2] = order
newtopo[a2][a1] = order
if reorder_atoms_by_residue and primary_structure:
resorder = {}
for atom in newatoms:
resorder.setdefault(atom.residue, len(resorder))
newatoms.sort(key=lambda a: resorder[a.residue])
return mdt.Molecule(newatoms,
bond_graph=newtopo,
**kwargs)
@runsremotely(enable=force_remote)
def from_smiles(smi, name=None):
""" Translate a smiles string to a 3D structure.
This method uses OpenBabel to generate a plausible 3D conformation of the 2D SMILES topology.
We only use the first result from the conformation generator.
Args:
smi (str): smiles string
name (str): name to assign to molecule (default - the smiles string)
Returns:
moldesign.Molecule: the translated molecule
"""
if name is None: name = smi
pbmol = pb.readstring('smi', smi)
pbmol.addh()
pbmol.make3D()
mol = pybel_to_mol(pbmol,
name=name,
atom_names=False,
primary_structure=False)
for atom in mol.atoms:
atom.name = atom.elem + str(atom.index)
return mol
|
tkzeng/molecular-design-toolkit
|
moldesign/interfaces/openbabel.py
|
Python
|
apache-2.0
| 12,337
|
[
"Pybel"
] |
8596c5421b7982641790aeb73ae5a3a24d753f789ccd388dff8220a9b701c751
|
# $Id$
#
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors as rdMD, Descriptors
from rdkit.Chem import AllChem
from rdkit import DataStructs
from rdkit import RDConfig
from rdkit.Geometry import rdGeometry as rdG
import unittest
def feq(v1, v2, tol=1.e-4) :
return abs(v1-v2) < tol
class TestCase(unittest.TestCase) :
def setUp(self):
pass
def testAtomPairTypes(self):
params = rdMD.AtomPairsParameters
mol = Chem.MolFromSmiles("C=C");
self.assertTrue(rdMD.GetAtomPairAtomCode(mol.GetAtomWithIdx(0))==\
rdMD.GetAtomPairAtomCode(mol.GetAtomWithIdx(1)))
self.assertTrue(rdMD.GetAtomPairAtomCode(mol.GetAtomWithIdx(0))==\
1 | (1 | 1<<params.numPiBits)<<params.numBranchBits)
mol = Chem.MolFromSmiles("C#CO");
self.assertTrue(rdMD.GetAtomPairAtomCode(mol.GetAtomWithIdx(0))!=\
rdMD.GetAtomPairAtomCode(mol.GetAtomWithIdx(1)))
self.assertTrue(rdMD.GetAtomPairAtomCode(mol.GetAtomWithIdx(0))==\
1 | (2 | 1<<params.numPiBits)<<params.numBranchBits)
self.assertTrue(rdMD.GetAtomPairAtomCode(mol.GetAtomWithIdx(1))==\
2 | (2 | 1<<params.numPiBits)<<params.numBranchBits)
self.assertTrue(rdMD.GetAtomPairAtomCode(mol.GetAtomWithIdx(2))==\
1 | (0 | 3<<params.numPiBits)<<params.numBranchBits)
self.assertTrue(rdMD.GetAtomPairAtomCode(mol.GetAtomWithIdx(1),1)==\
1 | (2 | 1<<params.numPiBits)<<params.numBranchBits)
self.assertTrue(rdMD.GetAtomPairAtomCode(mol.GetAtomWithIdx(1),2)==\
0 | (2 | 1<<params.numPiBits)<<params.numBranchBits)
def testAtomPairs(self):
m = Chem.MolFromSmiles('CCC')
fp1 = rdMD.GetAtomPairFingerprint(m)
fp2 = rdMD.GetAtomPairFingerprint(m,minLength=1,maxLength=2)
nz1 = fp1.GetNonzeroElements()
self.assertEqual(len(nz1),2)
nz2 = fp2.GetNonzeroElements()
self.assertEqual(len(nz2),2)
fp2 = rdMD.GetAtomPairFingerprint(m,minLength=1,maxLength=1)
nz2 = fp2.GetNonzeroElements()
self.assertEqual(len(nz2),1)
def testHashedAtomPairs(self):
m = Chem.MolFromSmiles('c1ccccc1')
fp1 = rdMD.GetHashedAtomPairFingerprint(m,2048)
fp2 = rdMD.GetHashedAtomPairFingerprint(m,2048,1,3)
self.assertTrue(fp1==fp2)
fp2 = rdMD.GetHashedAtomPairFingerprint(m,2048,1,2)
sim= DataStructs.DiceSimilarity(fp1,fp2)
self.assertTrue(sim>0.0 and sim<1.0)
m = Chem.MolFromSmiles('c1ccccn1')
fp2 = rdMD.GetHashedAtomPairFingerprint(m,2048)
sim= DataStructs.DiceSimilarity(fp1,fp2)
self.assertTrue(sim>0.0 and sim<1.0)
m = Chem.MolFromSmiles('c1ccccc1')
fp1 = rdMD.GetHashedAtomPairFingerprintAsBitVect(m,2048)
m = Chem.MolFromSmiles('c1ccccn1')
fp2 = rdMD.GetHashedAtomPairFingerprintAsBitVect(m,2048)
sim= DataStructs.DiceSimilarity(fp1,fp2)
self.assertTrue(sim>0.0 and sim<1.0)
def testRootedAtomPairs(self):
m = Chem.MolFromSmiles('Oc1ccccc1')
fp1 = rdMD.GetAtomPairFingerprint(m)
fp2 = rdMD.GetAtomPairFingerprint(m,fromAtoms=(0,))
nz1 = fp1.GetNonzeroElements()
nz2 = fp2.GetNonzeroElements()
for k,v in nz2.items():
self.assertTrue(v<=nz1[k])
def testTopologicalTorsions(self):
mol = Chem.MolFromSmiles("CC");
fp = rdMD.GetTopologicalTorsionFingerprint(mol)
self.assertTrue(fp.GetTotalVal()==0)
mol = Chem.MolFromSmiles("CCCC");
fp = rdMD.GetTopologicalTorsionFingerprint(mol)
self.assertTrue(fp.GetTotalVal()==1)
fp = rdMD.GetTopologicalTorsionFingerprint(mol,3)
self.assertTrue(fp.GetTotalVal()==2)
mol = Chem.MolFromSmiles("CCCO");
fp = rdMD.GetTopologicalTorsionFingerprint(mol)
self.assertTrue(fp.GetTotalVal()==1)
fp = rdMD.GetTopologicalTorsionFingerprint(mol,3)
self.assertTrue(fp.GetTotalVal()==2)
mol = Chem.MolFromSmiles("CCCCCCCCCCC");
fp = rdMD.GetTopologicalTorsionFingerprint(mol,7)
self.assertRaises(ValueError,lambda : rdMD.GetTopologicalTorsionFingerprint(mol,8))
def testHashedTopologicalTorsions(self):
mol = Chem.MolFromSmiles("c1ncccc1");
fp1 = rdMD.GetHashedTopologicalTorsionFingerprint(mol)
mol = Chem.MolFromSmiles("n1ccccc1");
fp2 = rdMD.GetHashedTopologicalTorsionFingerprint(mol)
self.assertEqual(DataStructs.DiceSimilarity(fp1,fp2),1.0)
def testRootedTorsions(self):
m = Chem.MolFromSmiles('Oc1ccccc1')
fp1 = rdMD.GetTopologicalTorsionFingerprint(m)
fp2 = rdMD.GetTopologicalTorsionFingerprint(m,fromAtoms=(0,))
nz1 = fp1.GetNonzeroElements()
nz2 = fp2.GetNonzeroElements()
for k,v in nz2.items():
self.assertTrue(v<=nz1[k])
m = Chem.MolFromSmiles('COCC')
fp1 = rdMD.GetTopologicalTorsionFingerprint(m)
self.assertEqual(len(fp1.GetNonzeroElements()),1)
fp1 = rdMD.GetTopologicalTorsionFingerprint(m,fromAtoms=(0,))
self.assertEqual(len(fp1.GetNonzeroElements()),1)
fp1 = rdMD.GetTopologicalTorsionFingerprint(m,fromAtoms=(1,))
self.assertEqual(len(fp1.GetNonzeroElements()),0)
def testMorganFingerprints(self):
mol = Chem.MolFromSmiles('CC(F)(Cl)C(F)(Cl)C')
fp = rdMD.GetMorganFingerprint(mol,0)
self.assertTrue(len(fp.GetNonzeroElements())==4)
mol = Chem.MolFromSmiles('CC')
fp = rdMD.GetMorganFingerprint(mol,0)
self.assertTrue(len(fp.GetNonzeroElements())==1)
self.assertTrue(list(fp.GetNonzeroElements().values())[0]==2)
fp = rdMD.GetMorganFingerprint(mol,0,useCounts=False)
self.assertTrue(len(fp.GetNonzeroElements())==1)
self.assertTrue(list(fp.GetNonzeroElements().values())[0]==1)
mol = Chem.MolFromSmiles('CC(F)(Cl)C(F)(Cl)C')
fp = rdMD.GetHashedMorganFingerprint(mol,0)
self.assertTrue(len(fp.GetNonzeroElements())==4)
fp = rdMD.GetMorganFingerprint(mol,1)
self.assertTrue(len(fp.GetNonzeroElements())==8)
fp = rdMD.GetHashedMorganFingerprint(mol,1)
self.assertTrue(len(fp.GetNonzeroElements())==8)
fp = rdMD.GetMorganFingerprint(mol,2)
self.assertTrue(len(fp.GetNonzeroElements())==9)
mol = Chem.MolFromSmiles('CC(F)(Cl)[C@](F)(Cl)C')
fp = rdMD.GetMorganFingerprint(mol,0)
self.assertTrue(len(fp.GetNonzeroElements())==4)
fp = rdMD.GetMorganFingerprint(mol,1)
self.assertTrue(len(fp.GetNonzeroElements())==8)
fp = rdMD.GetMorganFingerprint(mol,2)
self.assertTrue(len(fp.GetNonzeroElements())==9)
fp = rdMD.GetMorganFingerprint(mol,0,useChirality=True)
self.assertTrue(len(fp.GetNonzeroElements())==4)
fp = rdMD.GetMorganFingerprint(mol,1,useChirality=True)
self.assertTrue(len(fp.GetNonzeroElements())==9)
fp = rdMD.GetMorganFingerprint(mol,2,useChirality=True)
self.assertTrue(len(fp.GetNonzeroElements())==10)
mol = Chem.MolFromSmiles('CCCCC')
fp = rdMD.GetMorganFingerprint(mol,0,fromAtoms=(0,))
self.assertTrue(len(fp.GetNonzeroElements())==1)
mol = Chem.MolFromSmiles('CC1CC1')
vs1 = rdMD.GetConnectivityInvariants(mol)
self.assertEqual(len(vs1),mol.GetNumAtoms())
fp1 = rdMD.GetMorganFingerprint(mol,2,invariants=vs1)
fp2 = rdMD.GetMorganFingerprint(mol,2)
self.assertEqual(fp1,fp2)
vs2 = rdMD.GetConnectivityInvariants(mol,False)
self.assertEqual(len(vs2),mol.GetNumAtoms())
self.assertNotEqual(vs1,vs2)
fp1 = rdMD.GetMorganFingerprint(mol,2,invariants=vs2)
self.assertNotEqual(fp1,fp2)
mol = Chem.MolFromSmiles('Cc1ccccc1')
vs1 = rdMD.GetFeatureInvariants(mol)
self.assertEqual(len(vs1),mol.GetNumAtoms())
self.assertEqual(vs1[0],0)
self.assertNotEqual(vs1[1],0)
self.assertEqual(vs1[1],vs1[2])
self.assertEqual(vs1[1],vs1[3])
self.assertEqual(vs1[1],vs1[4])
mol = Chem.MolFromSmiles('FCCCl')
vs1 = rdMD.GetFeatureInvariants(mol)
self.assertEqual(len(vs1),mol.GetNumAtoms())
self.assertEqual(vs1[1],0)
self.assertEqual(vs1[2],0)
self.assertNotEqual(vs1[0],0)
self.assertEqual(vs1[0],vs1[3])
fp1 = rdMD.GetMorganFingerprint(mol,0,invariants=vs1)
fp2 = rdMD.GetMorganFingerprint(mol,0,useFeatures=True)
self.assertEqual(fp1,fp2)
def testCrippen(self):
mol = Chem.MolFromSmiles("n1ccccc1CO");
contribs = rdMD._CalcCrippenContribs(mol)
self.assertEqual(len(contribs),mol.GetNumAtoms());
ts = [0]*mol.GetNumAtoms()
contribs = rdMD._CalcCrippenContribs(mol,force=True,atomTypes=ts)
self.assertEqual(ts,[59, 25, 25, 25, 25, 28, 17, 69])
ls = ['']*mol.GetNumAtoms()
contribs = rdMD._CalcCrippenContribs(mol,force=True,atomTypeLabels=ls)
self.assertEqual(ls,['N11', 'C18', 'C18', 'C18', 'C18', 'C21', 'C10', 'O2'])
def testMolWt(self):
mol = Chem.MolFromSmiles("C");
amw = rdMD._CalcMolWt(mol);
self.assertTrue(feq(amw,16.043,.001));
amw = rdMD._CalcMolWt(mol,True);
self.assertTrue(feq(amw,12.011,.001));
mol2 = Chem.AddHs(mol);
amw = rdMD._CalcMolWt(mol2);
self.assertTrue(feq(amw,16.043,.001));
amw = rdMD._CalcMolWt(mol2,True);
self.assertTrue(feq(amw,12.011,.001));
mol = Chem.MolFromSmiles("C");
amw = rdMD.CalcExactMolWt(mol);
self.assertTrue(feq(amw,16.031,.001));
def testPairValues(self):
import base64
testD=(('CCCO',b'AQAAAAQAAAAAAIAABgAAACGECAABAAAAIoQIAAEAAABBhAgAAQAAACNEGAABAAAAQUQYAAEAAABC\nRBgAAQAAAA==\n'),
('CNc1ccco1',b'AQAAAAQAAAAAAIAAEAAAACOECgABAAAAJIQKAAIAAABBhQoAAgAAAEKFCgABAAAAIsQKAAEAAABB\nxQoAAQAAAELFCgACAAAAIYQQAAEAAABChRAAAQAAAEOFEAACAAAAYYUQAAEAAAAjhBoAAQAAAEGF\nGgABAAAAQoUaAAIAAABhhRoAAQAAAEKIGgABAAAA\n'),
)
for smi,txt in testD:
pkl = base64.decodestring(txt)
fp = rdMD.GetAtomPairFingerprint(Chem.MolFromSmiles(smi))
fp2 = DataStructs.IntSparseIntVect(pkl)
self.assertEqual(DataStructs.DiceSimilarity(fp,fp2),1.0)
self.assertEqual(fp,fp2)
def testTorsionValues(self):
import base64
testD=(('CCCO',b'AQAAAAgAAAD/////DwAAAAEAAAAAAAAAIECAAAMAAAABAAAA\n'),
('CNc1ccco1',b'AQAAAAgAAAD/////DwAAAAkAAAAAAAAAIICkSAEAAAABAAAAKVKgSQEAAAABAAAAKVCgUAEAAAAB\nAAAAKVCgUQEAAAABAAAAKVCkCAIAAAABAAAAKdCkCAIAAAABAAAAKVCgSAMAAAABAAAAKVCkSAMA\nAAABAAAAIICkSAMAAAABAAAA\n'),
)
for smi,txt in testD:
pkl = base64.decodestring(txt)
fp = rdMD.GetTopologicalTorsionFingerprint(Chem.MolFromSmiles(smi))
fp2 = DataStructs.LongSparseIntVect(pkl)
self.assertEqual(DataStructs.DiceSimilarity(fp,fp2),1.0)
self.assertEqual(fp,fp2)
def testAtomPairOptions(self):
m1 = Chem.MolFromSmiles('c1ccccc1')
m2 = Chem.MolFromSmiles('c1ccccn1')
fp1 = rdMD.GetAtomPairFingerprint(m1)
fp2 = rdMD.GetAtomPairFingerprint(m2)
self.assertNotEqual(fp1,fp2)
fp1 = rdMD.GetAtomPairFingerprint(m1,atomInvariants=[1]*6)
fp2 = rdMD.GetAtomPairFingerprint(m2,atomInvariants=[1]*6)
self.assertEqual(fp1,fp2)
fp1 = rdMD.GetAtomPairFingerprint(m1,atomInvariants=[1]*6)
fp2 = rdMD.GetAtomPairFingerprint(m2,atomInvariants=[2]*6)
self.assertNotEqual(fp1,fp2)
fp1 = rdMD.GetHashedAtomPairFingerprintAsBitVect(m1)
fp2 = rdMD.GetHashedAtomPairFingerprintAsBitVect(m2)
self.assertNotEqual(fp1,fp2)
fp1 = rdMD.GetHashedAtomPairFingerprintAsBitVect(m1,atomInvariants=[1]*6)
fp2 = rdMD.GetHashedAtomPairFingerprintAsBitVect(m2,atomInvariants=[1]*6)
self.assertEqual(fp1,fp2)
fp1 = rdMD.GetHashedAtomPairFingerprintAsBitVect(m1,atomInvariants=[1]*6)
fp2 = rdMD.GetHashedAtomPairFingerprintAsBitVect(m2,atomInvariants=[2]*6)
self.assertNotEqual(fp1,fp2)
fp1 = rdMD.GetTopologicalTorsionFingerprint(m1)
fp2 = rdMD.GetTopologicalTorsionFingerprint(m2)
self.assertNotEqual(fp1,fp2)
fp1 = rdMD.GetTopologicalTorsionFingerprint(m1,atomInvariants=[1]*6)
fp2 = rdMD.GetTopologicalTorsionFingerprint(m2,atomInvariants=[1]*6)
self.assertEqual(fp1,fp2)
fp1 = rdMD.GetTopologicalTorsionFingerprint(m1,atomInvariants=[1]*6)
fp2 = rdMD.GetTopologicalTorsionFingerprint(m2,atomInvariants=[2]*6)
self.assertNotEqual(fp1,fp2)
fp1 = rdMD.GetHashedTopologicalTorsionFingerprintAsBitVect(m1)
fp2 = rdMD.GetHashedTopologicalTorsionFingerprintAsBitVect(m2)
self.assertNotEqual(fp1,fp2)
fp1 = rdMD.GetHashedTopologicalTorsionFingerprintAsBitVect(m1,atomInvariants=[1]*6)
fp2 = rdMD.GetHashedTopologicalTorsionFingerprintAsBitVect(m2,atomInvariants=[1]*6)
self.assertEqual(fp1,fp2)
fp1 = rdMD.GetHashedTopologicalTorsionFingerprintAsBitVect(m1,atomInvariants=[1]*6)
fp2 = rdMD.GetHashedTopologicalTorsionFingerprintAsBitVect(m2,atomInvariants=[2]*6)
self.assertNotEqual(fp1,fp2)
def testMolFormula(self):
m = Chem.MolFromSmiles("[2H]C([3H])O")
formula = rdMD.CalcMolFormula(m)
self.assertEqual(formula,'CH4O')
formula = rdMD.CalcMolFormula(m,separateIsotopes=True)
self.assertEqual(formula,'CH2DTO')
formula = rdMD.CalcMolFormula(m,separateIsotopes=True,abbreviateHIsotopes=False)
self.assertEqual(formula,'CH2[2H][3H]O')
m = Chem.MolFromSmiles("[2H][13CH2]CO")
formula = rdMD.CalcMolFormula(m)
self.assertEqual(formula,'C2H6O')
formula = rdMD.CalcMolFormula(m,separateIsotopes=True)
self.assertEqual(formula,'C[13C]H5DO')
def testSpiroAndBridgeheads(self):
m = Chem.MolFromSmiles("C1CC2CCC1CC2")
self.assertEqual(rdMD.CalcNumSpiroAtoms(m),0)
sa = []
self.assertEqual(rdMD.CalcNumSpiroAtoms(m,atoms=sa),0)
self.assertEqual(len(sa),0)
self.assertEqual(rdMD.CalcNumBridgeheadAtoms(m),2)
sa = []
self.assertEqual(rdMD.CalcNumBridgeheadAtoms(m,atoms=sa),2)
self.assertEqual(len(sa),2)
self.assertEqual(sorted(sa),[2,5])
m = Chem.MolFromSmiles("C1CCC2(C1)CC1CCC2CC1")
self.assertEqual(rdMD.CalcNumSpiroAtoms(m),1)
sa = []
self.assertEqual(rdMD.CalcNumSpiroAtoms(m,atoms=sa),1)
self.assertEqual(len(sa),1)
self.assertEqual(sorted(sa),[3])
self.assertEqual(rdMD.CalcNumBridgeheadAtoms(m),2)
sa = []
self.assertEqual(rdMD.CalcNumBridgeheadAtoms(m,atoms=sa),2)
self.assertEqual(len(sa),2)
self.assertEqual(sorted(sa),[6,9])
def testNumRotatableBonds(self):
for s in ["C1CC1CC",
"CCNC(=O)NCC",
'Cc1cccc(C)c1c1c(C)cccc1C',
'CCc1cccc(C)c1c1c(C)cccc1CC',
'Cc1cccc(C)c1c1c(C)nccc1C',
'Cc1cccc(C)c1c1c(C)cccc1',
'CCO',
]:
m = Chem.MolFromSmiles(s)
v1 = rdMD.CalcNumRotatableBonds(m)
v2 = rdMD.CalcNumRotatableBonds(m, False)
v3 = rdMD.CalcNumRotatableBonds(m, True)
v4 = rdMD.CalcNumRotatableBonds(m, rdMD.NumRotatableBondsOptions.Default)
v5 = rdMD.CalcNumRotatableBonds(m, rdMD.NumRotatableBondsOptions.NonStrict)
v6 = rdMD.CalcNumRotatableBonds(m, rdMD.NumRotatableBondsOptions.Strict)
v7 = rdMD.CalcNumRotatableBonds(m, rdMD.NumRotatableBondsOptions.StrictLinkages)
self.assertEquals(v1, v4)
self.assertEquals(v2, v5)
self.assertEquals(v3, v6)
def testProperties(self):
props = rdMD.Properties()
names = list(props.GetAvailableProperties())
self.assertEquals(names, list(props.GetPropertyNames()))
m = Chem.MolFromSmiles("C1CC1CC")
results = props.ComputeProperties(m)
for i,name in enumerate(names):
props = rdMD.Properties([name])
res = props.ComputeProperties(m)
self.assertEquals(len(res), 1)
self.assertEquals(res[0], results[i])
self.assertEquals(props.GetPropertyNames()[0], names[i])
self.assertEquals(len(props.GetPropertyNames()), 1)
try:
props = rdMD.Properties([1,2,3])
self.assertEquals("should not get here", "but did")
except TypeError:
pass
try:
props = rdMD.Properties(["property that doesn't exist"])
self.assertEquals("should not get here", "but did")
except RuntimeError:
pass
def testPythonDescriptorFunctor(self):
class NumAtoms(Descriptors.PropertyFunctor):
def __init__(self):
Descriptors.PropertyFunctor.__init__(self, "NumAtoms", "1.0.0")
def __call__(self, mol):
return mol.GetNumAtoms()
numAtoms = NumAtoms()
rdMD.Properties.RegisterProperty(numAtoms)
props = rdMD.Properties(["NumAtoms"])
self.assertEquals(1, props.ComputeProperties(Chem.MolFromSmiles("C"))[0])
self.assertTrue("NumAtoms" in rdMD.Properties.GetAvailableProperties())
# check memory
del numAtoms
self.assertEquals(1, props.ComputeProperties(Chem.MolFromSmiles("C"))[0])
self.assertTrue("NumAtoms" in rdMD.Properties.GetAvailableProperties())
m = Chem.MolFromSmiles("c1ccccc1")
properties = rdMD.Properties()
for name, value in zip(properties.GetPropertyNames(), properties.ComputeProperties(m)):
print(name, value)
properties = rdMD.Properties(['exactmw', 'lipinskiHBA'])
for name, value in zip(properties.GetPropertyNames(), properties.ComputeProperties(m)):
print(name, value)
def testPropertyRanges(self):
query = rdMD.MakePropertyRangeQuery("exactmw", 0, 1000)
self.assertTrue(query.Match(Chem.MolFromSmiles("C")))
query = rdMD.MakePropertyRangeQuery("exactmw", 1000, 10000)
self.assertFalse(query.Match(Chem.MolFromSmiles("C")))
if __name__ == '__main__':
unittest.main()
|
adalke/rdkit
|
Code/GraphMol/Descriptors/Wrap/testMolDescriptors.py
|
Python
|
bsd-3-clause
| 17,455
|
[
"RDKit"
] |
5d90b7f06dc18d7dc02d422ff47df436197ad3df4d10d7df59995de8389a37d3
|
#!/usr/bin/env python
import sys
# WE RELY ON THESE BEING SET !!!
# set default verbose level
# verbose = os.environ.get('RADICAL_PILOT_VERBOSE', 'REPORT')
# os.environ['RADICAL_PILOT_VERBOSE'] = verbose
# set default URL to IMP Mongo DB
# path_to_db = os.environ.get(
# 'RADICAL_PILOT_DBURL', "mongodb://ensembletk.imp.fu-berlin.de:27017/rp")
# assume we run a local
# path_to_db = os.environ.get(
# 'RADICAL_PILOT_DBURL', "mongodb://localhost:27017/rp")
#
# os.environ['RADICAL_PILOT_DBURL'] = path_to_db
# import adaptive components
from adaptivemd import Project
from adaptivemd import AllegroCluster
from adaptivemd import OpenMMEngine4CUDA
from adaptivemd import PyEMMAAnalysis
from adaptivemd import File
if __name__ == '__main__':
project = Project('testcase')
# --------------------------------------------------------------------------
# CREATE THE RESOURCE
# the instance to know about the place where we run simulations
# --------------------------------------------------------------------------
resource_id = 'fub.allegro'
project.initialize(AllegroCluster())
# --------------------------------------------------------------------------
# CREATE THE ENGINE
# the instance to create trajectories
# --------------------------------------------------------------------------
pdb_file = File('file://../files/alanine/alanine.pdb').named('initial_pdb')
engine = OpenMMEngine4CUDA(
pdb_file=pdb_file,
system_file=File('file://../files/alanine/system.xml'),
integrator_file=File('file://../files/alanine/integrator.xml'),
args='-r --report-interval 10 --store-interval 10'
).named('openmm')
# --------------------------------------------------------------------------
# CREATE AN ANALYZER
# the instance that knows how to compute a msm from the trajectories
# --------------------------------------------------------------------------
modeller = PyEMMAAnalysis(
pdb_file=pdb_file
).named('pyemma')
project.generators.add(engine)
project.generators.add(modeller)
# --------------------------------------------------------------------------
# CREATE THE CLUSTER
# the instance that runs the simulations on the resource
# --------------------------------------------------------------------------
scheduler = project.get_scheduler('gpu', cores=1, runtime=4*24*60)
# create 4 blocks a 4 trajectories
trajectories = [project.new_trajectory(engine['pdb_file'], 100, 4) for _ in range(4)]
tasks = map(engine.run, trajectories)
print trajectories
# submit
scheduler(tasks)
scheduler.wait()
# now start adaptive loop
for f in project.trajectories:
print f.url
for loop in range(2):
trajectories = [project.new_ml_trajectory(length=100, number=4) for _ in range(4)]
print trajectories
tasks = map(engine.run, trajectories)
finals = scheduler(tasks)
scheduler.wait()
for f in project.trajectories:
print f.url
task = scheduler(modeller.execute(list(project.trajectories)))
scheduler.wait()
# print
#
# CURSOR_UP_ONE = '\x1b[1A'
# ERASE_LINE = '\x1b[2K'
#
# while ev:
# sys.stdout.write(CURSOR_UP_ONE + ERASE_LINE)
# sys.stdout.write('# of trajectories : %8d / # of models : %8d \n' % (
# len(project.trajectories),
# len(project.models)
# ))
# sys.stdout.flush()
# time.sleep(5.0)
scheduler.exit()
project.close()
|
jrossyra/adaptivemd
|
examples/tutorial/test_adaptive.py
|
Python
|
lgpl-2.1
| 3,628
|
[
"OpenMM"
] |
53016ecc975e6604658bfe2bd6993ae66cedab1b802a63a40ebaa126c458e393
|
"""
Logger class
"""
import fcntl
import os
from collections import OrderedDict as od
def _create_path(system, style='folder'):
"""
Create a path from the system.
Parameters
----------
system : lammps System
The system to create path from
style : {'folder', 'hash'}
Style to use. Folder creates a path that mimics a folder
structure similar to that of the system parameters. Hash creates
a hash integer from the system dictionary.
Returns
-------
path : string
Path relative to root_path in which we will insert the data
"""
prefix = od([('lambda', 'l'), ('N', 'N'), ('expansion', 'exp'),
('potential', ''), ('x', 'x'), ('d', 'd'), ('T', 'T')])
if style == 'folder':
path = ''
for key in prefix.keys():
if key in system.keys():
path = path + ''.join((prefix[key], str(system[key]))) + '/'
for key, val in zip(system.keys(), system.values()):
if not key in prefix.keys():
path = path + ''.join(map(str, (key, val))) + '/'
elif style == 'hash':
path = str(hash(frozenset(system)))
else:
raise ValueError("The style {0} wasn't found".format(style))
return path
class Logger(object):
"""
Main logger class. Can either plot or write the data on a text file
"""
def __init__(self, system, root_path='./data', style='folder'):
"""
Constructor.
Parameters
----------
system : System
The molecular dynamics system. From it we will extract the
global parameters that define the simulation
root_path : str, optional
Root path in which we will store all the information
style : {'folder', 'hash'}
Style to use. Folder creates a path that mimics a folder
structure similar to that of the system parameters. Hash creates
a hash integer from the system dictionary.
"""
identifier = [' '.join(map(str, (i, system[i]))) for i in system]
path = _create_path(system, style)
self.path = '{0}/{1}'.format(root_path, path)
os.makedirs(self.path)
identifier.append('id {0}'.format(path))
success = False
while not success:
try:
fdb = open('{0}/db.dat'.format(root_path), 'a')
fcntl.flock(fdb, fcntl.LOCK_EX)
print>>fdb, ', '.join(identifier)
fcntl.flock(fdb, fcntl.LOCK_UN)
fdb.close()
success = True
except IOError:
pass
fkey = open('{0}/key.dat'.format(self.path), 'w')
print>>fkey, ', '.join(identifier)
fkey.close()
system.lmp.command('log {0}/log.lammps'.format(self.path))
def dump(self, system, style='text'):
"""
Dump the system information.
Parameters
----------
system : System
The molecular dynamics system. It needs to have a dump method
in it. It can be eventually changed so it can be ported
outside lammps.
style : {'text', 'image'}
Style to use when dumping
"""
system.dump('{0}'.format(self.path), style)
def log(self, analyzer):
"""
Write the logging of the analyzer.
Parameters
----------
analyzer : Analyzer
The analyzer to be logged
"""
analyzer.log('{0}'.format(self.path))
def plot(self, analyzer):
"""
Plot the result of the analyzer.
Parameters
----------
analyzer : Analyzer
The analyzer to be logged
"""
analyzer.plot('{0}'.format(self.path))
|
pabloalcain/lammps-python
|
pylammps/Logger.py
|
Python
|
gpl-3.0
| 3,430
|
[
"LAMMPS"
] |
721ebed5f7ce02c620ed543da4e4dfde003b44bc235ad1231778a973aff9a574
|
# (C) British Crown Copyright 2013 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.fileformats.netcdf.Saver` class."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import mock
import netCDF4 as nc
import numpy as np
import iris
from iris.coord_systems import GeogCS, TransverseMercator, RotatedGeogCS
from iris.coords import DimCoord
from iris.cube import Cube
from iris.fileformats.netcdf import Saver
import iris.tests.stock as stock
class Test_write(tests.IrisTest):
def _transverse_mercator_cube(self, ellipsoid=None):
data = np.arange(12).reshape(3, 4)
cube = Cube(data, 'air_pressure_anomaly')
trans_merc = TransverseMercator(49.0, -2.0, -400000.0, 100000.0,
0.9996012717, ellipsoid)
coord = DimCoord(range(3), 'projection_y_coordinate', units='m',
coord_system=trans_merc)
cube.add_dim_coord(coord, 0)
coord = DimCoord(range(4), 'projection_x_coordinate', units='m',
coord_system=trans_merc)
cube.add_dim_coord(coord, 1)
return cube
def test_transverse_mercator(self):
# Create a Cube with a transverse Mercator coordinate system.
ellipsoid = GeogCS(6377563.396, 6356256.909)
cube = self._transverse_mercator_cube(ellipsoid)
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube)
self.assertCDL(nc_path)
def test_transverse_mercator_no_ellipsoid(self):
# Create a Cube with a transverse Mercator coordinate system.
cube = self._transverse_mercator_cube()
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube)
self.assertCDL(nc_path)
def _simple_cube(self, dtype):
data = np.arange(12, dtype=dtype).reshape(3, 4)
points = np.arange(3, dtype=dtype)
bounds = np.arange(6, dtype=dtype).reshape(3, 2)
cube = Cube(data, 'air_pressure_anomaly')
coord = DimCoord(points, bounds=bounds)
cube.add_dim_coord(coord, 0)
return cube
def test_little_endian(self):
# Create a Cube with little-endian data.
cube = self._simple_cube('<f4')
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube)
self.assertCDL(nc_path, basename='endian', flags='')
def test_big_endian(self):
# Create a Cube with big-endian data.
cube = self._simple_cube('>f4')
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube)
self.assertCDL(nc_path, basename='endian', flags='')
def test_zlib(self):
cube = self._simple_cube('>f4')
with mock.patch('iris.fileformats.netcdf.netCDF4') as api:
with Saver('/dummy/path', 'NETCDF4') as saver:
saver.write(cube, zlib=True)
dataset = api.Dataset.return_value
create_var_calls = mock.call.createVariable(
'air_pressure_anomaly', np.dtype('float32'), ['dim0', 'dim1'],
fill_value=None, shuffle=True, least_significant_digit=None,
contiguous=False, zlib=True, fletcher32=False,
endian='native', complevel=4, chunksizes=None).call_list()
dataset.assert_has_calls(create_var_calls)
def test_least_significant_digit(self):
cube = Cube(np.array([1.23, 4.56, 7.89]),
standard_name='surface_temperature', long_name=None,
var_name='temp', units='K')
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube, least_significant_digit=1)
cube_saved = iris.load_cube(nc_path)
self.assertEquals(
cube_saved.attributes['least_significant_digit'], 1)
self.assertFalse(np.all(cube.data == cube_saved.data))
self.assertArrayAllClose(cube.data, cube_saved.data, 0.1)
def test_default_unlimited_dimensions(self):
cube = self._simple_cube('>f4')
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube)
ds = nc.Dataset(nc_path)
self.assertTrue(ds.dimensions['dim0'].isunlimited())
self.assertFalse(ds.dimensions['dim1'].isunlimited())
ds.close()
def test_no_unlimited_dimensions(self):
cube = self._simple_cube('>f4')
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube, unlimited_dimensions=[])
ds = nc.Dataset(nc_path)
for dim in ds.dimensions.itervalues():
self.assertFalse(dim.isunlimited())
ds.close()
def test_invalid_unlimited_dimensions(self):
cube = self._simple_cube('>f4')
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
# should not raise an exception
saver.write(cube, unlimited_dimensions=['not_found'])
def test_custom_unlimited_dimensions(self):
cube = self._transverse_mercator_cube()
unlimited_dimensions = ['projection_y_coordinate',
'projection_x_coordinate']
# test coordinates by name
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube, unlimited_dimensions=unlimited_dimensions)
ds = nc.Dataset(nc_path)
for dim in unlimited_dimensions:
self.assertTrue(ds.dimensions[dim].isunlimited())
ds.close()
# test coordinate arguments
with self.temp_filename('.nc') as nc_path:
coords = [cube.coord(dim) for dim in unlimited_dimensions]
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube, unlimited_dimensions=coords)
ds = nc.Dataset(nc_path)
for dim in unlimited_dimensions:
self.assertTrue(ds.dimensions[dim].isunlimited())
ds.close()
class TestCoordSystems(tests.IrisTest):
def cube_with_cs(self, coord_system,
names=['grid_longitude', 'grid_latitude']):
cube = stock.lat_lon_cube()
x, y = cube.coord('longitude'), cube.coord('latitude')
x.coord_system = y.coord_system = coord_system
for coord, name in zip([x, y], names):
coord.rename(name)
return cube
def construct_cf_grid_mapping_variable(self, cube):
# Calls the actual NetCDF saver with appropriate mocking, returning
# the grid variable that gets created.
grid_variable = mock.Mock(name='NetCDFVariable')
create_var_fn = mock.Mock(side_effect=[grid_variable])
dataset = mock.Mock(variables=[],
createVariable=create_var_fn)
saver = mock.Mock(spec=Saver, _coord_systems=[],
_dataset=dataset)
variable = mock.Mock()
Saver._create_cf_grid_mapping(saver, cube, variable)
self.assertEqual(create_var_fn.call_count, 1)
self.assertEqual(variable.grid_mapping,
grid_variable.grid_mapping_name)
return grid_variable
def variable_attributes(self, mocked_variable):
"""Get the attributes dictionary from a mocked NetCDF variable."""
# Get the attributes defined on the mock object.
attributes = filter(lambda name: not name.startswith('_'),
sorted(mocked_variable.__dict__.keys()))
attributes.remove('method_calls')
return {key: getattr(mocked_variable, key) for key in attributes}
def test_rotated_geog_cs(self):
coord_system = RotatedGeogCS(37.5, 177.5, ellipsoid=GeogCS(6371229.0))
cube = self.cube_with_cs(coord_system)
expected = {'grid_mapping_name': 'rotated_latitude_longitude',
'north_pole_grid_longitude': 0.0,
'grid_north_pole_longitude': 177.5,
'grid_north_pole_latitude': 37.5,
'longitude_of_prime_meridian': 0.0,
'earth_radius': 6371229.0,
}
grid_variable = self.construct_cf_grid_mapping_variable(cube)
actual = self.variable_attributes(grid_variable)
# To see obvious differences, check that they keys are the same.
self.assertEqual(sorted(actual.keys()), sorted(expected.keys()))
# Now check that the values are equivalent.
self.assertEqual(actual, expected)
def test_spherical_geog_cs(self):
coord_system = GeogCS(6371229.0)
cube = self.cube_with_cs(coord_system)
expected = {'grid_mapping_name': 'latitude_longitude',
'longitude_of_prime_meridian': 0.0,
'earth_radius': 6371229.0
}
grid_variable = self.construct_cf_grid_mapping_variable(cube)
actual = self.variable_attributes(grid_variable)
# To see obvious differences, check that they keys are the same.
self.assertEqual(sorted(actual.keys()), sorted(expected.keys()))
# Now check that the values are equivalent.
self.assertEqual(actual, expected)
def test_elliptic_geog_cs(self):
coord_system = GeogCS(637, 600)
cube = self.cube_with_cs(coord_system)
expected = {'grid_mapping_name': 'latitude_longitude',
'longitude_of_prime_meridian': 0.0,
'semi_minor_axis': 600.0,
'semi_major_axis': 637.0,
}
grid_variable = self.construct_cf_grid_mapping_variable(cube)
actual = self.variable_attributes(grid_variable)
# To see obvious differences, check that they keys are the same.
self.assertEqual(sorted(actual.keys()), sorted(expected.keys()))
# Now check that the values are equivalent.
self.assertEqual(actual, expected)
if __name__ == "__main__":
tests.main()
|
scollis/iris
|
lib/iris/tests/unit/fileformats/netcdf/test_Saver.py
|
Python
|
gpl-3.0
| 11,072
|
[
"NetCDF"
] |
3cabfa0996c8c88d6ab144c543cc5bf9f08b3dab30a4e21b0794b54a259513df
|
#!/usr/bin/env python
"""Automatically install required tools and data to run bcbio-nextgen pipelines.
This automates the steps required for installation and setup to make it
easier to get started with bcbio-nextgen. The defaults provide data files
for human variant calling.
Requires: git, wget, bgzip2, Python 3.x, Python 2.7 or argparse + Python 2.6 and earlier
"""
from __future__ import print_function
import collections
import contextlib
import datetime
import os
import platform
import shutil
import subprocess
import sys
try:
import urllib2 as urllib_request
except ImportError:
import urllib.request as urllib_request
REMOTES = {
"requirements": "https://raw.githubusercontent.com/bcbio/bcbio-nextgen/master/requirements-conda.txt",
"gitrepo": "https://github.com/bcbio/bcbio-nextgen.git",
"system_config": "https://raw.githubusercontent.com/bcbio/bcbio-nextgen/master/config/bcbio_system.yaml",
"anaconda": "https://repo.continuum.io/miniconda/Miniconda3-latest-%s-x86_64.sh"}
TARGETPY = "python=3.6"
def main(args, sys_argv):
check_arguments(args)
check_dependencies()
with bcbio_tmpdir():
setup_data_dir(args)
print("Installing isolated base python installation")
anaconda = install_anaconda_python(args)
print("Installing mamba")
anaconda = install_mamba(anaconda, args)
print("Installing conda-build")
install_conda_build(anaconda, args)
print("Installing bcbio-nextgen")
bcbio = install_conda_pkgs(anaconda, args)
bootstrap_bcbionextgen(anaconda, args)
print("Installing data and third party dependencies")
system_config = write_system_config(REMOTES["system_config"], args.datadir,
args.tooldir)
setup_manifest(args.datadir)
subprocess.check_call([bcbio, "upgrade"] + _clean_args(sys_argv, args))
print("Finished: bcbio-nextgen, tools and data installed")
print(" Genome data installed in:\n %s" % args.datadir)
if args.tooldir:
print(" Tools installed in:\n %s" % args.tooldir)
print(" Ready to use system configuration at:\n %s" % system_config)
print(" Edit configuration file as needed to match your machine or cluster")
def _clean_args(sys_argv, args):
"""Remove data directory from arguments to pass to upgrade function.
"""
base = [x for x in sys_argv if
x.startswith("-") or not args.datadir == os.path.abspath(os.path.expanduser(x))]
# Remove installer only options we don't pass on
base = [x for x in base if x not in set(["--minimize-disk"])]
if "--nodata" in base:
base.remove("--nodata")
else:
base.append("--data")
return base
def bootstrap_bcbionextgen(anaconda, args):
if args.upgrade == "development":
git_tag = "@%s" % args.revision if args.revision != "master" else ""
subprocess.check_call([anaconda["pip"], "install", "--upgrade", "--no-deps",
"git+%s%s#egg=bcbio-nextgen" % (REMOTES["gitrepo"], git_tag)])
def _get_conda_channels(conda_bin):
"""Retrieve default conda channels, checking if they are pre-specified in config.
This allows users to override defaults with specific mirrors in their .condarc
"""
channels = ["bioconda", "conda-forge"]
out = []
try:
import yaml
config = yaml.safe_load(subprocess.check_output([conda_bin, "config", "--show"]))
except ImportError:
config = {}
for c in channels:
present = False
for orig_c in config.get("channels") or []:
if orig_c.endswith((c, "%s/" % c)):
present = True
break
if not present:
out += ["-c", c]
return out
def install_mamba(anaconda, args):
anaconda_dir = os.path.join(args.datadir, "anaconda")
bindir = os.path.join(anaconda_dir, "bin")
mamba = os.path.join(bindir, "mamba")
subprocess.check_call(
[anaconda["conda"], "install", "--yes"] +
_get_conda_channels(anaconda["conda"]) + ["mamba"])
anaconda["mamba"] = mamba
return anaconda
def install_conda_build(anaconda, args):
anaconda_dir = os.path.join(args.datadir, "anaconda")
bindir = os.path.join(anaconda_dir, "bin")
mamba = os.path.join(bindir, "mamba")
subprocess.check_call(
[anaconda["mamba"], "install", "--yes"] +
_get_conda_channels(anaconda["conda"]) + ["conda-build"])
def install_conda_pkgs(anaconda, args):
env = dict(os.environ)
# Try to avoid user specific pkgs and envs directories
# https://github.com/conda/conda/issues/6748
env["CONDA_PKGS_DIRS"] = os.path.join(anaconda["dir"], "pkgs")
env["CONDA_ENVS_DIRS"] = os.path.join(anaconda["dir"], "envs")
conda_bin = anaconda["conda"]
if "mamba" in anaconda.keys():
mamba_bin = anaconda["mamba"]
else:
mamba_bin = anaconda["conda"]
if not os.path.exists(os.path.basename(REMOTES["requirements"])):
subprocess.check_call(["wget", "--no-check-certificate", REMOTES["requirements"]])
if args.minimize_disk:
subprocess.check_call([mamba_bin, "install", "--yes", "nomkl"], env=env)
channels = _get_conda_channels(conda_bin)
subprocess.check_call([mamba_bin, "install", "--yes"] + channels +
["--only-deps", "bcbio-nextgen", TARGETPY], env=env)
subprocess.check_call([conda_bin, "install", "--yes"] + channels +
["--file", os.path.basename(REMOTES["requirements"]), TARGETPY], env=env)
return os.path.join(anaconda["dir"], "bin", "bcbio_nextgen.py")
def _guess_distribution():
"""Simple approach to identify if we are on a MacOSX or Linux system for Anaconda.
"""
if platform.mac_ver()[0]:
return "macosx"
else:
return "linux"
def install_anaconda_python(args):
"""Provide isolated installation of Anaconda python for running bcbio-nextgen.
http://docs.continuum.io/anaconda/index.html
"""
anaconda_dir = os.path.join(args.datadir, "anaconda")
bindir = os.path.join(anaconda_dir, "bin")
conda = os.path.join(bindir, "conda")
if not os.path.exists(anaconda_dir) or not os.path.exists(conda):
if os.path.exists(anaconda_dir):
shutil.rmtree(anaconda_dir)
dist = args.distribution if args.distribution else _guess_distribution()
url = REMOTES["anaconda"] % ("MacOSX" if dist.lower() == "macosx" else "Linux")
if not os.path.exists(os.path.basename(url)):
subprocess.check_call(["wget", "--progress=dot:mega", "--no-check-certificate", url])
subprocess.check_call("bash %s -b -p %s" %
(os.path.basename(url), anaconda_dir), shell=True)
return {"conda": conda,
"pip": os.path.join(bindir, "pip"),
"dir": anaconda_dir}
def setup_manifest(datadir):
"""Create barebones manifest to be filled in during update
"""
manifest_dir = os.path.join(datadir, "manifest")
if not os.path.exists(manifest_dir):
os.makedirs(manifest_dir)
def write_system_config(base_url, datadir, tooldir):
"""Write a bcbio_system.yaml configuration file with tool information.
"""
out_file = os.path.join(datadir, "galaxy", os.path.basename(base_url))
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
if os.path.exists(out_file):
# if no tool directory and exists, do not overwrite
if tooldir is None:
return out_file
else:
bak_file = out_file + ".bak%s" % (datetime.datetime.now().strftime("%Y%M%d_%H%M"))
shutil.copy(out_file, bak_file)
if tooldir:
java_basedir = os.path.join(tooldir, "share", "java")
rewrite_ignore = ("log",)
with contextlib.closing(urllib_request.urlopen(base_url)) as in_handle:
with open(out_file, "w") as out_handle:
in_resources = False
in_prog = None
for line in (l.decode("utf-8") for l in in_handle):
if line[0] != " ":
in_resources = line.startswith("resources")
in_prog = None
elif (in_resources and line[:2] == " " and line[2] != " "
and not line.strip().startswith(rewrite_ignore)):
in_prog = line.split(":")[0].strip()
# Update java directories to point to install directory, avoid special cases
elif line.strip().startswith("dir:") and in_prog and in_prog not in ["log", "tmp"]:
final_dir = os.path.basename(line.split()[-1])
if tooldir:
line = "%s: %s\n" % (line.split(":")[0],
os.path.join(java_basedir, final_dir))
in_prog = None
elif line.startswith("galaxy"):
line = "# %s" % line
out_handle.write(line)
return out_file
def setup_data_dir(args):
if not os.path.exists(args.datadir):
cmd = ["mkdir", "-p", args.datadir]
subprocess.check_call(cmd)
@contextlib.contextmanager
def bcbio_tmpdir():
orig_dir = os.getcwd()
work_dir = os.path.join(os.getcwd(), "tmpbcbio-install")
if not os.path.exists(work_dir):
os.makedirs(work_dir)
os.chdir(work_dir)
yield work_dir
os.chdir(orig_dir)
shutil.rmtree(work_dir)
def check_arguments(args):
"""Ensure argruments are consistent and correct.
"""
if args.toolplus and not args.tooldir:
raise argparse.ArgumentTypeError("Cannot specify --toolplus without --tooldir")
def check_dependencies():
"""Ensure required tools for installation are present.
"""
print("Checking required dependencies")
for dep, msg in [(["git", "--version"], "Git (http://git-scm.com/)"),
(["wget", "--version"], "wget"),
(["bzip2", "-h"], "bzip2")]:
try:
p = subprocess.Popen(dep, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
out, code = p.communicate()
except OSError:
out = "Executable not found"
code = 127
if code == 127:
raise OSError("bcbio-nextgen installer requires %s\n%s" % (msg, out))
def _check_toolplus(x):
"""Parse options for adding non-standard/commercial tools like GATK and MuTecT.
"""
import argparse
Tool = collections.namedtuple("Tool", ["name", "fname"])
std_choices = set(["data", "dbnsfp", "ericscript"])
if x in std_choices:
return Tool(x, None)
elif "=" in x and len(x.split("=")) == 2:
name, fname = x.split("=")
fname = os.path.normpath(os.path.realpath(fname))
if not os.path.exists(fname):
raise argparse.ArgumentTypeError("Unexpected --toolplus argument for %s. File does not exist: %s"
% (name, fname))
return Tool(name, fname)
else:
raise argparse.ArgumentTypeError("Unexpected --toolplus argument. Expect toolname=filename.")
if __name__ == "__main__":
try:
import argparse
except ImportError:
raise ImportError("bcbio-nextgen installer requires `argparse`, included in Python 2.7.\n"
"Install for earlier versions with `pip install argparse` or "
"`easy_install argparse`.")
parser = argparse.ArgumentParser(
description="Automatic installation for bcbio-nextgen pipelines")
parser.add_argument("datadir", help="Directory to install genome data",
type=lambda x: (os.path.abspath(os.path.expanduser(x))))
parser.add_argument("--cores", default=1,
help="Number of cores to use if local indexing is necessary.")
parser.add_argument("--tooldir",
help="Directory to install 3rd party software tools. Leave unspecified for no tools",
type=lambda x: (os.path.abspath(os.path.expanduser(x))), default=None)
parser.add_argument("--toolplus", help="Specify additional tool categories to install",
action="append", default=[], type=_check_toolplus)
parser.add_argument("--datatarget", help="Data to install. Allows customization or install of extra data.",
action="append", default=[],
choices=["variation", "rnaseq", "smallrna", "gemini", "vep", "dbnsfp",
"battenberg", "kraken", "ericscript", "gnomad"])
parser.add_argument("--genomes", help="Genomes to download",
action="append", default=[],
choices=["GRCh37", "hg19", "hg38", "hg38-noalt", "mm10", "mm9", "rn6", "rn5",
"canFam3", "dm3", "galGal4", "phix", "pseudomonas_aeruginosa_ucbpp_pa14",
"sacCer3", "TAIR10", "WBcel235", "xenTro3", "GRCz10", "GRCz11",
"Sscrofa11.1", "BDGP6"])
parser.add_argument("--aligners", help="Aligner indexes to download",
action="append", default=[],
choices=["bbmap", "bowtie", "bowtie2", "bwa", "minimap2", "novoalign", "rtg", "snap",
"star", "ucsc", "hisat2"])
parser.add_argument("--nodata", help="Do not install data dependencies",
dest="install_data", action="store_false", default=True)
parser.add_argument("--isolate", help="Created an isolated installation without PATH updates",
dest="isolate", action="store_true", default=False)
parser.add_argument("--minimize-disk", help="Try to minimize disk usage (no MKL extensions)",
dest="minimize_disk", action="store_true", default=False)
parser.add_argument("-u", "--upgrade", help="Code version to install",
choices=["stable", "development"], default="stable")
parser.add_argument("--revision", help="Specify a git commit hash or tag to install", default="master")
parser.add_argument("--distribution", help="Operating system distribution",
default="",
choices=["ubuntu", "debian", "centos", "scientificlinux", "macosx"])
if len(sys.argv) == 1:
parser.print_help()
else:
main(parser.parse_args(), sys.argv[1:])
|
a113n/bcbio-nextgen
|
scripts/bcbio_nextgen_install.py
|
Python
|
mit
| 14,511
|
[
"BWA",
"Bioconda",
"Bowtie",
"Galaxy"
] |
3420dca6e5c89eb3f18581c4e877644bb55b45116274d77a37399774a5da41f7
|
#!/usr/local/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
repex
=====
Replica-exchange simulation algorithms and specific variants.
DESCRIPTION
This module provides a general facility for running replica-exchange simulations, as well as
derived classes for special cases such as parallel tempering (in which the states differ only
in temperature) and Hamiltonian exchange (in which the state differ only by potential function).
Provided classes include:
* ReplicaExchange - Base class for general replica-exchange simulations among specified ThermodynamicState objects
* ParallelTempering - Convenience subclass of ReplicaExchange for parallel tempering simulations (one System object, many temperatures/pressures)
* HamiltonianExchange - Convenience subclass of ReplicaExchange for Hamiltonian exchange simulations (many System objects, same temperature/pressure)
TODO
* Add analysis facility accessible by user.
* Give up on Context caching and revert to serial Context creation/destruction if we run out of GPU memory (issuing an alert).
* Store replica self-energies and/or -ln q(x) for simulation (for analyzing correlation times).
* Add analysis facility.
* Allow user to call initialize() externally and get the NetCDF file handle to add additional data?
* Store / restore parameters and System objects from NetCDF file for resuming and later analysis.
* Sampling support:
* Short-term: Add support for user to specify a callback to create the Integrator to use ('integrator_factory' or 'integrator_callback').
* Longer-term: Allow a more complex MCMC sampling scheme consisting of one or more moves to be specified through mcmc.py facility.
* Allow different choices of temperature handling during exchange attempts:
* scale velocities (exchanging only on potential energies) - make this the default?
* randomize velocities (exchanging only on potential energies)
* exchange on total energies, preserving velocities (requires more replicas)
* Add control over number of times swaps are attempted when mixing replicas, or compute best guess automatically
* Add another layer of abstraction so that the base class uses generic log probabilities, rather than reduced potentials?
* Use interface-based checking of arguments so that different implementations of the OpenMM API (such as pyopenmm) can be used.
* Eliminate file closures in favor of syncs to avoid closing temporary files in the middle of a run.
COPYRIGHT
Written by John D. Chodera <jchodera@gmail.com> while at the University of California Berkeley.
LICENSE
This code is licensed under the latest available version of the GNU General Public License.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
from simtk import openmm
from simtk import unit
import os, os.path
import math
import copy
import time
import datetime
import logging
logger = logging.getLogger(__name__)
import numpy as np
import mdtraj as md
import netCDF4 as netcdf
from utils import is_terminal_verbose, delayed_termination
#=============================================================================================
# MODULE CONSTANTS
#=============================================================================================
kB = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA # Boltzmann constant
# TODO: Fix MAX_SEED when we determine what maximum allowed seed is.
#MAX_SEED = 4294967 # maximum seed for OpenMM setRandomNumberSeed
MAX_SEED = 2**31 - 1 # maximum seed for OpenMM setRandomNumberSeed
#=============================================================================================
# Exceptions
#=============================================================================================
class NotImplementedException(Exception):
"""
Exception denoting that the requested feature has not yet been implemented.
"""
pass
class ParameterException(Exception):
"""
Exception denoting that an incorrect argument has been specified.
"""
pass
#=============================================================================================
# Thermodynamic state description
#=============================================================================================
class ThermodynamicState(object):
"""
Data specifying a thermodynamic state obeying Boltzmann statistics.
Examples
--------
Specify an NVT state for a water box at 298 K.
>>> from simtk import unit
>>> from openmmtools import testsystems
>>> waterbox = testsystems.WaterBox()
>>> [system, positions] = [waterbox.system, waterbox.positions]
>>> state = ThermodynamicState(system=system, temperature=298.0*unit.kelvin)
Specify an NPT state at 298 K and 1 atm pressure.
>>> state = ThermodynamicState(system=system, temperature=298.0*unit.kelvin, pressure=1.0*unit.atmospheres)
Note that the pressure is only relevant for periodic systems.
Notes
-----
This state object cannot describe states obeying non-Boltzamnn statistics, such as Tsallis statistics.
TODO
----
* Implement a more fundamental ProbabilityState as a base class?
* Implement pH.
"""
def __init__(self, system=None, temperature=None, pressure=None):
"""
Initialize the thermodynamic state.
Parameters
----------
system : simtk.openmm.System, optional, default=None
A System object describing the potential energy function for the system
Note: Only a shallow copy is made.
temperature : simtk.unit.Quantity compatible with 'kelvin', optional, default=None
The temperature for a system with constant temperature
pressure : simtk.unit.Quantity compatible with 'atmospheres', optional, default=None
The pressure for constant-pressure systems (default: None)
"""
# Initialize.
self.system = None # the System object governing the potential energy computation
self.temperature = None # the temperature
self.pressure = None # the pressure, or None if not isobaric
# Store provided values.
if system is not None:
self.system = system
if temperature is not None:
self.temperature = temperature
if pressure is not None:
self.pressure = pressure
return
@property
def kT(self):
"""
Return the thermal energy (kT) of this state.
"""
return (kB * self.temperature)
def reduced_potential(self, positions, box_vectors=None, platform=None, context=None):
"""
Compute the reduced potential for the given positions in this thermodynamic state.
Parameters
----------
positions : simtk.unit.Quantity of Nx3 numpy.array
Positions[n,k] is kth coordinate of particle n
box_vectors : tuple of Vec3 or ???, optional, default=None
Periodic box vectors, if present.
platform : simtk.openmm.Platform, optional, default=None
Platform to use, or None if default.
context : simtk.openmm.Context, optional, default=none
If specified, use this Context.
Returns
-------
u : float
The unitless reduced potential (which can be considered to have units of kT)
Examples
--------
Compute the reduced potential of a Lennard-Jones cluster at 100 K.
>>> from simtk import unit
>>> from openmmtools import testsystems
>>> testsystem = testsystems.LennardJonesCluster()
>>> [system, positions] = [testsystem.system, testsystem.positions]
>>> state = ThermodynamicState(system=system, temperature=100.0*unit.kelvin)
>>> potential = state.reduced_potential(positions)
Compute the reduced potential of a Lennard-Jones fluid at 100 K and 1 atm.
>>> testsystem = testsystems.LennardJonesFluid()
>>> [system, positions] = [testsystem.system, testsystem.positions]
>>> state = ThermodynamicState(system=system, temperature=100.0*unit.kelvin, pressure=1.0*unit.atmosphere)
>>> box_vectors = system.getDefaultPeriodicBoxVectors()
>>> potential = state.reduced_potential(positions, box_vectors)
Compute the reduced potential of a water box at 298 K and 1 atm.
>>> testsystem = testsystems.WaterBox()
>>> [system, positions] = [testsystem.system, testsystem.positions]
>>> state = ThermodynamicState(system=system, temperature=298.0*unit.kelvin, pressure=1.0*unit.atmosphere)
>>> box_vectors = system.getDefaultPeriodicBoxVectors()
>>> potential = state.reduced_potential(positions, box_vectors)
Notes
-----
The reduced potential is defined as in Ref. [1]
u = \beta [U(x) + p V(x) + \mu N(x)]
where the thermodynamic parameters are
\beta = 1/(kB T) is he inverse temperature
U(x) is the potential energy
p is the pressure
\mu is the chemical potential
and the configurational properties are
x the atomic positions
V(x) is the instantaneous box volume
N(x) the numbers of various particle species (e.g. protons of titratible groups)
References
----------
[1] Shirts MR and Chodera JD. Statistically optimal analysis of equilibrium states. J Chem Phys 129:124105, 2008.
TODO
----
* Instead of requiring configuration and box_vectors be passed separately, develop a Configuration or Snapshot class.
"""
# If pressure is specified, ensure box vectors have been provided.
if (self.pressure is not None) and (box_vectors is None):
raise ParameterException("box_vectors must be specified if constant-pressure ensemble.")
# Compute potential energy.
potential_energy = self._compute_potential_energy(positions, box_vectors=box_vectors, platform=platform, context=context)
# Compute inverse temperature.
beta = 1.0 / (kB * self.temperature)
# Compute reduced potential.
reduced_potential = beta * potential_energy
if self.pressure is not None:
reduced_potential += beta * self.pressure * self._volume(box_vectors) * unit.AVOGADRO_CONSTANT_NA
return reduced_potential
def _compute_potential_energy(self, positions, box_vectors=None, platform=None, context=None):
"""
Compute the potential energy for the given positions in this thermodynamic state.
Parameters
----------
positions : simtk.unit.Quantity of Nx3 numpy.array
Positions[n,k] is kth coordinate of particle n
box_vectors : tuple of Vec3 or ???, optional, default=None
Periodic box vectors, if present.
platform : simtk.openmm.Platform, optional, default=None
Platform to use, or None if default.
context : simtk.openmm.Context, optional, default=none
If specified, use this Context.
Returns
-------
potential_energy : simtk.unit.Quantity with units compatible with kilojoules_per_mole
The unit-bearing potential energy.
"""
# Create OpenMM context to compute potential energy.
cleanup_context = False
if context == None:
integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
if platform:
context = openmm.Context(self.system, integrator, platform)
else:
context = openmm.Context(self.system, integrator)
cleanup_context = True
# Must set box vectors first.
if box_vectors is not None: context.setPeriodicBoxVectors(*box_vectors)
# Set positions.
context.setPositions(positions)
# WARNING: We don't set the alchemical state here, which may cause trouble.
# Retrieve potential energy.
potential_energy = context.getState(getEnergy=True).getPotentialEnergy()
# Clean up if we created a Context and Integrator.
if cleanup_context:
del context, integrator
return potential_energy
def is_compatible_with(self, state):
"""
Determine whether another state is in the same thermodynamic ensemble (e.g. NVT, NPT).
ARGUMENTS
state (ThermodynamicState) - thermodynamic state whose compatibility is to be determined
RETURNS
is_compatible (boolean) - True if 'state' is of the same ensemble (e.g. both NVT, both NPT), False otherwise
EXAMPLES
Create NVT and NPT states.
>>> from simtk import unit
>>> from openmmtools import testsystems
>>> testsystem = testsystems.LennardJonesCluster()
>>> [system, positions] = [testsystem.system, testsystem.positions]
>>> nvt_state = ThermodynamicState(system=system, temperature=100.0*unit.kelvin)
>>> npt_state = ThermodynamicState(system=system, temperature=100.0*unit.kelvin, pressure=1.0*unit.atmospheres)
Test compatibility.
>>> test1 = nvt_state.is_compatible_with(nvt_state)
>>> test2 = nvt_state.is_compatible_with(npt_state)
>>> test3 = npt_state.is_compatible_with(nvt_state)
>>> test4 = npt_state.is_compatible_with(npt_state)
"""
is_compatible = True
# Make sure systems have the same number of atoms.
if ((self.system != None) and (state.system != None)):
if (self.system.getNumParticles() != state.system.getNumParticles()):
is_compatible = False
# Make sure other terms are defined for both states.
# TODO: Use introspection to get list of parameters?
for parameter in ['temperature', 'pressure']:
if (parameter in dir(self)) is not (parameter in dir(state)):
# parameter is not shared by both states
is_compatible = False
return is_compatible
def __repr__(self):
"""
Returns a string representation of a state.
Examples
--------
Create an NVT state.
>>> from simtk import unit
>>> from openmmtools import testsystems
>>> testsystem = testsystems.LennardJonesCluster()
>>> [system, positions] = [testsystem.system, testsystem.positions]
>>> state = ThermodynamicState(system=system, temperature=100.0*unit.kelvin)
Return a representation of the state.
>>> state_string = repr(state)
"""
r = "<ThermodynamicState object"
if self.temperature is not None:
r += ", temperature = %s" % str(self.temperature)
if self.pressure is not None:
r += ", pressure = %s" % str(self.pressure)
r += ">"
return r
def __str__(self):
r = "<ThermodynamicState object"
if self.temperature is not None:
r += ", temperature = %s" % str(self.temperature)
if self.pressure is not None:
r += ", pressure = %s" % str(self.pressure)
r += ">"
return r
def _volume(self, box_vectors):
"""
Return the volume of the current configuration.
Returns
-------
volume : simtk.unit.Quantity
The volume of the system (in units of length^3), or None if no box positions are defined
Examples
--------
Compute the volume of a Lennard-Jones fluid at 100 K and 1 atm.
TODO
----
* Replace with OpenMM State.getPeriodicBoxVolume()
>>> from openmmtools import testsystems
>>> testsystem = testsystems.LennardJonesFluid()
>>> [system, positions] = [testsystem.system, testsystem.positions]
>>> state = ThermodynamicState(system=system, temperature=100.0*unit.kelvin, pressure=1.0*unit.atmosphere)
>>> box_vectors = system.getDefaultPeriodicBoxVectors()
>>> volume = state._volume(box_vectors)
"""
# Compute volume of parallelepiped.
[a,b,c] = box_vectors
A = np.array([a/a.unit, b/a.unit, c/a.unit])
volume = np.linalg.det(A) * a.unit**3
return volume
#=============================================================================================
# Replica-exchange simulation
#=============================================================================================
class ReplicaExchange(object):
"""
Replica-exchange simulation facility.
This base class provides a general replica-exchange simulation facility, allowing any set of thermodynamic states
to be specified, along with a set of initial positions to be assigned to the replicas in a round-robin fashion.
No distinction is made between one-dimensional and multidimensional replica layout; by default, the replica mixing
scheme attempts to mix *all* replicas to minimize slow diffusion normally found in multidimensional replica exchange
simulations. (Modification of the 'replica_mixing_scheme' setting will allow the tranditional 'neighbor swaps only'
scheme to be used.)
While this base class is fully functional, it does not make use of the special structure of parallel tempering or
Hamiltonian exchange variants of replica exchange. The ParallelTempering and HamiltonianExchange classes should
therefore be used for these algorithms, since they are more efficient and provide more convenient ways to initialize
the simulation classes.
Stored configurations, energies, swaps, and restart information are all written to a single output file using
the platform portable, robust, and efficient NetCDF4 library. Plans for future HDF5 support are pending.
Attributes
----------
The following parameters (attributes) can be set after the object has been created, but before it has been
initialized by a call to run():
collision_rate : simtk.unit.Quantity (units: 1/time)
The collision rate used for Langevin dynamics (default: 90 ps^-1)
constraint_tolerance : float
Relative constraint tolerance (default: 1e-6)
timestep : simtk.unit.Quantity (units: time)
Timestep for Langevin dyanmics (default: 2 fs)
nsteps_per_iteration : int
Number of timesteps per iteration (default: 500)
number_of_iterations : int
Number of replica-exchange iterations to simulate (default: 100)
number_of_equilibration_iterations : int
Number of equilibration iterations before beginning exchanges (default: 0)
equilibration_timestep : simtk.unit.Quantity (units: time)
Timestep for use in equilibration (default: 2 fs)
title : str
Title for the simulation.
minimize : bool
Minimize configurations before running the simulation (default: True)
minimize_tolerance : simtk.unit.Quantity (units: energy/mole/length)
Set minimization tolerance (default: 1.0 * unit.kilojoules_per_mole / unit.nanometers).
minimize_max_iterations : int
Maximum number of iterations for minimization.
replica_mixing_scheme : str
Specify how to mix replicas. Supported schemes are 'swap-neighbors' and
'swap-all' (default: 'swap-all').
online_analysis : bool
If True, analysis will occur each iteration (default: False).
online_analysis_min_iterations : int
Minimum number of iterations needed to begin online analysis (default: 20).
show_energies : bool
If True, will print energies at each iteration (default: True).
show_mixing_statistics : bool
If True, will show mixing statistics at each iteration (default: True).
TODO
----
* Replace hard-coded Langevin dynamics with general MCMC moves.
* Allow parallel resource to be used, if available (likely via Parallel Python).
* Add support for and autodetection of other NetCDF4 interfaces.
* Add HDF5 support.
Examples
--------
Parallel tempering simulation of alanine dipeptide in implicit solvent (replica exchange among temperatures)
(This is just an illustrative example; use ParallelTempering class for actual production parallel tempering simulations.)
>>> # Create test system.
>>> from openmmtools import testsystems
>>> testsystem = testsystems.AlanineDipeptideImplicit()
>>> [system, positions] = [testsystem.system, testsystem.positions]
>>> # Create thermodynamic states for parallel tempering with exponentially-spaced schedule.
>>> from simtk import unit
>>> import math
>>> nreplicas = 3 # number of temperature replicas
>>> T_min = 298.0 * unit.kelvin # minimum temperature
>>> T_max = 600.0 * unit.kelvin # maximum temperature
>>> T_i = [ T_min + (T_max - T_min) * (math.exp(float(i) / float(nreplicas-1)) - 1.0) / (math.e - 1.0) for i in range(nreplicas) ]
>>> states = [ ThermodynamicState(system=system, temperature=T_i[i]) for i in range(nreplicas) ]
>>> import tempfile
>>> store_filename = tempfile.NamedTemporaryFile(delete=False).name + '.nc'
>>> # Create simulation.
>>> simulation = ReplicaExchange(store_filename)
>>> simulation.create(states, positions) # initialize the replica-exchange simulation
>>> simulation.minimize = False
>>> simulation.number_of_iterations = 2 # set the simulation to only run 2 iterations
>>> simulation.timestep = 2.0 * unit.femtoseconds # set the timestep for integration
>>> simulation.nsteps_per_iteration = 50 # run 50 timesteps per iteration
>>> simulation.run() # run the simulation
>>> del simulation # clean up
Extend the simulation
>>> simulation = ReplicaExchange(store_filename)
>>> simulation.resume()
>>> simulation.number_of_iterations = 4 # extend
>>> simulation.run()
Clean up.
>>> os.remove(store_filename)
"""
default_parameters = {'collision_rate': 5.0 / unit.picosecond,
'constraint_tolerance': 1.0e-6,
'timestep': 2.0 * unit.femtosecond,
'nsteps_per_iteration': 500,
'number_of_iterations': 1,
'equilibration_timestep': 1.0 * unit.femtosecond,
'number_of_equilibration_iterations': 1,
'title': 'Replica-exchange simulation created using ReplicaExchange class of repex.py on %s' % time.asctime(time.localtime()),
'minimize': True,
'minimize_tolerance': 1.0 * unit.kilojoules_per_mole / unit.nanometers,
'minimize_max_iterations': 0,
'replica_mixing_scheme': 'swap-all',
'online_analysis': False,
'online_analysis_min_iterations': 20,
'show_energies': True,
'show_mixing_statistics': True
}
# Options to store.
options_to_store = ['collision_rate', 'constraint_tolerance', 'timestep', 'nsteps_per_iteration', 'number_of_iterations', 'equilibration_timestep', 'number_of_equilibration_iterations', 'title', 'minimize', 'replica_mixing_scheme', 'online_analysis', 'show_mixing_statistics']
def __init__(self, store_filename, mpicomm=None, mm=None, **kwargs):
"""
Initialize replica-exchange simulation facility.
Parameters
----------
store_filename : string
Name of file to bind simulation to use as storage for checkpointing and storage of results.
mm : implementation of simtk.openmm, optional, default=simtk.openmm
OpenMM API implementation to use
mpicomm : mpi4py communicator, optional, default=None
MPI communicator, if parallel execution is desired
Other Parameters
----------------
**kwargs
Parameters in ReplicaExchange.default_parameters corresponding public attributes.
"""
# To allow for parameters to be modified after object creation, class is not initialized until a call to self._initialize().
self._initialized = False
# Select default OpenMM implementation if not specified.
self.mm = mm
if mm is None: self.mm = openmm
# Set MPI communicator (or None if not used).
self.mpicomm = mpicomm
# Set default options.
# These can be changed externally until object is initialized.
self.platform = None
self.platform_name = None
self.integrator = None # OpenMM integrator to use for propagating dynamics
# Initialize keywords parameters and check for unknown keywords parameters
for par, default in self.default_parameters.items():
setattr(self, par, kwargs.pop(par, default))
if kwargs:
raise TypeError('got an unexpected keyword arguments {}'.format(
', '.join(kwargs.keys())))
# Record store file filename
self.store_filename = store_filename
# Check if netcdf file exists, assuming we want to resume if one exists.
self._resume = os.path.exists(self.store_filename) and (os.path.getsize(self.store_filename) > 0)
if self.mpicomm:
logger.debug('Node {}/{}: MPI bcast - sharing self._resume'.format(
self.mpicomm.rank, self.mpicomm.size))
self._resume = self.mpicomm.bcast(self._resume, root=0) # use whatever root node decides
return
def create(self, states, positions, options=None, metadata=None):
"""
Create new replica-exchange simulation.
Parameters
----------
states : list of ThermodynamicState
Thermodynamic states to simulate, where one replica is allocated per state.
Each state must have a system with the same number of atoms, and the same
thermodynamic ensemble (combination of temperature, pressure, pH, etc.) must
be defined for each.
positions : Coordinate object or iterable container of Coordinate objects)
One or more sets of initial positions
to be initially assigned to replicas in a round-robin fashion, provided simulation is not resumed from store file.
Currently, positions must be specified as a list of simtk.unit.Quantity-wrapped np arrays.
options : dict, optional, default=None
Optional dict to use for specifying simulation options. Provided keywords will be matched to object variables to replace defaults.
metadata : dict, optional, default=None
metadata to store in a 'metadata' group in store file
"""
# Check if netcdf file exists.
file_exists = os.path.exists(self.store_filename) and (os.path.getsize(self.store_filename) > 0)
if self.mpicomm:
logger.debug('Node {}/{}: MPI bcast - sharing file_exists'.format(
self.mpicomm.rank, self.mpicomm.size))
file_exists = self.mpicomm.bcast(file_exists, root=0) # use whatever root node decides
if file_exists:
raise RuntimeError("NetCDF file %s already exists; cowardly refusing to overwrite." % self.store_filename)
self._resume = False
# TODO: Make a deep copy of specified states once this is fixed in OpenMM.
# self.states = copy.deepcopy(states)
self.states = states
# Determine number of replicas from the number of specified thermodynamic states.
self.nreplicas = len(self.states)
# Check to make sure all states have the same number of atoms and are in the same thermodynamic ensemble.
for state in self.states:
if not state.is_compatible_with(self.states[0]):
raise ParameterError("Provided ThermodynamicState states must all be from the same thermodynamic ensemble.")
# Distribute coordinate information to replicas in a round-robin fashion.
# We have to explicitly check to see if z is a list or a set here because it turns out that np 2D arrays are iterable as well.
# TODO: Handle case where positions are passed in as a list of tuples, or list of lists, or list of Vec3s, etc.
if type(positions) in [type(list()), type(set())]:
self.provided_positions = [ unit.Quantity(np.array(coordinate_set / coordinate_set.unit), coordinate_set.unit) for coordinate_set in positions ]
else:
self.provided_positions = [ unit.Quantity(np.array(positions / positions.unit), positions.unit) ]
# Handle provided 'options' dict, replacing any options provided by caller in dictionary.
if options is not None:
for key in options.keys(): # for each provided key
if key in vars(self).keys(): # if this is also a simulation parameter
value = options[key]
logger.debug("from options: %s -> %s" % (key, str(value)))
vars(self)[key] = value # replace default simulation parameter with provided parameter
# Store metadata to store in store file.
self.metadata = metadata
# Initialize NetCDF file.
self._initialize_create()
return
def resume(self, options=None):
"""
Parameters
----------
options : dict, optional, default=None
will override any options restored from the store file.
"""
self._resume = True
# Check if netcdf file exists.
file_exists = os.path.exists(self.store_filename) and (os.path.getsize(self.store_filename) > 0)
if self.mpicomm:
logger.debug('Node {}/{}: MPI bcast - sharing file_exists'.format(
self.mpicomm.rank, self.mpicomm.size))
file_exists = self.mpicomm.bcast(file_exists, root=0) # use whatever root node decides
if not file_exists:
raise Exception("NetCDF file %s does not exist; cannot resume." % self.store_filename)
# Try to restore thermodynamic states and run options from the NetCDF file.
ncfile = netcdf.Dataset(self.store_filename, 'r')
self._restore_thermodynamic_states(ncfile)
self._restore_options(ncfile)
self._restore_metadata(ncfile)
ncfile.close()
# Determine number of replicas from the number of specified thermodynamic states.
self.nreplicas = len(self.states)
# Check to make sure all states have the same number of atoms and are in the same thermodynamic ensemble.
for state in self.states:
if not state.is_compatible_with(self.states[0]):
raise ParameterError("Provided ThermodynamicState states must all be from the same thermodynamic ensemble.")
# Handle provided 'options' dict, replacing any options provided by caller in dictionary.
# TODO: Check to make sure that only allowed overrides are specified.
if options:
for key in options.keys(): # for each provided key
if key in vars(self).keys(): # if this is also a simulation parameter
value = options[key]
logger.debug("from options: %s -> %s" % (key, str(value)))
vars(self)[key] = value # replace default simulation parameter with provided parameter
return
def __repr__(self):
"""
Return a 'formal' representation that can be used to reconstruct the class, if possible.
"""
# TODO: Can we make this a more useful expression?
return "<instance of ReplicaExchange>"
def __str__(self):
"""
Show an 'informal' human-readable representation of the replica-exchange simulation.
"""
r = ""
r += "Replica-exchange simulation\n"
r += "\n"
r += "%d replicas\n" % str(self.nreplicas)
r += "%d coordinate sets provided\n" % len(self.provided_positions)
r += "file store: %s\n" % str(self.store_filename)
r += "initialized: %s\n" % str(self._initialized)
r += "\n"
r += "PARAMETERS\n"
r += "collision rate: %s\n" % str(self.collision_rate)
r += "relative constraint tolerance: %s\n" % str(self.constraint_tolerance)
r += "timestep: %s\n" % str(self.timestep)
r += "number of steps/iteration: %s\n" % str(self.nsteps_per_iteration)
r += "number of iterations: %s\n" % str(self.number_of_iterations)
r += "equilibration timestep: %s\n" % str(self.equilibration_timestep)
r += "number of equilibration iterations: %s\n" % str(self.number_of_equilibration_iterations)
r += "\n"
return r
@classmethod
def _status_from_ncfile(cls, ncfile):
"""
Return status dict of current calculation.
Returns
-------
status : dict
Returns a dict of useful information about current simulation progress.
"""
status = dict()
status['number_of_iterations'] = ncfile.variables['positions'].shape[0]
status['nstates'] = ncfile.variables['positions'].shape[1]
status['natoms'] = ncfile.variables['positions'].shape[2]
return status
@classmethod
def status_from_store(cls, store_filename):
"""
Return status dict of calculation on disk.
Parameters
----------
store_filename : str
The name of the NetCDF storage filename.
Returns
-------
status : dict
Returns a dict of useful information about current simulation progress.
"""
ncfile = netcdf.Dataset(store_filename, 'r')
status = ReplicaExchange._status_from_ncfile(ncfile)
ncfile.close()
return status
def status(self):
"""
Return status dict of current calculation.
Returns
-------
status : dict
Returns a dict of useful information about current simulation progress.
"""
ncfile = netcdf.Dataset(self.store_filename, 'r')
status = ReplicaExchange._status_from_ncfile(self.ncfile)
ncfile.close()
return status
def run(self, niterations_to_run=None):
"""
Run the replica-exchange simulation.
Any parameter changes (via object attributes) that were made between object creation and calling this method become locked in
at this point, and the object will create and bind to the store file. If the store file already exists, the run will be resumed
if possible; otherwise, an exception will be raised.
Parameters
----------
niterations_to_run : int, optional, default=None
If specfied, only at most the specified number of iterations will be run.
"""
if not self._initialized:
self._initialize_resume()
# Main loop
run_start_time = time.time()
run_start_iteration = self.iteration
iteration_limit = self.number_of_iterations
if niterations_to_run:
iteration_limit = min(self.iteration + niterations_to_run, iteration_limit)
while (self.iteration < iteration_limit):
logger.debug("\nIteration %d / %d" % (self.iteration+1, self.number_of_iterations))
initial_time = time.time()
# Attempt replica swaps to sample from equilibrium permuation of states associated with replicas.
self._mix_replicas()
# Propagate replicas.
self._propagate_replicas()
# Compute energies of all replicas at all states.
self._compute_energies()
# Show energies.
if self.show_energies:
self._show_energies()
# Write iteration to storage file.
self._write_iteration_netcdf()
# Increment iteration counter.
self.iteration += 1
# Show mixing statistics.
if self.show_mixing_statistics:
self._show_mixing_statistics()
# Perform online analysis.
if self.online_analysis:
self._analysis()
# Show timing statistics if debug level is activated
if logger.isEnabledFor(logging.DEBUG):
final_time = time.time()
elapsed_time = final_time - initial_time
estimated_time_remaining = (final_time - run_start_time) / (self.iteration - run_start_iteration) * (self.number_of_iterations - self.iteration)
estimated_total_time = (final_time - run_start_time) / (self.iteration - run_start_iteration) * (self.number_of_iterations)
estimated_finish_time = final_time + estimated_time_remaining
logger.debug("Iteration took %.3f s." % elapsed_time)
logger.debug("Estimated completion in %s, at %s (consuming total wall clock time %s)." % (str(datetime.timedelta(seconds=estimated_time_remaining)), time.ctime(estimated_finish_time), str(datetime.timedelta(seconds=estimated_total_time))))
# Perform sanity checks to see if we should terminate here.
self._run_sanity_checks()
# Clean up and close storage files.
self._finalize()
return
def _determine_fastest_platform(self, system):
"""
Determine fastest OpenMM platform for given system.
Parameters
----------
system : simtk.openmm.System
The system for which the fastest OpenMM Platform object is to be determined.
Returns
-------
platform : simtk.openmm.Platform
The fastest OpenMM Platform for the specified system.
"""
timestep = 1.0 * unit.femtoseconds
integrator = openmm.VerletIntegrator(timestep)
context = openmm.Context(system, integrator)
platform = context.getPlatform()
del context, integrator
return platform
def _initialize_create(self):
"""
Initialize the simulation and create a storage file, closing it after completion.
"""
if self._initialized:
raise Error("Simulation has already been initialized.")
# Extract a representative system.
representative_system = self.states[0].system
# Turn off verbosity if not master node.
if self.mpicomm:
# Have each node report that it is initialized.
# TODO this doesn't work on worker nodes since they report only warning entries and higher
logger.debug("Initialized node %d / %d" % (self.mpicomm.rank, self.mpicomm.size))
# Display papers to be cited.
if is_terminal_verbose():
self._display_citations()
# Determine number of alchemical states.
self.nstates = len(self.states)
# Determine number of atoms in systems.
self.natoms = representative_system.getNumParticles()
# Allocate storage.
self.replica_positions = list() # replica_positions[i] is the configuration currently held in replica i
self.replica_box_vectors = list() # replica_box_vectors[i] is the set of box vectors currently held in replica i
self.replica_states = np.zeros([self.nstates], np.int64) # replica_states[i] is the state that replica i is currently at
self.u_kl = np.zeros([self.nstates, self.nstates], np.float64)
self.swap_Pij_accepted = np.zeros([self.nstates, self.nstates], np.float64)
self.Nij_proposed = np.zeros([self.nstates,self.nstates], np.int64) # Nij_proposed[i][j] is the number of swaps proposed between states i and j, prior of 1
self.Nij_accepted = np.zeros([self.nstates,self.nstates], np.int64) # Nij_proposed[i][j] is the number of swaps proposed between states i and j, prior of 1
# Distribute coordinate information to replicas in a round-robin fashion, making a deep copy.
if not self._resume:
self.replica_positions = [ copy.deepcopy(self.provided_positions[replica_index % len(self.provided_positions)]) for replica_index in range(self.nstates) ]
# Assign default box vectors.
self.replica_box_vectors = list()
for state in self.states:
[a,b,c] = state.system.getDefaultPeriodicBoxVectors()
box_vectors = unit.Quantity(np.zeros([3,3], np.float32), unit.nanometers)
box_vectors[0,:] = a
box_vectors[1,:] = b
box_vectors[2,:] = c
self.replica_box_vectors.append(box_vectors)
# Assign initial replica states.
for replica_index in range(self.nstates):
self.replica_states[replica_index] = replica_index
# Initialize current iteration counter.
self.iteration = 0
# Initialize NetCDF file.
self._initialize_netcdf()
# Store initial state.
self._write_iteration_netcdf()
# Close NetCDF file.
self.ncfile.close()
self.ncfile = None
return
def _initialize_resume(self):
"""
Initialize the simulation, and bind to a storage file.
"""
if self._initialized:
raise Error("Simulation has already been initialized.")
# Extract a representative system.
representative_system = self.states[0].system
# Turn off verbosity if not master node.
if self.mpicomm:
# Have each node report that it is initialized.
# TODO this doesn't work on worker nodes since they report only warning entries and higher
logger.debug("Initialized node %d / %d" % (self.mpicomm.rank, self.mpicomm.size))
# Display papers to be cited.
if is_terminal_verbose():
self._display_citations()
# Determine number of alchemical states.
self.nstates = len(self.states)
# Determine number of atoms in systems.
self.natoms = representative_system.getNumParticles()
# If no platform is specified, instantiate a platform, or try to use the fastest platform.
if self.platform is None:
# Handle OpenMM platform selection.
# TODO: Can we handle this more gracefully, or push this off to ReplicaExchange?
if self.platform_name:
self.platform = openmm.Platform.getPlatformByName(self.platform_name)
else:
self.platform = self._determine_fastest_platform(representative_system)
# Use only a single CPU thread if we are using the CPU platform.
# TODO: Since there is an environment variable that can control this, we may want to avoid doing this.
if (self.platform.getName() == 'CPU') and self.mpicomm:
logger.debug("Setting 'CpuThreads' to 1 because MPI is active.")
self.platform.setPropertyDefaultValue('CpuThreads', '1')
# Allocate storage.
self.replica_positions = list() # replica_positions[i] is the configuration currently held in replica i
self.replica_box_vectors = list() # replica_box_vectors[i] is the set of box vectors currently held in replica i
self.replica_states = np.zeros([self.nstates], np.int32) # replica_states[i] is the state that replica i is currently at
self.u_kl = np.zeros([self.nstates, self.nstates], np.float64)
self.swap_Pij_accepted = np.zeros([self.nstates, self.nstates], np.float64)
self.Nij_proposed = np.zeros([self.nstates,self.nstates], np.int64) # Nij_proposed[i][j] is the number of swaps proposed between states i and j, prior of 1
self.Nij_accepted = np.zeros([self.nstates,self.nstates], np.int64) # Nij_proposed[i][j] is the number of swaps proposed between states i and j, prior of 1
# Distribute coordinate information to replicas in a round-robin fashion, making a deep copy.
if not self._resume:
self.replica_positions = [ copy.deepcopy(self.provided_positions[replica_index % len(self.provided_positions)]) for replica_index in range(self.nstates) ]
# Assign default box vectors.
self.replica_box_vectors = list()
for state in self.states:
[a,b,c] = state.system.getDefaultPeriodicBoxVectors()
box_vectors = unit.Quantity(np.zeros([3,3], np.float32), unit.nanometers)
box_vectors[0,:] = a
box_vectors[1,:] = b
box_vectors[2,:] = c
self.replica_box_vectors.append(box_vectors)
# Assign initial replica states.
for replica_index in range(self.nstates):
self.replica_states[replica_index] = replica_index
# Check to make sure NetCDF file exists.
if not os.path.exists(self.store_filename):
raise Exception("Store file %s does not exist." % self.store_filename)
# Open NetCDF file for reading
logger.debug("Reading NetCDF file '%s'..." % self.store_filename)
ncfile = netcdf.Dataset(self.store_filename, 'r')
# Resume from NetCDF file.
self._resume_from_netcdf(ncfile)
# Close NetCDF file.
ncfile.close()
if (self.mpicomm is None) or (self.mpicomm.rank == 0):
# Reopen NetCDF file for appending, and maintain handle.
self.ncfile = netcdf.Dataset(self.store_filename, 'a')
else:
self.ncfile = None
# On first iteration, we need to do some initialization.
if self.iteration == 0:
# Perform sanity checks to see if we should terminate here.
self._run_sanity_checks()
# Minimize and equilibrate all replicas.
self._minimize_and_equilibrate()
# Compute energies of all alchemical replicas
self._compute_energies()
# Show energies.
if self.show_energies:
self._show_energies()
# Re-store initial state.
# TODO: Sort this logic out.
#self.ncfile = ncfile
#self._write_iteration_netcdf()
#self.ncfile = None
# Run sanity checks.
# TODO: Refine this.
self._run_sanity_checks()
#self._compute_energies() # recompute energies?
#self._run_sanity_checks()
# We will work on the next iteration.
self.iteration += 1
# Show energies.
if self.show_energies:
self._show_energies()
# Analysis object starts off empty.
self.analysis = None
# Signal that the class has been initialized.
self._initialized = True
return
def _finalize(self):
"""
Do anything necessary to finish run except close files.
"""
if self.mpicomm:
# Only the root node needs to clean up.
if self.mpicomm.rank != 0: return
if hasattr(self, 'ncfile') and self.ncfile:
self.ncfile.sync()
return
def __del__(self):
"""
Clean up, closing files.
"""
self._finalize()
if self.mpicomm:
# Only the root node needs to clean up.
if self.mpicomm.rank != 0: return
if hasattr(self, 'ncfile'):
if self.ncfile is not None:
self.ncfile.close()
self.ncfile = None
return
def _display_citations(self):
"""
Display papers to be cited.
TODO:
* Add original citations for various replica-exchange schemes.
* Show subset of OpenMM citations based on what features are being used.
"""
openmm_citations = """\
Friedrichs MS, Eastman P, Vaidyanathan V, Houston M, LeGrand S, Beberg AL, Ensign DL, Bruns CM, and Pande VS. Accelerating molecular dynamic simulations on graphics processing unit. J. Comput. Chem. 30:864, 2009. DOI: 10.1002/jcc.21209
Eastman P and Pande VS. OpenMM: A hardware-independent framework for molecular simulations. Comput. Sci. Eng. 12:34, 2010. DOI: 10.1109/MCSE.2010.27
Eastman P and Pande VS. Efficient nonbonded interactions for molecular dynamics on a graphics processing unit. J. Comput. Chem. 31:1268, 2010. DOI: 10.1002/jcc.21413
Eastman P and Pande VS. Constant constraint matrix approximation: A robust, parallelizable constraint method for molecular simulations. J. Chem. Theor. Comput. 6:434, 2010. DOI: 10.1021/ct900463w"""
gibbs_citations = """\
Chodera JD and Shirts MR. Replica exchange and expanded ensemble simulations as Gibbs sampling: Simple improvements for enhanced mixing. J. Chem. Phys., in press. arXiv: 1105.5749"""
mbar_citations = """\
Shirts MR and Chodera JD. Statistically optimal analysis of samples from multiple equilibrium states. J. Chem. Phys. 129:124105, 2008. DOI: 10.1063/1.2978177"""
print "Please cite the following:"
print ""
print openmm_citations
if self.replica_mixing_scheme == 'swap-all':
print gibbs_citations
if self.online_analysis:
print mbar_citations
return
def _propagate_replica(self, replica_index):
"""
Propagate the replica corresponding to the specified replica index.
Caching is used.
ARGUMENTS
replica_index (int) - the replica to propagate
RETURNS
elapsed_time (float) - time (in seconds) to propagate replica
"""
start_time = time.time()
# Retrieve state.
state_index = self.replica_states[replica_index] # index of thermodynamic state that current replica is assigned to
state = self.states[state_index] # thermodynamic state
# If temperature and pressure are specified, make sure MonteCarloBarostat is attached.
if state.temperature and state.pressure:
forces = { state.system.getForce(index).__class__.__name__ : state.system.getForce(index) for index in range(state.system.getNumForces()) }
if 'MonteCarloAnisotropicBarostat' in forces:
raise Exception('MonteCarloAnisotropicBarostat is unsupported.')
if 'MonteCarloBarostat' in forces:
barostat = forces['MonteCarloBarostat']
# Set temperature and pressure.
try:
barostat.setDefaultTemperature(state.temperature)
except AttributeError: # versions previous to OpenMM0.8
barostat.setTemperature(state.temperature)
barostat.setDefaultPressure(state.pressure)
barostat.setRandomNumberSeed(int(np.random.randint(0, MAX_SEED)))
else:
# Create barostat and add it to the system if it doesn't have one already.
barostat = openmm.MonteCarloBarostat(state.pressure, state.temperature)
barostat.setRandomNumberSeed(int(np.random.randint(0, MAX_SEED)))
state.system.addForce(barostat)
# Create Context and integrator.
integrator = openmm.LangevinIntegrator(state.temperature, self.collision_rate, self.timestep)
integrator.setRandomNumberSeed(int(np.random.randint(0, MAX_SEED)))
if self.platform:
context = openmm.Context(state.system, integrator, self.platform)
else:
context = openmm.Context(state.system, integrator)
# Set box vectors.
box_vectors = self.replica_box_vectors[replica_index]
context.setPeriodicBoxVectors(box_vectors[0,:], box_vectors[1,:], box_vectors[2,:])
# Set positions.
positions = self.replica_positions[replica_index]
context.setPositions(positions)
setpositions_end_time = time.time()
# Assign Maxwell-Boltzmann velocities.
context.setVelocitiesToTemperature(state.temperature, int(np.random.randint(0, MAX_SEED)))
setvelocities_end_time = time.time()
# Run dynamics.
integrator.step(self.nsteps_per_iteration)
integrator_end_time = time.time()
# Store final positions
getstate_start_time = time.time()
openmm_state = context.getState(getPositions=True, enforcePeriodicBox=state.system.usesPeriodicBoundaryConditions())
getstate_end_time = time.time()
self.replica_positions[replica_index] = openmm_state.getPositions(asNumpy=True)
# Store box vectors.
self.replica_box_vectors[replica_index] = openmm_state.getPeriodicBoxVectors(asNumpy=True)
# Clean up.
del context, integrator
# Compute timing.
end_time = time.time()
elapsed_time = end_time - start_time
positions_elapsed_time = setpositions_end_time - start_time
velocities_elapsed_time = setvelocities_end_time - setpositions_end_time
integrator_elapsed_time = integrator_end_time - setvelocities_end_time
getstate_elapsed_time = getstate_end_time - integrator_end_time
logger.debug("Replica %d/%d: integrator elapsed time %.3f s (positions %.3f s | velocities %.3f s | integrate+getstate %.3f s)." % (replica_index, self.nreplicas, elapsed_time, positions_elapsed_time, velocities_elapsed_time, integrator_elapsed_time+getstate_elapsed_time))
return elapsed_time
def _propagate_replicas_mpi(self):
"""
Propagate all replicas using MPI communicator.
It is presumed all nodes have the correct configurations in the correct replica slots, but that state indices may be unsynchronized.
TODO
* Move synchronization of state information to mix_replicas?
* Broadcast from root node only?
"""
# Propagate all replicas.
logger.debug("Propagating all replicas for %.3f ps..." % (self.nsteps_per_iteration * self.timestep / unit.picoseconds))
# Run just this node's share of states.
logger.debug("Running trajectories...")
start_time = time.time()
# replica_lookup = { self.replica_states[replica_index] : replica_index for replica_index in range(self.nstates) } # replica_lookup[state_index] is the replica index currently at state 'state_index' # requires Python 2.7 features
replica_lookup = dict( (self.replica_states[replica_index], replica_index) for replica_index in range(self.nstates) ) # replica_lookup[state_index] is the replica index currently at state 'state_index' # Python 2.6 compatible
replica_indices = [ replica_lookup[state_index] for state_index in range(self.mpicomm.rank, self.nstates, self.mpicomm.size) ] # list of replica indices for this node to propagate
for replica_index in replica_indices:
logger.debug("Node %3d/%3d propagating replica %3d state %3d..." % (self.mpicomm.rank, self.mpicomm.size, replica_index, self.replica_states[replica_index]))
self._propagate_replica(replica_index)
end_time = time.time()
elapsed_time = end_time - start_time
# Collect elapsed time.
node_elapsed_times = self.mpicomm.gather(elapsed_time, root=0) # barrier
if self.mpicomm.rank == 0 and logger.isEnabledFor(logging.DEBUG):
node_elapsed_times = np.array(node_elapsed_times)
end_time = time.time()
elapsed_time = end_time - start_time
barrier_wait_times = elapsed_time - node_elapsed_times
logger.debug("Running trajectories: elapsed time %.3f s (barrier time min %.3f s | max %.3f s | avg %.3f s)" % (elapsed_time, barrier_wait_times.min(), barrier_wait_times.max(), barrier_wait_times.mean()))
logger.debug("Total time spent waiting for GPU: %.3f s" % (node_elapsed_times.sum()))
# Send final configurations and box vectors back to all nodes.
logger.debug("Synchronizing trajectories...")
start_time = time.time()
replica_indices_gather = self.mpicomm.allgather(replica_indices)
replica_positions_gather = self.mpicomm.allgather([ self.replica_positions[replica_index] for replica_index in replica_indices ])
replica_box_vectors_gather = self.mpicomm.allgather([ self.replica_box_vectors[replica_index] for replica_index in replica_indices ])
for (source, replica_indices) in enumerate(replica_indices_gather):
for (index, replica_index) in enumerate(replica_indices):
self.replica_positions[replica_index] = replica_positions_gather[source][index]
self.replica_box_vectors[replica_index] = replica_box_vectors_gather[source][index]
end_time = time.time()
logger.debug("Synchronizing configurations and box vectors: elapsed time %.3f s" % (end_time - start_time))
return
def _propagate_replicas_serial(self):
"""
Propagate all replicas using serial execution.
"""
# Propagate all replicas.
logger.debug("Propagating all replicas for %.3f ps..." % (self.nsteps_per_iteration * self.timestep / unit.picoseconds))
for replica_index in range(self.nstates):
self._propagate_replica(replica_index)
return
def _propagate_replicas(self):
"""
Propagate all replicas.
TODO
* Report on efficiency of dyanmics (fraction of time wasted to overhead).
"""
start_time = time.time()
if self.mpicomm:
self._propagate_replicas_mpi()
else:
self._propagate_replicas_serial()
end_time = time.time()
elapsed_time = end_time - start_time
time_per_replica = elapsed_time / float(self.nstates)
ns_per_day = self.timestep * self.nsteps_per_iteration / time_per_replica * 24*60*60 / unit.nanoseconds
logger.debug("Time to propagate all replicas: %.3f s (%.3f per replica, %.3f ns/day)." % (elapsed_time, time_per_replica, ns_per_day))
return
def _minimize_replica(self, replica_index):
"""
Minimize the specified replica.
"""
# Retrieve thermodynamic state.
state_index = self.replica_states[replica_index] # index of thermodynamic state that current replica is assigned to
state = self.states[state_index] # thermodynamic state
# Create integrator and context.
integrator = self.mm.VerletIntegrator(1.0 * unit.femtoseconds)
context = self.mm.Context(state.system, integrator, self.platform)
# Set box vectors.
box_vectors = self.replica_box_vectors[replica_index]
context.setPeriodicBoxVectors(box_vectors[0,:], box_vectors[1,:], box_vectors[2,:])
# Set positions.
positions = self.replica_positions[replica_index]
context.setPositions(positions)
# Minimize energy.
minimized_positions = self.mm.LocalEnergyMinimizer.minimize(context, self.minimize_tolerance, self.minimize_max_iterations)
# Store final positions
self.replica_positions[replica_index] = context.getState(getPositions=True, enforcePeriodicBox=state.system.usesPeriodicBoundaryConditions()).getPositions(asNumpy=True)
# Clean up.
del integrator, context
return
def _minimize_and_equilibrate(self):
"""
Minimize and equilibrate all replicas.
"""
# Minimize
if self.minimize:
logger.debug("Minimizing all replicas...")
if self.mpicomm:
# MPI implementation.
logger.debug("MPI implementation.")
# Minimize this node's share of replicas.
start_time = time.time()
for replica_index in range(self.mpicomm.rank, self.nstates, self.mpicomm.size):
logger.debug("node %d / %d : minimizing replica %d / %d" % (self.mpicomm.rank, self.mpicomm.size, replica_index, self.nstates))
self._minimize_replica(replica_index)
end_time = time.time()
debug_msg = 'Node {}/{}: MPI barrier'.format(self.mpicomm.rank, self.mpicomm.size)
logger.debug(debug_msg + ' - waiting for the minimization to be completed.')
self.mpicomm.barrier()
logger.debug("Running trajectories: elapsed time %.3f s" % (end_time - start_time))
# Send final configurations and box vectors back to all nodes.
logger.debug("Synchronizing trajectories...")
replica_positions_gather = self.mpicomm.allgather(self.replica_positions[self.mpicomm.rank:self.nstates:self.mpicomm.size])
replica_box_vectors_gather = self.mpicomm.allgather(self.replica_box_vectors[self.mpicomm.rank:self.nstates:self.mpicomm.size])
for replica_index in range(self.nstates):
source = replica_index % self.mpicomm.size # node with trajectory data
index = replica_index // self.mpicomm.size # index within trajectory batch
self.replica_positions[replica_index] = replica_positions_gather[source][index]
self.replica_box_vectors[replica_index] = replica_box_vectors_gather[source][index]
logger.debug("Synchronizing configurations and box vectors: elapsed time %.3f s" % (end_time - start_time))
else:
# Serial implementation.
logger.debug("Serial implementation.")
for replica_index in range(self.nstates):
logger.debug("minimizing replica %d / %d" % (replica_index, self.nstates))
self._minimize_replica(replica_index)
# Equilibrate
production_timestep = self.timestep
for iteration in range(self.number_of_equilibration_iterations):
logger.debug("equilibration iteration %d / %d" % (iteration, self.number_of_equilibration_iterations))
self._propagate_replicas()
self.timestep = production_timestep
return
def _compute_energies(self):
"""
Compute energies of all replicas at all states.
TODO
* We have to re-order Context initialization if we have variable box volume
* Parallel implementation
"""
start_time = time.time()
logger.debug("Computing energies...")
if self.mpicomm:
# MPI version.
# Compute energies for this node's share of states.
for state_index in range(self.mpicomm.rank, self.nstates, self.mpicomm.size):
for replica_index in range(self.nstates):
self.u_kl[replica_index,state_index] = self.states[state_index].reduced_potential(self.replica_positions[replica_index], box_vectors=self.replica_box_vectors[replica_index], platform=self.platform)
# Send final energies to all nodes.
energies_gather = self.mpicomm.allgather(self.u_kl[:,self.mpicomm.rank:self.nstates:self.mpicomm.size])
for state_index in range(self.nstates):
source = state_index % self.mpicomm.size # node with trajectory data
index = state_index // self.mpicomm.size # index within trajectory batch
self.u_kl[:,state_index] = energies_gather[source][:,index]
else:
# Serial version.
for state_index in range(self.nstates):
for replica_index in range(self.nstates):
self.u_kl[replica_index,state_index] = self.states[state_index].reduced_potential(self.replica_positions[replica_index], box_vectors=self.replica_box_vectors[replica_index], platform=self.platform)
end_time = time.time()
elapsed_time = end_time - start_time
time_per_energy= elapsed_time / float(self.nstates)**2
logger.debug("Time to compute all energies %.3f s (%.3f per energy calculation)." % (elapsed_time, time_per_energy))
return
def _mix_all_replicas(self):
"""
Attempt exchanges between all replicas to enhance mixing.
TODO
* Adjust nswap_attempts based on how many we can afford to do and not have mixing take a substantial fraction of iteration time.
"""
# Determine number of swaps to attempt to ensure thorough mixing.
# TODO: Replace this with analytical result computed to guarantee sufficient mixing.
nswap_attempts = self.nstates**5 # number of swaps to attempt (ideal, but too slow!)
nswap_attempts = self.nstates**3 # best compromise for pure Python?
logger.debug("Will attempt to swap all pairs of replicas, using a total of %d attempts." % nswap_attempts)
# Attempt swaps to mix replicas.
for swap_attempt in range(nswap_attempts):
# Choose replicas to attempt to swap.
i = np.random.randint(self.nstates) # Choose replica i uniformly from set of replicas.
j = np.random.randint(self.nstates) # Choose replica j uniformly from set of replicas.
# Determine which states these resplicas correspond to.
istate = self.replica_states[i] # state in replica slot i
jstate = self.replica_states[j] # state in replica slot j
# Reject swap attempt if any energies are nan.
if (np.isnan(self.u_kl[i,jstate]) or np.isnan(self.u_kl[j,istate]) or np.isnan(self.u_kl[i,istate]) or np.isnan(self.u_kl[j,jstate])):
continue
# Compute log probability of swap.
log_P_accept = - (self.u_kl[i,jstate] + self.u_kl[j,istate]) + (self.u_kl[i,istate] + self.u_kl[j,jstate])
#print "replica (%3d,%3d) states (%3d,%3d) energies (%8.1f,%8.1f) %8.1f -> (%8.1f,%8.1f) %8.1f : log_P_accept %8.1f" % (i,j,istate,jstate,self.u_kl[i,istate],self.u_kl[j,jstate],self.u_kl[i,istate]+self.u_kl[j,jstate],self.u_kl[i,jstate],self.u_kl[j,istate],self.u_kl[i,jstate]+self.u_kl[j,istate],log_P_accept)
# Record that this move has been proposed.
self.Nij_proposed[istate,jstate] += 1
self.Nij_proposed[jstate,istate] += 1
# Accept or reject.
if (log_P_accept >= 0.0 or (np.random.rand() < math.exp(log_P_accept))):
# Swap states in replica slots i and j.
(self.replica_states[i], self.replica_states[j]) = (self.replica_states[j], self.replica_states[i])
# Accumulate statistics
self.Nij_accepted[istate,jstate] += 1
self.Nij_accepted[jstate,istate] += 1
return
def _mix_all_replicas_cython(self):
"""
Attempt to exchange all replicas to enhance mixing, calling code written in Cython.
"""
from . import mixing
from mixing._mix_replicas import _mix_replicas_cython
replica_states = md.utils.ensure_type(self.replica_states, np.int64, 1, "Replica States")
u_kl = md.utils.ensure_type(self.u_kl, np.float64, 2, "Reduced Potentials")
Nij_proposed = md.utils.ensure_type(self.Nij_proposed, np.int64, 2, "Nij Proposed")
Nij_accepted = md.utils.ensure_type(self.Nij_accepted, np.int64, 2, "Nij accepted")
_mix_replicas_cython(self.nstates**4, self.nstates, replica_states, u_kl, Nij_proposed, Nij_accepted)
#replica_states = np.array(self.replica_states, np.int64)
#u_kl = np.array(self.u_kl, np.float64)
#Nij_proposed = np.array(self.Nij_proposed, np.int64)
#Nij_accepted = np.array(self.Nij_accepted, np.int64)
#_mix_replicas._mix_replicas_cython(self.nstates**4, self.nstates, replica_states, u_kl, Nij_proposed, Nij_accepted)
self.replica_states = replica_states
self.Nij_proposed = Nij_proposed
self.Nij_accepted = Nij_accepted
def _mix_neighboring_replicas(self):
"""
Attempt exchanges between neighboring replicas only.
"""
logger.debug("Will attempt to swap only neighboring replicas.")
# Attempt swaps of pairs of replicas using traditional scheme (e.g. [0,1], [2,3], ...)
offset = np.random.randint(2) # offset is 0 or 1
for istate in range(offset, self.nstates-1, 2):
jstate = istate + 1 # second state to attempt to swap with i
# Determine which replicas these states correspond to.
i = None
j = None
for index in range(self.nstates):
if self.replica_states[index] == istate: i = index
if self.replica_states[index] == jstate: j = index
# Reject swap attempt if any energies are nan.
if (np.isnan(self.u_kl[i,jstate]) or np.isnan(self.u_kl[j,istate]) or np.isnan(self.u_kl[i,istate]) or np.isnan(self.u_kl[j,jstate])):
continue
# Compute log probability of swap.
log_P_accept = - (self.u_kl[i,jstate] + self.u_kl[j,istate]) + (self.u_kl[i,istate] + self.u_kl[j,jstate])
#print "replica (%3d,%3d) states (%3d,%3d) energies (%8.1f,%8.1f) %8.1f -> (%8.1f,%8.1f) %8.1f : log_P_accept %8.1f" % (i,j,istate,jstate,self.u_kl[i,istate],self.u_kl[j,jstate],self.u_kl[i,istate]+self.u_kl[j,jstate],self.u_kl[i,jstate],self.u_kl[j,istate],self.u_kl[i,jstate]+self.u_kl[j,istate],log_P_accept)
# Record that this move has been proposed.
self.Nij_proposed[istate,jstate] += 1
self.Nij_proposed[jstate,istate] += 1
# Accept or reject.
if (log_P_accept >= 0.0 or (np.random.rand() < math.exp(log_P_accept))):
# Swap states in replica slots i and j.
(self.replica_states[i], self.replica_states[j]) = (self.replica_states[j], self.replica_states[i])
# Accumulate statistics
self.Nij_accepted[istate,jstate] += 1
self.Nij_accepted[jstate,istate] += 1
return
def _mix_replicas(self):
"""
Attempt to swap replicas according to user-specified scheme.
"""
if (self.mpicomm) and (self.mpicomm.rank != 0):
# Non-root nodes receive state information.
logger.debug('Node {}/{}: MPI bcast - sharing replica_states'.format(
self.mpicomm.rank, self.mpicomm.size))
self.replica_states = self.mpicomm.bcast(self.replica_states, root=0)
return
logger.debug("Mixing replicas...")
# Reset storage to keep track of swap attempts this iteration.
self.Nij_proposed[:,:] = 0
self.Nij_accepted[:,:] = 0
# Perform swap attempts according to requested scheme.
start_time = time.time()
if self.replica_mixing_scheme == 'swap-neighbors':
self._mix_neighboring_replicas()
elif self.replica_mixing_scheme == 'swap-all':
# Try to use weave-accelerated mixing code if possible, otherwise fall back to Python-accelerated code.
try:
self._mix_all_replicas_cython()
except ValueError as e:
logger.warning(e.message)
self._mix_all_replicas()
elif self.replica_mixing_scheme == 'none':
# Don't mix replicas.
pass
else:
raise ParameterException("Replica mixing scheme '%s' unknown. Choose valid 'replica_mixing_scheme' parameter." % self.replica_mixing_scheme)
end_time = time.time()
# Determine fraction of swaps accepted this iteration.
nswaps_attempted = self.Nij_proposed.sum()
nswaps_accepted = self.Nij_accepted.sum()
swap_fraction_accepted = 0.0
if (nswaps_attempted > 0): swap_fraction_accepted = float(nswaps_accepted) / float(nswaps_attempted);
logger.debug("Accepted %d / %d attempted swaps (%.1f %%)" % (nswaps_accepted, nswaps_attempted, swap_fraction_accepted * 100.0))
# Estimate cumulative transition probabilities between all states.
Nij_accepted = self.ncfile.variables['accepted'][:,:,:].sum(0) + self.Nij_accepted
Nij_proposed = self.ncfile.variables['proposed'][:,:,:].sum(0) + self.Nij_proposed
swap_Pij_accepted = np.zeros([self.nstates,self.nstates], np.float64)
for istate in range(self.nstates):
Ni = Nij_proposed[istate,:].sum()
if (Ni == 0):
swap_Pij_accepted[istate,istate] = 1.0
else:
swap_Pij_accepted[istate,istate] = 1.0 - float(Nij_accepted[istate,:].sum() - Nij_accepted[istate,istate]) / float(Ni)
for jstate in range(self.nstates):
if istate != jstate:
swap_Pij_accepted[istate,jstate] = float(Nij_accepted[istate,jstate]) / float(Ni)
if self.mpicomm:
# Root node will share state information with all replicas.
logger.debug('Node {}/{}: MPI bcast - sharing replica_states'.format(
self.mpicomm.rank, self.mpicomm.size))
self.replica_states = self.mpicomm.bcast(self.replica_states, root=0)
# Report on mixing.
logger.debug("Mixing of replicas took %.3f s" % (end_time - start_time))
return
def _accumulate_mixing_statistics(self):
"""Return the mixing transition matrix Tij."""
try:
return self._accumulate_mixing_statistics_update()
except AttributeError:
pass
except ValueError:
logger.info("Inconsistent transition count matrix detected, recalculating from scratch.")
return self._accumulate_mixing_statistics_full()
def _accumulate_mixing_statistics_full(self):
"""Compute statistics of transitions iterating over all iterations of repex."""
states = self.ncfile.variables['states']
self._Nij = np.zeros([self.nstates, self.nstates], np.float64)
for iteration in range(states.shape[0]-1):
for ireplica in range(self.nstates):
istate = states[iteration, ireplica]
jstate = states[iteration + 1, ireplica]
self._Nij[istate, jstate] += 0.5
self._Nij[jstate, istate] += 0.5
Tij = np.zeros([self.nstates, self.nstates], np.float64)
for istate in range(self.nstates):
Tij[istate] = self._Nij[istate] / self._Nij[istate].sum()
return Tij
def _accumulate_mixing_statistics_update(self):
"""Compute statistics of transitions updating Nij of last iteration of repex."""
states = self.ncfile.variables['states']
if self._Nij.sum() != (states.shape[0] - 2) * self.nstates: # n_iter - 2 = (n_iter - 1) - 1. Meaning that you have exactly one new iteration to process.
raise(ValueError("Inconsistent transition count matrix detected. Perhaps you tried updating twice in a row?"))
for ireplica in range(self.nstates):
istate = states[self.iteration-2, ireplica]
jstate = states[self.iteration-1, ireplica]
self._Nij[istate, jstate] += 0.5
self._Nij[jstate, istate] += 0.5
Tij = np.zeros([self.nstates, self.nstates], np.float64)
for istate in range(self.nstates):
Tij[istate] = self._Nij[istate] / self._Nij[istate].sum()
return Tij
def _show_mixing_statistics(self):
if self.iteration < 2:
return
if self.mpicomm and self.mpicomm.rank != 0:
return # only root node have access to ncfile
if not logger.isEnabledFor(logging.DEBUG):
return
Tij = self._accumulate_mixing_statistics()
# Print observed transition probabilities.
PRINT_CUTOFF = 0.001 # Cutoff for displaying fraction of accepted swaps.
logger.debug("Cumulative symmetrized state mixing transition matrix:")
str_row = "%6s" % ""
for jstate in range(self.nstates):
str_row += "%6d" % jstate
logger.debug(str_row)
for istate in range(self.nstates):
str_row = "%-6d" % istate
for jstate in range(self.nstates):
P = Tij[istate,jstate]
if (P >= PRINT_CUTOFF):
str_row += "%6.3f" % P
else:
str_row += "%6s" % ""
logger.debug(str_row)
# Estimate second eigenvalue and equilibration time.
mu = np.linalg.eigvals(Tij)
mu = -np.sort(-mu) # sort in descending order
if (mu[1] >= 1):
logger.debug("Perron eigenvalue is unity; Markov chain is decomposable.")
else:
logger.debug("Perron eigenvalue is %9.5f; state equilibration timescale is ~ %.1f iterations" % (mu[1], 1.0 / (1.0 - mu[1])))
def _initialize_netcdf(self):
"""
Initialize NetCDF file for storage.
"""
# Only root node should set up NetCDF file.
if self.mpicomm:
if self.mpicomm.rank != 0: return
# Open NetCDF 4 file for writing.
ncfile = netcdf.Dataset(self.store_filename, 'w', version='NETCDF4')
# Create dimensions.
ncfile.createDimension('iteration', 0) # unlimited number of iterations
ncfile.createDimension('replica', self.nreplicas) # number of replicas
ncfile.createDimension('atom', self.natoms) # number of atoms in system
ncfile.createDimension('spatial', 3) # number of spatial dimensions
# Set global attributes.
setattr(ncfile, 'title', self.title)
setattr(ncfile, 'application', 'YANK')
setattr(ncfile, 'program', 'yank.py')
setattr(ncfile, 'programVersion', 'unknown') # TODO: Include actual version.
setattr(ncfile, 'Conventions', 'YANK')
setattr(ncfile, 'ConventionVersion', '0.1')
# Create variables.
ncvar_positions = ncfile.createVariable('positions', 'f4', ('iteration','replica','atom','spatial'), zlib=True, chunksizes=(1,self.nreplicas,self.natoms,3))
ncvar_states = ncfile.createVariable('states', 'i4', ('iteration','replica'), zlib=False, chunksizes=(1,self.nreplicas))
ncvar_energies = ncfile.createVariable('energies', 'f8', ('iteration','replica','replica'), zlib=False, chunksizes=(1,self.nreplicas,self.nreplicas))
ncvar_proposed = ncfile.createVariable('proposed', 'i4', ('iteration','replica','replica'), zlib=False, chunksizes=(1,self.nreplicas,self.nreplicas))
ncvar_accepted = ncfile.createVariable('accepted', 'i4', ('iteration','replica','replica'), zlib=False, chunksizes=(1,self.nreplicas,self.nreplicas))
ncvar_box_vectors = ncfile.createVariable('box_vectors', 'f4', ('iteration','replica','spatial','spatial'), zlib=False, chunksizes=(1,self.nreplicas,3,3))
ncvar_volumes = ncfile.createVariable('volumes', 'f8', ('iteration','replica'), zlib=False, chunksizes=(1,self.nreplicas))
# Define units for variables.
setattr(ncvar_positions, 'units', 'nm')
setattr(ncvar_states, 'units', 'none')
setattr(ncvar_energies, 'units', 'kT')
setattr(ncvar_proposed, 'units', 'none')
setattr(ncvar_accepted, 'units', 'none')
setattr(ncvar_box_vectors, 'units', 'nm')
setattr(ncvar_volumes, 'units', 'nm**3')
# Define long (human-readable) names for variables.
setattr(ncvar_positions, "long_name", "positions[iteration][replica][atom][spatial] is position of coordinate 'spatial' of atom 'atom' from replica 'replica' for iteration 'iteration'.")
setattr(ncvar_states, "long_name", "states[iteration][replica] is the state index (0..nstates-1) of replica 'replica' of iteration 'iteration'.")
setattr(ncvar_energies, "long_name", "energies[iteration][replica][state] is the reduced (unitless) energy of replica 'replica' from iteration 'iteration' evaluated at state 'state'.")
setattr(ncvar_proposed, "long_name", "proposed[iteration][i][j] is the number of proposed transitions between states i and j from iteration 'iteration-1'.")
setattr(ncvar_accepted, "long_name", "accepted[iteration][i][j] is the number of proposed transitions between states i and j from iteration 'iteration-1'.")
setattr(ncvar_box_vectors, "long_name", "box_vectors[iteration][replica][i][j] is dimension j of box vector i for replica 'replica' from iteration 'iteration-1'.")
setattr(ncvar_volumes, "long_name", "volume[iteration][replica] is the box volume for replica 'replica' from iteration 'iteration-1'.")
# Create timestamp variable.
ncvar_timestamp = ncfile.createVariable('timestamp', str, ('iteration',), zlib=False, chunksizes=(1,))
# Create group for performance statistics.
ncgrp_timings = ncfile.createGroup('timings')
ncvar_iteration_time = ncgrp_timings.createVariable('iteration', 'f', ('iteration',), zlib=False, chunksizes=(1,)) # total iteration time (seconds)
ncvar_iteration_time = ncgrp_timings.createVariable('mixing', 'f', ('iteration',), zlib=False, chunksizes=(1,)) # time for mixing
ncvar_iteration_time = ncgrp_timings.createVariable('propagate', 'f', ('iteration','replica'), zlib=False, chunksizes=(1,self.nreplicas)) # total time to propagate each replica
# Store thermodynamic states.
self._store_thermodynamic_states(ncfile)
# Store run options
self._store_options(ncfile)
# Store metadata.
if self.metadata:
self._store_metadata(ncfile)
# Force sync to disk to avoid data loss.
ncfile.sync()
# Store netcdf file handle.
self.ncfile = ncfile
return
@delayed_termination
def _write_iteration_netcdf(self):
"""
Write positions, states, and energies of current iteration to NetCDF file.
"""
if self.mpicomm:
# Only the root node will write data.
if self.mpicomm.rank != 0: return
initial_time = time.time()
# Store replica positions.
for replica_index in range(self.nstates):
positions = self.replica_positions[replica_index]
x = positions / unit.nanometers
self.ncfile.variables['positions'][self.iteration,replica_index,:,:] = x[:,:]
# Store box vectors and volume.
for replica_index in range(self.nstates):
state_index = self.replica_states[replica_index]
state = self.states[state_index]
box_vectors = self.replica_box_vectors[replica_index]
for i in range(3):
self.ncfile.variables['box_vectors'][self.iteration,replica_index,i,:] = (box_vectors[i] / unit.nanometers)
volume = state._volume(box_vectors)
self.ncfile.variables['volumes'][self.iteration,replica_index] = volume / (unit.nanometers**3)
# Store state information.
self.ncfile.variables['states'][self.iteration,:] = self.replica_states[:]
# Store energies.
self.ncfile.variables['energies'][self.iteration,:,:] = self.u_kl[:,:]
# Store mixing statistics.
# TODO: Write mixing statistics for this iteration?
self.ncfile.variables['proposed'][self.iteration,:,:] = self.Nij_proposed[:,:]
self.ncfile.variables['accepted'][self.iteration,:,:] = self.Nij_accepted[:,:]
# Store timestamp this iteration was written.
self.ncfile.variables['timestamp'][self.iteration] = time.ctime()
# Force sync to disk to avoid data loss.
presync_time = time.time()
self.ncfile.sync()
# Print statistics.
final_time = time.time()
sync_time = final_time - presync_time
elapsed_time = final_time - initial_time
logger.debug("Writing data to NetCDF file took %.3f s (%.3f s for sync)" % (elapsed_time, sync_time))
return
def _run_sanity_checks(self):
"""
Run some checks on current state information to see if something has gone wrong that precludes continuation.
"""
abort = False
# Check positions.
for replica_index in range(self.nreplicas):
positions = self.replica_positions[replica_index]
x = positions / unit.nanometers
if np.any(np.isnan(x)):
logger.warning("nan encountered in replica %d positions." % replica_index)
abort = True
# Check energies.
for replica_index in range(self.nreplicas):
if np.any(np.isnan(self.u_kl[replica_index,:])):
logger.warning("nan encountered in u_kl state energies for replica %d" % replica_index)
abort = True
if abort:
if self.mpicomm:
self.mpicomm.Abort()
else:
raise Exception("Aborting.")
return
def _store_thermodynamic_states(self, ncfile):
"""
Store the thermodynamic states in a NetCDF file.
"""
logger.debug("Storing thermodynamic states in NetCDF file...")
initial_time = time.time()
# Create a group to store state information.
ncgrp_stateinfo = ncfile.createGroup('thermodynamic_states')
# Get number of states.
ncvar_nstates = ncgrp_stateinfo.createVariable('nstates', int)
ncvar_nstates.assignValue(self.nstates)
# Temperatures.
ncvar_temperatures = ncgrp_stateinfo.createVariable('temperatures', 'f', ('replica',))
setattr(ncvar_temperatures, 'units', 'K')
setattr(ncvar_temperatures, 'long_name', "temperatures[state] is the temperature of thermodynamic state 'state'")
for state_index in range(self.nstates):
ncvar_temperatures[state_index] = self.states[state_index].temperature / unit.kelvin
# Pressures.
if self.states[0].pressure is not None:
ncvar_temperatures = ncgrp_stateinfo.createVariable('pressures', 'f', ('replica',))
setattr(ncvar_temperatures, 'units', 'atm')
setattr(ncvar_temperatures, 'long_name', "pressures[state] is the external pressure of thermodynamic state 'state'")
for state_index in range(self.nstates):
ncvar_temperatures[state_index] = self.states[state_index].pressure / unit.atmospheres
# TODO: Store other thermodynamic variables store in ThermodynamicState? Generalize?
# Systems.
ncvar_serialized_states = ncgrp_stateinfo.createVariable('systems', str, ('replica',), zlib=True)
setattr(ncvar_serialized_states, 'long_name', "systems[state] is the serialized OpenMM System corresponding to the thermodynamic state 'state'")
for state_index in range(self.nstates):
logger.debug("Serializing state %d..." % state_index)
serialized = self.states[state_index].system.__getstate__()
logger.debug("Serialized state is %d B | %.3f KB | %.3f MB" % (len(serialized), len(serialized) / 1024.0, len(serialized) / 1024.0 / 1024.0))
ncvar_serialized_states[state_index] = serialized
final_time = time.time()
elapsed_time = final_time - initial_time
logger.debug("Serializing thermodynamic states took %.3f s." % elapsed_time)
return
def _restore_thermodynamic_states(self, ncfile):
"""
Restore the thermodynamic states from a NetCDF file.
"""
logger.debug("Restoring thermodynamic states from NetCDF file...")
initial_time = time.time()
# Make sure this NetCDF file contains thermodynamic state information.
if not 'thermodynamic_states' in ncfile.groups:
raise Exception("Could not restore thermodynamic states from %s" % self.store_filename)
# Create a group to store state information.
ncgrp_stateinfo = ncfile.groups['thermodynamic_states']
# Get number of states.
self.nstates = ncgrp_stateinfo.variables['nstates'].getValue()
# Read state information.
self.states = list()
for state_index in range(self.nstates):
# Populate a new ThermodynamicState object.
state = ThermodynamicState()
# Read temperature.
state.temperature = float(ncgrp_stateinfo.variables['temperatures'][state_index]) * unit.kelvin
# Read pressure, if present.
if 'pressures' in ncgrp_stateinfo.variables:
state.pressure = float(ncgrp_stateinfo.variables['pressures'][state_index]) * unit.atmospheres
# Reconstitute System object.
state.system = self.mm.System()
state.system.__setstate__(str(ncgrp_stateinfo.variables['systems'][state_index]))
# Store state.
self.states.append(state)
final_time = time.time()
elapsed_time = final_time - initial_time
logger.debug("Restoring thermodynamic states from NetCDF file took %.3f s." % elapsed_time)
return True
def _store_dict_in_netcdf(self, ncgrp, options):
"""
Store the contents of a dict in a NetCDF file.
Parameters
----------
ncgrp : ncfile.Dataset group
The group in which to store options.
options : dict
The dict to store.
"""
from utils import typename
import collections
for option_name in options.keys():
# Get option value.
option_value = options[option_name]
# If Quantity, strip off units first.
option_unit = None
if type(option_value) == unit.Quantity:
option_unit = option_value.unit
option_value = option_value / option_unit
# Store the Python type.
option_type = type(option_value)
option_type_name = typename(option_type)
# Handle booleans
if type(option_value) == bool:
option_value = int(option_value)
# Store the variable.
if type(option_value) == str:
ncvar = ncgrp.createVariable(option_name, type(option_value), 'scalar')
packed_data = np.empty(1, 'O')
packed_data[0] = option_value
ncvar[:] = packed_data
setattr(ncvar, 'type', option_type_name)
elif isinstance(option_value, collections.Iterable):
nelements = len(option_value)
element_type = type(option_value[0])
element_type_name = typename(element_type)
ncgrp.createDimension(option_name, nelements) # unlimited number of iterations
ncvar = ncgrp.createVariable(option_name, element_type, (option_name,))
for (i, element) in enumerate(option_value):
ncvar[i] = element
setattr(ncvar, 'type', element_type_name)
elif option_value is None:
ncvar = ncgrp.createVariable(option_name, int)
ncvar.assignValue(0)
setattr(ncvar, 'type', option_type_name)
else:
ncvar = ncgrp.createVariable(option_name, type(option_value))
ncvar.assignValue(option_value)
setattr(ncvar, 'type', option_type_name)
# Log value (truncate if too long but save length)
if hasattr(option_value, '__len__'):
logger.debug("Storing option: {} -> {} (type: {}, length {})".format(
option_name, str(option_value)[:500], option_type_name, len(option_value)))
else:
logger.debug("Storing option: {} -> {} (type: {})".format(
option_name, option_value, option_type_name))
if option_unit: setattr(ncvar, 'units', str(option_unit))
return
def _restore_dict_from_netcdf(self, ncgrp):
"""
Restore dict from NetCDF.
Parameters
----------
ncgrp : netcdf.Dataset group
The NetCDF group to restore from.
Returns
-------
options : dict
The restored options as a dict.
"""
options = dict()
import numpy
for option_name in ncgrp.variables.keys():
# Get NetCDF variable.
option_ncvar = ncgrp.variables[option_name]
type_name = getattr(option_ncvar, 'type')
# Get option value.
if type_name == 'NoneType':
option_value = None
elif option_ncvar.shape == ():
option_value = option_ncvar.getValue()
# Cast to python types.
if type_name == 'bool':
option_value = bool(option_value)
elif type_name == 'int':
option_value = int(option_value)
elif type_name == 'float':
option_value = float(option_value)
elif type_name == 'str':
option_value = str(option_value)
elif (option_ncvar.shape[0] >= 0):
option_value = np.array(option_ncvar[:], eval(type_name))
# TODO: Deal with values that are actually scalar constants.
# TODO: Cast to appropriate type
else:
option_value = option_ncvar[0]
option_value = eval(type_name + '(' + repr(option_value) + ')')
# Log value (truncate if too long but save length)
if hasattr(option_value, '__len__'):
logger.debug("Restoring option: {} -> {} (type: {}, length {})".format(
option_name, str(option_value)[:500], type(option_value), len(option_value)))
else:
logger.debug("Retoring option: {} -> {} (type: {})".format(
option_name, option_value, type(option_value)))
# If Quantity, assign unit.
if hasattr(option_ncvar, 'units'):
option_unit_name = getattr(option_ncvar, 'units')
if option_unit_name[0] == '/':
option_value = eval(str(option_value) + option_unit_name, unit.__dict__)
else:
option_value = eval(str(option_value) + '*' + option_unit_name, unit.__dict__)
# Store option.
options[option_name] = option_value
return options
def _store_options(self, ncfile):
"""
Store run parameters in NetCDF file.
"""
logger.debug("Storing run parameters in NetCDF file...")
# Create scalar dimension if not already present.
if 'scalar' not in ncfile.dimensions:
ncfile.createDimension('scalar', 1) # scalar dimension
# Create a group to store state information.
ncgrp_options = ncfile.createGroup('options')
# Build dict of options to store.
options = dict()
for option_name in self.options_to_store:
option_value = getattr(self, option_name)
options[option_name] = option_value
# Store options.
self._store_dict_in_netcdf(ncgrp_options, options)
return
def _restore_options(self, ncfile):
"""
Restore run parameters from NetCDF file.
"""
logger.debug("Attempting to restore options from NetCDF file...")
# Make sure this NetCDF file contains option information
if not 'options' in ncfile.groups:
raise Exception("options not found in NetCDF file.")
# Find the group.
ncgrp_options = ncfile.groups['options']
# Restore options as dict.
options = self._restore_dict_from_netcdf(ncgrp_options)
# Set these as attributes.
for option_name in options.keys():
setattr(self, option_name, options[option_name])
# Signal success.
return True
def _store_metadata(self, ncfile):
"""
Store metadata in NetCDF file.
Parameters
----------
ncfile : netcdf.Dataset
The NetCDF file in which metadata is to be stored.
"""
ncgrp = ncfile.createGroup('metadata')
self._store_dict_in_netcdf(ncgrp, self.metadata)
return
def _restore_metadata(self, ncfile):
"""
Restore metadata from NetCDF file.
Parameters
----------
ncfile : netcdf.Dataset
The NetCDF file in which metadata is to be stored.
"""
self.metadata = None
if 'metadata' in ncfile.groups:
ncgrp = ncfile.groups['metadata']
self.metadata = self._restore_dict_from_netcdf(ncgrp)
def _resume_from_netcdf(self, ncfile):
"""
Resume execution by reading current positions and energies from a NetCDF file.
Parameters
----------
ncfile : netcdf.Dataset
The NetCDF file in which metadata is to be stored.
"""
# TODO: Perform sanity check on file before resuming
# Get current dimensions.
self.iteration = ncfile.variables['positions'].shape[0] - 1
self.nstates = ncfile.variables['positions'].shape[1]
self.natoms = ncfile.variables['positions'].shape[2]
self.nreplicas = self.nstates
logger.debug("iteration = %d, nstates = %d, natoms = %d" % (self.iteration, self.nstates, self.natoms))
# Restore positions.
self.replica_positions = list()
for replica_index in range(self.nstates):
x = ncfile.variables['positions'][self.iteration,replica_index,:,:].astype(np.float64).copy()
positions = unit.Quantity(x, unit.nanometers)
self.replica_positions.append(positions)
# Restore box vectors.
self.replica_box_vectors = list()
for replica_index in range(self.nstates):
x = ncfile.variables['box_vectors'][self.iteration,replica_index,:,:].astype(np.float64).copy()
box_vectors = unit.Quantity(x, unit.nanometers)
self.replica_box_vectors.append(box_vectors)
# Restore state information.
self.replica_states = ncfile.variables['states'][self.iteration,:].copy()
# Restore energies.
self.u_kl = ncfile.variables['energies'][self.iteration,:,:].copy()
def _show_energies(self):
"""
Show energies (in units of kT) for all replicas at all states.
"""
if not logger.isEnabledFor(logging.DEBUG):
return
# print header
str_row = "%-24s %16s" % ("reduced potential (kT)", "current state")
for state_index in range(self.nstates):
str_row += " state %3d" % state_index
logger.debug(str_row)
# print energies in kT
for replica_index in range(self.nstates):
str_row = "replica %-16d %16d" % (replica_index, self.replica_states[replica_index])
for state_index in range(self.nstates):
u = self.u_kl[replica_index,state_index]
if (u > 1e6):
str_row += "%10.3e" % u
else:
str_row += "%10.1f" % u
logger.debug(str_row)
return
def _compute_trace(self):
"""
Compute trace for replica ensemble minus log probability.
Extract timeseries of u_n = - log q(X_n) from store file
where q(X_n) = \pi_{k=1}^K u_{s_{nk}}(x_{nk})
with X_n = [x_{n1}, ..., x_{nK}] is the current collection of replica configurations
s_{nk} is the current state of replica k at iteration n
u_k(x) is the kth reduced potential
Returns
-------
u_n : numpy array of numpy.float64
u _n[n] is -log q(X_n)
TODO
----
* Later, we should have this quantity computed and stored on the fly in the store file.
But we may want to do this without breaking backward compatibility.
"""
# Get current dimensions.
niterations = self.ncfile.variables['energies'].shape[0]
nstates = self.ncfile.variables['energies'].shape[1]
natoms = self.ncfile.variables['energies'].shape[2]
# Extract energies.
energies = ncfile.variables['energies']
u_kln_replica = np.zeros([nstates, nstates, niterations], np.float64)
for n in range(niterations):
u_kln_replica[:,:,n] = energies[n,:,:]
# Deconvolute replicas
u_kln = np.zeros([nstates, nstates, niterations], np.float64)
for iteration in range(niterations):
state_indices = ncfile.variables['states'][iteration,:]
u_kln[state_indices,:,iteration] = energies[iteration,:,:]
# Compute total negative log probability over all iterations.
u_n = np.zeros([niterations], np.float64)
for iteration in range(niterations):
u_n[iteration] = np.sum(np.diagonal(u_kln[:,:,iteration]))
return u_n
def _analysis(self):
"""
Perform online analysis each iteration.
Every iteration, this will update the estimate of the state relative free energy differences and statistical uncertainties.
We can additionally request further analysis.
"""
# Only root node can perform analysis.
if self.mpicomm and (self.mpicomm.rank != 0): return
# Determine how many iterations there are data available for.
replica_states = self.ncfile.variables['states'][:,:]
u_nkl_replica = self.ncfile.variables['energies'][:,:,:]
# Determine number of iterations completed.
number_of_iterations_completed = replica_states.shape[0]
nstates = replica_states.shape[1]
# Online analysis can only be performed after a sufficient quantity of data has been collected.
if (number_of_iterations_completed < self.online_analysis_min_iterations):
logger.debug("Online analysis will be performed after %d iterations have elapsed." % self.online_analysis_min_iterations)
self.analysis = None
return
# Deconvolute replicas and compute total simulation effective self-energy timeseries.
u_kln = np.zeros([nstates, nstates, number_of_iterations_completed], np.float32)
u_n = np.zeros([number_of_iterations_completed], np.float64)
for iteration in range(number_of_iterations_completed):
state_indices = replica_states[iteration,:]
u_n[iteration] = 0.0
for replica_index in range(nstates):
state_index = state_indices[replica_index]
u_n[iteration] += u_nkl_replica[iteration,replica_index,state_index]
u_kln[state_index,:,iteration] = u_nkl_replica[iteration,replica_index,:]
# Determine optimal equilibration time, statistical inefficiency, and effectively uncorrelated sample indices.
from pymbar import timeseries
[t0, g, Neff_max] = timeseries.detectEquilibration(u_n)
indices = t0 + timeseries.subsampleCorrelatedData(u_n[t0:], g=g)
N_k = indices.size * np.ones([nstates], np.int32)
# Next, analyze with pymbar, initializing with last estimate of free energies.
from pymbar import MBAR
if hasattr(self, 'f_k'):
mbar = MBAR(u_kln[:,:,indices], N_k, initial_f_k=self.f_k)
else:
mbar = MBAR(u_kln[:,:,indices], N_k)
# Cache current free energy estimate to save time in future MBAR solutions.
self.f_k = mbar.f_k
# Compute entropy and enthalpy.
[Delta_f_ij, dDelta_f_ij, Delta_u_ij, dDelta_u_ij, Delta_s_ij, dDelta_s_ij] = mbar.computeEntropyAndEnthalpy()
# Store analysis summary.
# TODO: Convert this to an object?
analysis = dict()
analysis['equilibration_end'] = t0
analysis['g'] = g
analysis['indices'] = indices
analysis['Delta_f_ij'] = Delta_f_ij
analysis['dDelta_f_ij'] = dDelta_f_ij
analysis['Delta_u_ij'] = Delta_u_ij
analysis['dDelta_u_ij'] = dDelta_u_ij
analysis['Delta_s_ij'] = Delta_s_ij
analysis['dDelta_s_ij'] = dDelta_s_ij
def matrix2str(x):
"""
Return a print-ready string version of a matrix of numbers.
Parameters
----------
x : numpy.array of nrows x ncols matrix
Matrix of numbers to print.
TODO
----
* Automatically determine optimal spacing
"""
[nrows, ncols] = x.shape
str_row = ""
for i in range(nrows):
for j in range(ncols):
str_row += "%8.3f" % x[i, j]
str_row += "\n"
return str_row
# Print estimate
if logger.isEnabledFor(logging.DEBUG):
logger.debug("================================================================================")
logger.debug("Online analysis estimate of free energies:")
logger.debug(" equilibration end: %d iterations" % t0)
logger.debug(" statistical inefficiency: %.1f iterations" % g)
logger.debug(" effective number of uncorrelated samples: %.1f" % Neff_max)
logger.debug("Reduced free energy (f), enthalpy (u), and entropy (s) differences among thermodynamic states:")
logger.debug("Delta_f_ij")
logger.debug(matrix2str(Delta_f_ij))
logger.debug("dDelta_f_ij")
logger.debug(matrix2str(dDelta_f_ij))
logger.debug("Delta_u_ij")
logger.debug(matrix2str(Delta_u_ij))
logger.debug("dDelta_u_ij")
logger.debug(matrix2str(dDelta_u_ij))
logger.debug("Delta_s_ij")
logger.debug(matrix2str(Delta_s_ij))
logger.debug("dDelta_s_ij")
logger.debug(matrix2str(dDelta_s_ij))
logger.debug("================================================================================")
self.analysis = analysis
return
def analyze(self):
"""
Analyze the current simulation and return estimated free energies.
Returns
-------
analysis : dict
Analysis object containing end of equilibrated region, statistical inefficiency, and free energy differences:
Keys
----
equilibration_end : int
The last iteration in the discarded equilibrated region
g : float
Estimated statistical inefficiency of production region
indices : list of int
Equilibrated, effectively uncorrelated iteration indices used in analysis
Delta_f_ij : numpy array of nstates x nstates
Delta_f_ij[i,j] is the free energy difference f_j - f_i in units of kT
dDelta_f_ij : numpy array of nstates x nstates
dDelta_f_ij[i,j] is estimated standard error of Delta_f_ij[i,j]
Delta_u_ij
Delta_u_ij[i,j] is the reduced enthalpy difference u_j - u_i in units of kT
dDelta_u_ij
dDelta_u_ij[i,j] is estimated standard error of Delta_u_ij[i,j]
Delta_s_ij
Delta_s_ij[i,j] is the reduced entropic contribution to the free energy difference s_j - s_i in units of kT
dDelta_s_ij
dDelta_s_ij[i,j] is estimated standard error of Delta_s_ij[i,j]
"""
if not self._initialized:
self._initialize_resume()
# Update analysis on root node.
self._analysis()
if self.mpicomm: self.analysis = self.mpicomm.bcast(self.analysis, root=0) # broadcast analysis from root node
# Return analysis object
return self.analysis
#=============================================================================================
# Parallel tempering
#=============================================================================================
class ParallelTempering(ReplicaExchange):
"""
Parallel tempering simulation facility.
DESCRIPTION
This class provides a facility for parallel tempering simulations. It is a subclass of ReplicaExchange, but provides
various convenience methods and efficiency improvements for parallel tempering simulations, so should be preferred for
this type of simulation. In particular, the System only need be specified once, while the temperatures (or a temperature
range) is used to automatically build a set of ThermodynamicState objects for replica-exchange. Efficiency improvements
make use of the fact that the reduced potentials are linear in inverse temperature.
EXAMPLES
Parallel tempering of alanine dipeptide in implicit solvent.
>>> # Create alanine dipeptide test system.
>>> from openmmtools import testsystems
>>> testsystem = testsystems.AlanineDipeptideImplicit()
>>> [system, positions] = [testsystem.system, testsystem.positions]
>>> # Create temporary file for storing output.
>>> import tempfile
>>> file = tempfile.NamedTemporaryFile() # temporary file for testing
>>> store_filename = file.name
>>> # Initialize parallel tempering on an exponentially-spaced scale
>>> Tmin = 298.0 * unit.kelvin
>>> Tmax = 600.0 * unit.kelvin
>>> nreplicas = 3
>>> simulation = ParallelTempering(store_filename)
>>> simulation.create(system, positions, Tmin=Tmin, Tmax=Tmax, ntemps=nreplicas)
>>> simulation.number_of_iterations = 2 # set the simulation to only run 10 iterations
>>> simulation.timestep = 2.0 * unit.femtoseconds # set the timestep for integration
>>> simulation.minimize = False
>>> simulation.nsteps_per_iteration = 50 # run 50 timesteps per iteration
>>> # Run simulation.
>>> simulation.run() # run the simulation
Parallel tempering of alanine dipeptide in explicit solvent at 1 atm.
>>> # Create alanine dipeptide system
>>> from openmmtools import testsystems
>>> testsystem = testsystems.AlanineDipeptideExplicit()
>>> [system, positions] = [testsystem.system, testsystem.positions]
>>> # Add Monte Carlo barsostat to system (must be same pressure as simulation).
>>> import simtk.openmm as openmm
>>> pressure = 1.0 * unit.atmosphere
>>> # Create temporary file for storing output.
>>> import tempfile
>>> file = tempfile.NamedTemporaryFile() # temporary file for testing
>>> store_filename = file.name
>>> # Initialize parallel tempering on an exponentially-spaced scale
>>> Tmin = 298.0 * unit.kelvin
>>> Tmax = 600.0 * unit.kelvin
>>> nreplicas = 3
>>> simulation = ParallelTempering(store_filename)
>>> simulation.create(system, positions, Tmin=Tmin, Tmax=Tmax, pressure=pressure, ntemps=nreplicas)
>>> simulation.number_of_iterations = 2 # set the simulation to only run 10 iterations
>>> simulation.timestep = 2.0 * unit.femtoseconds # set the timestep for integration
>>> simulation.nsteps_per_iteration = 50 # run 50 timesteps per iteration
>>> simulation.minimize = False # don't minimize first
>>> # Run simulation.
>>> simulation.run() # run the simulation
"""
def create(self, system, positions, options=None, Tmin=None, Tmax=None, ntemps=None, temperatures=None, pressure=None, metadata=None):
"""
Initialize a parallel tempering simulation object.
Parameters
----------
system : simtk.openmm.System
the system to simulate
positions : simtk.unit.Quantity of np natoms x 3 array of units length, or list
coordinate set(s) for one or more replicas, assigned in a round-robin fashion
Tmin : simtk.unit.Quantity with units compatible with kelvin, optional, default=None
min temperature
Tmax : simtk.unit.Quantity with units compatible with kelvin, optional, default=None
max temperature
ntemps : int, optional, default=None
number of exponentially-spaced temperatures between Tmin and Tmax
temperatures : list of simtk.unit.Quantity with units compatible with kelvin, optional, default=None
if specified, this list of temperatures will be used instead of (Tmin, Tmax, ntemps)
pressure : simtk.unit.Quantity with units compatible with atmospheres, optional, default=None
if specified, a MonteCarloBarostat will be added (or modified) to perform NPT simulations
options : dict, optional, default=None
Options to use for specifying simulation protocol. Provided keywords will be matched to object variables to replace defaults.
Notes
-----
Either (Tmin, Tmax, ntempts) must all be specified or the list of 'temperatures' must be specified.
"""
# Create thermodynamic states from temperatures.
if temperatures is not None:
logger.info("Using provided temperatures")
self.temperatures = temperatures
elif (Tmin is not None) and (Tmax is not None) and (ntemps is not None):
self.temperatures = [ Tmin + (Tmax - Tmin) * (math.exp(float(i) / float(ntemps-1)) - 1.0) / (math.e - 1.0) for i in range(ntemps) ]
else:
raise ValueError("Either 'temperatures' or 'Tmin', 'Tmax', and 'ntemps' must be provided.")
states = [ ThermodynamicState(system=system, temperature=self.temperatures[i], pressure=pressure) for i in range(ntemps) ]
# Initialize replica-exchange simlulation.
ReplicaExchange.create(self, states, positions, options=options, metadata=metadata)
# Override title.
self.title = 'Parallel tempering simulation created using ParallelTempering class of repex.py on %s' % time.asctime(time.localtime())
return
def _compute_energies(self):
"""
Compute reduced potentials of all replicas at all states (temperatures).
NOTES
Because only the temperatures differ among replicas, we replace the generic O(N^2) replica-exchange implementation with an O(N) implementation.
"""
start_time = time.time()
logger.debug("Computing energies...")
if self.mpicomm:
# MPI implementation
# Create an integrator and context.
state = self.states[0]
integrator = self.mm.VerletIntegrator(self.timestep)
context = self.mm.Context(state.system, integrator, self.platform)
for replica_index in range(self.mpicomm.rank, self.nstates, self.mpicomm.size):
# Set positions.
context.setPositions(self.replica_positions[replica_index])
# Compute potential energy.
openmm_state = context.getState(getEnergy=True)
potential_energy = openmm_state.getPotentialEnergy()
# Compute energies at this state for all replicas.
for state_index in range(self.nstates):
# Compute reduced potential
beta = 1.0 / (kB * self.states[state_index].temperature)
self.u_kl[replica_index,state_index] = beta * potential_energy
# Gather energies.
energies_gather = self.mpicomm.allgather(self.u_kl[self.mpicomm.rank:self.nstates:self.mpicomm.size,:])
for replica_index in range(self.nstates):
source = replica_index % self.mpicomm.size # node with trajectory data
index = replica_index // self.mpicomm.size # index within trajectory batch
self.u_kl[replica_index,:] = energies_gather[source][index]
# Clean up.
del context, integrator
else:
# Serial implementation.
# Create an integrator and context.
state = self.states[0]
integrator = self.mm.VerletIntegrator(self.timestep)
context = self.mm.Context(state.system, integrator, self.platform)
# Compute reduced potentials for all configurations in all states.
for replica_index in range(self.nstates):
# Set positions.
context.setPositions(self.replica_positions[replica_index])
# Compute potential energy.
openmm_state = context.getState(getEnergy=True)
potential_energy = openmm_state.getPotentialEnergy()
# Compute energies at this state for all replicas.
for state_index in range(self.nstates):
# Compute reduced potential
beta = 1.0 / (kB * self.states[state_index].temperature)
self.u_kl[replica_index,state_index] = beta * potential_energy
# Clean up.
del context, integrator
end_time = time.time()
elapsed_time = end_time - start_time
time_per_energy = elapsed_time / float(self.nstates)
logger.debug("Time to compute all energies %.3f s (%.3f per energy calculation).\n" % (elapsed_time, time_per_energy))
return
#=============================================================================================
# Hamiltonian exchange
#=============================================================================================
class HamiltonianExchange(ReplicaExchange):
"""
Hamiltonian exchange simulation facility.
DESCRIPTION
This class provides an implementation of a Hamiltonian exchange simulation based on the ReplicaExchange facility.
It provides several convenience classes and efficiency improvements, and should be preferentially used for Hamiltonian
exchange simulations over ReplicaExchange when possible.
EXAMPLES
>>> # Create reference system
>>> from openmmtools import testsystems
>>> testsystem = testsystems.AlanineDipeptideImplicit()
>>> [reference_system, positions] = [testsystem.system, testsystem.positions]
>>> # Copy reference system.
>>> systems = [reference_system for index in range(10)]
>>> # Create temporary file for storing output.
>>> import tempfile
>>> file = tempfile.NamedTemporaryFile() # temporary file for testing
>>> store_filename = file.name
>>> # Create reference state.
>>> reference_state = ThermodynamicState(reference_system, temperature=298.0*unit.kelvin)
>>> # Create simulation.
>>> simulation = HamiltonianExchange(store_filename)
>>> simulation.create(reference_state, systems, positions)
>>> simulation.number_of_iterations = 2 # set the simulation to only run 2 iterations
>>> simulation.timestep = 2.0 * unit.femtoseconds # set the timestep for integration
>>> simulation.nsteps_per_iteration = 50 # run 50 timesteps per iteration
>>> simulation.minimize = False
>>> # Run simulation.
>>> simulation.run() #doctest: +ELLIPSIS
...
"""
def create(self, reference_state, systems, positions, options=None, metadata=None):
"""
Initialize a Hamiltonian exchange simulation object.
Parameters
----------
reference_state : ThermodynamicState
reference state containing all thermodynamic parameters except the system, which will be replaced by 'systems'
systems : list of simtk.openmm.System
list of systems to simulate (one per replica)
positions : simtk.unit.Quantity of np natoms x 3 with units compatible with nanometers
positions (or a list of positions objects) for initial assignment of replicas (will be used in round-robin assignment)
options : dict, optional, default=None
Optional dict to use for specifying simulation protocol. Provided keywords will be matched to object variables to replace defaults.
metadata : dict, optional, default=None
metadata to store in a 'metadata' group in store file
"""
if systems is None:
states = None
else:
# Create thermodynamic states from systems.
states = [ ThermodynamicState(system=system, temperature=reference_state.temperature, pressure=reference_state.pressure) for system in systems ]
# Initialize replica-exchange simlulation.
ReplicaExchange.create(self, states, positions, options=options, metadata=metadata)
# Override title.
self.title = 'Hamiltonian exchange simulation created using HamiltonianExchange class of repex.py on %s' % time.asctime(time.localtime())
return
#=============================================================================================
# MAIN AND TESTS
#=============================================================================================
if __name__ == "__main__":
import doctest
doctest.testmod()
|
jchodera/yank
|
Yank/repex.py
|
Python
|
lgpl-3.0
| 121,075
|
[
"MDTraj",
"NetCDF",
"OpenMM"
] |
f851482ac12c620059cf86fbd403d13b9ff3ea9495f13f857793c998c33ffa1d
|
import os
import sys
import time
import pkg_resources
from django.template import Context, Template
from selenium.webdriver.support.ui import WebDriverWait
from workbench import scenarios
from workbench.test.selenium_test import SeleniumTest
from .resources import ResourceLoader
class SeleniumBaseTest(SeleniumTest):
module_name = None
default_css_selector = None
relative_scenario_path = 'xml'
timeout = 10 # seconds
@property
def _module_name(self):
if self.module_name is None:
raise NotImplementedError("Overwrite cls.module_name in your derived class.")
return self.module_name
@property
def _default_css_selector(self):
if self.default_css_selector is None:
raise NotImplementedError("Overwrite cls.default_css_selector in your derived class.")
return self.default_css_selector
def setUp(self):
super(SeleniumBaseTest, self).setUp()
# Use test scenarios
self.browser.get(self.live_server_url) # Needed to load tests once
scenarios.SCENARIOS.clear()
loader = ResourceLoader(self._module_name)
scenarios_list = loader.load_scenarios_from_path(self.relative_scenario_path, include_identifier=True)
for identifier, title, xml in scenarios_list:
scenarios.add_xml_scenario(identifier, title, xml)
self.addCleanup(scenarios.remove_scenario, identifier)
# Suzy opens the browser to visit the workbench
self.browser.get(self.live_server_url)
# She knows it's the site by the header
header1 = self.browser.find_element_by_css_selector('h1')
self.assertEqual(header1.text, 'XBlock scenarios')
def wait_until_hidden(self, elem):
wait = WebDriverWait(elem, self.timeout)
wait.until(lambda e: not e.is_displayed(), u"{} should be hidden".format(elem.text))
def wait_until_disabled(self, elem):
wait = WebDriverWait(elem, self.timeout)
wait.until(lambda e: not e.is_enabled(), u"{} should be disabled".format(elem.text))
def wait_until_clickable(self, elem):
wait = WebDriverWait(elem, self.timeout)
wait.until(lambda e: e.is_displayed() and e.is_enabled(), u"{} should be clickable".format(elem.text))
def wait_until_text_in(self, text, elem):
wait = WebDriverWait(elem, self.timeout)
wait.until(lambda e: text in e.text, u"{} should be in {}".format(text, elem.text))
def wait_until_exists(self, selector):
wait = WebDriverWait(self.browser, self.timeout)
wait.until(lambda driver: driver.find_element_by_css_selector(selector), u"Selector '{}' should exist.".format(selector))
def go_to_page(self, page_name, css_selector=None, view_name=None):
"""
Navigate to the page `page_name`, as listed on the workbench home
Returns the DOM element on the visited page located by the `css_selector`
"""
if css_selector is None:
css_selector = self._default_css_selector
self.browser.get(self.live_server_url)
target_url = self.browser.find_element_by_link_text(page_name).get_attribute('href')
if view_name:
target_url += '{}/'.format(view_name)
self.browser.get(target_url)
time.sleep(1)
block = self.browser.find_element_by_css_selector(css_selector)
return block
|
hosamshahin/xblock-utils-old
|
xblockutils/base_test.py
|
Python
|
agpl-3.0
| 3,410
|
[
"VisIt"
] |
c959bced51e6b9afb0d11b2b17cb45f07a52102d7f4d74b01bbebfa5197b5040
|
import copy
import datetime
import json
import os
from email.utils import formataddr
from django.conf import settings
from django.core import mail
from django.urls import reverse
from unittest import mock
import pytest
from waffle.testutils import override_switch
from olympia import amo
from olympia.access.models import Group, GroupUser
from olympia.activity.models import MAX_TOKEN_USE_COUNT, ActivityLog, ActivityLogToken
from olympia.activity.utils import (
ACTIVITY_MAIL_GROUP,
ADDON_REVIEWER_NAME,
ActivityEmailEncodingError,
ActivityEmailError,
ActivityEmailParser,
ActivityEmailTokenError,
ActivityEmailUUIDError,
add_email_to_activity_log,
add_email_to_activity_log_wrapper,
log_and_notify,
notify_about_activity_log,
NOTIFICATIONS_FROM_EMAIL,
send_activity_mail,
)
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import TestCase, addon_factory, SQUOTE_ESCAPED, user_factory
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
sample_message_file = os.path.join(TESTS_DIR, 'emails', 'message.json')
with open(sample_message_file) as file_object:
sample_message_content = json.loads(file_object.read())
class TestEmailParser(TestCase):
def test_basic_email(self):
parser = ActivityEmailParser(sample_message_content['Message'])
assert parser.get_uuid() == '5a0b8a83d501412589cc5d562334b46b'
assert parser.reply == ("This is a developer reply to an AMO. It's nice.")
def test_with_invalid_msg(self):
with self.assertRaises(ActivityEmailEncodingError):
ActivityEmailParser('youtube?v=dQw4w9WgXcQ')
def test_with_empty_to(self):
message = copy.deepcopy(sample_message_content['Message'])
message['To'] = None
parser = ActivityEmailParser(message)
with self.assertRaises(ActivityEmailUUIDError):
# It should fail, but not because of a Not Iterable TypeError,
# instead we handle that gracefully and raise an exception that
# we control and catch later.
parser.get_uuid()
def test_empty_text_body(self):
"""We receive requests that either have no `TextBody` or it's None
https://github.com/mozilla/addons-server/issues/8848
"""
message = copy.deepcopy(sample_message_content['Message'])
message['TextBody'] = None
with self.assertRaises(ActivityEmailEncodingError):
ActivityEmailParser(message)
message = copy.deepcopy(sample_message_content['Message'])
message.pop('TextBody', None)
with self.assertRaises(ActivityEmailEncodingError):
ActivityEmailParser(message)
@override_switch('activity-email-bouncing', active=True)
class TestEmailBouncing(TestCase):
BOUNCE_REPLY = (
'Hello,\n\nAn email was received, apparently from you. Unfortunately '
"we couldn't process it because of:\n%s\n\nPlease visit %s to leave "
'a reply instead.\n--\nMozilla Add-ons\n%s\n'
)
def setUp(self):
self.bounce_reply = self.BOUNCE_REPLY % (
'%s',
settings.SITE_URL,
settings.SITE_URL,
)
self.email_text = sample_message_content['Message']
@mock.patch('olympia.activity.utils.ActivityLog.create')
def test_no_note_logged(self, log_mock):
# First set everything up so it's working
addon = addon_factory()
version = addon.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)
user = user_factory()
self.grant_permission(user, '*:*')
ActivityLogToken.objects.create(
user=user, version=version, uuid='5a0b8a83d501412589cc5d562334b46b'
)
# Make log_mock return false for some reason.
log_mock.return_value = False
# No exceptions thrown, but no log means something went wrong.
assert not add_email_to_activity_log_wrapper(self.email_text, 0)
assert len(mail.outbox) == 1
out = mail.outbox[0]
assert out.body == (self.bounce_reply % 'Undefined Error.')
assert out.subject == 'Re: This is the subject of a test message.'
assert out.to == ['sender@example.com']
def test_exception_because_invalid_token(self):
# Fails because the token doesn't exist in ActivityToken.objects
assert not add_email_to_activity_log_wrapper(self.email_text, 0)
assert len(mail.outbox) == 1
out = mail.outbox[0]
assert out.body == (
self.bounce_reply
% 'UUID found in email address TO: header but is not a valid token '
'(5a0b8a83d501412589cc5d562334b46b).'
)
assert out.subject == 'Re: This is the subject of a test message.'
assert out.to == ['sender@example.com']
def test_exception_because_invalid_email(self):
# Fails because the token doesn't exist in ActivityToken.objects
email_text = copy.deepcopy(self.email_text)
email_text['To'] = [
{
'EmailAddress': 'foobar@addons.mozilla.org',
'FriendlyName': 'not a valid activity mail reply',
}
]
assert not add_email_to_activity_log_wrapper(email_text, 0)
assert len(mail.outbox) == 1
out = mail.outbox[0]
assert out.body == (
self.bounce_reply % 'TO: address does not contain activity email uuid ('
'foobar@addons.mozilla.org).'
)
assert out.subject == 'Re: This is the subject of a test message.'
assert out.to == ['sender@example.com']
def test_exception_parser_because_malformed_message(self):
assert not add_email_to_activity_log_wrapper('blah de blah', 0)
# No From or Reply means no bounce, alas.
assert len(mail.outbox) == 0
def _test_exception_in_parser_but_can_send_email(self, message):
assert not add_email_to_activity_log_wrapper(message, 0)
assert len(mail.outbox) == 1
assert mail.outbox[0].body == (
self.bounce_reply % 'Invalid or malformed json message object.'
)
assert mail.outbox[0].subject == 'Re: your email to us'
assert mail.outbox[0].to == ['bob@dole.org']
def test_exception_in_parser_but_from_defined(self):
"""Unlikely scenario of an email missing a body but having a From."""
self._test_exception_in_parser_but_can_send_email(
{'From': {'EmailAddress': 'bob@dole.org'}}
)
def test_exception_in_parser_but_reply_to_defined(self):
"""Even more unlikely scenario of an email missing a body but having a
ReplyTo."""
self._test_exception_in_parser_but_can_send_email(
{'ReplyTo': {'EmailAddress': 'bob@dole.org'}}
)
def test_exception_to_notifications_alias(self):
email_text = copy.deepcopy(self.email_text)
email_text['To'] = [
{
'EmailAddress': 'notifications@%s' % settings.INBOUND_EMAIL_DOMAIN,
'FriendlyName': 'not a valid activity mail reply',
}
]
assert not add_email_to_activity_log_wrapper(email_text, 0)
assert len(mail.outbox) == 1
out = mail.outbox[0]
assert (
'This email address is not meant to receive emails directly.'
) in out.body
assert out.subject == 'Re: This is the subject of a test message.'
assert out.to == ['sender@example.com']
@override_switch('activity-email-bouncing', active=False)
def test_exception_but_bouncing_waffle_off(self):
# Fails because the token doesn't exist in ActivityToken.objects
assert not add_email_to_activity_log_wrapper(self.email_text, 0)
# But no bounce.
assert len(mail.outbox) == 0
def test_exception_but_spammy(self):
# Fails because the token doesn't exist in ActivityToken.objects
assert not add_email_to_activity_log_wrapper(self.email_text, 10.0)
assert not add_email_to_activity_log_wrapper(self.email_text, 10)
assert not add_email_to_activity_log_wrapper(self.email_text, '10')
assert not add_email_to_activity_log_wrapper(self.email_text, 11.0)
# But no bounce.
assert len(mail.outbox) == 0
# but should be bounced if below the threshaold
assert not add_email_to_activity_log_wrapper(self.email_text, 9.9)
class TestAddEmailToActivityLog(TestCase):
def setUp(self):
self.addon = addon_factory(name='Badger', status=amo.STATUS_NOMINATED)
version = self.addon.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)
self.profile = user_factory()
self.token = ActivityLogToken.objects.create(version=version, user=self.profile)
self.token.update(uuid='5a0b8a83d501412589cc5d562334b46b')
self.parser = ActivityEmailParser(sample_message_content['Message'])
def test_developer_comment(self):
self.profile.addonuser_set.create(addon=self.addon)
note = add_email_to_activity_log(self.parser)
assert note.log == amo.LOG.DEVELOPER_REPLY_VERSION
self.token.refresh_from_db()
assert self.token.use_count == 1
def test_reviewer_comment(self):
self.grant_permission(self.profile, 'Addons:Review')
note = add_email_to_activity_log(self.parser)
assert note.log == amo.LOG.REVIEWER_REPLY_VERSION
self.token.refresh_from_db()
assert self.token.use_count == 1
def test_with_max_count_token(self):
"""Test with an invalid token."""
self.token.update(use_count=MAX_TOKEN_USE_COUNT + 1)
with self.assertRaises(ActivityEmailTokenError):
assert not add_email_to_activity_log(self.parser)
self.token.refresh_from_db()
assert self.token.use_count == MAX_TOKEN_USE_COUNT + 1
def test_with_unpermitted_token(self):
"""Test when the token user doesn't have a permission to add a note."""
with self.assertRaises(ActivityEmailTokenError):
assert not add_email_to_activity_log(self.parser)
self.token.refresh_from_db()
assert self.token.use_count == 0
def test_non_existent_token(self):
self.token.update(uuid='12345678901234567890123456789012')
with self.assertRaises(ActivityEmailUUIDError):
assert not add_email_to_activity_log(self.parser)
def test_broken_token(self):
parser = ActivityEmailParser(copy.deepcopy(sample_message_content['Message']))
parser.email['To'][0]['EmailAddress'] = 'reviewreply+1234@foo.bar'
with self.assertRaises(ActivityEmailUUIDError):
assert not add_email_to_activity_log(parser)
def test_banned_user(self):
self.profile.addonuser_set.create(addon=self.addon)
self.profile.update(banned=datetime.datetime.now())
with self.assertRaises(ActivityEmailError):
assert not add_email_to_activity_log(self.parser)
class TestLogAndNotify(TestCase):
def setUp(self):
self.developer = user_factory()
self.developer2 = user_factory()
self.reviewer = user_factory()
self.grant_permission(self.reviewer, 'Addons:Review', 'Addon Reviewers')
self.addon = addon_factory()
self.version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED
)
self.addon.addonuser_set.create(user=self.developer)
self.addon.addonuser_set.create(user=self.developer2)
self.task_user = user_factory(id=settings.TASK_USER_ID)
def _create(self, action, author=None):
author = author or self.reviewer
details = {
'comments': 'I spy, with my líttle €ye...',
'version': self.version.version,
}
activity = ActivityLog.create(
action, self.addon, self.version, user=author, details=details
)
activity.update(created=self.days_ago(1))
return activity
def _recipients(self, email_mock):
recipients = []
for call in email_mock.call_args_list:
recipients += call[1]['recipient_list']
[reply_to] = call[1]['reply_to']
assert reply_to.startswith('reviewreply+')
assert reply_to.endswith(settings.INBOUND_EMAIL_DOMAIN)
return recipients
def _check_email(
self,
call,
url,
reason_text,
*,
author,
is_from_developer=False,
is_to_developer=False,
):
subject = call[0][0]
body = call[0][1]
assert subject == 'Mozilla Add-ons: {} {}'.format(
self.addon.name,
self.version.version,
)
assert ('visit %s' % url) in body
assert ('receiving this email because %s' % reason_text) in body
assert 'If we do not hear from you within' not in body
assert self.reviewer.name not in body
if is_to_developer and not is_from_developer:
assert ('%s wrote:' % ADDON_REVIEWER_NAME) in body
else:
assert ('%s wrote:' % author.name) in body
@mock.patch('olympia.activity.utils.send_mail')
def test_developer_reply(self, send_mail_mock):
# One from the reviewer.
self._create(amo.LOG.REJECT_VERSION, self.reviewer)
# One from the developer. So the developer is on the 'thread'
self._create(amo.LOG.DEVELOPER_REPLY_VERSION, self.developer)
action = amo.LOG.DEVELOPER_REPLY_VERSION
comments = 'Thïs is á reply'
log_and_notify(action, comments, self.developer, self.version)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 2 # We added one above.
assert logs[0].details['comments'] == 'Thïs is á reply'
assert send_mail_mock.call_count == 2 # One author, one reviewer.
sender = formataddr((self.developer.name, NOTIFICATIONS_FROM_EMAIL))
assert sender == send_mail_mock.call_args_list[0][1]['from_email']
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 2
assert self.reviewer.email in recipients
assert self.developer2.email in recipients
# The developer who sent it doesn't get their email back.
assert self.developer.email not in recipients
self._check_email(
send_mail_mock.call_args_list[0],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.',
author=self.developer,
is_from_developer=True,
is_to_developer=True,
)
review_url = absolutify(
reverse(
'reviewers.review',
kwargs={'addon_id': self.version.addon.pk, 'channel': 'listed'},
add_prefix=False,
)
)
self._check_email(
send_mail_mock.call_args_list[1],
review_url,
'you reviewed this add-on.',
author=self.developer,
is_from_developer=True,
is_to_developer=True,
)
@mock.patch('olympia.activity.utils.send_mail')
def test_reviewer_reply(self, send_mail_mock):
# One from the reviewer.
self._create(amo.LOG.REJECT_VERSION, self.reviewer)
# One from the developer.
self._create(amo.LOG.DEVELOPER_REPLY_VERSION, self.developer)
action = amo.LOG.REVIEWER_REPLY_VERSION
comments = 'Thîs ïs a revïewer replyîng'
log_and_notify(action, comments, self.reviewer, self.version)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 1
assert logs[0].details['comments'] == 'Thîs ïs a revïewer replyîng'
assert send_mail_mock.call_count == 2 # Both authors.
sender = formataddr((ADDON_REVIEWER_NAME, NOTIFICATIONS_FROM_EMAIL))
assert sender == send_mail_mock.call_args_list[0][1]['from_email']
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 2
assert self.developer.email in recipients
assert self.developer2.email in recipients
# The reviewer who sent it doesn't get their email back.
assert self.reviewer.email not in recipients
self._check_email(
send_mail_mock.call_args_list[0],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.',
author=self.reviewer,
is_from_developer=False,
is_to_developer=True,
)
self._check_email(
send_mail_mock.call_args_list[1],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.',
author=self.reviewer,
is_from_developer=False,
is_to_developer=True,
)
@mock.patch('olympia.activity.utils.send_mail')
def test_log_with_no_comment(self, send_mail_mock):
# One from the reviewer.
self._create(amo.LOG.REJECT_VERSION, self.reviewer)
action = amo.LOG.APPROVAL_NOTES_CHANGED
log_and_notify(
action=action,
comments=None,
note_creator=self.developer,
version=self.version,
)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 1
assert not logs[0].details # No details json because no comment.
assert send_mail_mock.call_count == 2 # One author, one reviewer.
sender = formataddr((self.developer.name, NOTIFICATIONS_FROM_EMAIL))
assert sender == send_mail_mock.call_args_list[0][1]['from_email']
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 2
assert self.reviewer.email in recipients
assert self.developer2.email in recipients
assert 'Approval notes changed' in (send_mail_mock.call_args_list[0][0][1])
assert 'Approval notes changed' in (send_mail_mock.call_args_list[1][0][1])
def test_staff_cc_group_is_empty_no_failure(self):
Group.objects.create(name=ACTIVITY_MAIL_GROUP, rules='None:None')
log_and_notify(amo.LOG.REJECT_VERSION, 'á', self.reviewer, self.version)
@mock.patch('olympia.activity.utils.send_mail')
def test_staff_cc_group_get_mail(self, send_mail_mock):
self.grant_permission(self.reviewer, 'None:None', ACTIVITY_MAIL_GROUP)
action = amo.LOG.DEVELOPER_REPLY_VERSION
comments = 'Thïs is á reply'
log_and_notify(action, comments, self.developer, self.version)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 1
recipients = self._recipients(send_mail_mock)
sender = formataddr((self.developer.name, NOTIFICATIONS_FROM_EMAIL))
assert sender == send_mail_mock.call_args_list[0][1]['from_email']
assert len(recipients) == 2
# self.reviewers wasn't on the thread, but gets an email anyway.
assert self.reviewer.email in recipients
assert self.developer2.email in recipients
review_url = absolutify(
reverse(
'reviewers.review',
kwargs={'addon_id': self.version.addon.pk, 'channel': 'listed'},
add_prefix=False,
)
)
self._check_email(
send_mail_mock.call_args_list[1],
review_url,
'you are member of the activity email cc group.',
author=self.developer,
)
@mock.patch('olympia.activity.utils.send_mail')
def test_task_user_doesnt_get_mail(self, send_mail_mock):
"""The task user account is used to auto-sign unlisted addons, amongst
other things, but we don't want that user account to get mail."""
self._create(amo.LOG.APPROVE_VERSION, self.task_user)
action = amo.LOG.DEVELOPER_REPLY_VERSION
comments = 'Thïs is á reply'
log_and_notify(action, comments, self.developer, self.version)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 1
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 1
assert self.developer2.email in recipients
assert self.task_user.email not in recipients
@mock.patch('olympia.activity.utils.send_mail')
def test_ex_reviewer_doesnt_get_mail(self, send_mail_mock):
"""If a reviewer has now left the team don't email them."""
self._create(amo.LOG.REJECT_VERSION, self.reviewer)
# Take his joob!
GroupUser.objects.get(
group=Group.objects.get(name='Addon Reviewers'), user=self.reviewer
).delete()
action = amo.LOG.DEVELOPER_REPLY_VERSION
comments = 'Thïs is á reply'
log_and_notify(action, comments, self.developer, self.version)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 1
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 1
assert self.developer2.email in recipients
assert self.reviewer.email not in recipients
@mock.patch('olympia.activity.utils.send_mail')
def test_review_url_listed(self, send_mail_mock):
# One from the reviewer.
self._create(amo.LOG.REJECT_VERSION, self.reviewer)
# One from the developer. So the developer is on the 'thread'
self._create(amo.LOG.DEVELOPER_REPLY_VERSION, self.developer)
action = amo.LOG.DEVELOPER_REPLY_VERSION
comments = 'Thïs is á reply'
log_and_notify(action, comments, self.developer, self.version)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 2 # We added one above.
assert logs[0].details['comments'] == 'Thïs is á reply'
assert send_mail_mock.call_count == 2 # One author, one reviewer.
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 2
assert self.reviewer.email in recipients
assert self.developer2.email in recipients
# The developer who sent it doesn't get their email back.
assert self.developer.email not in recipients
self._check_email(
send_mail_mock.call_args_list[0],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.',
author=self.developer,
)
review_url = absolutify(
reverse(
'reviewers.review',
add_prefix=False,
kwargs={'channel': 'listed', 'addon_id': self.addon.pk},
)
)
self._check_email(
send_mail_mock.call_args_list[1],
review_url,
'you reviewed this add-on.',
author=self.developer,
)
@mock.patch('olympia.activity.utils.send_mail')
def test_review_url_unlisted(self, send_mail_mock):
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted', 'Addon Reviewers')
# One from the reviewer.
self._create(amo.LOG.COMMENT_VERSION, self.reviewer)
# One from the developer. So the developer is on the 'thread'
self._create(amo.LOG.DEVELOPER_REPLY_VERSION, self.developer)
action = amo.LOG.DEVELOPER_REPLY_VERSION
comments = 'Thïs is á reply'
log_and_notify(action, comments, self.developer, self.version)
logs = ActivityLog.objects.filter(action=action.id)
assert len(logs) == 2 # We added one above.
assert logs[0].details['comments'] == 'Thïs is á reply'
assert send_mail_mock.call_count == 2 # One author, one reviewer.
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 2
assert self.reviewer.email in recipients
assert self.developer2.email in recipients
# The developer who sent it doesn't get their email back.
assert self.developer.email not in recipients
self._check_email(
send_mail_mock.call_args_list[0],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.',
author=self.developer,
)
review_url = absolutify(
reverse(
'reviewers.review',
add_prefix=False,
kwargs={'channel': 'unlisted', 'addon_id': self.addon.pk},
)
)
self._check_email(
send_mail_mock.call_args_list[1],
review_url,
'you reviewed this add-on.',
author=self.developer,
)
@mock.patch('olympia.activity.utils.send_mail')
def test_from_name_escape(self, send_mail_mock):
self.developer.update(display_name='mr "quote" escape')
# One from the reviewer.
self._create(amo.LOG.REJECT_VERSION, self.reviewer)
# One from the developer. So the developer is on the 'thread'
self._create(amo.LOG.DEVELOPER_REPLY_VERSION, self.developer)
action = amo.LOG.DEVELOPER_REPLY_VERSION
comments = 'Thïs is á reply'
log_and_notify(action, comments, self.developer, self.version)
sender = r'"mr \"quote\" escape" <notifications@%s>' % (
settings.INBOUND_EMAIL_DOMAIN
)
assert sender == send_mail_mock.call_args_list[0][1]['from_email']
@mock.patch('olympia.activity.utils.send_mail')
def test_comment_entity_decode(self, send_mail_mock):
# One from the reviewer.
self._create(amo.LOG.REJECT_VERSION, self.reviewer)
action = amo.LOG.REVIEWER_REPLY_VERSION
comments = f'This email{SQUOTE_ESCAPED}s entities should be decoded'
log_and_notify(action, comments, self.reviewer, self.version)
body = send_mail_mock.call_args_list[1][0][1]
assert "email's entities should be decoded" in body
assert '&' not in body
@mock.patch('olympia.activity.utils.send_mail')
def test_notify_about_previous_activity(self, send_mail_mock):
# Create an activity to use when notifying.
activity = self._create(amo.LOG.REVIEWER_REPLY_VERSION, self.reviewer)
notify_about_activity_log(self.addon, self.version, activity)
assert ActivityLog.objects.count() == 1 # No new activity created.
assert send_mail_mock.call_count == 2 # Both authors.
sender = formataddr((ADDON_REVIEWER_NAME, NOTIFICATIONS_FROM_EMAIL))
assert sender == send_mail_mock.call_args_list[0][1]['from_email']
recipients = self._recipients(send_mail_mock)
assert len(recipients) == 2
assert self.developer.email in recipients
assert self.developer2.email in recipients
# The reviewer who sent it doesn't get their email back.
assert self.reviewer.email not in recipients
self._check_email(
send_mail_mock.call_args_list[0],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.',
author=self.reviewer,
is_from_developer=False,
is_to_developer=True,
)
self._check_email(
send_mail_mock.call_args_list[1],
absolutify(self.addon.get_dev_url('versions')),
'you are listed as an author of this add-on.',
author=self.reviewer,
is_from_developer=False,
is_to_developer=True,
)
@pytest.mark.django_db
def test_send_activity_mail():
subject = 'This ïs ã subject'
message = 'And... this ïs a messãge!'
addon = addon_factory()
latest_version = addon.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)
user = user_factory()
recipients = [
user,
]
from_email = 'bob@bob.bob'
action = ActivityLog.create(amo.LOG.DEVELOPER_REPLY_VERSION, user=user)
send_activity_mail(
subject, message, latest_version, recipients, from_email, action.id
)
assert len(mail.outbox) == 1
assert mail.outbox[0].body == message
assert mail.outbox[0].subject == subject
uuid = latest_version.token.get(user=user).uuid.hex
reference_header = '<{addon}/{version}@{site}>'.format(
addon=latest_version.addon.id,
version=latest_version.id,
site=settings.INBOUND_EMAIL_DOMAIN,
)
message_id = '<{addon}/{version}/{action}@{site}>'.format(
addon=latest_version.addon.id,
version=latest_version.id,
action=action.id,
site=settings.INBOUND_EMAIL_DOMAIN,
)
assert mail.outbox[0].extra_headers['In-Reply-To'] == reference_header
assert mail.outbox[0].extra_headers['References'] == reference_header
assert mail.outbox[0].extra_headers['Message-ID'] == message_id
reply_email = f'reviewreply+{uuid}@{settings.INBOUND_EMAIL_DOMAIN}'
assert mail.outbox[0].reply_to == [reply_email]
|
mozilla/addons-server
|
src/olympia/activity/tests/test_utils.py
|
Python
|
bsd-3-clause
| 29,093
|
[
"VisIt"
] |
257343f945b54ae582ce8ff4fea4c205a09e7d587d5dae2f7dd368c04e5215c8
|
from __future__ import print_function, division
import os, sys, pwd, json
import pkg_resources
from cosmos.api import Cosmos, Dependency, default_get_submit_args
from yaps2.utils import to_json, merge_params, natural_key, ensure_directory
class Config(object):
def __init__(self, job_db,
input_json_sample_bams, project_name, email, workspace, drm_job_group):
self.email = email
self.db = job_db
self.project_name = project_name
self.rootdir = workspace
self.input_json_sample_bams = input_json_sample_bams
self.drm_job_group = drm_job_group
self.ensure_rootdir()
if self.email is None:
self.email = self.setup_email()
if self.db is None:
self.db = os.path.join(
os.path.abspath(self.rootdir),
'.job_queue.db'
)
self.sample_data = self.collect_sample_data()
def ensure_rootdir(self):
if not os.path.exists(self.rootdir):
os.makedirs(self.rootdir)
def setup_email(self):
user_id = pwd.getpwuid( os.getuid() ).pw_name
return '{}@genome.wustl.edu'.format(user_id)
def collect_sample_data(self):
# expecting the JSON to have this structure
# {
# "<organism-sample-id>": {
# "administration-project": "CCDG Testing - Gold Whole Genomes with TruSeq",
# "bams": [
# "/path/to/gerald/bam1.bam",
# "/path/to/geral/bam2.bam"
# ],
# "meta": {
# "ethnicity": null,
# "gender": "male",
# "nomenclature": "CCDG",
# "original-name": "H_XYZ-sample-1234"
# }
# },
# }
#
# NOTE: the 'adminstration-project' key can be substituted by 'analysis-project'
with open(self.input_json_sample_bams, 'r') as json_data:
d = json.load(json_data)
return d
class Pipeline(object):
def __init__(self, config, drm, restart):
self.config = config
self.cosmos = Cosmos(
database_url='sqlite:///{}'.format(self.config.db),
get_submit_args=default_get_submit_args,
default_drm=drm
)
self.cosmos.initdb()
primary_logfile = os.path.join(
self.config.rootdir,
'{}.log'.format(self.config.project_name),
)
self.workflow = self.cosmos.start(
self.config.project_name,
primary_log_path=primary_logfile,
restart=restart,
)
self.setup_pipeline()
def setup_pipeline(self):
self.construct_pipeline()
self.workflow.make_output_dirs()
def run(self):
# put set_successful to False if you intend to add more tasks to the
# pipeline later
custom_log_dir = lambda task : os.path.join(self.config.rootdir, 'logs', task.stage.name, task.uid)
self.workflow.run(set_successful=False, log_out_dir_func=custom_log_dir)
def construct_pipeline(self):
speedseq_tasks = self.create_speedseq_realign_tasks()
def create_speedseq_realign_tasks(self):
tasks = []
stage = '1-exec-speedseq-realign'
basedir = os.path.join(self.config.rootdir, stage)
email = self.config.email
lsf_job_group = self.config.drm_job_group
sample_data = self.config.sample_data
for sample_id in sample_data.keys():
bam_paths = sample_data[sample_id]['bams']
sample_name = sample_data[sample_id]['meta']['original-name']
output_prefix = os.path.join(basedir, sample_id, "{}.b38.realign".format(sample_id))
tmpdir = os.path.join(basedir, sample_id, 'tmpdir')
input_bams = ' '.join(bam_paths)
task = {
'func' : exec_speedseq,
'params' : {
'output_prefix' : output_prefix,
'tmpdir' : tmpdir,
'input_bams' : input_bams,
},
'stage_name' : stage,
'uid' : sample_id,
'drm_params' :
to_json(exec_speedseq_lsf_params(email, lsf_job_group)),
}
tasks.append( self.workflow.add_task(**task) )
return tasks
def exec_speedseq(output_prefix, tmpdir, input_bams, **kwargs):
args = locals()
default = {
'script' : pkg_resources.resource_filename('yaps2', 'resources/b38/speedseq-realign.sh'),
# build 38
'reference' : os.path.join(
'/gscmnt/gc2802/halllab/ccdg_resources/genomes',
'human/GRCh38DH/bwa/0_7_12/all_sequences.fa'
),
}
cmd_args = merge_params(default, args)
cmd = ("{script} {output_prefix} {tmpdir} {reference} {input_bams}").format(**cmd_args)
return cmd
def exec_speedseq_lsf_params(email, job_group):
return {
'g' : job_group,
'u' : email,
'N' : None,
'q' : "long",
'M' : 50000000, # 50_000_000 (50 GB)
'R' : 'select[mem>45000] rusage[mem=48000] span[hosts=1]',
'n' : 8
}
|
indraniel/yaps2
|
yaps2/pipelines/b38.py
|
Python
|
bsd-2-clause
| 5,222
|
[
"BWA"
] |
1823ba7e195259ff1f47b47db33a57d095e2b237ea4479a879f53536cec9e5d9
|
# -*- coding: utf-8 -*-
# coding: utf-8
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import open, str
import warnings
from ....pipeline import engine as pe
from ....interfaces import utility as niu
from ....interfaces import fsl as fsl
def create_dmri_preprocessing(name='dMRI_preprocessing', use_fieldmap=True, fieldmap_registration=False):
"""
Creates a workflow that chains the necessary pipelines to
correct for motion, eddy currents, and, if selected, susceptibility
artifacts in EPI dMRI sequences.
.. deprecated:: 0.9.3
Use :func:`nipype.workflows.dmri.preprocess.epi.all_fmb_pipeline` or
:func:`nipype.workflows.dmri.preprocess.epi.all_peb_pipeline` instead.
.. warning:: This workflow rotates the b-vectors, so please be
advised that not all the dicom converters ensure the consistency between the resulting
nifti orientation and the b matrix table (e.g. dcm2nii checks it).
Example
-------
>>> nipype_dmri_preprocess = create_dmri_preprocessing('nipype_dmri_prep')
>>> nipype_dmri_preprocess.inputs.inputnode.in_file = 'diffusion.nii'
>>> nipype_dmri_preprocess.inputs.inputnode.in_bvec = 'diffusion.bvec'
>>> nipype_dmri_preprocess.inputs.inputnode.ref_num = 0
>>> nipype_dmri_preprocess.inputs.inputnode.fieldmap_mag = 'magnitude.nii'
>>> nipype_dmri_preprocess.inputs.inputnode.fieldmap_pha = 'phase.nii'
>>> nipype_dmri_preprocess.inputs.inputnode.te_diff = 2.46
>>> nipype_dmri_preprocess.inputs.inputnode.epi_echospacing = 0.77
>>> nipype_dmri_preprocess.inputs.inputnode.epi_rev_encoding = False
>>> nipype_dmri_preprocess.inputs.inputnode.pi_accel_factor = True
>>> nipype_dmri_preprocess.run() # doctest: +SKIP
Inputs::
inputnode.in_file - The diffusion data
inputnode.in_bvec - The b-matrix file, in FSL format and consistent with the in_file orientation
inputnode.ref_num - The reference volume (a b=0 volume in dMRI)
inputnode.fieldmap_mag - The magnitude of the fieldmap
inputnode.fieldmap_pha - The phase difference of the fieldmap
inputnode.te_diff - TE increment used (in msec.) on the fieldmap acquisition (generally 2.46ms for 3T scanners)
inputnode.epi_echospacing - The EPI EchoSpacing parameter (in msec.)
inputnode.epi_rev_encoding - True if reverse encoding was used (generally False)
inputnode.pi_accel_factor - Parallel imaging factor (aka GRAPPA acceleration factor)
inputnode.vsm_sigma - Sigma (in mm.) of the gaussian kernel used for in-slice smoothing of the deformation field (voxel shift map, vsm)
Outputs::
outputnode.dmri_corrected
outputnode.bvec_rotated
Optional arguments::
use_fieldmap - True if there are fieldmap files that should be used (default True)
fieldmap_registration - True if registration to fieldmap should be performed (default False)
"""
warnings.warn(('This workflow is deprecated from v.1.0.0, use of available '
'nipype.workflows.dmri.preprocess.epi.all_*'), DeprecationWarning)
pipeline = pe.Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(
fields=['in_file', 'in_bvec', 'ref_num', 'fieldmap_mag',
'fieldmap_pha', 'te_diff', 'epi_echospacing',
'epi_rev_encoding', 'pi_accel_factor', 'vsm_sigma']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['dmri_corrected', 'bvec_rotated']),
name='outputnode')
motion = create_motion_correct_pipeline()
eddy = create_eddy_correct_pipeline()
if use_fieldmap: # we have a fieldmap, so lets use it (yay!)
susceptibility = create_epidewarp_pipeline(
fieldmap_registration=fieldmap_registration)
pipeline.connect([
(inputnode, motion, [('in_file', 'inputnode.in_file'),
('in_bvec', 'inputnode.in_bvec'),
('ref_num', 'inputnode.ref_num')]),
(inputnode, eddy, [('ref_num', 'inputnode.ref_num')]),
(motion, eddy, [('outputnode.motion_corrected', 'inputnode.in_file')]),
(eddy, susceptibility, [('outputnode.eddy_corrected', 'inputnode.in_file')]),
(inputnode, susceptibility, [('ref_num', 'inputnode.ref_num'),
('fieldmap_mag', 'inputnode.fieldmap_mag'),
('fieldmap_pha', 'inputnode.fieldmap_pha'),
('te_diff', 'inputnode.te_diff'),
('epi_echospacing', 'inputnode.epi_echospacing'),
('epi_rev_encoding', 'inputnode.epi_rev_encoding'),
('pi_accel_factor', 'inputnode.pi_accel_factor'),
('vsm_sigma', 'inputnode.vsm_sigma')]),
(motion, outputnode, [('outputnode.out_bvec', 'bvec_rotated')]),
(susceptibility, outputnode, [('outputnode.epi_corrected', 'dmri_corrected')])
])
else: # we don't have a fieldmap, so we just carry on without it :(
pipeline.connect([
(inputnode, motion, [('in_file', 'inputnode.in_file'),
('in_bvec', 'inputnode.in_bvec'),
('ref_num', 'inputnode.ref_num')]),
(inputnode, eddy, [('ref_num', 'inputnode.ref_num')]),
(motion, eddy, [('outputnode.motion_corrected', 'inputnode.in_file')]),
(motion, outputnode, [('outputnode.out_bvec', 'bvec_rotated')]),
(eddy, outputnode, [('outputnode.eddy_corrected', 'dmri_corrected')])
])
return pipeline
def create_motion_correct_pipeline(name='motion_correct'):
"""Creates a pipeline that corrects for motion artifact in dMRI sequences.
It takes a series of diffusion weighted images and rigidly co-registers
them to one reference image. Finally, the b-matrix is rotated accordingly
(Leemans et al. 2009 - http://www.ncbi.nlm.nih.gov/pubmed/19319973),
making use of the rotation matrix obtained by FLIRT.
.. deprecated:: 0.9.3
Use :func:`nipype.workflows.dmri.preprocess.epi.hmc_pipeline` instead.
.. warning:: This workflow rotates the b-vectors, so please be adviced
that not all the dicom converters ensure the consistency between the resulting
nifti orientation and the b matrix table (e.g. dcm2nii checks it).
Example
-------
>>> nipype_motioncorrect = create_motion_correct_pipeline('nipype_motioncorrect')
>>> nipype_motioncorrect.inputs.inputnode.in_file = 'diffusion.nii'
>>> nipype_motioncorrect.inputs.inputnode.in_bvec = 'diffusion.bvec'
>>> nipype_motioncorrect.inputs.inputnode.ref_num = 0
>>> nipype_motioncorrect.run() # doctest: +SKIP
Inputs::
inputnode.in_file
inputnode.ref_num
inputnode.in_bvec
Outputs::
outputnode.motion_corrected
outputnode.out_bvec
"""
warnings.warn(('This workflow is deprecated from v.1.0.0, use '
'nipype.workflows.dmri.preprocess.epi.hmc_pipeline instead'),
DeprecationWarning)
inputnode = pe.Node(
niu.IdentityInterface(
fields=['in_file', 'ref_num', 'in_bvec']),
name='inputnode')
pipeline = pe.Workflow(name=name)
split = pe.Node(fsl.Split(dimension='t'), name='split')
pick_ref = pe.Node(niu.Select(), name='pick_ref')
coregistration = pe.MapNode(fsl.FLIRT(no_search=True, interp='spline',
padding_size=1, dof=6), name='coregistration', iterfield=['in_file'])
rotate_bvecs = pe.Node(niu.Function(input_names=['in_bvec', 'in_matrix'], output_names=[
'out_file'], function=_rotate_bvecs), name='rotate_b_matrix')
merge = pe.Node(fsl.Merge(dimension='t'), name='merge')
outputnode = pe.Node(
niu.IdentityInterface(
fields=['motion_corrected', 'out_bvec']),
name='outputnode')
pipeline.connect([(inputnode, split, [('in_file', 'in_file')]),
(split, pick_ref, [('out_files', 'inlist')]),
(inputnode, pick_ref, [('ref_num', 'index')]),
(split, coregistration, [('out_files', 'in_file')]),
(inputnode, rotate_bvecs, [('in_bvec', 'in_bvec')]),
(coregistration, rotate_bvecs, [('out_matrix_file', 'in_matrix')]),
(pick_ref, coregistration, [('out', 'reference')]),
(coregistration, merge, [('out_file', 'in_files')]),
(merge, outputnode, [('merged_file', 'motion_corrected')]),
(rotate_bvecs, outputnode, [('out_file', 'out_bvec')])
])
return pipeline
def create_eddy_correct_pipeline(name='eddy_correct'):
"""
.. deprecated:: 0.9.3
Use :func:`nipype.workflows.dmri.preprocess.epi.ecc_pipeline` instead.
Creates a pipeline that replaces eddy_correct script in FSL. It takes a
series of diffusion weighted images and linearly co-registers them to one
reference image. No rotation of the B-matrix is performed, so this pipeline
should be executed after the motion correction pipeline.
Example
-------
>>> nipype_eddycorrect = create_eddy_correct_pipeline('nipype_eddycorrect')
>>> nipype_eddycorrect.inputs.inputnode.in_file = 'diffusion.nii'
>>> nipype_eddycorrect.inputs.inputnode.ref_num = 0
>>> nipype_eddycorrect.run() # doctest: +SKIP
Inputs::
inputnode.in_file
inputnode.ref_num
Outputs::
outputnode.eddy_corrected
"""
warnings.warn(('This workflow is deprecated from v.1.0.0, use '
'nipype.workflows.dmri.preprocess.epi.ecc_pipeline instead'),
DeprecationWarning)
inputnode = pe.Node(
niu.IdentityInterface(fields=['in_file', 'ref_num']),
name='inputnode')
pipeline = pe.Workflow(name=name)
split = pe.Node(fsl.Split(dimension='t'), name='split')
pick_ref = pe.Node(niu.Select(), name='pick_ref')
coregistration = pe.MapNode(fsl.FLIRT(no_search=True, padding_size=1,
interp='trilinear'), name='coregistration', iterfield=['in_file'])
merge = pe.Node(fsl.Merge(dimension='t'), name='merge')
outputnode = pe.Node(
niu.IdentityInterface(fields=['eddy_corrected']),
name='outputnode')
pipeline.connect([
(inputnode, split, [('in_file', 'in_file')]),
(split, pick_ref, [('out_files', 'inlist')]),
(inputnode, pick_ref, [('ref_num', 'index')]),
(split, coregistration, [('out_files', 'in_file')]),
(pick_ref, coregistration, [('out', 'reference')]),
(coregistration, merge, [('out_file', 'in_files')]),
(merge, outputnode, [('merged_file', 'eddy_corrected')])
])
return pipeline
def fieldmap_correction(name='fieldmap_correction', nocheck=False):
"""
.. deprecated:: 0.9.3
Use :func:`nipype.workflows.dmri.preprocess.epi.sdc_fmb` instead.
Fieldmap-based retrospective correction of EPI images for the susceptibility distortion
artifact (Jezzard et al., 1995). Fieldmap images are assumed to be already registered
to EPI data, and a brain mask is required.
Replaces the former workflow, still available as create_epidewarp_pipeline(). The difference
with respect the epidewarp pipeline is that now the workflow uses the new fsl_prepare_fieldmap
available as of FSL 5.0.
Example
-------
>>> nipype_epicorrect = fieldmap_correction('nipype_epidewarp')
>>> nipype_epicorrect.inputs.inputnode.in_file = 'diffusion.nii'
>>> nipype_epicorrect.inputs.inputnode.in_mask = 'brainmask.nii'
>>> nipype_epicorrect.inputs.inputnode.fieldmap_pha = 'phase.nii'
>>> nipype_epicorrect.inputs.inputnode.fieldmap_mag = 'magnitude.nii'
>>> nipype_epicorrect.inputs.inputnode.te_diff = 2.46
>>> nipype_epicorrect.inputs.inputnode.epi_echospacing = 0.77
>>> nipype_epicorrect.inputs.inputnode.encoding_direction = 'y'
>>> nipype_epicorrect.run() # doctest: +SKIP
Inputs::
inputnode.in_file - The volume acquired with EPI sequence
inputnode.in_mask - A brain mask
inputnode.fieldmap_pha - The phase difference map from the fieldmapping, registered to in_file
inputnode.fieldmap_mag - The magnitud maps (usually 4D, one magnitude per GRE scan)
from the fieldmapping, registered to in_file
inputnode.te_diff - Time difference in msec. between TE in ms of the fieldmapping (usually a GRE sequence).
inputnode.epi_echospacing - The effective echo spacing (aka dwell time) in msec. of the EPI sequence. If
EPI was acquired with parallel imaging, then the effective echo spacing is
eff_es = es / acc_factor.
inputnode.encoding_direction - The phase encoding direction in EPI acquisition (default y)
inputnode.vsm_sigma - Sigma value of the gaussian smoothing filter applied to the vsm (voxel shift map)
Outputs::
outputnode.epi_corrected
outputnode.out_vsm
"""
warnings.warn(('This workflow is deprecated from v.1.0.0, use '
'nipype.workflows.dmri.preprocess.epi.sdc_fmb instead'),
DeprecationWarning)
inputnode = pe.Node(niu.IdentityInterface(
fields=['in_file',
'in_mask',
'fieldmap_pha',
'fieldmap_mag',
'te_diff',
'epi_echospacing',
'vsm_sigma',
'encoding_direction'
]), name='inputnode'
)
pipeline = pe.Workflow(name=name)
# Keep first frame from magnitude
select_mag = pe.Node(fsl.utils.ExtractROI(
t_size=1, t_min=0), name='select_magnitude')
# Mask magnitude (it is required by PreparedFieldMap)
mask_mag = pe.Node(fsl.maths.ApplyMask(), name='mask_magnitude')
# Run fsl_prepare_fieldmap
fslprep = pe.Node(fsl.PrepareFieldmap(), name='prepare_fieldmap')
if nocheck:
fslprep.inputs.nocheck = True
# Use FUGUE to generate the voxel shift map (vsm)
vsm = pe.Node(fsl.FUGUE(save_shift=True), name='generate_vsm')
# VSM demean is not anymore present in the epi_reg script
# vsm_mean = pe.Node(niu.Function(input_names=['in_file', 'mask_file', 'in_unwarped'], output_names=[
# 'out_file'], function=_vsm_remove_mean), name='vsm_mean_shift')
# fugue_epi
dwi_split = pe.Node(niu.Function(input_names=[
'in_file'], output_names=['out_files'], function=_split_dwi), name='dwi_split')
# 'fugue -i %s -u %s --loadshift=%s --mask=%s' % ( vol_name, out_vol_name, vsm_name, mask_name )
dwi_applyxfm = pe.MapNode(fsl.FUGUE(
icorr=True, save_shift=False), iterfield=['in_file'], name='dwi_fugue')
# Merge back all volumes
dwi_merge = pe.Node(fsl.utils.Merge(
dimension='t'), name='dwi_merge')
outputnode = pe.Node(
niu.IdentityInterface(fields=['epi_corrected', 'out_vsm']),
name='outputnode')
pipeline.connect([
(inputnode, select_mag, [('fieldmap_mag', 'in_file')]),
(inputnode, fslprep, [('fieldmap_pha', 'in_phase'), ('te_diff', 'delta_TE')]),
(inputnode, mask_mag, [('in_mask', 'mask_file')]),
(select_mag, mask_mag, [('roi_file', 'in_file')]),
(mask_mag, fslprep, [('out_file', 'in_magnitude')]),
(fslprep, vsm, [('out_fieldmap', 'phasemap_in_file')]),
(inputnode, vsm, [('fieldmap_mag', 'in_file'),
('encoding_direction', 'unwarp_direction'),
(('te_diff', _ms2sec), 'asym_se_time'),
('vsm_sigma', 'smooth2d'),
(('epi_echospacing', _ms2sec), 'dwell_time')]),
(mask_mag, vsm, [('out_file', 'mask_file')]),
(inputnode, dwi_split, [('in_file', 'in_file')]),
(dwi_split, dwi_applyxfm, [('out_files', 'in_file')]),
(mask_mag, dwi_applyxfm, [('out_file', 'mask_file')]),
(vsm, dwi_applyxfm, [('shift_out_file', 'shift_in_file')]),
(inputnode, dwi_applyxfm, [('encoding_direction', 'unwarp_direction')]),
(dwi_applyxfm, dwi_merge, [('unwarped_file', 'in_files')]),
(dwi_merge, outputnode, [('merged_file', 'epi_corrected')]),
(vsm, outputnode, [('shift_out_file', 'out_vsm')])
])
return pipeline
def topup_correction(name='topup_correction'):
"""
.. deprecated:: 0.9.3
Use :func:`nipype.workflows.dmri.preprocess.epi.sdc_peb` instead.
Corrects for susceptibilty distortion of EPI images when one reverse encoding dataset has
been acquired
Example
-------
>>> nipype_epicorrect = topup_correction('nipype_topup')
>>> nipype_epicorrect.inputs.inputnode.in_file_dir = 'epi.nii'
>>> nipype_epicorrect.inputs.inputnode.in_file_rev = 'epi_rev.nii'
>>> nipype_epicorrect.inputs.inputnode.encoding_direction = ['y', 'y-']
>>> nipype_epicorrect.inputs.inputnode.ref_num = 0
>>> nipype_epicorrect.run() # doctest: +SKIP
Inputs::
inputnode.in_file_dir - EPI volume acquired in 'forward' phase encoding
inputnode.in_file_rev - EPI volume acquired in 'reversed' phase encoding
inputnode.encoding_direction - Direction encoding of in_file_dir
inputnode.ref_num - Identifier of the reference volumes (usually B0 volume)
Outputs::
outputnode.epi_corrected
"""
warnings.warn(('This workflow is deprecated from v.1.0.0, use '
'nipype.workflows.dmri.preprocess.epi.sdc_peb instead'),
DeprecationWarning)
pipeline = pe.Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(
fields=['in_file_dir',
'in_file_rev',
'encoding_direction',
'readout_times',
'ref_num'
]), name='inputnode'
)
outputnode = pe.Node(niu.IdentityInterface(
fields=['out_fieldcoef',
'out_movpar',
'out_enc_file',
'epi_corrected'
]), name='outputnode'
)
b0_dir = pe.Node(fsl.ExtractROI(t_size=1), name='b0_1')
b0_rev = pe.Node(fsl.ExtractROI(t_size=1), name='b0_2')
combin = pe.Node(niu.Merge(2), name='merge')
combin2 = pe.Node(niu.Merge(2), name='merge2')
merged = pe.Node(fsl.Merge(dimension='t'), name='b0_comb')
topup = pe.Node(fsl.TOPUP(), name='topup')
applytopup = pe.Node(fsl.ApplyTOPUP(in_index=[1, 2]), name='applytopup')
pipeline.connect([
(inputnode, b0_dir, [('in_file_dir', 'in_file'), ('ref_num', 't_min')]),
(inputnode, b0_rev, [('in_file_rev', 'in_file'), ('ref_num', 't_min')]),
(inputnode, combin2, [('in_file_dir', 'in1'), ('in_file_rev', 'in2')]),
(b0_dir, combin, [('roi_file', 'in1')]),
(b0_rev, combin, [('roi_file', 'in2')]),
(combin, merged, [('out', 'in_files')]),
(merged, topup, [('merged_file', 'in_file')]),
(inputnode, topup, [('encoding_direction', 'encoding_direction'), ('readout_times', 'readout_times')]),
(topup, applytopup, [('out_fieldcoef', 'in_topup_fieldcoef'), ('out_movpar', 'in_topup_movpar'),
('out_enc_file', 'encoding_file')]),
(combin2, applytopup, [('out', 'in_files')]),
(topup, outputnode, [('out_fieldcoef', 'out_fieldcoef'), ('out_movpar', 'out_movpar'),
('out_enc_file', 'out_enc_file')]),
(applytopup, outputnode, [('out_corrected', 'epi_corrected')])
])
return pipeline
def create_epidewarp_pipeline(name='epidewarp', fieldmap_registration=False):
"""
Replaces the epidewarp.fsl script (http://www.nmr.mgh.harvard.edu/~greve/fbirn/b0/epidewarp.fsl)
for susceptibility distortion correction of dMRI & fMRI acquired with EPI sequences and the fieldmap
information (Jezzard et al., 1995) using FSL's FUGUE. The registration to the (warped) fieldmap
(strictly following the original script) is available using fieldmap_registration=True.
.. warning:: This workflow makes use of ``epidewarp.fsl`` a script of FSL deprecated long
time ago. The use of this workflow is not recommended, use
:func:`nipype.workflows.dmri.preprocess.epi.sdc_fmb` instead.
Example
-------
>>> nipype_epicorrect = create_epidewarp_pipeline('nipype_epidewarp', fieldmap_registration=False)
>>> nipype_epicorrect.inputs.inputnode.in_file = 'diffusion.nii'
>>> nipype_epicorrect.inputs.inputnode.fieldmap_mag = 'magnitude.nii'
>>> nipype_epicorrect.inputs.inputnode.fieldmap_pha = 'phase.nii'
>>> nipype_epicorrect.inputs.inputnode.te_diff = 2.46
>>> nipype_epicorrect.inputs.inputnode.epi_echospacing = 0.77
>>> nipype_epicorrect.inputs.inputnode.epi_rev_encoding = False
>>> nipype_epicorrect.inputs.inputnode.ref_num = 0
>>> nipype_epicorrect.inputs.inputnode.pi_accel_factor = 1.0
>>> nipype_epicorrect.run() # doctest: +SKIP
Inputs::
inputnode.in_file - The volume acquired with EPI sequence
inputnode.fieldmap_mag - The magnitude of the fieldmap
inputnode.fieldmap_pha - The phase difference of the fieldmap
inputnode.te_diff - Time difference between TE in ms.
inputnode.epi_echospacing - The echo spacing (aka dwell time) in the EPI sequence
inputnode.epi_ph_encoding_dir - The phase encoding direction in EPI acquisition (default y)
inputnode.epi_rev_encoding - True if it is acquired with reverse encoding
inputnode.pi_accel_factor - Acceleration factor used for EPI parallel imaging (GRAPPA)
inputnode.vsm_sigma - Sigma value of the gaussian smoothing filter applied to the vsm (voxel shift map)
inputnode.ref_num - The reference volume (B=0 in dMRI or a central frame in fMRI)
Outputs::
outputnode.epi_corrected
Optional arguments::
fieldmap_registration - True if registration to fieldmap should be done (default False)
"""
warnings.warn(('This workflow reproduces a deprecated FSL script.'),
DeprecationWarning)
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file',
'fieldmap_mag',
'fieldmap_pha',
'te_diff',
'epi_echospacing',
'epi_ph_encoding_dir',
'epi_rev_encoding',
'pi_accel_factor',
'vsm_sigma',
'ref_num',
'unwarp_direction'
]), name='inputnode')
pipeline = pe.Workflow(name=name)
# Keep first frame from magnitude
select_mag = pe.Node(fsl.utils.ExtractROI(
t_size=1, t_min=0), name='select_magnitude')
# mask_brain
mask_mag = pe.Node(fsl.BET(mask=True), name='mask_magnitude')
mask_mag_dil = pe.Node(niu.Function(input_names=[
'in_file'], output_names=['out_file'], function=_dilate_mask), name='mask_dilate')
# Compute dwell time
dwell_time = pe.Node(niu.Function(input_names=['dwell_time', 'pi_factor', 'is_reverse_encoding'], output_names=[
'dwell_time'], function=_compute_dwelltime), name='dwell_time')
# Normalize phase diff to be [-pi, pi)
norm_pha = pe.Node(niu.Function(input_names=['in_file'], output_names=[
'out_file'], function=_prepare_phasediff), name='normalize_phasediff')
# Execute FSL PRELUDE: prelude -p %s -a %s -o %s -f -v -m %s
prelude = pe.Node(fsl.PRELUDE(
process3d=True), name='phase_unwrap')
fill_phase = pe.Node(niu.Function(input_names=['in_file'], output_names=[
'out_file'], function=_fill_phase), name='fill_phasediff')
# to assure that vsm is same dimension as mag. The input only affects the output dimension.
# The content of the input has no effect on the vsm. The de-warped mag volume is
# meaningless and will be thrown away
# fugue -i %s -u %s -p %s --dwell=%s --asym=%s --mask=%s --saveshift=%s %
# ( mag_name, magdw_name, ph_name, esp, tediff, mask_name, vsmmag_name)
vsm = pe.Node(fsl.FUGUE(save_shift=True), name='generate_vsm')
vsm_mean = pe.Node(niu.Function(input_names=['in_file', 'mask_file', 'in_unwarped'], output_names=[
'out_file'], function=_vsm_remove_mean), name='vsm_mean_shift')
# fugue_epi
dwi_split = pe.Node(niu.Function(input_names=[
'in_file'], output_names=['out_files'], function=_split_dwi), name='dwi_split')
# 'fugue -i %s -u %s --loadshift=%s --mask=%s' % ( vol_name, out_vol_name, vsm_name, mask_name )
dwi_applyxfm = pe.MapNode(fsl.FUGUE(
icorr=True, save_shift=False), iterfield=['in_file'], name='dwi_fugue')
# Merge back all volumes
dwi_merge = pe.Node(fsl.utils.Merge(
dimension='t'), name='dwi_merge')
outputnode = pe.Node(
niu.IdentityInterface(fields=['epi_corrected']),
name='outputnode')
pipeline.connect([
(inputnode, dwell_time, [('epi_echospacing', 'dwell_time'), ('pi_accel_factor', 'pi_factor'), ('epi_rev_encoding', 'is_reverse_encoding')]),
(inputnode, select_mag, [('fieldmap_mag', 'in_file')]),
(inputnode, norm_pha, [('fieldmap_pha', 'in_file')]),
(select_mag, mask_mag, [('roi_file', 'in_file')]),
(mask_mag, mask_mag_dil, [('mask_file', 'in_file')]),
(select_mag, prelude, [('roi_file', 'magnitude_file')]),
(norm_pha, prelude, [('out_file', 'phase_file')]),
(mask_mag_dil, prelude, [('out_file', 'mask_file')]),
(prelude, fill_phase, [('unwrapped_phase_file', 'in_file')]),
(inputnode, vsm, [('fieldmap_mag', 'in_file')]),
(fill_phase, vsm, [('out_file', 'phasemap_in_file')]),
(inputnode, vsm, [(('te_diff', _ms2sec), 'asym_se_time'), ('vsm_sigma', 'smooth2d')]),
(dwell_time, vsm, [(('dwell_time', _ms2sec), 'dwell_time')]),
(mask_mag_dil, vsm, [('out_file', 'mask_file')]),
(mask_mag_dil, vsm_mean, [('out_file', 'mask_file')]),
(vsm, vsm_mean, [('unwarped_file', 'in_unwarped'), ('shift_out_file', 'in_file')]),
(inputnode, dwi_split, [('in_file', 'in_file')]),
(dwi_split, dwi_applyxfm, [('out_files', 'in_file')]),
(dwi_applyxfm, dwi_merge, [('unwarped_file', 'in_files')]),
(dwi_merge, outputnode, [('merged_file', 'epi_corrected')])
])
if fieldmap_registration:
""" Register magfw to example epi. There are some parameters here that may need to be tweaked. Should probably strip the mag
Pre-condition: forward warp the mag in order to reg with func. What does mask do here?
"""
# Select reference volume from EPI (B0 in dMRI and a middle frame in
# fMRI)
select_epi = pe.Node(fsl.utils.ExtractROI(
t_size=1), name='select_epi')
# fugue -i %s -w %s --loadshift=%s --mask=%s % ( mag_name, magfw_name,
# vsmmag_name, mask_name ), log ) # Forward Map
vsm_fwd = pe.Node(fsl.FUGUE(
forward_warping=True), name='vsm_fwd')
vsm_reg = pe.Node(fsl.FLIRT(bins=256, cost='corratio', dof=6, interp='spline', searchr_x=[
-10, 10], searchr_y=[-10, 10], searchr_z=[-10, 10]), name='vsm_registration')
# 'flirt -in %s -ref %s -out %s -init %s -applyxfm' % ( vsmmag_name, ref_epi, vsmmag_name, magfw_mat_out )
vsm_applyxfm = pe.Node(fsl.ApplyXfm(
interp='spline'), name='vsm_apply_xfm')
# 'flirt -in %s -ref %s -out %s -init %s -applyxfm' % ( mask_name, ref_epi, mask_name, magfw_mat_out )
msk_applyxfm = pe.Node(fsl.ApplyXfm(
interp='nearestneighbour'), name='msk_apply_xfm')
pipeline.connect([
(inputnode, select_epi, [('in_file', 'in_file'), ('ref_num', 't_min')]),
(select_epi, vsm_reg, [('roi_file', 'reference')]),
(vsm, vsm_fwd, [('shift_out_file', 'shift_in_file')]),
(mask_mag_dil, vsm_fwd, [('out_file', 'mask_file')]),
(inputnode, vsm_fwd, [('fieldmap_mag', 'in_file')]),
(vsm_fwd, vsm_reg, [('warped_file', 'in_file')]),
(vsm_reg, msk_applyxfm, [('out_matrix_file', 'in_matrix_file')]),
(select_epi, msk_applyxfm, [('roi_file', 'reference')]),
(mask_mag_dil, msk_applyxfm, [('out_file', 'in_file')]),
(vsm_reg, vsm_applyxfm, [('out_matrix_file', 'in_matrix_file')]),
(select_epi, vsm_applyxfm, [('roi_file', 'reference')]),
(vsm_mean, vsm_applyxfm, [('out_file', 'in_file')]),
(msk_applyxfm, dwi_applyxfm, [('out_file', 'mask_file')]),
(vsm_applyxfm, dwi_applyxfm, [('out_file', 'shift_in_file')])
])
else:
pipeline.connect([
(mask_mag_dil, dwi_applyxfm, [('out_file', 'mask_file')]),
(vsm_mean, dwi_applyxfm, [('out_file', 'shift_in_file')])
])
return pipeline
def _rotate_bvecs(in_bvec, in_matrix):
import os
import numpy as np
name, fext = os.path.splitext(os.path.basename(in_bvec))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_rotated.bvec' % name)
bvecs = np.loadtxt(in_bvec)
new_bvecs = np.zeros(shape=bvecs.T.shape) # pre-initialise array, 3 col format
for i, vol_matrix in enumerate(in_matrix[0::]): # start index at 0
bvec = np.matrix(bvecs[:, i])
rot = np.matrix(np.loadtxt(vol_matrix)[0:3, 0:3])
new_bvecs[i] = (np.array(rot * bvec.T).T)[0] # fill each volume with x,y,z as we go along
np.savetxt(out_file, np.array(new_bvecs).T, fmt=b'%0.15f')
return out_file
def _cat_logs(in_files):
import shutil
import os
name, fext = os.path.splitext(os.path.basename(in_files[0]))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_ecclog.log' % name)
out_str = ''
with open(out_file, 'wb') as totallog:
for i, fname in enumerate(in_files):
totallog.write('\n\npreprocessing %d\n' % i)
with open(fname) as inlog:
for line in inlog:
totallog.write(line)
return out_file
def _compute_dwelltime(dwell_time=0.68, pi_factor=1.0, is_reverse_encoding=False):
dwell_time *= (1.0 / pi_factor)
if is_reverse_encoding:
dwell_time *= -1.0
return dwell_time
def _effective_echospacing(dwell_time, pi_factor=1.0):
dwelltime = 1.0e-3 * dwell_time * (1.0 / pi_factor)
return dwelltime
def _prepare_phasediff(in_file):
import nibabel as nib
import os
import numpy as np
img = nib.load(in_file)
max_diff = np.max(img.get_data().reshape(-1))
min_diff = np.min(img.get_data().reshape(-1))
A = (2.0 * np.pi) / (max_diff - min_diff)
B = np.pi - (A * max_diff)
diff_norm = img.get_data() * A + B
name, fext = os.path.splitext(os.path.basename(in_file))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_2pi.nii.gz' % name)
nib.save(nib.Nifti1Image(diff_norm, img.affine, img.header), out_file)
return out_file
def _dilate_mask(in_file, iterations=4):
import nibabel as nib
import scipy.ndimage as ndimage
import os
img = nib.load(in_file)
img._data = ndimage.binary_dilation(img.get_data(), iterations=iterations)
name, fext = os.path.splitext(os.path.basename(in_file))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_dil.nii.gz' % name)
nib.save(img, out_file)
return out_file
def _fill_phase(in_file):
import nibabel as nib
import os
import numpy as np
img = nib.load(in_file)
dumb_img = nib.Nifti1Image(np.zeros(img.shape), img.affine, img.header)
out_nii = nib.funcs.concat_images((img, dumb_img))
name, fext = os.path.splitext(os.path.basename(in_file))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_fill.nii.gz' % name)
nib.save(out_nii, out_file)
return out_file
def _vsm_remove_mean(in_file, mask_file, in_unwarped):
import nibabel as nib
import os
import numpy as np
import numpy.ma as ma
img = nib.load(in_file)
msk = nib.load(mask_file).get_data()
img_data = img.get_data()
img_data[msk == 0] = 0
vsmmag_masked = ma.masked_values(img_data.reshape(-1), 0.0)
vsmmag_masked = vsmmag_masked - vsmmag_masked.mean()
img._data = vsmmag_masked.reshape(img.shape)
name, fext = os.path.splitext(os.path.basename(in_file))
if fext == '.gz':
name, _ = os.path.splitext(name)
out_file = os.path.abspath('./%s_demeaned.nii.gz' % name)
nib.save(img, out_file)
return out_file
def _ms2sec(val):
return val * 1e-3
def _split_dwi(in_file):
import nibabel as nib
import os
out_files = []
frames = nib.funcs.four_to_three(nib.load(in_file))
name, fext = os.path.splitext(os.path.basename(in_file))
if fext == '.gz':
name, _ = os.path.splitext(name)
for i, frame in enumerate(frames):
out_file = os.path.abspath('./%s_%03d.nii.gz' % (name, i))
nib.save(frame, out_file)
out_files.append(out_file)
return out_files
|
carolFrohlich/nipype
|
nipype/workflows/dmri/fsl/epi.py
|
Python
|
bsd-3-clause
| 35,131
|
[
"Gaussian"
] |
e9d2306d39069411e2b3e998143a58f2d01a342e8289c74f43378602b3ab2cbb
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import sys
from GPy.models.sparse_gp_regression import SparseGPRegression
class SparseGPLVM(SparseGPRegression):
"""
Sparse Gaussian Process Latent Variable Model
:param Y: observed data
:type Y: np.ndarray
:param input_dim: latent dimensionality
:type input_dim: int
:param init: initialisation method for the latent space
:type init: 'PCA'|'random'
"""
def __init__(self, Y, input_dim, X=None, kernel=None, init='PCA', num_inducing=10):
if X is None:
from ..util.initialization import initialize_latent
X, fracs = initialize_latent(init, input_dim, Y)
SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing)
def parameters_changed(self):
super(SparseGPLVM, self).parameters_changed()
self.X.gradient = self.kern.gradients_X_diag(self.grad_dict['dL_dKdiag'], self.X)
self.X.gradient += self.kern.gradients_X(self.grad_dict['dL_dKnm'], self.X, self.Z)
def plot_latent(self, labels=None, which_indices=None,
resolution=50, ax=None, marker='o', s=40,
fignum=None, plot_inducing=True, legend=True,
plot_limits=None,
aspect='auto', updates=False, predict_kwargs={}, imshow_kwargs={}):
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import dim_reduction_plots
return dim_reduction_plots.plot_latent(self, labels, which_indices,
resolution, ax, marker, s,
fignum, plot_inducing, legend,
plot_limits, aspect, updates, predict_kwargs, imshow_kwargs)
|
gusmaogabriels/GPy
|
GPy/models/sparse_gplvm.py
|
Python
|
bsd-3-clause
| 1,818
|
[
"Gaussian"
] |
c1f88bea0b6a03c5df2155f55622f91014d858c0129043fe6ba962fe90fbe130
|
# Copyright 2015 Kevin B Jacobs
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Match a genome to a database of alleles."""
import csv
import sys
from os.path import expanduser
from operator import attrgetter
from pysam import VariantFile, Fastafile
from vgraph.norm import NormalizedLocus
from vgraph.intervals import union
from vgraph.iterstuff import sort_almost_sorted
from vgraph.match import records_by_chromosome, get_superlocus_bounds, find_allele
def annotate_info(locus, allele, info_meta, suffix, times):
"""Annotate INFO in sample with fields from database."""
for name in info_meta:
if name in allele.record.info:
sname = name + suffix
orig_value = locus.record.info.get(sname, ())
new_value = allele.record.info[name]
if not isinstance(new_value, tuple):
new_value = (new_value,)
locus.record.info[sname] = orig_value + new_value * times
def annotate_format(locus, allele, format_meta, suffix, times):
"""Annotate FORMAT in sample with fields from database."""
sample = locus.record.samples[0]
for name in format_meta:
if name in allele.record.format:
sname = name + suffix
orig_value = sample.get(sname, ())
new_value = allele.record.samples[0][name]
if not isinstance(new_value, tuple):
new_value = (new_value,)
sample[sname] = orig_value + new_value * times
def generate_superlocus_matches(chrom, superlocus, ref, alleles, mode, debug=False):
"""Generate allele matches for a superlocus."""
for allele in alleles:
super_allele = [locus for locus in superlocus if locus.extremes_intersect(allele)]
# Remove all reference calls from the superlocus.
# This is primarily done to remove long leading and trailing reference regions.
# Interstitial reference regions will be added back, based on how gaps are handled.
super_non_ref = [locus for locus in super_allele if not locus.is_ref()]
if debug:
super_start, super_stop = get_superlocus_bounds([[allele], super_non_ref])
print('-' * 80, file=sys.stderr)
print(f'{chrom}:[{super_start:d}-{super_stop:d}):', file=sys.stderr)
print(file=sys.stderr)
print(' ALLELE: {} {}:[{}-{}) ref={} alt={}'.format(
allele.record.id,
allele.contig,
allele.start,
allele.stop,
allele.ref or '-',
','.join(alt or '-' for alt in allele.alts)
), file=sys.stderr)
print(file=sys.stderr)
for i, locus in enumerate(super_non_ref, 1):
lref = locus.ref or '-'
indices = locus.allele_indices
if indices.count(None) == len(indices):
geno = 'nocall'
elif indices.count(0) == len(indices):
geno = 'refcall'
else:
sep = '|' if locus.phased else '/'
geno = sep.join(locus.alleles[a] or '-' if a is not None else '.' for a in indices)
print(f' VAR{i:d}: {locus.contig}[{locus.start:5d}-{locus.stop:5d}) ref={lref} geno={geno}', file=sys.stderr)
match = find_allele(ref, allele, super_non_ref, mode=mode, debug=debug)
if debug:
print(file=sys.stderr)
print(f' MATCH={match}', file=sys.stderr)
print(file=sys.stderr)
yield super_allele, allele, match
def generate_matches(refs, sample, db, args):
"""Generate allele matches over all chromosomes."""
for chrom, ref, loci in records_by_chromosome(refs, [sample, db], [args.name, None], args):
# Create superloci by taking the union of overlapping loci across all of the locus streams
loci = [sort_almost_sorted(l, key=NormalizedLocus.extreme_order_key) for l in loci]
superloci = union(loci, interval_func=attrgetter('min_start', 'max_stop'))
for _, _, (superlocus, alleles) in superloci:
alleles.sort(key=NormalizedLocus.natural_order_key)
superlocus.sort(key=NormalizedLocus.natural_order_key)
yield superlocus, generate_superlocus_matches(chrom, superlocus, ref, alleles, args.mode, args.debug)
def build_new_metadata(db, sample):
"""Build new metadata definitions for sample matches."""
format_meta = []
for fmt, meta in db.header.formats.items():
if fmt not in sample.header.formats:
format_meta.append(meta.name)
sample.header.formats.add(meta.name + '_FOUND', number='.', type=meta.type,
description='Allele(s) found: ' + meta.description)
sample.header.formats.add(meta.name + '_NOTFOUND', number='.', type=meta.type,
description='Allele(s) not found: ' + meta.description)
sample.header.formats.add(meta.name + '_NOCALL', number='.', type=meta.type,
description='Allele(s) with uncertain presense: ' + meta.description)
info_meta = []
for info, meta in db.header.info.items():
if info not in sample.header.info:
info_meta.append(meta.name)
sample.header.info.add(meta.name + '_FOUND', number='.', type=meta.type,
description='Allele(s) found: ' + meta.description)
sample.header.info.add(meta.name + '_NOTFOUND', number='.', type=meta.type,
description='Allele(s) not found: ' + meta.description)
sample.header.info.add(meta.name + '_NOCALL', number='.', type=meta.type,
description='Allele(s) with uncertain presense: ' + meta.description)
return format_meta, info_meta
def translate_match(match):
"""Translate match to STATUS and TIMES."""
status, times = 'NOTFOUND', 1
if match is None:
status = 'NOCALL'
elif match.allele_ploidy:
status = 'FOUND'
times = match.allele_ploidy
return status, times
def match_database(args):
"""Match a genome to a database of alleles."""
refs = Fastafile(expanduser(args.reference))
db = VariantFile(expanduser(args.database))
sample = VariantFile(expanduser(args.sample))
format_meta, info_meta = build_new_metadata(db, sample)
with VariantFile(args.output, 'w', header=sample.header) as out:
for superlocus, matches in generate_matches(refs, sample, db, args):
for allele_locus, allele, match in matches:
# Annotate results of search
status, times = translate_match(match)
suffix = '_' + status
for locus in allele_locus:
annotate_info(locus, allele, info_meta, suffix, times)
annotate_format(locus, allele, format_meta, suffix, times)
for locus in sorted(superlocus, key=NormalizedLocus.record_order_key):
out.write(locus.record)
def update_info_header(header):
"""Add match INFO fields VCF header for dbmatch2."""
info_header = header.info
if 'FOUND' not in info_header:
info_header.add('FOUND', number='.', type='String', description='Allele(s) found')
if 'NOTFOUND' not in info_header:
info_header.add('NOTFOUND', number='.', type='String', description='Allele(s) not found')
if 'NOCALL' not in info_header:
info_header.add('NOCALL', number='.', type='String', description='Allele(s) not called due to uncertainty')
def clear_info_fields(loci):
"""Clear INFO fields, if present, prior to setting them."""
for locus in loci:
info = locus.record.info
for status in ('FOUND', 'NOTFOUND', 'NOCALL'):
if status in info:
del info[status]
def write_table_header(out):
"""Write a match header for the tabular output of dbmatch2."""
out.writerow([
'SAMPLE_ID',
'ALLELE_ID',
'STATUS',
'VARIANT_QUALITY',
'CALL_QUALITY',
'ALLELE_PLOIDY',
'REFERENCE_PLOIDY',
'OTHER_PLOIDY',
'ALLELE_READS',
'REFERENCE_READS',
'OTHER_READS',
])
def write_table_row(out, sample_name, var_id, superlocus, status, match):
"""Write a match row for the tabular output of dbmatch2."""
if not out:
return
qual = min(locus.record.qual for locus in superlocus if locus.record.qual is not None) if superlocus else None
gts = [locus.record.samples[sample_name] for locus in superlocus]
gq = min(gt.get('GQ', 0) for gt in gts) if gts else ''
row = [
sample_name,
var_id,
status,
f'{qual:.2f}' if qual is not None else '',
gq,
]
if match:
row += [
match.allele_ploidy, match.ref_ploidy, match.other_ploidy,
match.allele_depth if match.allele_depth is not None else 'NOT_CALLED',
match.ref_depth, # ref allele depth should always be reported if records are present
match.other_depth if match.other_depth is not None else 'NOT_CALLED',
]
else:
row += ['NO_CALL'] * 3 + ['NOT_CALLED'] * 3
out.writerow(row)
def match_database2(args):
"""Match a genome to a database of alleles."""
refs = Fastafile(expanduser(args.reference))
db = VariantFile(expanduser(args.database))
sample = VariantFile(expanduser(args.sample))
try:
sample_name = sample.header.samples[args.name]
except TypeError:
sample_name = args.name
if db.index is None:
raise ValueError('database file must be indexed')
if sample.index is None:
raise ValueError('sample file must be indexed')
# Open tabluar output file, if requested
table = None
if args.table:
tablefile = open(args.table, 'w') if args.table != '-' else sys.stdout
table = csv.writer(tablefile, delimiter='\t', lineterminator='\n')
write_table_header(table)
update_info_header(sample.header)
with VariantFile(args.output, 'w', header=sample.header) as out:
for superlocus, matches in generate_matches(refs, sample, db, args):
clear_info_fields(superlocus)
for allele_locus, allele, match in matches:
dbvar = allele.record
var_id = dbvar.id or f'{dbvar.chrom}_{dbvar.start+1}_{dbvar.stop}_{dbvar.alts[0]}'
status, times = translate_match(match)
for locus in allele_locus:
info = locus.record.info
info[status] = info.get(status, ()) + (var_id, ) * times
write_table_row(table, sample_name, var_id, allele_locus, status, match)
for locus in sorted(superlocus, key=NormalizedLocus.record_order_key):
out.write(locus.record)
|
bioinformed/vgraph
|
vgraph/dbmatch.py
|
Python
|
apache-2.0
| 11,556
|
[
"pysam"
] |
fc10c01547111b8178e1a7696529c27108cecd6ca204ff4ebc907551fc17f491
|
#!/usr/bin/env python
__author__ = 'Benjamin Bolduc'
import sys
import os
import optparse
import math
import csv
import datetime
from collections import OrderedDict
from pprint import pprint
p = optparse.OptionParser(description="""A simple script that creates a graph (*.graphml format) from a BLAST file and
applies a community detection algorithm to identify groups of highly-related sequences. Annotation information can also
be added as well, allowing high-order organization and graphics in a graphml-compatible graph visualization software.
A number of options exist to filter the BLAST HSPs going into the network, thresholds for the number of minimum members
within a group to be retained in the graph, as well as read-based analytics (using *.ace formatted files). Limited data
use visualization is also possible.""")
g = optparse.OptionGroup(p, "General Options")
g.add_option("--input_fn", dest="input_fn", metavar="FILENAME",
help="""BLAST file in pairwise format (out_fmt 0 option).""")
g.add_option("--annotation_fn", dest="annotation_fn", metavar="FILENAME",
help="""Tab-delimited file with header columns as arguments for annotation and the first row as the
sequence/contig/node's name. This name MUST MATCH the sequence name in whatever fasta file was used to
create the network from. EX:
Name Length Group Time Spacer Virus Type Fraction
Contig5 2554 8 Feb Yes Archaeal 17
Contig2 3221 6 Dec No Bacterial 19""")
g.add_option("--output_fn", dest="output_fn", metavar="FILENAME", default="Output", help="""Output prefix.""")
p.add_option_group(g)
g = optparse.OptionGroup(p, "BLAST Options")
g.add_option("--evalue", dest="evalue", type="float", metavar="FLOAT", default=1E-30,
help="""E-value cutoff for sequence selection (optional; default: %default).""")
g.add_option("--hsp_to_query_ratio", dest="hsp_to_query_ratio", type="float", metavar="FLOAT", default=0.0,
help="""Percent of query that belongs to the HSP. If alignment/HSP is 200 bp and query length is 400,
the ratio is 0.5 (optional; default: %default).""")
g.add_option("--hsp_identity", dest="hsp_identity", type="float", metavar="FLOAT", default=0.75,
help="""Percent identity cutoff. This allows a minimum % identity that an alignment must be to be included.
(optional; default: %default).""")
g.add_option("--hsp_length", dest="hsp_length", type="int", metavar="INTEGER", default=200,
help="""HSP length cutoff. This allows a minimum length that an alignment must be to be included.
(optional; default: %default).""")
p.add_option_group(g)
g = optparse.OptionGroup(p, "Network Options")
g.add_option("--num_inputs", dest="num_inputs", type="int", metavar="INTEGER",
help="""The number of sequences in the original BLAST. This is useful for when the BLAST file is too large
to be easily parsed.""")
g.add_option("--partitioner", dest = "partitioner", metavar = "PYTHON CODE", default = "%",
help = """Python code to further define a separating characteristic (i.e. site, date, etc) that can be used
during graph visualization to color-code nodes. For example, "%.split('|')[1].split('_')[1]" will identify
0808 for contig01129|NL10_0808_vDNA_MSU_WGA; '%' will be replaced by the sequence name. An alternative is to
use the annotation file, whose attributes will be passed to the created nodes (optional; Default: %default")""")
g.add_option("--use_reads", dest="use_reads", action="store_true", default=False,
help="""Enables the use of read counting using 454 ACE files.""")
g.add_option("--read_fn", dest="read_fn", metavar="FILENAME",
help="""Ace file with read-to-contig mapping information.""")
g.add_option("--read_community_cutoff", dest="read_co_cutoff", type="int", metavar="INTEGER", default=1000,
help="""Sets a cutoff for the minimum number of reads comprising the contigs of a community must have to be retained.""")
g.add_option("--contig_community_cutoff", dest="contig_co_cutoff", type="int", metavar="INTEGER", default=50,
help="""Sets a cutoff for the minimum number of members a community must have to be retained.""")
g.add_option("--remove_tmp", dest="tmp_remove", action="store_true", default=False,
help="""Removes initial network created prior to any manipulation. This is useful for when memory is
constrained and analysis crashes. This REMOVES that file at the end of a successful run.""")
p.add_option_group(g)
g = optparse.OptionGroup(p, "Post-Visualization Options")
p.add_option("--membership_per_community", dest="mpc_fig", action="store_true", default=False,
help="Will count the members in each community and generate a figure.")
p.add_option("--data_represented", dest="dr_fig", action="store_true", default=False,
help="Will calculate the data represented and generate a figure.")
(p, a) = p.parse_args()
def error(msg):
print >> sys.stderr, "ERROR: {}".format(msg)
sys.exit(1)
if (p.input_fn == None):
error("An input sequence filename is required.")
try:
with open(p.input_fn): pass
except IOError:
error("Input file {} not found.".format(p.input_fn))
try:
from blastorage import Storage
except ImportError:
error("The BlaSTorage library is required and was not found. Available @ http://biowiki.crs4.it/biowiki/MassimilianoOrsini")
if p.mpc_fig or p.dr_fig:
try:
import matplotlib
except ImportError:
error("The matplotlib library is required and was not found. Available @ http://matplotlib.org/")
try:
import numpy as np
except ImportError:
error("The numpy library is required and was not found. Available @ ")
try:
import networkx as nx
except ImportError:
error("The NetworkX library is required and was not found. Available @ https://networkx.github.io/")
try:
import community
except ImportError:
error("The NetworkX-compatible Louvain community detection algorithm is required and was not found."
"It is available at https://bitbucket.org/taynaud/python-louvain")
if (p.use_reads):
if (p.read_fn == None):
error("An read filename is required.")
try:
with open(p.read_fn):
pass
except IOError:
error("Read file {} not found.".format(p.read_fn))
def logger(logfile_handle, text):
logfile_handle.write("{} {}\n".format(datetime.datetime.now(), text))
logfile_handle.flush()
print text
logfile_fn = "{}.log".format(p.output_fn)
logfile_fh = open(logfile_fn, 'a', 0) # Set unbuffered, so info written to disk immediately
diG = nx.DiGraph()
node_annotation_dict = {}
if p.annotation_fn:
with open(p.annotation_fn, 'rU') as annotation_fh:
logger(logfile_fh, "Reading Annotation File.")
line_count = 0
header = []
for lines_of_data in annotation_fh:
if line_count == 0: # Hack to separate 1st line of file (annotation names) from rest of file
header = lines_of_data.strip().split('\t')
line_count += 1
else:
data = lines_of_data.strip().split('\t')
node_annotation_dict[data[0]] = dict(zip(header[1:], data[1:])) # With header columns as keys, rows as values
# Header should stay the same, with lengths, groups, etc always being keys and the data (changing) as values
db = p.input_fn.rsplit('.', 1)[0]+'.db'
infile = p.input_fn
graph_tmp_out = "{0}.tmp.graphml".format(p.output_fn)
uGraph = ''
input_queries = 0
non_singleton_graph_node_count = 0
NoMatches = 0
NoMatches_fh = open(p.output_fn + '-NoMatches.list', 'a', 0)
if os.path.isfile("{}.graphml".format(p.output_fn)):
print "End file {}.graphml already exists. It is safe to delete this file.".format(p.output_fn)
else:
if os.path.isfile(graph_tmp_out):
logger(logfile_fh, "Graph file found! Jumpstarting Analysis.")
uGraph = nx.read_graphml(graph_tmp_out)
logger(logfile_fh, "Graph file loaded.")
non_singleton_graph_node_count = uGraph.number_of_nodes() # Update nodes if reloading graph file
if not os.path.isfile(graph_tmp_out):
logger(logfile_fh, "Processing main BLAST file.")
st = Storage(db, infile)
st.store()
api = st()
logger(logfile_fh, "BLAST file conversion finished. Parsing for network.")
blast_queries = api.getQueriesNumber()
input_queries = blast_queries # Number of reference sequences should/cold be subtracted
logger(logfile_fh, "There are {} blast queries to parse.".format(blast_queries))
n = 0
for blast in api: # One Blast result for each query
query = str(blast.getQueryTitle()).strip()
query_len = int(blast.getQueryLength())
query_dict = {
"id": query,
"length": query_len,
"size": 1,
}
if query in node_annotation_dict:
query_dict.update(node_annotation_dict[query])
# Round result, multiple objects for psiblast... apparently only 1 (stills needs iterated through though)
for iteration in blast:
if not iteration.found: # Another way of saying "if cond is False"
NoMatches += 1
NoMatches_fh.write(query + '\n')
NoMatches_fh.flush()
else:
# All sequences will get 'a chance.' If good match, then edge will connect.
# If no good match (i.e. fails >= 1 conditions), will be removed during singleton analysis
diG.add_node(query, query_dict) # Pass dict to networkx
for alignment in iteration: # Alignment, one for each subject
target = str(alignment.getSubjectTitle()).strip()
target_len = int(alignment.getSubjectLength()) # True target length, i.e. the contig size
target_dict = {
"id": target,
"length": query_len,
"size": 1,
}
if target in node_annotation_dict:
target_dict.update(node_annotation_dict[target])
for hsp in alignment:
hsp.setGradesDict()
evalue = hsp.getEvalueAsFloat()
identity = hsp.getIdentitiesAsPercentage()
identities = hsp.getIdentitiesAsAbsoluteValue() # Int
hsp_length = int(hsp.getIdentitiesExpression().split(' ')[0].split('/')[1]) # Hack to get hsp length, Int
hsp_identity = float(identities) / hsp_length # Percent identity over HSP length
total_alignment_length = hsp_length / target_len # Alignment length over target
hsp_to_query_ratio = hsp_length / (query_len + .0) # Can be greater than 1
if query == target:
continue # At the very least, it's a self-match, at worst, there's already a node in the network
if (evalue <= p.evalue) and (hsp_to_query_ratio >= p.hsp_to_query_ratio) \
and (hsp_identity >= p.hsp_identity) and (hsp_length >= p.hsp_length):
# Going to add a node anyway, might as well add it once
diG.add_node(target, target_dict)
if evalue == 0:
diG.add_edge(query, target, weight=500.0, length=hsp_length, identity=hsp_identity, label='BLAST')
if evalue != 0:
expect = math.log(evalue) * -1
diG.add_edge(query, target, weight=expect, length=hsp_length, identity=hsp_identity, label='BLAST')
n += 1
sys.stdout.write("\r{0:.4f}".format((float(n) / blast_queries) * 100))
sys.stdout.flush()
logger(logfile_fh, "\nThere were {} queries without a match.".format(NoMatches))
logger(logfile_fh, "Finished processing BLAST file.")
# Remove nodes with fewer than 1 edge in a directed graph
# Following speeds things up by removing nodes that are not relevant
# Any nodes with matches but not matches of sufficient strength
singles = [node for node, degree in diG.degree_iter() if degree == 0]
logger(logfile_fh, "There were {} queries that were singletons.".format(len(singles)))
diG.remove_nodes_from(singles)
Insufficient_Match_fh = open(p.output_fn + '-PoorMatches.list', 'w')
formatted_singles = "\n".join(singles)
print >> Insufficient_Match_fh, formatted_singles
Insufficient_Match_fh.close()
logger(logfile_fh, "Removed singletons.")
logger(logfile_fh, "Converting directed graph to undirected.")
# Convert graph to undirected - can't run community analysis without
uGraph = diG.to_undirected(diG)
non_singleton_graph_node_count = uGraph.number_of_nodes()
logger(logfile_fh, "Writing raw graphml file to disk.")
# In case downstream analysis fails... wont have to repeat
nx.write_graphml(uGraph, "{0}.tmp.graphml".format(p.output_fn))
logger(logfile_fh, "Attempting to close and remove BlaSTorage DB. If it fails, kill job and restart. It will resume."
" This is likely to happen if your BLAST file is >= 2 GB.")
st.close()
os.remove(db)
logger(logfile_fh, "Closing finished.")
# Sadly needed if graph BlaSTorage fails to close, halting the program. No other way to obtain the input queries unless
# the blast file is reloaded... so could either force user to supply the number, the fasta file used for the blast, or
# just grep -c '>' on the original fasta file and be done with it
if (p.num_inputs):
input_queries = int(p.num_inputs)
else:
if input_queries == 0:
error("There are no input queries or the number of input queries has not been specified. This can happen if"
" the BlaSTorage library failed previously.")
def find_partition(graph):
g = graph
partition = community.best_partition(g)
nx.set_node_attributes(g, 'orig_partition', partition)
return g, partition
logger(logfile_fh, "Identifying partitions...")
# Find communities
coGraph, parts = find_partition(uGraph)
new_parts = {}
cluster_membership = {}
out_part_mem_fh = open(p.output_fn + '-Partition-Membership.tab', 'w')
for sequences, partitions in parts.iteritems():
print >> out_part_mem_fh, '{0}\t{1}'.format(sequences, partitions)
if not partitions in new_parts.keys(): # The keys are partition numbers
new_parts[partitions] = 1
else:
new_parts[partitions] += 1
if not partitions in cluster_membership:
cluster_membership[partitions] = [sequences]
else:
cluster_membership[partitions].append(sequences)
out_part_mem_fh.close()
if p.partitioner != "%":
logger(logfile_fh, "Partitioner enabled due to non-default options.")
logger(logfile_fh, "Identifying fraction information...")
part_site_comp_fh = csv.writer(open(p.output_fn + '-Partition-Site-Composition.csv', 'w'))
partition_sites = {}
for contigs, partitions in parts.iteritems():
partition_num = str(partitions)
get_characteristic = eval("lambda x: " + p.partitioner.replace('%', 'x').replace("\\x", '%'))
characteristic = get_characteristic(contigs)
if not partition_num in partition_sites:
partition_sites[partition_num] = {}
if not characteristic in partition_sites[partition_num]:
partition_sites[partition_num][characteristic] = 0
partition_sites[partition_num][characteristic] += 1
characteristics = {}
for cluster_id in partition_sites:
for characteristic in partition_sites[cluster_id]:
characteristics[characteristic] = None
characteristics = sorted(characteristics)
part_site_comp_fh.writerow(["Cluster"] + characteristics + ["Total Contigs"])
for cluster_id in sorted(partition_sites):
row = [cluster_id]
total_contigs = sum(partition_sites[cluster_id].values())
for characteristic in characteristics:
row.append(partition_sites[cluster_id].get(characteristic, 0))
# If reference sequence present, remove count from total
if 'REF' in characteristic:
total_contigs -= partition_sites[cluster_id].get(characteristic, 0)
row.append(total_contigs)
part_site_comp_fh.writerow(row)
cutoff = []
if (p.use_reads):
print "Working on ace file {}".format(p.read_fn)
contig_read_dict = {}
contig_read_len_dict = {}
from Bio.Sequencing import Ace
with open(p.read_fn, 'rU') as ace_fh:
for contig in Ace.parse(ace_fh):
"""rd (reads) - read with name, sequence, etc
qa (read qual) - which parts used as consensus
ds - file name of read's chromatogram file
af - loc of read within contig
bs (base segment) - which read chosen at consensus at each pos
rt (transient read tags) - generated by crossmatch and phrap
ct (consensus tag)
wa (whole assembly tag) - hosts assembly program name, version, etc
wr
reads - info about read supporting ace contig
contig - holds info about contig from ace record"""
contig_name = "{}".format(contig.name) # contig00001
if not contig_name in contig_read_dict:
contig_read_dict[contig_name] = []
if not contig_name in contig_read_len_dict:
contig_read_len_dict[contig_name] = []
for read_id, read in enumerate(contig.reads):
# Using enumerate to give int because parsing through list of contig's reads. The list isn't necessarily in order
# meaning that parsing in one block won't correspond to to its location (in the list) in another block
read_name = read.rd.name.split('.')[0] # Functionally equivalent to contig.af[read_id].name
read_length = len(read.rd.sequence)
if not read_name in contig_read_dict[contig_name]:
contig_read_dict[contig_name].append(read_name)
contig_read_len_dict[contig_name].append(read_length)
cluster_membership = OrderedDict(sorted(cluster_membership.items(), key=lambda t: len(t[1]), reverse=True)) # Sorted by value
cluster_read_abundances = OrderedDict()
cluster_read_length_abundances = OrderedDict()
for cluster, contigs in cluster_membership.iteritems(): # Ordered above, but need another dict to keep it that way
# Might as well filter the list here and now. Later the reads *would of* been filtered, but that's pointless since
# N reads will never be sufficient to generate N contigs in order to be included in network
for contig in contigs:
try:
num_reads = len(contig_read_dict[contig]) # Get length of contig's read list
read_lengths = sum(contig_read_len_dict[contig]) # Get sum of all read lengths
except KeyError: # References will not be in the metagenome's ace files, obviously
print "Skipping {}".format(contig)
break
if not cluster in cluster_read_abundances:
cluster_read_abundances[cluster] = 0
if not cluster in cluster_read_length_abundances:
cluster_read_length_abundances[cluster] = 0
cluster_read_abundances[cluster] += num_reads
cluster_read_length_abundances[cluster] += read_lengths
out_part_size_fh = open(p.output_fn + '-Partition-Contig-Read-Sizes.tab', 'w')
for keys, values in new_parts.iteritems(): # partition: count
try:
cluster_reads = cluster_read_abundances[keys]
print >> out_part_size_fh, '{0}\t{1}\t{2}'.format(keys, values, cluster_reads) # print partition \t count
if values >= p.read_co_cutoff:
cutoff.append(keys)
except KeyError: # All clusters are in new_parts, not all clusters have sufficient
print "Unable to find {}. It has {} contigs".format(keys, values)
out_part_size_fh.close()
if p.partitioner != "%":
part_site_read_abund_fh = csv.writer(open(p.output_fn + '-Partition-Site-Read-Abundances.csv', 'w'))
partition_read_abundance_sites = {}
for contigs, partitions in parts.iteritems():
partition_num = str(partitions)
try:
num_reads = len(contig_read_dict[contigs])
#print contigs, partitions, num_reads
except KeyError: # References will not be in the metagenome's ace files, obviously
print "Skipping {}".format(contigs)
num_reads = 0
get_characteristic = eval("lambda x: " + p.partitioner.replace('%', 'x').replace("\\x", '%'))
characteristic = get_characteristic(contigs)
if not partition_num in partition_read_abundance_sites:
partition_read_abundance_sites[partition_num] = {}
if not characteristic in partition_read_abundance_sites[partition_num]:
partition_read_abundance_sites[partition_num][characteristic] = 0
partition_read_abundance_sites[partition_num][characteristic] += num_reads
characteristics = {}
for cluster_id in partition_read_abundance_sites:
for characteristic in partition_read_abundance_sites[cluster_id]:
characteristics[characteristic] = None
characteristics = sorted(characteristics)
part_site_read_abund_fh.writerow(["Cluster"] + characteristics + ["Total Reads", "Total Contigs"])
read_composition_array = []
for cluster_id in sorted(partition_read_abundance_sites):
row = [cluster_id]
total_reads = sum(partition_read_abundance_sites[cluster_id].values())
total_contigs = new_parts[int(cluster_id)]
list_to_append = []
for characteristic in characteristics:
row.append(partition_read_abundance_sites[cluster_id].get(characteristic, 0))
if total_contigs >= p.co_cutoff:
list_to_append.append(partition_read_abundance_sites[cluster_id].get(characteristic, 0))
row.append(total_reads)
row.append(total_contigs)
if list_to_append: # Dont want to add empty arrays
read_composition_array.append(list_to_append)
part_site_read_abund_fh.writerow(row)
# http://stackoverflow.com/questions/19872530/python-sort-lists-based-on-their-sum
read_composition_array.sort(key=sum) # So easy it makes me want to cry
# If not using reads
else:
out_part_size_fh = open(p.output_fn + '-Partition-Contig-Sizes.tab', 'w')
for keys, values in new_parts.iteritems(): # partition: count
print >> out_part_size_fh, '{0}\t{1}'.format(keys, values) # print partition \t count
if values >= p.contig_co_cutoff:
cutoff.append(keys)
logger(logfile_fh, "Removing clusters containing less than {} members".format(p.contig_co_cutoff))
# Create a subgraph that includes only those above a specified cutoff value
subGraph = coGraph.subgraph([n for n, attrdict in coGraph.node.items() if attrdict['orig_partition'] in cutoff])
logger(logfile_fh, "Writing final network to disk...")
nx.write_graphml(subGraph, "{0}.graphml".format(p.output_fn))
logger(logfile_fh, "Network written to disk")
if p.tmp_remove:
os.remove(graph_tmp_out)
logger(logfile_fh, "Removing temporary network file...")
if p.mpc_fig or p.dr_fig:
from mpl_toolkits.mplot3d import Axes3D
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rcParams
if p.mpc_fig:
logger(logfile_fh, "Generating Membership Per Community Figure")
rcParams['ytick.direction'] = 'out'
font = {'family': 'sans-serif',
'weight': 'normal',
'size': 10}
lines = {'linewidth': 0.5}
Data = [value for (key, value) in new_parts.iteritems() if key in cutoff]
Data.sort(reverse=True)
# Removes the "long tail" often associated with rare species
Data_NoFew = filter(lambda few: few >= 2, Data)
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(1, 1, 1)
N = len(Data_NoFew)
ind = np.arange(N)
width = 0.8
margin = 0.1
colors = []
for data in Data_NoFew:
if data >= p.contig_co_cutoff:
colors.append('b')
else:
colors.append('r')
ax.bar(ind + margin, Data_NoFew, width, color=colors, align='edge', alpha=.8, zorder=1, edgecolor="none", linewidth=0) # Removed label=
ax.set_ylabel('Members/Community')
ax.set_xlabel('Viral Group Rank')
ax.legend(loc='upper left', frameon=False)
fig.savefig(p.output_fn + "-Members-in-Each-Community.png", dpi=600, bbox_inches='tight')
if p.dr_fig:
logger(logfile_fh, "Generating NonSingleton Data Represented Figure.")
import matplotlib.cm as cm
from matplotlib.colors import Normalize
font = {'family': 'sans-serif',
'weight': 'normal',
'size': 10}
lines = {'linewidth': 0.5}
matplotlib.rc('font', **font)
matplotlib.rc('lines', **lines)
fig = plt.figure(figsize=(7, 4))
ax = fig.add_subplot(1, 1, 1)
# new_parts = partition#, counts
Data = [value for (key, value) in new_parts.iteritems()]
N = 15 # Change to 16 if including 16 cutoff range
ind = np.arange(N)
width = 0.8
Total_Contigs = int(input_queries)
# Will create a list with values of the counts of each of the partitions (done above)
Total_Contigs_In_Network_list = [value for value in Data if value >= 2]
Total_Contigs_In_Network = sum(Total_Contigs_In_Network_list)
Total_Contigs_In_Gt1 = non_singleton_graph_node_count
data_represented_membership_size_cutoffs = [2, 3, 4, 5, 10, 15, 20, 30, 40, 50, 100, 200, 300, 400, 500]
Data_Represented = []
for cutoff in data_represented_membership_size_cutoffs:
Data_prerepresented = []
for community_sizes in Data:
if community_sizes >= cutoff:
Data_prerepresented.append(community_sizes)
# Calculate % of data represented by the new communities that are above the cutoff
to_append = (sum(Data_prerepresented) / (Total_Contigs_In_Gt1 + .0)) * 100
Data_Represented.append(to_append)
my_cmap = cm.get_cmap('jet')
my_norm = Normalize(vmin=min(Data_Represented), vmax=max(Data_Represented))
ax.bar(ind, Data_Represented, width, color=my_cmap(my_norm(Data_Represented)), align='edge', alpha=1, zorder=1)
xlabels = ["{0}".format(x) for x in data_represented_membership_size_cutoffs]
ax.set_xlabel('Members/Group')
ax.set_ylabel('Contigs Represented (%)')
ax.set_xlim(0)
ax.set_ylim(0)
ax.set_xticks(ind + (width / 2)) # ind+width
ax.set_xticklabels((xlabels))
xtickNames = ax.set_xticklabels(xlabels)
plt.setp(xtickNames, rotation=0, fontsize=10)
sm = plt.cm.ScalarMappable(cmap=my_cmap, norm=Normalize(vmin=min(Data_Represented), vmax=max(Data_Represented)))
sm._A = []
plt.colorbar(sm)
fig.savefig(p.output_fn + "-NoSingles-Contig-Data-Represented.png", dpi=600, bbox_inches='tight')
if p.dr_fig:
logger(logfile_fh, "Generating Total Data Represented Figure.")
import matplotlib.cm as cm
from matplotlib.colors import Normalize
font = {'family': 'sans-serif',
'weight': 'normal',
'size': 10}
lines = {'linewidth': 0.5}
matplotlib.rc('font', **font)
matplotlib.rc('lines', **lines)
fig = plt.figure(figsize=(7, 4))
ax = fig.add_subplot(1, 1, 1)
Data = [value for (key, value) in new_parts.iteritems()]
N = 15 # Change to 16 if including 16 cutoff range
ind = np.arange(N)
width = 0.8
Total_Contigs = int(input_queries) + .0
# Will create a list with values of the counts of each of the partitions (done above)
Total_Contigs_In_Network_list = [value for value in Data if value >= 1]
Total_Contigs_In_Network = sum(Total_Contigs_In_Network_list)
data_represented_membership_size_cutoffs = [2, 3, 4, 5, 10, 15, 20, 30, 40, 50, 100, 200, 300, 400, 500]
Data_Represented = []
for cutoff in data_represented_membership_size_cutoffs:
Data_prerepresented = []
for community_sizes in Data:
if community_sizes >= cutoff:
Data_prerepresented.append(community_sizes)
# Calculate % of data represented by the new communities that are above the cutoff
to_append = (sum(Data_prerepresented) / (Total_Contigs + .0)) * 100
Data_Represented.append(to_append)
my_cmap = cm.get_cmap('jet')
my_norm = Normalize(vmin=min(Data_Represented), vmax=max(Data_Represented))
ax.bar(ind, Data_Represented, width, color=my_cmap(my_norm(Data_Represented)),
align='edge', alpha=1, zorder=1)
xlabels = ["{0}".format(x) for x in data_represented_membership_size_cutoffs]
ax.set_xlabel('Members/Group')
ax.set_ylabel('Contigs Represented (%)')
ax.set_xlim(0)
ax.set_ylim(0)
ax.set_xticks(ind + (width / 2)) # ind+width
ax.set_xticklabels(xlabels)
xtickNames = ax.set_xticklabels(xlabels)
plt.setp(xtickNames, rotation=0, fontsize=10)
sm = plt.cm.ScalarMappable(cmap=my_cmap, norm=Normalize(vmin=min(Data_Represented), vmax=max(Data_Represented)))
sm._A = []
plt.colorbar(sm)
fig.savefig(p.output_fn + "-Total-Data-Represented.png", dpi=600, bbox_inches='tight')
logger(logfile_fh, "Program Complete.")
logfile_fh.close()
|
chrisLanderson/rumen_virome
|
scripts/BLAST2Network.py
|
Python
|
mit
| 27,771
|
[
"BLAST"
] |
90b10c6ccb270ce4f11b71643d3169e0be203d3b0770a9199bce4cceafca1b50
|
import shutil
import os
modules = ["trimgalore", "fastqc", "kallisto", "star", "star-fusion", "picard", "picard_IS", "htseq-gene", "htseq-exon", "varscan", "gatk", 'jsplice']
module_names = {"trimgalore":"",
"fastqc":"",
"kallisto":"",
"star":"",
"star-fusion":"",
"picard":"",
"picard_IS":"",
"htseq-gene":"",
"htseq-exon":"",
"varscan":"",
"jsplice":"",
"gatk":""}
def get_menu(config, ns):
enabled = dict()
for module in modules:
if config.has_key(module):
if int(config[module][0].split("/")[0]) > 0:
enabled[module] = 1
menu = list()
menu.append('<h1><a #highlight="" href="./summary.html">Summary</a></h1>')
menu.append('<h1><a #highlight="" href="./hpc.html">HPC statistics</a></h1>')
if enabled.has_key("fastqc") or enabled.has_key("trimgalore"):
menu.append('<h1>Raw QC:</h1>')
if enabled.has_key("trimgalore"):
menu.append('<h2><a #highlight="" href="./trim.html">- TrimGalore/Cutadapt</a></h2>')
if enabled.has_key("fastqc"):
menu.append('<h2><a #highlight="" href="./fastqc.html">- FastQC</a></h2>')
if enabled.has_key("star") or enabled.has_key("picard") or enabled.has_key("htseq-gene") or enabled.has_key("htseq-exon"):
menu.append('<h1>Alignment QC:</h1>')
if enabled.has_key("picard"):
menu.append('<h2><a #highlight="" href="./picard.html">- Picard</a></h2>')
if enabled.has_key("picard_IS"):
menu.append('<h2><a #highlight="" href="./picard-is.html">- Picard Insert Size</a></h2>')
if enabled.has_key("star"):
menu.append('<h2><a #highlight="" href="./star.html">- STAR</a></h2>')
if enabled.has_key("kallisto"):
menu.append('<h2><a #highlight="" href="./kallisto.html">- KALLISTO</a></h2>')
if enabled.has_key("htseq-gene"):
menu.append('<h2><a #highlight="" href="./htseq-gene.html">- HTseq-Gene</a></h2>')
if enabled.has_key("htseq-exon"):
menu.append('<h2><a #highlight="" href="./htseq-exon.html">- HTseq-Exon</a></h2>')
if ns > 1:
if enabled.has_key("star") or enabled.has_key("htseq-gene") or enabled.has_key("htseq-exon") or enabled.has_key("kallisto"):
menu.append('<h1>Count statistics:</h1>')
menu.append('<h2><a #highlight="" href="./downloads.html">- DOWNLOADS</a></h2>')
if enabled.has_key("star"):
menu.append('<h2><a #highlight="" href="./star2.html">- STAR</a></h2>')
if enabled.has_key("kallisto"):
menu.append('<h2><a #highlight="" href="./kallisto2.html">- KALLISTO</a></h2>')
if enabled.has_key("htseq-gene"):
menu.append('<h2><a href="./htseq-gene2.html">- HTseq-Gene</a></h2>')
if enabled.has_key("htseq-exon"):
menu.append('<h2><a #highlight="" href="./htseq-exon2.html">- HTseq-Exon</a></h2>')
if enabled.has_key("gatk") or enabled.has_key("varscan"):
menu.append('<h1>Variant calling:</h1>')
if enabled.has_key("varscan"):
menu.append('<h2><a #highlight="" href="./varscan.html">- VARSCAN</a></h2>')
if enabled.has_key("gatk"):
menu.append('<h2><a #highlight="" href="./gatk.html">- GATK</a></h2>')
if enabled.has_key("star-fusion"):
menu.append('<h1>Gene fusions:</h1>')
menu.append('<h2><a #highlight="" href="./star-fusion.html">- Star-Fusion</a></h2>')
if enabled.has_key("jsplice"):
menu.append('<h1>Alternative splicing:</h1>')
menu.append('<h2><a #highlight="" href="./jsplice.html">- jSplice</a></h2>')
menu = "\n".join(menu)
return menu
def print_samples(path,config):
analysis = ['trimgalore', 'fastqc', 'kallisto', 'star', 'star-fusion', 'picard', "htseq-gene", "htseq-exon", "picard_IS", "varscan", 'gatk', 'jsplice']
sta= {"trimgalore":"TrimGalore", "fastqc":"FastQC","star":"STAR","star-fusion":"STAR-Fusion","picard":"PicardQC","kallisto":"Kallisto","htseq-gene":"HTseq-gene",
"htseq-exon":"HTseq-exon", "picard_IS":"Picard-InsertSize", "varscan":"VARSCAN", "gatk":"GATK", "jsplice":"jSplice"}
# SAMPLES LIST
samples = dict()
f = open(path + "/samples.list",'r')
hss = f.readline().strip("\n").split("\t")
idx = []
for i, ix in enumerate(hss):
if ix.startswith('FASTQ'):
idx.append(i)
hs = [hss[0]] + [hss[i] for i in idx]
for i in f:
i = i.strip("\n").split("\t")
if i[0] != "":
samples[i[0]] = [i[j] for j in idx]
f.close()
# scan
results = dict()
for i in analysis:
if config.has_key(i):
sok = dict()
if os.path.exists(path + "/results_" + i + "/samples_ok.txt"):
f = open(path + "/results_" + i + "/samples_ok.txt", 'r')
for j in f:
sok[j.strip("\n")] = 1
f.close()
results[i] = dict()
if i == "trimgalore":
for x, y in sorted(samples.iteritems()):
res = []
if sok.has_key(x):
filess = y
for f in filess:
f = f.split("/")[-1]
link = "../results_trimgalore/" + f + "_trimming_report.txt"
link = '<a href="LINK" target="_blank">OK</a>'.replace("LINK", link)
res.append(link)
else:
res.append("FAIL")
results["trimgalore"][x] = " / ".join(res)
elif i=="fastqc":
for x, y in sorted(samples.iteritems()):
res = []
if sok.has_key(x):
filess = y
for f in filess:
f = f.split("/")[-1]
link = "../results_fastqc/"+f.replace(".fastq","").replace(".gz","")+"_fastqc/fastqc_report.html"
link = '<a href="LINK" target="_blank">OK</a>'.replace("LINK",link)
res.append(link)
else:
res.append("FAIL")
results["fastqc"][x] = " / ".join(res)
elif i=="star":
for x, y in sorted(samples.iteritems()):
res = []
if sok.has_key(x):
link = "../results_star/" + x + "_Aligned.out.sam"
link = '<a href="LINK" target="_blank">BAM-OK</a>'.replace("LINK", link)
res.append(link)
link = "../results_star/" + x + "_ReadsPerGene.out.tab"
link = '<a href="LINK" target="_blank">COUNTS-OK</a>'.replace("LINK", link)
res.append(link)
link = "../results_star/" + x + "_SJ.out.tab"
link = '<a href="LINK" target="_blank">SJ-OK</a>'.replace("LINK", link)
res.append(link)
else:
res.append("BAM-FAIL")
res.append("COUNTS-FAIL")
res.append("COUNTS-FAIL")
results["star"][x] = " / ".join(res)
elif i=="kallisto":
for x, y in sorted(samples.iteritems()):
res = []
if sok.has_key(x):
link = "../results_kallisto/" + x + "/abundance.tsv"
link = '<a href="LINK" target="_blank">OK</a>'.replace("LINK", link)
res.append(link)
else:
res.append("FAIL")
results["kallisto"][x] = " / ".join(res)
elif i=="star-fusion":
for x, y in sorted(samples.iteritems()):
res = []
if sok.has_key(x):
link = "../results_star-fusion/" + x + "/star-fusion.fusion_candidates.final.abridged"
link = '<a href="LINK" target="_blank">OK</a>'.replace("LINK", link)
res.append(link)
else:
res.append("FAIL")
results["star-fusion"][x] = " / ".join(res)
elif i=="picard_IS":
for x, y in sorted(samples.iteritems()):
res = []
if sok.has_key(x) and os.path.exists(path + '/results_picard_IS/' + x + '.txt'):
link = "../results_picard_IS/" + x + ".txt"
link = '<a href="LINK" target="_blank">OK</a>'.replace("LINK", link)
res.append(link)
else:
res.append("FAIL")
results["picard_IS"][x] = " / ".join(res)
elif i=="jsplice":
for x, y in sorted(samples.iteritems()):
res = []
if os.path.exists(path + '/results_jsplice/jSplice_results.html'):
link = '../results_jsplice/jSplice_results.html'
link = '<a href="LINK" target="_blank">OK</a>'.replace("LINK", link)
res.append(link)
else:
res.append("FAIL")
results["jsplice"][x] = " / ".join(res)
elif i=="picard":
for x, y in sorted(samples.iteritems()):
res = []
if sok.has_key(x):
link = "../results_picard/" + x + ".general.qc"
link = '<a href="LINK" target="_blank">GENERAL-OK</a>'.replace("LINK", link)
res.append(link)
link = "../results_picard/" + x + ".protein_coding.qc"
link = '<a href="LINK" target="_blank">PC-OK</a>'.replace("LINK", link)
res.append(link)
link = "../results_picard/" + x + ".ribosomal.qc"
link = '<a href="LINK" target="_blank">RB-OK</a>'.replace("LINK", link)
res.append(link)
else:
res.append("GENERAL-FAIL")
res.append("PC-FAIL")
res.append("RB-FAIL")
results["picard"][x] = " / ".join(res)
elif i=="htseq-gene":
for x, y in sorted(samples.iteritems()):
res = []
if sok.has_key(x):
link = "../results_htseq-gene/" + x + ".tab"
link = '<a href="LINK" target="_blank">OK</a>'.replace("LINK", link)
res.append(link)
else:
res.append("FAIL")
results["htseq-gene"][x] = " / ".join(res)
elif i=="htseq-exon":
for x, y in sorted(samples.iteritems()):
res = []
if sok.has_key(x):
link = "../results_htseq-exon/" + x + ".tab"
link = '<a href="LINK" target="_blank">OK</a>'.replace("LINK", link)
res.append(link)
else:
res.append("FAIL")
results["htseq-exon"][x] = " / ".join(res)
elif i=="varscan":
for x, y in sorted(samples.iteritems()):
res = []
if sok.has_key(x):
link = "../results_varscan/" + x + ".vcf"
link = '<a href="LINK" target="_blank">VCF</a>'.replace("LINK", link)
res.append(link)
else:
res.append("FAIL")
results["varscan"][x] = " / ".join(res)
elif i=="gatk":
for x, y in sorted(samples.iteritems()):
res = []
if sok.has_key(x):
link1 = "../results_gatk/" + x + ".vcf"
link1 = '<a href="LINK" target="_blank">VCF</a>'.replace("LINK", link1)
link2 = "../results_gatk/" + x + ".filt.vcf"
link2 = '<a href="LINK" target="_blank">VCF_FILT</a>'.replace("LINK", link2)
res.append(link1 + "/" + link2)
else:
res.append("FAIL")
results["gatk"][x] = " / ".join(res)
n = "<th bgcolor='#A8A8A8'>Sample</th>"
for i in hs[1:]:
n = n + "<th bgcolor='#A8A8A8'> Size "+i+" (Gb)</th>"
for i in range(len(analysis)):
if results.has_key(analysis[i]):
n = n +"<th bgcolor='#A8A8A8'>"+sta[analysis[i]]+"</th>"
thead = "<thead><tr>"+n+"</tr></thead>"
tab = list()
for i in sorted(samples.keys()):
n = ["<td bgcolor='#A8A8A8'>"+i+"</td>"]
for j in range(len(hs[1:])):
try:
n.append("<td bgcolor='#A8A8A8'>" + str(round(os.stat(samples[i][j]).st_size/1000000000.0, 2)) + "</td>")
except:
n.append("<td bgcolor='#A8A8A8'>NA</td>")
for a in analysis:
if results.has_key(a):
cl = "#00CC66"
if "FAIL" in results[a][i]:
cl = "#CC3300"
n.append("<td bgcolor='"+cl+"'>"+results[a][i]+"</td>")
tab.append("<tr>"+"".join(n)+"</tr>")
return '<table id="DT" class="display">' + thead + "<tbody>" + "\n".join(tab) + "</tbody></table>"
def config_file(path, fname):
f = open(path + "/" + fname,'r')
n = list()
for i in f:
i = i.strip("\n").split("\t")
if len(i) >= 2:
g = "<tr><td bgcolor='#00CC66'>"+i[0]+"</td><td bgcolor='#00CC66'>"+i[1]+"</td></tr>"
else:
g = "<tr><td colspan='2' bgcolor='#C0C0C0'>" + i[0] + "</td></tr>"
n.append(g)
f.close()
tab = "<table>"+"".join(n)+"</table>"
return tab
def print_table_default(datafile, index, select):
palette = ["#00FA9A", "#AFEEEE", "#D8BFD8", "#DEB887", "#D3D3D3", "#EEE8AA"]
if not os.path.exists(datafile):
return ""
f = open(datafile, 'r')
h = f.readline().strip("\n").split("\t")
if len(select) == 0:
select = range(len(h))
n = ""
for i in select:
n = n + "<th align='center' bgcolor='#A8A8A8'>" + h[i] + "</th>"
if index < 0:
n = '<table id="DT" class="display"><thead><tr>' + n + '</tr></thead><tbody>'
else:
n = '<table><thead><tr>' + n + '</tr></thead><tbody>'
M = dict()
r = 0
for i in f:
i = i.strip("\n").split("\t")
if len(i) > 0:
temp = ""
if index > -1:
if not M.has_key(i[index]):
M[i[index]] = palette[r % len(palette)]
r += 1
for k in select:
j = i[k]
if (j == "-1") or (j.startswith("NA ") or j.endswith(" NA") or j == "NA"):
temp = temp + "<td align='center' bgcolor='#CC3300'>" + j + "</td>"
else:
if index < 0:
temp = temp + "<td align='center' bgcolor='#00CC66'>" + j + "</td>"
else:
temp = temp + "<td align='center' bgcolor='" + M[i[index]] + "'>" + j + "</td>"
n = n + "<tr>" + temp + "</tr>"
n += "</tbody></table>"
return n
def check_project(path):
print "> Checking project path..."
if path == "":
exit("Parameter -p is required.")
if not os.path.exists(path):
exit("Path to project folder not found.")
if path.endswith("/"):
path = path[0:(len(path)-1)]
if not os.path.exists(path + "/config.txt"):
exit("Project configuration file not found: " + path + "/config.txt")
if not os.path.exists(path + "/samples.list"):
exit("Project samples file not found: " + path + "/samples.list")
project = path.split("/")
project = project[len(project)-1] # project id
return project, path
def stats_picard(path,samples,config):
n = os.listdir(path)
hh = "\t".join(["sample_id","mRNA-Coding", "mRNA-Ribosomal", "mRNA-Others","Intron","Intergenic","Unmapped","Med-CVcov","Med-5bias","Med-3bias","Med-5/3bias"])
if config.has_key("picard") and ("results_picard" in n):
files = os.listdir(path+"/results_picard")
n = [".general.qc",".protein_coding.qc",".ribosomal.qc"]
names = ["Sample","BASES (N)","ALIGN (N)","ALIGN (%)","INTRON (N)","INTRON (%)",
"INTERGEN (N)","INTERGEN (%)","MRNA (N)","MRNA (%)","Protein coding (%)",
"Ribosomal (%)","Others (%)","Coding (%)","UTR (%)","MEDIAN CV COVERAGE","MEDIAN 5' BIAS","MEDIAN 3' BIAS","MEDIAN 5'-3' BIAS"]
g = ["MEDIAN_CV_COVERAGE","MEDIAN_5PRIME_BIAS","MEDIAN_3PRIME_BIAS","MEDIAN_5PRIME_TO_3PRIME_BIAS"]
out = open(path + "/outputs/stats_picard.txt",'w')
table = ['<tr><th align="center" colspan="2" bgcolor="#A8A8A8"></th><th align="center" colspan="2" bgcolor="#A8A8A8">ALIGNED BASES</th><th align="center" colspan="6" bgcolor="#A8A8A8">GENOME LOCATION</th><th align="center" colspan="3" bgcolor="#A8A8A8">MRNA TYPE</th><th align="center" colspan="2" bgcolor="#A8A8A8">MRNA LOCATION</th><th align="center" colspan="4" bgcolor="#A8A8A8">OTHERS</th></tr>']
header = ""
for i in names:
header = header+"<th bgcolor='#A8A8A8'>"+i+"</th>"
print >> out, hh
header = "<tr>"+header+"</tr>"
table.append(header)
for i in sorted(samples.keys()):
stats = [{},{},{}]
heads = list()
ex = 0
for k in range(3):
if i+n[k] in files:
f = open(path+"/results_picard"+"/"+i+n[k],'r')
nx = 0
kdiff0 = 0
for ii in f:
if ii.startswith("PF_BASES"):
head = ii.rstrip().split("\t")
nx = 1
for kk in head:
if not (kk in heads):
heads.append(kk)
elif nx == 1:
vals = ii.strip("\n").split("\t")
for kk in range(len(head)):
stats[k][head[kk]] = vals[kk]
if (vals[kk] != "") and (vals[kk] != "?"):
if float(vals[kk]) > 0:
kdiff0 += 1
nx = 0
if kdiff0 == 0:
ex = 1
break
f.close()
else:
ex = 1
break
if ex ==1:
tr = "<td bgcolor='#CC3300'>"+i+"</td>"
o = i
for ii in range(18):
tr += "<td bgcolor='#CC3300'>NA</td>"
o += "\tNA"
tr = "<tr>"+tr+"</tr>"
table.append(tr)
s = ["NA" for ii in range(10)]
else:
align = int(stats[0]["PF_ALIGNED_BASES"])
cod = int(stats[0]["CODING_BASES"])
utr = int(stats[0]["UTR_BASES"])
intr = int(stats[0]["INTRONIC_BASES"])
inter = int(stats[0]["INTERGENIC_BASES"])
mrna = cod + utr
pc = int(stats[1]["CODING_BASES"])+int(stats[1]["UTR_BASES"])
rb = int(stats[2]["CODING_BASES"])+int(stats[2]["UTR_BASES"])
ot = mrna - pc - rb
pc = str(round(100*float(pc)/align,3))
rb = str(round(100*float(rb)/align,3))
ot = str(round(100*float(ot)/align,3))
tr = "<td bgcolor='#B8B8B8'>"+i+"</td>"
tr +="<td bgcolor='#00CC66'>"+stats[0]["PF_BASES"]+"</td>"
tr +="<td bgcolor='#00CC66'>"+str(align)+"</td>"
tr +="<td bgcolor='#99CC66'>"+str(round(100*float(align)/float(stats[0]["PF_BASES"]),2))+"</td>"
tr +="<td bgcolor='#00CC66'>"+str(intr)+"</td>"
tr +="<td bgcolor='#99CC66'>"+str(round(100*float(intr)/align,2))+"</td>"
tr +="<td bgcolor='#00CC66'>"+str(inter)+"</td>"
tr +="<td bgcolor='#99CC66'>"+str(round(100*float(inter)/align,2))+"</td>"
tr +="<td bgcolor='#00CC66'>"+str(mrna)+"</td>"
tr +="<td bgcolor='#99CC66'>"+str(round(100*float(mrna)/align,2))+"</td>"
tr +="<td bgcolor='#99CC66'>"+pc+"</td>"
tr +="<td bgcolor='#99CC66'>"+rb+"</td>"
tr +="<td bgcolor='#99CC66'>"+ot+"</td>"
tr +="<td bgcolor='#99CC66'>"+str(round(100*float(cod)/align,2))+"</td>"
tr +="<td bgcolor='#99CC66'>"+str(round(100*float(utr)/align,2))+"</td>"
o = i + "\t" + stats[0]["PF_BASES"] + "\t" + str(align) + "\t" + str(round(100*float(align)/float(stats[0]["PF_BASES"]),2)) + "\t" + str(intr) + "\t" + str(round(100*float(intr)/align,2)) + "\t" + str(inter) + "\t" + str(round(100*float(inter)/align,2)) + "\t" + str(mrna) + "\t" + str(round(100*float(mrna)/align,2)) + "\t" +pc+ "\t" +rb+ "\t" +ot+ "\t" +str(round(100*float(cod)/align,2))+ "\t" + str(round(100*float(utr)/align,2))
for ix in g:
if stats[0][ix]!="?":
tr +="<td bgcolor='#99CC66'>"+str(round(float(stats[0][ix]),3))+"</td>"
o += "\t" + str(round(float(stats[0][ix]),3))
else:
tr +="<td bgcolor='#99CC66'>0</td>"
o += "\t0"
tr = "<tr>"+tr+"</tr>"
table.append(tr)
o = o.split("\t")
st= 0
s = []
for ind in [10, 11, 12, 5, 7, 3]:
s.append(str(round(float(o[ind]) * float(o[2])/float(o[1]),3)))
if ind != 3:
st += float(o[ind]) * float(o[2])/float(o[1])
for ind in [15,16,17,18]:
if o[ind] != "0":
s.append(str(round(float(o[ind]) * 100,3)))
else:
s.append("0")
s[5] = str(round(100 - st,3))
print >> out, i + "\t" + "\t".join(s)
out.close()
return "<table>"+"\n".join(table)+"</table>"
else:
return ""
def stats_picard_2(path,samples,config):
n = os.listdir(path)
H= ['sample_id','MEDIAN_INSERT_SIZE','MEDIAN_ABSOLUTE_DEVIATION','MIN_INSERT_SIZE',
'MAX_INSERT_SIZE','MEAN_INSERT_SIZE','STANDARD_DEVIATION','READ_PAIRS', 'LINK_TXT', 'LINK_PDF']
hh = "\t".join(H)
if config.has_key("picard_IS") and ("results_picard_IS" in n):
files = os.listdir(path+"/results_picard_IS")
out = open(path + "/outputs/stats_picard2.txt",'w')
print >> out, hh
for i in sorted(samples.keys()):
if i + ".txt" in files:
f = open(path+"/results_picard_IS"+"/"+i+".txt",'r')
k = 0
while (1):
h = f.readline()
if h.startswith("MEDIAN_INSERT_SIZE"):
j = f.readline().strip("\n").split("\t")
h = h.strip("\n").split("\t")
break
k += 1
if k > 10 or len(h) == 0:
j = ['NA' for i in range(7)]
break
J = []
for k in range(len(h)):
if h[k] in H:
J.append(j[k])
print >> out, "\t".join([i] + J + ['<a href="../results_picard_IS/' + i + '.txt" target="_blank">+</a>', '<a href="../results_picard_IS/' + i + '.pdf" target="_blank">+</a>'])
f.close()
else:
print >> out, "\t".join([i] + ['NA' for i in range(9)])
out.close()
return 1
def skeleton(path, path2html):
print "> Building HTML and OUTPUT folders skeletons..."
print " - Path: " + path
print " - Libs: " + path2html
# Creates output directiories
if os.path.exists(path + "/outputs"):
os.system("rm -r " + path + "/outputs")
os.mkdir(path + "/outputs")
# Creates HTML directories
n = os.listdir(path)
if "HTML" in n:
os.system("rm -r " + path + "/HTML")
if not ("HTML" in os.listdir(path)):
os.mkdir(path + "/HTML")
os.mkdir(path + "/HTML/html")
# Copy lightbox and jquery
shutil.copy(path2html + "/html/style.css", path + "/HTML/html/style.css")
shutil.copy(path2html + "/html/lytebox.js", path + "/HTML/html/lytebox.js")
shutil.copy(path2html + "/html/lytebox.css", path + "/HTML/html/lytebox.css")
shutil.copy(path2html + "/html/jquery-1.12.0.js", path + "/HTML/html/jquery-1.12.0.js")
shutil.copy(path2html + "/html/jquery.dataTables.min.js", path + "/HTML/html/jquery.dataTables.min.js")
shutil.copy(path2html + "/html/jquery.dataTables.min.css", path + "/HTML/html/jquery.dataTables.min.css")
shutil.copy(path2html + "/html/dataTables.colReorder.min.js", path + "/HTML/html/dataTables.colReorder.min.js")
shutil.copy(path2html + "/html/amcharts.js", path + "/HTML/html/amcharts.js")
shutil.copy(path2html + "/html/serial.js", path + "/HTML/html/serial.js")
shutil.copy(path2html + "/html/xy.js", path + "/HTML/html/xy.js")
os.system("cp -r " + path2html + "/html/images " + path + "/HTML/html/")
# def check_samples(path):
# # Parses the samples file
# print "> Parsing samples file..."
# try:
# f = open(path + "/samples.list", 'r')
# i = f.readline().strip("\n").split("\t")
# index = [-1,-1,-1]
# if ("FASTQ_1" in i) and ("FASTQ_2" in i):
# k = "paired-end"
# for r in range(len(i)):
# if i[r]=="SampleID":
# index[0] = r
# if i[r]=="FASTQ_1":
# index[1] = r
# if i[r]=="FASTQ_2":
# index[2] = r
# else:
# k = "single-end"
# for r in range(len(i)):
# if i[r]=="SampleID":
# index[0] = r
# if i[r]=="FASTQ":
# index[1] = r
# samples = dict()
# for i in f:
# i = i.strip("\n").split("\t")
# if len(i)>1:
# if k=="paired-end":
# samples[i[index[0]]] = [[i[index[1]],i[index[2]]],["Type",k]]
# else:
# samples[i[index[0]]] = [[i[index[1]]],["Type",k]]
# f.close()
# return samples
# except:
# exit("Error checking samples file: " + path + "/samples.list")
def build_amcharts(input, output, prog, pname, path, html_table, project, lmenu):
out = open(output, 'w')
f = open(input, 'r')
it = ""
for i in f:
i = i.strip("\n")
if i.startswith("#ITERATOR"):
pattern = i.replace("#ITERATOR=","")
f2 = open(path + "/outputs/" + pname[1] + "_pca.txt", 'r')
h = f2.readline()
it = list()
for j in f2:
np = pattern
j = j.strip("\n").split("\t")
for k in range(len(j)):
np = np.replace("#VAR"+str(len(j)-k-1),j[len(j)-k-1])
it.append(np)
it = ", ".join(it)
f2.close()
else:
if prog + "2.html" in i:
i = i.replace("#HIGHLIGHT", 'style="color:#808080"')
print >> out, i.replace("#LATMENU",lmenu).replace("#PROG", pname[0]).replace("#PROJECT", project).replace("#SITERATOR", it).replace("#HIGHTLIGHT", "").replace("#TABLE", html_table)
f.close()
out.close()
def check_config(path):
# Parses the configuration file
print "> Parsing configuration file..."
try:
z = ["trimgalore", "fastqc", "star", "star-fusion", "picard", "htseq-gene", "htseq-exon", "kallisto", "picard_IS", "varscan", "gatk", 'jsplice']
f = open(path + "/config.txt", 'r')
analysis = dict()
analysis["cluster"] = dict()
analysis["programs"] = dict()
for i in f:
if not i.startswith("#"):
i = i.strip("\n").split("\t")
if len(i) > 1:
if i[0] in z:
if int(i[1].split("/")[0]) > 0:
analysis[i[0]] = [i[1], "results_" + i[0], dict()]
elif i[0] in ["wt", "q"]:
analysis["cluster"][i[0]] = i[1]
elif i[0] == "star_args_own":
if analysis["programs"]["star_args"] == "own":
analysis["programs"]["star_args"] = i[1]
elif i[0] == "starfusion_own":
if analysis["programs"]["starfusion"] == "own":
analysis["programs"]["starfusion"] = i[1]
elif i[0] == "genome_build":
analysis[i[0]] = i[1]
else:
analysis["programs"][i[0]] = i[1]
f.close()
return analysis
except:
exit("Error checking configuration file: " + path + "/config.txt")
# def print_config(config,path):
# table = list()
# table.append(["Analysis", "Processors", "Folder", "Timestamp",
# "TStart","TEnd","Success","CPU-Time","MaxMemo",
# "AveMemo","MaxSwap","Parameters"])
# for i in modules:
# if config.has_key(i):
# n = check_log_cluster(path, config[i][1])
# st = []
# if len(config[i][2]) > 0:
# for v,w in config[i][2].iteritems():
# st.append(v+": "+w)
# st = "<br>".join(st)
# for j in range(len(n)):
# tt = [module_names[i]]+[config[i][0],"./"+config[i][1]]+n[j]+[st]
# table.append(tt)
# n = ""
# for i in table[0]:
# n = n+"<th bgcolor='#A8A8A8'>"+i+"</th>"
# n = ["<tr>"+n+"</tr>"]
# for i in table[1:]:
# temp = ""
# for j in i:
# if "NA" in j:
# temp = temp+"<td bgcolor='#CC3300'>"+j+"</td>"
# else:
# temp = temp+"<td bgcolor='#00CC66'>"+j+"</td>"
# n.append("<tr>"+temp+"</tr>")
# return n
# def check_log_cluster(path, val):
# t = os.listdir(path)
# if not (val in t):
# return ["NA","NA","NA","NA","NA","NA","NA"]
# t = os.listdir(path + "/" + val)
# t2 = list()
# for i in t:
# if i.startswith("log_cluster_") and (("scheduler" in i) == False):
# if len(i.split("_")) >= 4 :
# t2.append(i)
# if len(t2) == 0:
# return [["NA","NA","NA","NA","NA","NA","NA","NA"]]
# n = list()
# for jv in sorted(t2):
# f = open(path + "/" + val + "/" + jv,'r')
# ts = ""
# te = ""
# suc= "No"
# cpu_time = ""
# max_memo = ""
# ave_memo = ""
# max_swap = ""
# pid = "_".join(jv.split("_")[2:4]).replace(".txt","")
# for i in f:
# if i.startswith("Started at"):
# ts = i.rstrip().replace("Started at ","")
# if i.startswith("Results reported on"):
# te = i.rstrip().replace("Results reported on ","")
# if i.startswith("Successfully completed."):
# suc = "Yes"
# if "CPU time :" in i:
# cpu_time = " ".join(i.rstrip().split()[3:5])
# if "Max Memory :" in i:
# max_memo = " ".join(i.rstrip().split()[3:5])
# if "Average Memory :" in i:
# ave_memo = " ".join(i.rstrip().split()[3:5])
# if "Max Swap :" in i:
# max_swap = " ".join(i.rstrip().split()[3:5])
# f.close()
# n.append([pid,ts,te,suc,cpu_time,max_memo,ave_memo,max_swap])
# return n
def bar_getdata (filename, head, cols_bar, cols_line):
# LOAD DATA
if not os.path.exists(filename):
return ""
f = open(filename, 'r')
h = f.readline().strip("\n").split("\t")
if (len(cols_bar)==0) and (len(cols_line)==0):
cols_bar = range(1, len(h))
D = list()
for i in f:
i = i.strip("\n").split("\t")
n = list()
for j in range(len(h)):
if j == head:
n.append('"' + h[j] + '": "' + i[j] + '"')
elif (j in cols_bar) or (j in cols_line):
if i[j] != "NA":
n.append('"' + h[j] + '": ' + i[j])
else:
n.append('"' + h[j] + '": 0')
D.append("{" + ", ".join(n) + "}")
D = "var chartData = [" + ", ".join(D) + "];"
return D
def build_from_template(prog, project, data, html_table, html_table2, output, template, lmenu):
out = open(output,'w')
f = open(template, 'r')
r = output.split("/")[-1]
for i in f:
i = i.strip("\n")
if r in i:
i = i.replace("#HIGHLIGHT", 'style="color:#808080"')
print >> out, i.replace("#LATMENU",lmenu).replace("#PROG", prog).replace("#PROJECT", project).replace("#DATA", data).replace("#HIGHTLIGHT", "").replace("#TABLE2", html_table2).replace("#TABLE", html_table)
f.close()
out.close()
|
HudsonAlpha/aRNAPipe
|
lib/html_lib.py
|
Python
|
mit
| 33,948
|
[
"HTSeq"
] |
9863c2916bfcd15a14b2f7412706a21d6a02d5a0bec962561d6656763061447b
|
# Copyright 2004-2012 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import renpy.display
import contextlib
# Grab the python versions of the parser and ast modules.
ast = __import__("ast")
# The filename of the file we're parsing.
filename = None
new_variable_serial = 0
# Returns the name of a new variable.
@contextlib.contextmanager
def new_variable():
global new_variable_serial
new_variable_serial += 1
yield "_%d" % new_variable_serial
new_variable_serial -= 1
def increment_lineno(node, amount):
for node in ast.walk(node):
if hasattr(node, 'lineno'):
node.lineno += amount
class LineNumberNormalizer(ast.NodeVisitor):
def __init__(self):
self.last_line = 1
def generic_visit(self, node):
if hasattr(node, 'lineno'):
self.last_line = max(self.last_line, node.lineno)
node.lineno = self.last_line
super(LineNumberNormalizer, self).generic_visit(node)
##############################################################################
# Parsing.
# The parser that things are being added to.
parser = None
class Positional(object):
"""
This represents a positional parameter to a function.
"""
def __init__(self, name):
self.name = name
if parser:
parser.add(self)
# Used to generate the documentation
all_keyword_names = set()
class Keyword(object):
"""
This represents an optional keyword parameter to a function.
"""
def __init__(self, name):
self.name = name
self.style = False
all_keyword_names.add(self.name)
if parser:
parser.add(self)
class Style(object):
"""
This represents a style parameter to a function.
"""
def __init__(self, name):
self.name = name
self.style = True
for j in renpy.style.prefix_subs:
all_keyword_names.add(j + self.name)
if parser:
parser.add(self)
class Parser(object):
def __init__(self, name):
# The name of this object.
self.name = name
# The positional arguments, keyword arguments, and child
# statements of this statement.
self.positional = [ ]
self.keyword = { }
self.children = { }
all_statements.append(self)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.name)
def add(self, i):
"""
Adds a clause to this parser.
"""
if isinstance(i, list):
for j in i:
self.add(j)
return
if isinstance(i, Positional):
self.positional.append(i)
elif isinstance(i, Keyword):
self.keyword[i.name] = i
elif isinstance(i, Style):
for j in renpy.style.prefix_subs:
self.keyword[j + i.name] = i
elif isinstance(i, Parser):
self.children[i.name] = i
def parse_statement(self, l, name, layout_mode=False):
word = l.word() or l.match(r'\$')
if word and word in self.children:
if layout_mode:
c = self.children[word].parse_layout(l, name)
else:
c = self.children[word].parse(l, name)
return c
else:
return None
def parse_layout(self, l, name):
l.error("The %s statement cannot be used as a container for the has statement." % self.name)
def parse_children(self, stmt, l, name):
l.expect_block(stmt)
l = l.subblock_lexer()
rv = [ ]
with new_variable() as child_name:
count = 0
while l.advance():
if len(l.block) != 1:
rv.extend(self.parse_exec("%s = (%s, %d)" % (child_name, name, count), l.number))
else:
child_name = name
c = self.parse_statement(l, child_name)
if c is None:
l.error('Expected screen language statement.')
rv.extend(c)
count += 1
return rv
def parse_eval(self, expr, lineno=1):
"""
Parses an expression for eval, and then strips off the module
and expr instances, and adjusts the line number.
"""
if isinstance(expr, unicode):
expr = renpy.python.escape_unicode(expr)
try:
rv = ast.parse(expr, 'eval').body[0].value
except SyntaxError, e:
raise renpy.parser.ParseError(
filename,
lineno + e[1][1] - 1,
"Syntax error while parsing python expression.",
e[1][3],
e[1][2])
increment_lineno(rv, lineno-1)
return rv
def parse_exec(self, code, lineno=1):
"""
Parses an expression for exec, then strips off the module and
adjusts the line number. Returns a list of statements.
"""
if isinstance(code, unicode):
code = renpy.python.escape_unicode(code)
try:
rv = ast.parse(code, 'exec')
except SyntaxError, e:
raise renpy.parser.ParseError(
filename,
lineno + e[1][1] - 1,
"Syntax error while parsing python code.",
e[1][3],
e[1][2])
increment_lineno(rv, lineno-1)
return rv.body
def parse_simple_expression(self, l):
lineno = l.number
expr = l.require(l.simple_expression)
return self.parse_eval(expr, lineno)
def parse(self, l, name):
"""
This is expected to parse a function statement, and to return
a list of python ast statements.
`l` the lexer.
`name` the name of the variable containing the name of the
current statement.
"""
raise NotImplemented()
# A singleton value.
many = object()
class FunctionStatementParser(Parser):
"""
This is responsible for parsing function statements.
"""
def __init__(self, name, function, nchildren=0, unevaluated=False, scope=False):
super(FunctionStatementParser, self).__init__(name)
# Functions that are called when this statement runs.
self.function = function
# The number of children we have.
self.nchildren = nchildren
# True if we should evaluate arguments and children. False
# if we should just pass them into our child.
self.unevaluated = unevaluated
# Add us to the appropriate lists.
global parser
parser = self
if nchildren != 0:
childbearing_statements.append(self)
self.scope = scope
def parse_layout(self, l, name):
return self.parse(l, name, True)
def parse(self, l, name, layout_mode=False):
# The list of nodes this function returns.
rv = [ ]
# The line number of the current node.
lineno = l.number
if layout_mode and self.nchildren is not many:
l.error("The %s statement cannot be used as a layout." % self.name)
func = self.parse_eval(self.function, lineno)
call_node = ast.Call(
lineno=lineno,
col_offset=0,
func=func,
args=[ ],
keywords=[ ],
starargs=None,
kwargs=None,
)
seen_keywords = set()
# Parses a keyword argument from the lexer.
def parse_keyword(l):
name = l.word()
if name is None:
l.error('expected a keyword argument, colon, or end of line.')
if name not in self.keyword:
l.error('%r is not a keyword argument or valid child for the %s statement.' % (name, self.name))
if name in seen_keywords:
l.error('keyword argument %r appears more than once in a %s statement.' % (name, self.name))
seen_keywords.add(name)
expr = self.parse_simple_expression(l)
call_node.keywords.append(
ast.keyword(arg=str(name), value=expr),
)
# We assume that the initial keyword has been parsed already,
# so we start with the positional arguments.
for _i in self.positional:
call_node.args.append(self.parse_simple_expression(l))
# Next, we allow keyword arguments on the starting line.
while True:
if l.match(':'):
l.expect_eol()
l.expect_block(self.name)
block = True
break
if l.eol():
l.expect_noblock(self.name)
block = False
break
parse_keyword(l)
rv.append(ast.Expr(value=call_node))
if self.nchildren == 1:
rv.extend(self.parse_exec('ui.child_or_fixed()'))
needs_close = (self.nchildren != 0)
# The index of the child we're adding to this statement.
child_index = 0
# The variable we store the child's name in.
with new_variable() as child_name:
old_l = l
# If we have a block, then parse each line.
if block:
l = l.subblock_lexer()
while l.advance():
state = l.checkpoint()
if l.keyword(r'has'):
if self.nchildren != 1:
l.error("The %s statement does not take a layout." % self.name)
if child_index != 0:
l.error("The has statement may not be given after a child has been supplied.")
c = self.parse_statement(l, child_name, layout_mode=True)
if c is None:
l.error('Has expects a child statement.')
# Remove the call to child_or_fixed.
rv.pop()
rv.extend(self.parse_exec("%s = (%s, %d)" % (child_name, name, child_index)))
rv.extend(c)
needs_close = False
continue
c = self.parse_statement(l, child_name)
if c is not None:
rv.extend(self.parse_exec("%s = (%s, %d)" % (child_name, name, child_index)))
rv.extend(c)
child_index += 1
continue
l.revert(state)
while not l.eol():
parse_keyword(l)
l = old_l
if layout_mode:
while l.advance():
c = self.parse_statement(l, child_name)
if c is not None:
rv.extend(self.parse_exec("%s = (%s, %d)" % (child_name, name, child_index)))
rv.extend(c)
child_index += 1
else:
l.error("Expected a screen language statement.")
if needs_close:
rv.extend(self.parse_exec("ui.close()"))
if "id" not in seen_keywords:
call_node.keywords.append(ast.keyword(arg="id", value=self.parse_eval(name, lineno)))
if "scope" not in seen_keywords and self.scope:
call_node.keywords.append(ast.keyword(arg="scope", value=self.parse_eval("_scope", lineno)))
return rv
##############################################################################
# Definitions of screen language statements.
# Used to allow statements to take styles.
styles = [ ]
# All statements defined, and statements that take children.
all_statements = [ ]
childbearing_statements = [ ]
position_properties = [ Style(i) for i in [
"anchor",
"xanchor",
"yanchor",
"pos",
"xpos",
"ypos",
"align",
"xalign",
"yalign",
"xoffset",
"yoffset",
"maximum",
"xmaximum",
"ymaximum",
"area",
"clipping",
"xfill",
"yfill",
# no center, since it can conflict with the center transform.
"xcenter",
"ycenter",
] ]
text_properties = [ Style(i) for i in [
"antialias",
"black_color",
"bold",
"color",
"drop_shadow",
"drop_shadow_color",
"first_indent",
"font",
"size",
"hyperlink_functions",
"italic",
"justify",
"kerning",
"language",
"layout",
"line_leading",
"line_spacing",
"minwidth",
"min_width",
"newline_indent",
"outlines",
"rest_indent",
"ruby_style",
"slow_cps",
"slow_cps_multiplier",
"slow_abortable",
"strikethrough",
"text_align",
"text_y_fudge",
"underline",
"minimum",
"xminimum",
"yminimum",
] ]
window_properties = [ Style(i) for i in [
"background",
"foreground",
"left_margin",
"right_margin",
"bottom_margin",
"top_margin",
"xmargin",
"ymargin",
"left_padding",
"right_padding",
"top_padding",
"bottom_padding",
"xpadding",
"ypadding",
"size_group",
"minimum",
"xminimum",
"yminimum",
] ]
button_properties = [ Style(i) for i in [
"sound",
"mouse",
"focus_mask",
] ]
bar_properties = [ Style(i) for i in [
"bar_vertical",
"bar_invert",
"bar_resizing",
"left_gutter",
"right_gutter",
"top_gutter",
"bottom_gutter",
"left_bar",
"right_bar",
"top_bar",
"bottom_bar",
"thumb",
"thumb_shadow",
"thumb_offset",
"mouse",
"unscrollable",
] ]
box_properties = [ Style(i) for i in [
"box_layout",
"box_wrap",
"spacing",
"first_spacing",
"fit_first",
"minimum",
"xminimum",
"yminimum",
] ]
ui_properties = [
Keyword("at"),
Keyword("id"),
Keyword("style"),
Keyword("style_group"),
Keyword("focus"),
Keyword("default"),
]
def add(thing):
parser.add(thing)
##############################################################################
# UI statements.
FunctionStatementParser("null", "ui.null", 0)
Keyword("width")
Keyword("height")
add(ui_properties)
add(position_properties)
FunctionStatementParser("text", "ui.text", 0, scope=True)
Positional("text")
Keyword("slow")
Keyword("slow_done")
Keyword("substitute")
Keyword("scope")
add(ui_properties)
add(position_properties)
add(text_properties)
FunctionStatementParser("hbox", "ui.hbox", many)
add(ui_properties)
add(position_properties)
add(box_properties)
FunctionStatementParser("vbox", "ui.vbox", many)
add(ui_properties)
add(position_properties)
add(box_properties)
FunctionStatementParser("fixed", "ui.fixed", many)
add(ui_properties)
add(position_properties)
add(box_properties)
FunctionStatementParser("grid", "ui.grid", many)
Positional("cols")
Positional("rows")
Keyword("transpose")
Style("spacing")
add(ui_properties)
add(position_properties)
FunctionStatementParser("side", "ui.side", many)
Positional("positions")
Style("spacing")
add(ui_properties)
add(position_properties)
# Omit sizer, as we can always just put an xmaximum and ymaximum on an item.
for name in [ "window", "frame" ]:
FunctionStatementParser(name, "ui." + name, 1)
add(ui_properties)
add(position_properties)
add(window_properties)
FunctionStatementParser("key", "ui.key", 0)
Positional("key")
Keyword("action")
FunctionStatementParser("timer", "ui.timer", 0)
Positional("delay")
Keyword("action")
Keyword("repeat")
# Omit behaviors.
# Omit menu as being too high-level.
FunctionStatementParser("input", "ui.input", 0)
Keyword("default")
Keyword("length")
Keyword("allow")
Keyword("exclude")
Keyword("prefix")
Keyword("suffix")
Keyword("changed")
add(ui_properties)
add(position_properties)
add(text_properties)
FunctionStatementParser("image", "ui.image", 0)
Positional("im")
# Omit imagemap_compat for being too high level (and obsolete).
FunctionStatementParser("button", "ui.button", 1)
Keyword("action")
Keyword("clicked")
Keyword("hovered")
Keyword("unhovered")
add(ui_properties)
add(position_properties)
add(window_properties)
add(button_properties)
FunctionStatementParser("imagebutton", "ui.imagebutton", 0)
Keyword("auto")
Keyword("idle")
Keyword("hover")
Keyword("insensitive")
Keyword("selected_idle")
Keyword("selected_hover")
Keyword("action")
Keyword("clicked")
Keyword("hovered")
Keyword("unhovered")
Keyword("image_style")
add(ui_properties)
add(position_properties)
add(window_properties)
add(button_properties)
FunctionStatementParser("textbutton", "ui.textbutton", 0, scope=True)
Positional("label")
Keyword("action")
Keyword("clicked")
Keyword("hovered")
Keyword("unhovered")
Keyword("text_style")
Keyword("substitute")
Keyword("scope")
add(ui_properties)
add(position_properties)
add(window_properties)
add(button_properties)
FunctionStatementParser("label", "ui.label", 0, scope=True)
Positional("label")
Keyword("text_style")
add(ui_properties)
add(position_properties)
add(window_properties)
for name in [ "bar", "vbar" ]:
FunctionStatementParser(name, "ui." + name, 0)
Keyword("adjustment")
Keyword("range")
Keyword("value")
Keyword("changed")
Keyword("hovered")
Keyword("unhovered")
add(ui_properties)
add(position_properties)
add(bar_properties)
# Omit autobar. (behavior)
FunctionStatementParser("viewport", "ui.viewport", 1)
Keyword("child_size")
Keyword("mousewheel")
Keyword("draggable")
Keyword("xadjustment")
Keyword("yadjustment")
add(ui_properties)
add(position_properties)
# Omit conditional. (behavior)
FunctionStatementParser("imagemap", "ui.imagemap", many)
Keyword("ground")
Keyword("hover")
Keyword("insensitive")
Keyword("idle")
Keyword("selected_hover")
Keyword("selected_idle")
Keyword("auto")
Keyword("alpha")
Keyword("cache")
add(ui_properties)
add(position_properties)
FunctionStatementParser("hotspot", "ui.hotspot_with_child", 1)
Positional("spot")
Keyword("action")
Keyword("clicked")
Keyword("hovered")
Keyword("unhovered")
add(ui_properties)
add(position_properties)
add(window_properties)
add(button_properties)
FunctionStatementParser("hotbar", "ui.hotbar", 0)
Positional("spot")
Keyword("adjustment")
Keyword("range")
Keyword("value")
add(ui_properties)
add(position_properties)
add(bar_properties)
FunctionStatementParser("transform", "ui.transform", 1)
Keyword("at")
Keyword("id")
for i in renpy.atl.PROPERTIES:
Style(i)
FunctionStatementParser("add", "ui.add", 0)
Positional("im")
Keyword("at")
Keyword("id")
for i in renpy.atl.PROPERTIES:
Style(i)
FunctionStatementParser("on", "ui.on", 0)
Positional("event")
Keyword("action")
FunctionStatementParser("drag", "ui.drag", 1)
Keyword("drag_name")
Keyword("draggable")
Keyword("droppable")
Keyword("drag_raise")
Keyword("dragged")
Keyword("dropped")
Keyword("drag_handle")
Keyword("drag_joined")
Keyword("clicked")
Keyword("hovered")
Keyword("unhovered")
Style("child")
add(ui_properties)
add(position_properties)
FunctionStatementParser("draggroup", "ui.draggroup", many)
add(ui_properties)
add(position_properties)
FunctionStatementParser("mousearea", "ui.mousearea", 0)
Keyword("hovered")
Keyword("unhovered")
add(ui_properties)
add(position_properties)
##############################################################################
# Control-flow statements.
def PassParser(Parser):
def __init__(self, name):
super(PassParser, self).__init__(name)
def parse(self, l, name):
return [ ast.Pass(lineno=l.number, col_offset=0) ]
PassParser("pass")
class DefaultParser(Parser):
def __init__(self, name):
super(DefaultParser, self).__init__(name)
def parse(self, l, name):
name = l.require(l.word)
l.require(r'=')
rest = l.rest()
code = "_scope.setdefault(%r, (%s))" % (name, rest)
return self.parse_exec(code, l.number)
DefaultParser("default")
class UseParser(Parser):
def __init__(self, name):
super(UseParser, self).__init__(name)
childbearing_statements.append(self)
def parse(self, l, name):
lineno = l.number
target_name = l.require(l.word)
code = "renpy.use_screen(%r, _name=%s, _scope=_scope" % (target_name, name)
args = renpy.parser.parse_arguments(l)
if args:
for k, v in args.arguments:
if k is None:
l.error('The use statement only takes keyword arguments.')
code += ", %s=(%s)" % (k, v)
if args.extrapos:
l.error('The use statement only takes keyword arguments.')
if args.extrakw:
code += ", **(%s)" % args.extrakw
code += ")"
return self.parse_exec(code, lineno)
UseParser("use")
class IfParser(Parser):
def __init__(self, name):
super(IfParser, self).__init__(name)
childbearing_statements.append(self)
def parse(self, l, name):
with new_variable() as child_name:
count = 0
lineno = l.number
condition = self.parse_eval(l.require(l.python_expression), lineno)
l.require(':')
l.expect_eol()
body = self.parse_exec("%s = (%s, %d)" % (child_name, name, count))
body.extend(self.parse_children('if', l, child_name))
orelse = [ ]
rv = ast.If(test=condition, body=body, orelse=orelse, lineno=lineno, col_offset=0)
count += 1
state = l.checkpoint()
while l.advance():
old_orelse = orelse
lineno = l.number
if l.keyword("elif"):
condition = self.parse_eval(l.require(l.python_expression), lineno)
body = self.parse_exec("%s = (%s, %d)" % (child_name, name, count))
body.extend(self.parse_children('if', l, child_name))
orelse = [ ]
old_orelse.append(ast.If(test=condition, body=body, orelse=orelse, lineno=lineno, col_offset=0))
count += 1
state = l.checkpoint()
elif l.keyword("else"):
old_orelse.extend(self.parse_exec("%s = (%s, %d)" % (child_name, name, count)))
old_orelse.extend(self.parse_children('if', l, child_name))
break
else:
l.revert(state)
break
return [ rv ]
IfParser("if")
class ForParser(Parser):
def __init__(self, name):
super(ForParser, self).__init__(name)
childbearing_statements.append(self)
def parse_tuple_pattern(self, l):
is_tuple = False
pattern = [ ]
while True:
lineno = l.number
if l.match(r"\("):
p = self.parse_tuple_pattern(l)
else:
p = l.name().encode("utf-8")
if not p:
break
pattern.append(ast.Name(id=p, ctx=ast.Store(), lineno=lineno, col_offset=0))
if l.match(r","):
is_tuple = True
else:
break
if not pattern:
l.error("Expected tuple pattern.")
if not is_tuple:
return pattern[0]
else:
return ast.Tuple(elts=pattern, ctx=ast.Store())
def parse(self, l, name):
lineno = l.number
pattern = self.parse_tuple_pattern(l)
l.require('in')
expression = self.parse_eval(l.require(l.python_expression), l.number)
l.require(':')
l.expect_eol()
with new_variable() as counter_name:
with new_variable() as child_name:
children = self.parse_exec("%s = (%s, %s)" % (child_name, name, counter_name))
children.extend(self.parse_children('for', l, child_name))
children.extend(self.parse_exec("%s += 1" % counter_name))
rv = self.parse_exec("%s = 0" % counter_name)
rv.append(ast.For(
target=pattern,
iter=expression,
body=children,
orelse=[],
lineno=lineno,
col_offset=0))
return rv
ForParser("for")
class PythonParser(Parser):
def __init__(self, name, one_line):
super(PythonParser, self).__init__(name)
self.one_line = one_line
def parse(self, l, name):
lineno = l.number
if self.one_line:
python_code = l.rest()
l.expect_noblock('one-line python statement')
else:
l.require(':')
l.expect_block('python block')
python_code = l.python_block()
lineno += 1
return self.parse_exec(python_code, lineno)
PythonParser("$", True)
PythonParser("python", False)
##############################################################################
# Add all_statements to the statements that take children.
for i in childbearing_statements:
i.add(all_statements)
##############################################################################
# Definition of the screen statement.
# class ScreenFunction(renpy.object.Object):
# def __init__(self, children):
# self.children = children
# def __call__(self, _name=(), _scope=None, **kwargs):
# for i, child in enumerate(self.children):
# child.evaluate(_name + (i,), _scope)
# def screen_function(positional, keyword, children):
# name = renpy.python.py_eval(positional[0].source)
# function = ScreenFunction(children)
# values = {
# "name" : name,
# "function" : function,
# }
# for k, v in keyword.iteritems():
# values[k] = renpy.python.py_eval(v.source)
# return values
# screen_stmt = FunctionStatementParser("screen", screen_function, unevaluated=True)
# Positional("name", Word)
# Keyword("modal", Expression)
# Keyword("zorder", Expression)
# Keyword("tag", Word)
# add(all_statements)
class ScreenLangScreen(renpy.object.Object):
"""
This represents a screen defined in the screen language.
"""
__version__ = 1
variant = "None"
# Predict should be false for screens created before
# prediction existed.
predict = "False"
def __init__(self):
# The name of the screen.
self.name = name
# Should this screen be declared as modal?
self.modal = "False"
# The screen's zorder.
self.zorder = "0"
# The screen's tag.
self.tag = None
# The PyCode object containing the screen's code.
self.code = None
# The variant of screen we're defining.
self.variant = "None" # expr.
# Should we predict this screen?
self.predict = "None" # expr.
def after_upgrade(self, version):
if version < 1:
self.modal = "False"
self.zorder = "0"
def define(self):
"""
Defines us as a screen.
"""
renpy.display.screen.define_screen(
self.name,
self,
modal=self.modal,
zorder=self.zorder,
tag=self.tag,
variant=renpy.python.py_eval(self.variant),
predict=renpy.python.py_eval(self.predict)
)
def __call__(self, _scope=None, **kwargs):
renpy.python.py_exec_bytecode(self.code.bytecode, locals=_scope)
class ScreenParser(Parser):
def __init__(self):
super(ScreenParser, self).__init__("screen")
def parse(self, l, name="_name"):
location = l.get_location()
screen = ScreenLangScreen()
def parse_keyword(l):
if l.match('modal'):
screen.modal = l.require(l.simple_expression)
return True
if l.match('zorder'):
screen.zorder = l.require(l.simple_expression)
return True
if l.match('tag'):
screen.tag = l.require(l.word)
return True
if l.match('variant'):
screen.variant = l.require(l.simple_expression)
return True
if l.match('predict'):
screen.predict = l.require(l.simple_expression)
return True
return False
lineno = l.number
screen.name = l.require(l.word)
while parse_keyword(l):
continue
l.require(':')
l.expect_eol()
l.expect_block('screen statement')
l = l.subblock_lexer()
rv = [ ]
count = 0
with new_variable() as child_name:
while l.advance():
if parse_keyword(l):
while parse_keyword(l):
continue
l.expect_eol()
continue
rv.extend(self.parse_exec("%s = (%s, %d)" % (child_name, name, count), l.number))
c = self.parse_statement(l, child_name)
if c is None:
l.error('Expected a screen language statement.')
rv.extend(c)
count += 1
node = ast.Module(body=rv, lineno=lineno, col_offset=0)
ast.fix_missing_locations(node)
LineNumberNormalizer().visit(node)
# Various bits of debugging code:
# print ast.dump(node, True, True)
# a = compile(node, 'foo', 'exec')
# import dis
# dis.dis(a)
# import unparse
# print
# print screen.name, "-----------------------------------------"
# unparse.Unparser(node)
screen.code = renpy.ast.PyCode(node, location, 'exec')
return screen
screen_parser = ScreenParser()
screen_parser.add(all_statements)
def parse_screen(l):
"""
Parses the screen statement.
"""
global filename
filename = l.filename
screen = screen_parser.parse(l)
return screen
|
MSEMJEJME/tkot
|
renpy/screenlang.py
|
Python
|
gpl-2.0
| 33,978
|
[
"VisIt"
] |
d8d3b0ca076e0d4bbe019be9d887b04f45ec523b56d5a8cadd5588a209e3a8cf
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
from shutil import copyfile
import glob
class Chombo(MakefilePackage):
"""The Chombo package provides a set of tools for implementing finite
difference and finite-volume methods for the solution of partial
differential equations on block-structured adaptively refined
logically rectangular (i.e. Cartesian) grids."""
homepage = "https://commons.lbl.gov/display/chombo"
url = "http://bitbucket.org/drhansj/chombo-xsdk.git"
# Use whatever path Brian V. and Terry L. agreed upon, but preserve version
version('3.2', git='http://bitbucket.org/drhansj/chombo-xsdk.git', commit='71d856c')
version('develop', git='http://bitbucket.org/drhansj/chombo-xsdk.git', tag='master')
# version('3.2', svn='https://anag-repo.lbl.gov/svn/Chombo/release/3.2')
variant('mpi', default=True, description='Enable MPI parallel support')
variant('hdf5', default=True, description='Enable HDF5 support')
variant('dims',
default='3',
values=('1', '2', '3', '4', '5', '6'),
multi=False,
description='Number of PDE dimensions [1-6]'
)
patch('hdf5-16api.patch', when='@3.2', level=0)
patch('Make.defs.local.template.patch', when='@3.2', level=0)
depends_on('blas')
depends_on('lapack')
depends_on('gmake', type='build')
depends_on('mpi', when='+mpi')
depends_on('hdf5', when='+hdf5')
depends_on('hdf5+mpi', when='+mpi+hdf5')
def edit(self, spec, prefix):
# Set fortran name mangling in Make.defs
defs_file = FileFilter('./lib/mk/Make.defs')
defs_file.filter('^\s*#\s*cppcallsfort\s*=\s*',
'cppcallsfort = -DCH_FORT_UNDERSCORE')
# Set remaining variables in Make.defs.local
# Make.defs.local.template.patch ensures lines for USE_TIMER,
# USE_LAPACK and lapackincflags are present
copyfile('./lib/mk/Make.defs.local.template',
'./lib/mk/Make.defs.local')
defs_file = FileFilter('./lib/mk/Make.defs.local')
# Unconditional settings
defs_file.filter('^\s*#\s*DEBUG\s*=\s*', 'DEBUG = FALSE')
defs_file.filter('^\s*#\s*OPT\s*=\s*', 'OPT = TRUE')
defs_file.filter('^\s*#\s*PIC\s*=\s*', 'PIC = TRUE')
# timer code frequently fails compiles. So disable it.
defs_file.filter('^\s*#\s*USE_TIMER\s*=\s*', 'USE_TIMER = FALSE')
# LAPACK setup
lapack_blas = spec['lapack'].libs + spec['blas'].libs
defs_file.filter('^\s*#\s*USE_LAPACK\s*=\s*', 'USE_LAPACK = TRUE')
defs_file.filter(
'^\s*#\s*lapackincflags\s*=\s*',
'lapackincflags = -I%s' % spec['lapack'].prefix.include)
defs_file.filter(
'^\s*#\s*syslibflags\s*=\s*',
'syslibflags = %s' % lapack_blas.ld_flags)
# Compilers and Compiler flags
defs_file.filter('^\s*#\s*CXX\s*=\s*', 'CXX = %s' % spack_cxx)
defs_file.filter('^\s*#\s*FC\s*=\s*', 'FC = %s' % spack_fc)
if '+mpi' in spec:
defs_file.filter(
'^\s*#\s*MPICXX\s*=\s*',
'MPICXX = %s' % self.spec['mpi'].mpicxx)
# Conditionally determined settings
defs_file.filter(
'^\s*#\s*MPI\s*=\s*',
'MPI = %s' % ('TRUE' if '+mpi' in spec else 'FALSE'))
defs_file.filter(
'^\s*#\s*DIM\s*=\s*',
'DIM = %s' % spec.variants['dims'].value)
# HDF5 settings
if '+hdf5' in spec:
defs_file.filter('^\s*#\s*USE_HDF5\s*=\s*', 'USE_HDF5 = TRUE')
defs_file.filter(
'^\s*#\s*HDFINCFLAGS\s*=.*',
'HDFINCFLAGS = -I%s' % spec['hdf5'].prefix.include)
defs_file.filter(
'^\s*#\s*HDFLIBFLAGS\s*=.*',
'HDFLIBFLAGS = %s' % spec['hdf5'].libs.ld_flags)
if '+mpi' in spec:
defs_file.filter(
'^\s*#\s*HDFMPIINCFLAGS\s*=.*',
'HDFMPIINCFLAGS = -I%s' % spec['hdf5'].prefix.include)
defs_file.filter(
'^\s*#\s*HDFMPILIBFLAGS\s*=.*',
'HDFMPILIBFLAGS = %s' % spec['hdf5'].libs.ld_flags)
def build(self, spec, prefix):
with working_dir('lib'):
gmake('all')
def install(self, spec, prefix):
with working_dir('lib'):
install_tree('include', prefix.include)
libfiles = glob.glob('lib*.a')
libfiles += glob.glob('lib*.so')
libfiles += glob.glob('lib*.dylib')
mkdirp(prefix.lib)
for lib in libfiles:
install(lib, prefix.lib)
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/chombo/package.py
|
Python
|
lgpl-2.1
| 5,904
|
[
"Brian"
] |
fd089c24bc41d7200c15e77e9a8882d517c8edaa75ab7d64d3f305174f65f3d2
|
# coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visitor class for traversing Python expressions."""
from __future__ import unicode_literals
import contextlib
import textwrap
from pythonparser import algorithm
from pythonparser import ast
from grumpy.compiler import expr
from grumpy.compiler import util
class ExprVisitor(algorithm.Visitor):
"""Builds and returns a Go expression representing the Python nodes."""
# pylint: disable=invalid-name,missing-docstring
def __init__(self, stmt_visitor):
self.stmt_visitor = stmt_visitor
self.block = stmt_visitor.block
self.writer = stmt_visitor.writer
def generic_visit(self, node):
msg = 'expression node not yet implemented: ' + type(node).__name__
raise util.ParseError(node, msg)
def visit_Attribute(self, node):
with self.visit(node.value) as obj:
attr = self.block.alloc_temp()
self.writer.write_checked_call2(
attr, 'πg.GetAttr(πF, {}, {}, nil)',
obj.expr, self.block.root.intern(node.attr))
return attr
def visit_BinOp(self, node):
result = self.block.alloc_temp()
with self.visit(node.left) as lhs, self.visit(node.right) as rhs:
op_type = type(node.op)
if op_type in ExprVisitor._BIN_OP_TEMPLATES:
tmpl = ExprVisitor._BIN_OP_TEMPLATES[op_type]
self.writer.write_checked_call2(
result, tmpl, lhs=lhs.expr, rhs=rhs.expr)
else:
msg = 'binary op not implemented: {}'.format(op_type.__name__)
raise util.ParseError(node, msg)
return result
def visit_BoolOp(self, node):
result = self.block.alloc_temp()
with self.block.alloc_temp('bool') as is_true:
if isinstance(node.op, ast.And):
cond_expr = '!' + is_true.expr
else:
cond_expr = is_true.expr
end_label = self.block.genlabel()
num_values = len(node.values)
for i, n in enumerate(node.values):
with self.visit(n) as v:
self.writer.write('{} = {}'.format(result.expr, v.expr))
if i < num_values - 1:
self.writer.write_checked_call2(
is_true, 'πg.IsTrue(πF, {})', result.expr)
self.writer.write_tmpl(textwrap.dedent("""\
if $cond_expr {
\tgoto Label$end_label
}"""), cond_expr=cond_expr, end_label=end_label)
self.writer.write_label(end_label)
return result
def visit_Call(self, node):
# Build positional arguments.
args = expr.nil_expr
if node.args:
args = self.block.alloc_temp('[]*πg.Object')
self.writer.write('{} = πF.MakeArgs({})'.format(args.expr,
len(node.args)))
for i, n in enumerate(node.args):
with self.visit(n) as a:
self.writer.write('{}[{}] = {}'.format(args.expr, i, a.expr))
varg = expr.nil_expr
if node.starargs:
varg = self.visit(node.starargs)
# Build keyword arguments
keywords = expr.nil_expr
if node.keywords:
values = []
for k in node.keywords:
values.append((util.go_str(k.arg), self.visit(k.value)))
keywords = self.block.alloc_temp('πg.KWArgs')
self.writer.write_tmpl('$keywords = πg.KWArgs{', keywords=keywords.name)
with self.writer.indent_block():
for k, v in values:
with v:
self.writer.write_tmpl('{$name, $value},', name=k, value=v.expr)
self.writer.write('}')
kwargs = expr.nil_expr
if node.kwargs:
kwargs = self.visit(node.kwargs)
# Invoke function with all parameters.
with args, varg, keywords, kwargs, self.visit(node.func) as func:
result = self.block.alloc_temp()
if varg is expr.nil_expr and kwargs is expr.nil_expr:
self.writer.write_checked_call2(result, '{}.Call(πF, {}, {})',
func.expr, args.expr, keywords.expr)
else:
self.writer.write_checked_call2(result,
'πg.Invoke(πF, {}, {}, {}, {}, {})',
func.expr, args.expr, varg.expr,
keywords.expr, kwargs.expr)
if node.args:
self.writer.write('πF.FreeArgs({})'.format(args.expr))
return result
def visit_Compare(self, node):
result = self.block.alloc_temp()
lhs = self.visit(node.left)
n = len(node.ops)
end_label = self.block.genlabel() if n > 1 else None
for i, (op, comp) in enumerate(zip(node.ops, node.comparators)):
rhs = self.visit(comp)
op_type = type(op)
if op_type in ExprVisitor._CMP_OP_TEMPLATES:
tmpl = ExprVisitor._CMP_OP_TEMPLATES[op_type]
self.writer.write_checked_call2(
result, tmpl, lhs=lhs.expr, rhs=rhs.expr)
elif isinstance(op, (ast.In, ast.NotIn)):
with self.block.alloc_temp('bool') as contains:
self.writer.write_checked_call2(
contains, 'πg.Contains(πF, {}, {})', rhs.expr, lhs.expr)
invert = '' if isinstance(op, ast.In) else '!'
self.writer.write('{} = πg.GetBool({}{}).ToObject()'.format(
result.name, invert, contains.expr))
elif isinstance(op, ast.Is):
self.writer.write('{} = πg.GetBool({} == {}).ToObject()'.format(
result.name, lhs.expr, rhs.expr))
elif isinstance(op, ast.IsNot):
self.writer.write('{} = πg.GetBool({} != {}).ToObject()'.format(
result.name, lhs.expr, rhs.expr))
else:
raise AssertionError('unrecognized compare op: {}'.format(
op_type.__name__))
if i < n - 1:
with self.block.alloc_temp('bool') as cond:
self.writer.write_checked_call2(
cond, 'πg.IsTrue(πF, {})', result.expr)
self.writer.write_tmpl(textwrap.dedent("""\
if !$cond {
\tgoto Label$end_label
}"""), cond=cond.expr, end_label=end_label)
lhs.free()
lhs = rhs
rhs.free()
if end_label is not None:
self.writer.write_label(end_label)
return result
def visit_Dict(self, node):
with self.block.alloc_temp('*πg.Dict') as d:
self.writer.write('{} = πg.NewDict()'.format(d.name))
for k, v in zip(node.keys, node.values):
with self.visit(k) as key, self.visit(v) as value:
self.writer.write_checked_call1('{}.SetItem(πF, {}, {})',
d.expr, key.expr, value.expr)
result = self.block.alloc_temp()
self.writer.write('{} = {}.ToObject()'.format(result.name, d.expr))
return result
def visit_Set(self, node):
with self.block.alloc_temp('*πg.Set') as s:
self.writer.write('{} = πg.NewSet()'.format(s.name))
for e in node.elts:
with self.visit(e) as value:
self.writer.write_checked_call2(expr.blank_var, '{}.Add(πF, {})',
s.expr, value.expr)
result = self.block.alloc_temp()
self.writer.write('{} = {}.ToObject()'.format(result.name, s.expr))
return result
def visit_DictComp(self, node):
result = self.block.alloc_temp()
elt = ast.Tuple(elts=[node.key, node.value])
gen_node = ast.GeneratorExp(
elt=elt, generators=node.generators, loc=node.loc)
with self.visit(gen_node) as gen:
self.writer.write_checked_call2(
result, 'πg.DictType.Call(πF, πg.Args{{{}}}, nil)', gen.expr)
return result
def visit_ExtSlice(self, node):
result = self.block.alloc_temp()
if len(node.dims) <= util.MAX_DIRECT_TUPLE:
with contextlib.nested(*(self.visit(d) for d in node.dims)) as dims:
self.writer.write('{} = πg.NewTuple{}({}).ToObject()'.format(
result.name, len(dims), ', '.join(d.expr for d in dims)))
else:
with self.block.alloc_temp('[]*πg.Object') as dims:
self.writer.write('{} = make([]*πg.Object, {})'.format(
dims.name, len(node.dims)))
for i, dim in enumerate(node.dims):
with self.visit(dim) as s:
self.writer.write('{}[{}] = {}'.format(dims.name, i, s.expr))
self.writer.write('{} = πg.NewTuple({}...).ToObject()'.format(
result.name, dims.expr))
return result
def visit_GeneratorExp(self, node):
body = ast.Expr(value=ast.Yield(value=node.elt), loc=node.loc)
for comp_node in reversed(node.generators):
for if_node in reversed(comp_node.ifs):
body = ast.If(test=if_node, body=[body], orelse=[], loc=node.loc) # pylint: disable=redefined-variable-type
body = ast.For(target=comp_node.target, iter=comp_node.iter,
body=[body], orelse=[], loc=node.loc)
args = ast.arguments(args=[], vararg=None, kwarg=None, defaults=[])
node = ast.FunctionDef(name='<generator>', args=args, body=[body])
gen_func = self.stmt_visitor.visit_function_inline(node)
result = self.block.alloc_temp()
self.writer.write_checked_call2(
result, '{}.Call(πF, nil, nil)', gen_func.expr)
return result
def visit_IfExp(self, node):
else_label, end_label = self.block.genlabel(), self.block.genlabel()
result = self.block.alloc_temp()
with self.visit(node.test) as test, self.block.alloc_temp('bool') as cond:
self.writer.write_checked_call2(
cond, 'πg.IsTrue(πF, {})', test.expr)
self.writer.write_tmpl(textwrap.dedent("""\
if !$cond {
\tgoto Label$else_label
}"""), cond=cond.expr, else_label=else_label)
with self.visit(node.body) as value:
self.writer.write('{} = {}'.format(result.name, value.expr))
self.writer.write('goto Label{}'.format(end_label))
self.writer.write_label(else_label)
with self.visit(node.orelse) as value:
self.writer.write('{} = {}'.format(result.name, value.expr))
self.writer.write_label(end_label)
return result
def visit_Index(self, node):
result = self.block.alloc_temp()
with self.visit(node.value) as v:
self.writer.write('{} = {}'.format(result.name, v.expr))
return result
def visit_Lambda(self, node):
ret = ast.Return(value=node.body, loc=node.loc)
func_node = ast.FunctionDef(
name='<lambda>', args=node.args, body=[ret])
return self.stmt_visitor.visit_function_inline(func_node)
def visit_List(self, node):
with self._visit_seq_elts(node.elts) as elems:
result = self.block.alloc_temp()
self.writer.write('{} = πg.NewList({}...).ToObject()'.format(
result.expr, elems.expr))
return result
def visit_ListComp(self, node):
result = self.block.alloc_temp()
gen_node = ast.GeneratorExp(
elt=node.elt, generators=node.generators, loc=node.loc)
with self.visit(gen_node) as gen:
self.writer.write_checked_call2(
result, 'πg.ListType.Call(πF, πg.Args{{{}}}, nil)', gen.expr)
return result
def visit_Name(self, node):
return self.block.resolve_name(self.writer, node.id)
def visit_Num(self, node):
if isinstance(node.n, int):
expr_str = 'NewInt({})'.format(node.n)
elif isinstance(node.n, long):
a = abs(node.n)
gobytes = ''
while a:
gobytes = hex(int(a&255)) + ',' + gobytes
a >>= 8
expr_str = 'NewLongFromBytes([]byte{{{}}})'.format(gobytes)
if node.n < 0:
expr_str = expr_str + '.Neg()'
elif isinstance(node.n, float):
expr_str = 'NewFloat({})'.format(node.n)
elif isinstance(node.n, complex):
expr_str = 'NewComplex(complex({}, {}))'.format(node.n.real, node.n.imag)
else:
msg = 'number type not yet implemented: ' + type(node.n).__name__
raise util.ParseError(node, msg)
return expr.GeneratedLiteral('πg.' + expr_str + '.ToObject()')
def visit_Slice(self, node):
result = self.block.alloc_temp()
lower = upper = step = expr.GeneratedLiteral('πg.None')
if node.lower:
lower = self.visit(node.lower)
if node.upper:
upper = self.visit(node.upper)
if node.step:
step = self.visit(node.step)
with lower, upper, step:
self.writer.write_checked_call2(
result, 'πg.SliceType.Call(πF, πg.Args{{{}, {}, {}}}, nil)',
lower.expr, upper.expr, step.expr)
return result
def visit_Subscript(self, node):
rhs = self.visit(node.slice)
result = self.block.alloc_temp()
with rhs, self.visit(node.value) as lhs:
self.writer.write_checked_call2(result, 'πg.GetItem(πF, {}, {})',
lhs.expr, rhs.expr)
return result
def visit_Str(self, node):
if isinstance(node.s, unicode):
expr_str = 'πg.NewUnicode({}).ToObject()'.format(
util.go_str(node.s.encode('utf-8')))
else:
expr_str = '{}.ToObject()'.format(self.block.root.intern(node.s))
return expr.GeneratedLiteral(expr_str)
def visit_Tuple(self, node):
result = self.block.alloc_temp()
if len(node.elts) <= util.MAX_DIRECT_TUPLE:
with contextlib.nested(*(self.visit(e) for e in node.elts)) as elts:
self.writer.write('{} = πg.NewTuple{}({}).ToObject()'.format(
result.name, len(elts), ', '.join(e.expr for e in elts)))
else:
with self._visit_seq_elts(node.elts) as elems:
self.writer.write('{} = πg.NewTuple({}...).ToObject()'.format(
result.expr, elems.expr))
return result
def visit_UnaryOp(self, node):
result = self.block.alloc_temp()
with self.visit(node.operand) as operand:
op_type = type(node.op)
if op_type in ExprVisitor._UNARY_OP_TEMPLATES:
self.writer.write_checked_call2(
result, ExprVisitor._UNARY_OP_TEMPLATES[op_type],
operand=operand.expr)
elif isinstance(node.op, ast.Not):
with self.block.alloc_temp('bool') as is_true:
self.writer.write_checked_call2(
is_true, 'πg.IsTrue(πF, {})', operand.expr)
self.writer.write('{} = πg.GetBool(!{}).ToObject()'.format(
result.name, is_true.expr))
else:
msg = 'unary op not implemented: {}'.format(op_type.__name__)
raise util.ParseError(node, msg)
return result
def visit_Yield(self, node):
if node.value:
value = self.visit(node.value)
else:
value = expr.GeneratedLiteral('πg.None')
resume_label = self.block.genlabel(is_checkpoint=True)
self.writer.write('πF.PushCheckpoint({})'.format(resume_label))
self.writer.write('return {}, nil'.format(value.expr))
self.writer.write_label(resume_label)
result = self.block.alloc_temp()
self.writer.write('{} = πSent'.format(result.name))
return result
_BIN_OP_TEMPLATES = {
ast.BitAnd: 'πg.And(πF, {lhs}, {rhs})',
ast.BitOr: 'πg.Or(πF, {lhs}, {rhs})',
ast.BitXor: 'πg.Xor(πF, {lhs}, {rhs})',
ast.Add: 'πg.Add(πF, {lhs}, {rhs})',
ast.Div: 'πg.Div(πF, {lhs}, {rhs})',
# TODO: Support "from __future__ import division".
ast.FloorDiv: 'πg.FloorDiv(πF, {lhs}, {rhs})',
ast.LShift: 'πg.LShift(πF, {lhs}, {rhs})',
ast.Mod: 'πg.Mod(πF, {lhs}, {rhs})',
ast.Mult: 'πg.Mul(πF, {lhs}, {rhs})',
ast.Pow: 'πg.Pow(πF, {lhs}, {rhs})',
ast.RShift: 'πg.RShift(πF, {lhs}, {rhs})',
ast.Sub: 'πg.Sub(πF, {lhs}, {rhs})',
}
_CMP_OP_TEMPLATES = {
ast.Eq: 'πg.Eq(πF, {lhs}, {rhs})',
ast.Gt: 'πg.GT(πF, {lhs}, {rhs})',
ast.GtE: 'πg.GE(πF, {lhs}, {rhs})',
ast.Lt: 'πg.LT(πF, {lhs}, {rhs})',
ast.LtE: 'πg.LE(πF, {lhs}, {rhs})',
ast.NotEq: 'πg.NE(πF, {lhs}, {rhs})',
}
_UNARY_OP_TEMPLATES = {
ast.Invert: 'πg.Invert(πF, {operand})',
ast.UAdd: 'πg.Pos(πF, {operand})',
ast.USub: 'πg.Neg(πF, {operand})',
}
def _visit_seq_elts(self, elts):
result = self.block.alloc_temp('[]*πg.Object')
self.writer.write('{} = make([]*πg.Object, {})'.format(
result.expr, len(elts)))
for i, e in enumerate(elts):
with self.visit(e) as elt:
self.writer.write('{}[{}] = {}'.format(result.expr, i, elt.expr))
return result
def _node_not_implemented(self, node):
msg = 'node not yet implemented: ' + type(node).__name__
raise util.ParseError(node, msg)
visit_SetComp = _node_not_implemented
|
S-YOU/grumpy
|
compiler/expr_visitor.py
|
Python
|
apache-2.0
| 16,850
|
[
"VisIt"
] |
59a65b6dbfe1e112c8dbaf10d4c66202803a8ae5b9ccf7f32ae1865cd471975e
|
#!/usr/bin/env python
#author: Peter Thorpe September 2016. The James Hutton Insitute,Dundee,UK.
#Title:
#script perform stats on the coverage files already generated"
#imports
import os
import sys
import numpy
from sys import stdin,argv
import sys
import datetime
from optparse import OptionParser
###########################################################################
# functions
###########################################################################
try:
# New in Python 3.4
from statistics import mean
except ImportError:
def mean(list_of_values):
"""Calculate the mean average of a list of numbers."""
# Quick and dirty, assumes already a list not an interator
# so don't have to worry about getting the divisor.
# Explicit float(...) to allow for Python 2 division.
return sum(list_of_values) / float(len(list_of_values))
assert mean([1,2,3,4,5]) == 3
def parse_result_file(blast):
"""read in the blast tab file. Reads whole file into memeroy.
returns a list, one list item per blast hit.
"""
with open(blast) as file:
data= file.read().split("\n")
data1 = [line.rstrip("\n") for line in (data)
if line.strip() != ""]
return data1
def convert_to_int(results):
"""function to convert list of string to list
of intergers"""
results = [int(i) for i in results if i != ""]
return results
def stat_tests(in_list):
"""function to return stats on a given list.
returns min_cov, max_cov, mean_cov, standard_dev
"""
min_cov = min(in_list)
max_cov = max(in_list)
mean_cov = mean(in_list)
standard_dev = numpy.std(in_list)
assert min_cov <= mean_cov <= max_cov
return min_cov, max_cov, mean_cov, standard_dev
def write_out_stats(ITS_cov, GFF, all_genes_cov, out_file):
"""function to write out summary stats. """
# call function to get list of coverage per file.
number_of_ITS_blast_hits = len(parse_result_file(GFF))
try:
ITS_cov_str = parse_result_file(ITS_cov)
#print ITS_cov_str
ITS_cov = convert_to_int(ITS_cov_str)
except:
raise ValueError("something wrong with ITS cov. file")
try:
all_genes_cov_str = parse_result_file(all_genes_cov)
all_genes_cov = convert_to_int(all_genes_cov_str)
except:
raise ValueError("something wrong with all genes cov. file")
# out file to write to
summary_stats_out = open(out_file, "w")
title = "#gene_class\tmin_cov\tmax_cov\tmean_cov\tstandard_dev\n"
summary_stats_out.write(title)
# call stats function
ITSmin_cov, ITSmax_cov, ITSmean_cov, ITSstandard_dev = stat_tests(ITS_cov)
ITS_data_formatted = "ITS:\t%s\t%s\t%s\t%s\n" %(ITSmin_cov,\
ITSmax_cov, ITSmean_cov, ITSstandard_dev)
#write out ITS results
summary_stats_out.write(ITS_data_formatted)
GENEmin_cov, GENEmax_cov, GENEmean_cov, GENEstandard_dev = stat_tests(all_genes_cov)
GENE_data_formatted = "allGenes:\t%s\t%s\t%s\t%s\n" %(GENEmin_cov,\
GENEmax_cov, GENEmean_cov, GENEstandard_dev)
summary_stats_out.write(GENE_data_formatted)
blast_hits_info = "\nITS_blast_hit = %s \n" %(number_of_ITS_blast_hits)
ratio_info = "ITS to gene ratio = %.1f \n" %(float(ITSmean_cov) / GENEmean_cov)
summary_stats_out.write(blast_hits_info)
summary_stats_out.write(ratio_info)
#close the write file
summary_stats_out.close()
###########################################################################
if "-v" in sys.argv or "--version" in sys.argv:
print ("v0.0.1")
sys.exit(0)
usage = """Use as follows:
Title:
script to generate aummary stats for ITS coverage and all genes coverage
$ summary_stats.py --ITS ITS.cov --all all_gene.cov -o summary.out
ITS GFF file needed to count the number of ITS blast hits
"""
parser = OptionParser(usage=usage)
parser.add_option("-i", "--ITS", dest="ITS_cov", default=None,
help="coverage file for ITS regions",
metavar="FILE")
parser.add_option("-g", "--GFF", dest="GFF", default=None,
help="ITS GFF file",
metavar="FILE")
parser.add_option("-a", "--all_genes_cov", dest="all_genes_cov",
default=None,
help="the coverage file for all genes",
metavar="FILE")
parser.add_option("-o", "--out_file", dest="out_file",
default="stats.out",
help="outfile for the ITS and allgene stats")
(options, args) = parser.parse_args()
ITS_cov = options.ITS_cov
GFF = options.GFF
all_genes_cov = options.all_genes_cov
out_file = options.out_file
#run the program
file_list = [ITS_cov, GFF, all_genes_cov]
for i in file_list:
if not os.path.isfile(i):
print("sorry, couldn't open the file: ", "\n")
print ("current working directory is :", os.getcwd() + "\n")
print ("files are :", [f for f in os.listdir('.')])
sys.exit("\n\nInput ITS file not found: %s" % i)
# call the top function
write_out_stats(ITS_cov, GFF, all_genes_cov, out_file)
|
widdowquinn/THAPBI
|
ITS_region_genomic_coverage/python_pipeline_draft/supporting_scripts/Summary_stats.py
|
Python
|
mit
| 5,169
|
[
"BLAST"
] |
b709e82ce99a4de28bdacb88aa28daebaeddbe8d3a8f9ad143179a3997d32479
|
from ase import Atom, Atoms
from gpaw import GPAW
from gpaw.test import equal
a = 5.0
d = 1.0
x = d / 3**0.5
atoms = Atoms([Atom('C', (0.0, 0.0, 0.0)),
Atom('H', (x, x, x)),
Atom('H', (-x, -x, x)),
Atom('H', (x, -x, -x)),
Atom('H', (-x, x, -x))],
cell=(a, a, a),
pbc=False)
atoms.positions[:] += a / 2
calc = GPAW(h=0.25, nbands=4, convergence={'eigenstates': 7.8e-10})
atoms.calc = calc
energy = atoms.get_potential_energy()
niter = calc.get_number_of_iterations()
# The three eigenvalues e[1], e[2], and e[3] must be degenerate:
e = calc.get_eigenvalues()
print e[1] - e[3]
equal(e[1], e[3], 9.3e-8)
energy_tolerance = 0.0003
niter_tolerance = 0
equal(energy, -23.6277, energy_tolerance)
# Calculate non-selfconsistent PBE eigenvalues:
from gpaw.xc.tools import vxc
epbe0 = e - vxc(calc)[0, 0] + vxc(calc, 'PBE')[0, 0]
# Calculate selfconsistent PBE eigenvalues:
calc.set(xc='PBE')
energy = atoms.get_potential_energy()
epbe = calc.get_eigenvalues()
de = epbe[1] - epbe[0]
de0 = epbe0[1] - epbe0[0]
print de, de0
equal(de, de0, 0.001)
|
robwarm/gpaw-symm
|
gpaw/test/degeneracy.py
|
Python
|
gpl-3.0
| 1,168
|
[
"ASE",
"GPAW"
] |
322614c08888d2d40f1fd2f1e33fb6d2da8cbb201e6d62ba5ced07e66c2610d7
|
"""Command-line utility for training"""
import argparse
from model import train
import numpy as np
import pickle
def main():
parser = argparse.ArgumentParser()
parser.add_argument('path', help='Path to the data')
parser.add_argument('--runs', type=int, default=10, help='Number of independent runs of the algorithm')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--epochs', type=int, default=10, help='Number of epochs')
parser.add_argument('--batch_train', type=int, default=512, help='Batch size')
parser.add_argument('--batch_test', type=int, default=512, help='Batch size for testing')
parser.add_argument('--hidden_units', type=int, default=64, help='Number of hiddden units of the LSTM cells')
parser.add_argument('--beta', type=float, default=5e-4, help='Coefficient of the L2 regularization')
parser.add_argument('--dropout', type=float, default=1., help='Probability to keep a given neuron (dropout)')
parser.add_argument('--inception', action='store_true', help='If specified, trains the inception-like net')
parser.add_argument('--concat', action='store_true', help='If specified, the outputs of the LSTM cells are concatenated instead of averaged')
parser.add_argument('--bidirectional', action='store_true', help='If specified, bidirectional LSTM is used (inception-like net uses bidirectional LSTM regardless of this parameter)')
args = parser.parse_args()
with open(args.path, 'rb') as f:
data = pickle.load(f)
precision = []
recall = []
run_n = args.runs
args = vars(args)
del args['path']
del args['runs']
for i in range(run_n):
print('*' * 40)
print('[RUN %i/%i]' % (i + 1, run_n))
print('*' * 40)
results = train(data, **args)
precision.append(results['precision'])
recall.append(results['recall'])
print('*' * 40)
print('*' * 40)
print('Final results averaged over %i runs:' % run_n)
print('Recall %.4f (%.4f), precision %.4f (%.4f)' % (np.mean(recall), np.std(recall), np.mean(precision), np.std(precision)))
print('***** Parameters *****')
print(results)
print('*' * 40)
print('*' * 40)
if __name__ == "__main__":
main()
|
michael135/count-vector-paper-experiments
|
train.py
|
Python
|
bsd-2-clause
| 2,287
|
[
"NEURON"
] |
e0abd0b94cb913ace939a7a77ac0c6732826b8b09efc4fbd42a1339cac350e71
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The duplicate song removal logic for OpenLP.
"""
import logging
import os
from PyQt4 import QtCore, QtGui
from openlp.core.lib import Registry, translate
from openlp.core.ui.wizard import OpenLPWizard, WizardStrings
from openlp.core.utils import AppLocation
from openlp.plugins.songs.lib import delete_song
from openlp.plugins.songs.lib.db import Song, MediaFile
from openlp.plugins.songs.forms.songreviewwidget import SongReviewWidget
from openlp.plugins.songs.lib.songcompare import songs_probably_equal
log = logging.getLogger(__name__)
class DuplicateSongRemovalForm(OpenLPWizard):
"""
This is the Duplicate Song Removal Wizard. It provides functionality to
search for and remove duplicate songs in the database.
"""
log.info('DuplicateSongRemovalForm loaded')
def __init__(self, plugin):
"""
Instantiate the wizard, and run any extra setup we need to.
``parent``
The QWidget-derived parent of the wizard.
``plugin``
The songs plugin.
"""
self.duplicate_song_list = []
self.review_current_count = 0
self.review_total_count = 0
# Used to interrupt ongoing searches when cancel is clicked.
self.break_search = False
super(DuplicateSongRemovalForm, self).__init__(Registry().get('main_window'),
plugin, 'duplicateSongRemovalWizard', ':/wizards/wizard_duplicateremoval.bmp', False)
self.setMinimumWidth(730)
def custom_signals(self):
"""
Song wizard specific signals.
"""
self.finish_button.clicked.connect(self.on_wizard_exit)
self.cancel_button.clicked.connect(self.on_wizard_exit)
def add_custom_pages(self):
"""
Add song wizard specific pages.
"""
# Add custom pages.
self.searching_page = QtGui.QWizardPage()
self.searching_page.setObjectName('searching_page')
self.searching_vertical_layout = QtGui.QVBoxLayout(self.searching_page)
self.searching_vertical_layout.setObjectName('searching_vertical_layout')
self.duplicate_search_progress_bar = QtGui.QProgressBar(self.searching_page)
self.duplicate_search_progress_bar.setObjectName('duplicate_search_progress_bar')
self.duplicate_search_progress_bar.setFormat(WizardStrings.PercentSymbolFormat)
self.searching_vertical_layout.addWidget(self.duplicate_search_progress_bar)
self.found_duplicates_edit = QtGui.QPlainTextEdit(self.searching_page)
self.found_duplicates_edit.setUndoRedoEnabled(False)
self.found_duplicates_edit.setReadOnly(True)
self.found_duplicates_edit.setObjectName('found_duplicates_edit')
self.searching_vertical_layout.addWidget(self.found_duplicates_edit)
self.searching_page_id = self.addPage(self.searching_page)
self.review_page = QtGui.QWizardPage()
self.review_page.setObjectName('review_page')
self.review_layout = QtGui.QVBoxLayout(self.review_page)
self.review_layout.setObjectName('review_layout')
self.review_scroll_area = QtGui.QScrollArea(self.review_page)
self.review_scroll_area.setObjectName('review_scroll_area')
self.review_scroll_area.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.review_scroll_area.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.review_scroll_area.setWidgetResizable(True)
self.review_scroll_area_widget = QtGui.QWidget(self.review_scroll_area)
self.review_scroll_area_widget.setObjectName('review_scroll_area_widget')
self.review_scroll_area_layout = QtGui.QHBoxLayout(self.review_scroll_area_widget)
self.review_scroll_area_layout.setObjectName('review_scroll_area_layout')
self.review_scroll_area_layout.setSizeConstraint(QtGui.QLayout.SetMinAndMaxSize)
self.review_scroll_area_layout.setMargin(0)
self.review_scroll_area_layout.setSpacing(0)
self.review_scroll_area.setWidget(self.review_scroll_area_widget)
self.review_layout.addWidget(self.review_scroll_area)
self.review_page_id = self.addPage(self.review_page)
# Add a dummy page to the end, to prevent the finish button to appear and the next button do disappear on the
#review page.
self.dummy_page = QtGui.QWizardPage()
self.dummy_page_id = self.addPage(self.dummy_page)
def retranslateUi(self):
"""
Song wizard localisation.
"""
self.setWindowTitle(translate('Wizard', 'Wizard'))
self.title_label.setText(WizardStrings.HeaderStyle % translate('OpenLP.Ui',
'Welcome to the Duplicate Song Removal Wizard'))
self.information_label.setText(translate("Wizard",
'This wizard will help you to remove duplicate songs from the song database. You will have a chance to '
'review every potential duplicate song before it is deleted. So no songs will be deleted without your '
'explicit approval.'))
self.searching_page.setTitle(translate('Wizard', 'Searching for duplicate songs.'))
self.searching_page.setSubTitle(translate('Wizard', 'Please wait while your songs database is analyzed.'))
self.update_review_counter_text()
self.review_page.setSubTitle(translate('Wizard',
'Here you can decide which songs to remove and which ones to keep.'))
def update_review_counter_text(self):
"""
Set the wizard review page header text.
"""
self.review_page.setTitle(translate('Wizard', 'Review duplicate songs (%s/%s)') % \
(self.review_current_count, self.review_total_count))
def custom_page_changed(self, page_id):
"""
Called when changing the wizard page.
``page_id``
ID of the page the wizard changed to.
"""
# Hide back button.
self.button(QtGui.QWizard.BackButton).hide()
if page_id == self.searching_page_id:
self.application.set_busy_cursor()
try:
self.button(QtGui.QWizard.NextButton).hide()
# Search duplicate songs.
max_songs = self.plugin.manager.get_object_count(Song)
if max_songs == 0 or max_songs == 1:
self.duplicate_search_progress_bar.setMaximum(1)
self.duplicate_search_progress_bar.setValue(1)
self.notify_no_duplicates()
return
# With x songs we have x*(x - 1) / 2 comparisons.
max_progress_count = max_songs * (max_songs - 1) // 2
self.duplicate_search_progress_bar.setMaximum(max_progress_count)
songs = self.plugin.manager.get_all_objects(Song)
for outer_song_counter in range(max_songs - 1):
for inner_song_counter in range(outer_song_counter + 1, max_songs):
if songs_probably_equal(songs[outer_song_counter], songs[inner_song_counter]):
duplicate_added = self.add_duplicates_to_song_list(songs[outer_song_counter],
songs[inner_song_counter])
if duplicate_added:
self.found_duplicates_edit.appendPlainText(songs[outer_song_counter].title + " = " +
songs[inner_song_counter].title)
self.duplicate_search_progress_bar.setValue(self.duplicate_search_progress_bar.value() + 1)
# The call to process_events() will keep the GUI responsive.
self.application.process_events()
if self.break_search:
return
self.review_total_count = len(self.duplicate_song_list)
if self.review_total_count == 0:
self.notify_no_duplicates()
else:
self.button(QtGui.QWizard.NextButton).show()
finally:
self.application.set_normal_cursor()
elif page_id == self.review_page_id:
self.process_current_duplicate_entry()
def notify_no_duplicates(self):
"""
Notifies the user, that there were no duplicates found in the database.
"""
self.button(QtGui.QWizard.FinishButton).show()
self.button(QtGui.QWizard.FinishButton).setEnabled(True)
self.button(QtGui.QWizard.NextButton).hide()
self.button(QtGui.QWizard.CancelButton).hide()
QtGui.QMessageBox.information(self, translate('Wizard', 'Information'),
translate('Wizard', 'No duplicate songs have been found in the database.'),
QtGui.QMessageBox.StandardButtons(QtGui.QMessageBox.Ok))
def add_duplicates_to_song_list(self, search_song, duplicate_song):
"""
Inserts a song duplicate (two similar songs) to the duplicate song list.
If one of the two songs is already part of the duplicate song list,
don't add another duplicate group but add the other song to that group.
Returns True if at least one of the songs was added, False if both were already
member of a group.
``search_song``
The song we searched the duplicate for.
``duplicate_song``
The duplicate song.
"""
duplicate_group_found = False
duplicate_added = False
for duplicate_group in self.duplicate_song_list:
# Skip the first song in the duplicate lists, since the first one has to be an earlier song.
if search_song in duplicate_group and not duplicate_song in duplicate_group:
duplicate_group.append(duplicate_song)
duplicate_group_found = True
duplicate_added = True
break
elif not search_song in duplicate_group and duplicate_song in duplicate_group:
duplicate_group.append(search_song)
duplicate_group_found = True
duplicate_added = True
break
elif search_song in duplicate_group and duplicate_song in duplicate_group:
duplicate_group_found = True
duplicate_added = False
break
if not duplicate_group_found:
self.duplicate_song_list.append([search_song, duplicate_song])
duplicate_added = True
return duplicate_added
def on_wizard_exit(self):
"""
Once the wizard is finished, refresh the song list,
since we potentially removed songs from it.
"""
self.break_search = True
self.plugin.media_item.on_search_text_button_clicked()
def setDefaults(self):
"""
Set default form values for the song import wizard.
"""
self.restart()
self.duplicate_search_progress_bar.setValue(0)
self.found_duplicates_edit.clear()
def validateCurrentPage(self):
"""
Controls whether we should switch to the next wizard page. This method loops
on the review page as long as there are more song duplicates to review.
"""
if self.currentId() == self.review_page_id:
# As long as it's not the last duplicate list entry we revisit the review page.
if len(self.duplicate_song_list) == 1:
return True
else:
self.proceed_to_next_review()
return False
return super(DuplicateSongRemovalForm, self).validateCurrentPage()
def remove_button_clicked(self, song_review_widget):
"""
Removes a song from the database, removes the GUI element representing the
song on the review page, and disable the remove button if only one duplicate
is left.
``song_review_widget``
The SongReviewWidget whose song we should delete.
"""
# Remove song from duplicate song list.
self.duplicate_song_list[-1].remove(song_review_widget.song)
# Remove song from the database.
delete_song(song_review_widget.song.id, self.plugin)
# Remove GUI elements for the song.
self.review_scroll_area_layout.removeWidget(song_review_widget)
song_review_widget.setParent(None)
# Check if we only have one duplicate left:
# 2 stretches + 1 SongReviewWidget = 3
# The SongReviewWidget is then at position 1.
if len(self.duplicate_song_list[-1]) == 1:
self.review_scroll_area_layout.itemAt(1).widget().song_remove_button.setEnabled(False)
def proceed_to_next_review(self):
"""
Removes the previous review UI elements and calls process_current_duplicate_entry.
"""
# Remove last duplicate group.
self.duplicate_song_list.pop()
# Remove all previous elements.
for i in reversed(list(range(self.review_scroll_area_layout.count()))):
item = self.review_scroll_area_layout.itemAt(i)
if isinstance(item, QtGui.QWidgetItem):
# The order is important here, if the .setParent(None) call is done
# before the .removeItem() call, a segfault occurs.
widget = item.widget()
self.review_scroll_area_layout.removeItem(item)
widget.setParent(None)
else:
self.review_scroll_area_layout.removeItem(item)
# Process next set of duplicates.
self.process_current_duplicate_entry()
def process_current_duplicate_entry(self):
"""
Update the review counter in the wizard header, add song widgets for
the current duplicate group to review, if it's the last
duplicate song group, hide the "next" button and show the "finish" button.
"""
# Update the counter.
self.review_current_count = self.review_total_count - (len(self.duplicate_song_list) - 1)
self.update_review_counter_text()
# Add song elements to the UI.
if len(self.duplicate_song_list) > 0:
self.review_scroll_area_layout.addStretch(1)
for duplicate in self.duplicate_song_list[-1]:
song_review_widget = SongReviewWidget(self.review_page, duplicate)
song_review_widget.song_remove_button_clicked.connect(self.remove_button_clicked)
self.review_scroll_area_layout.addWidget(song_review_widget)
self.review_scroll_area_layout.addStretch(1)
# Change next button to finish button on last review.
if len(self.duplicate_song_list) == 1:
self.button(QtGui.QWizard.FinishButton).show()
self.button(QtGui.QWizard.FinishButton).setEnabled(True)
self.button(QtGui.QWizard.NextButton).hide()
self.button(QtGui.QWizard.CancelButton).hide()
def _get_main_window(self):
"""
Adds the main window to the class dynamically.
"""
if not hasattr(self, '_main_window'):
self._main_window = Registry().get('main_window')
return self._main_window
main_window = property(_get_main_window)
def _get_application(self):
"""
Adds the openlp to the class dynamically.
Windows needs to access the application in a dynamic manner.
"""
if os.name == 'nt':
return Registry().get('application')
else:
if not hasattr(self, '_application'):
self._application = Registry().get('application')
return self._application
application = property(_get_application)
|
marmyshev/item_title
|
openlp/plugins/songs/forms/duplicatesongremovalform.py
|
Python
|
gpl-2.0
| 17,812
|
[
"Brian"
] |
0eb173feba1e1aaa7725cdf51bad34c4dc48ae575990298ba8db6c92f64beaf9
|
import os
import numpy as np
from numpy.linalg import inv
from datetime import datetime
import models.sxrd_new1 as model
from models.utils import UserVars
import batchfile.locate_path as batch_path
import dump_files.locate_path as output_path
import models.domain_creator as domain_creator
import supportive_functions.create_plots as create_plots
import models.domain_creator_sorbate as domain_creator_sorbate
import supportive_functions.make_parameter_table_GenX_5_beta as make_grid
##==========================================<program begins from here>=========================================##
COUNT_TIME=False
if COUNT_TIME:t_0=datetime.now()
VERSION=1.1#version number to make easier code update to compatible with gx files based on old version scripts
##<global handles>##
RUN=False
BATCH_PATH_HEAD,OUTPUT_FILE_PATH=batch_path.module_path_locator(),output_path.module_path_locator()
F1F2=np.loadtxt(os.path.join(BATCH_PATH_HEAD,'Zr_K_edge_Aug17_2016.f1f2'))#the energy column should NOT have duplicate values after rounding up to 0 digit. If so, cut off rows of duplicate energy!
RAXR_EL,E0,NUMBER_RAXS_SPECTRA,RAXR_FIT_MODE,FREEZE='Zr',18007,21,'MD',True#FREEZE=True will have resonant el make no influence on the non-resonant structure factor. And it will otherwise.
NUMBER_DOMAIN,COHERENCE=2,True
HEIGHT_OFFSET=-2.6685#if set to 0, the top atomic layer is at 2.6685 in fractional unit before relaxation
XY_OFFSET=[0,0]#takes effect only for structural atoms (not include Gaussian atoms)
GROUP_SCHEME=[[1,0]]#means group Domain1 and Domain2 for inplane and out of plane movement, set Domain2=Domain1 in side sim func
##<setting slabs>##
#unitcell = model.UnitCell(5.1988, 9.0266, 20.1058, 90, 95.782, 90)
unitcell = model.UnitCell(5.1988, 9.0266, 20.1058, 90, 95.782, 90)
inst = model.Instrument(wavel = .833, alpha = 2.0)
bulk, Domain1, Domain2 = model.Slab(T_factor='u'), model.Slab(c = 1.0,T_factor='u'), model.Slab(c = 1.0,T_factor='u')
domain_creator.add_atom_in_slab(bulk,os.path.join(BATCH_PATH_HEAD,'muscovite_001_bulk_u_corrected.str'),height_offset=HEIGHT_OFFSET)
domain_creator.add_atom_in_slab(Domain1,os.path.join(BATCH_PATH_HEAD,'muscovite_001_surface_Al_u_corrected.str'),attach='_D1',height_offset=HEIGHT_OFFSET)
domain_creator.add_atom_in_slab(Domain2,os.path.join(BATCH_PATH_HEAD,'muscovite_001_surface_Si_u_corrected.str'),attach='_D2',height_offset=HEIGHT_OFFSET)
##<coordination system definition>##
x0_v,y0_v,z0_v=np.array([1.,0.,0.]),np.array([0.,1.,0.]),np.array([0.,0.,1.])
f1=lambda x1,y1,z1,x2,y2,z2:np.array([[np.dot(x2,x1),np.dot(x2,y1),np.dot(x2,z1)],\
[np.dot(y2,x1),np.dot(y2,y1),np.dot(y2,z1)],\
[np.dot(z2,x1),np.dot(z2,y1),np.dot(z2,z1)]])
BASIS=np.array([unitcell.a, unitcell.b, unitcell.c])
#BASIS_SET=[[1,0,0],[0,1,0],[0.10126,0,1.0051136]]
BASIS_SET=[[1,0,0],[0,1,0],[np.tan(unitcell.beta-np.pi/2.),0,1./np.cos(unitcell.beta-np.pi/2.)]]
T=inv(np.transpose(f1(x0_v,y0_v,z0_v,*BASIS_SET)))
T_INV=inv(T)
##<Adding sorbates>##to be set##
#domain1
BUILD_GRID=3#FOR cubic structure only
LEVEL,CAP,EXTRA_SORBATE=13,[],[]
SYMMETRY,SWITCH_EXTRA_SORBATE=False,[True]*10
MIRROR_EXTRA_SORBATE=[True]*10
#NUMBER_SORBATE_LAYER,NUMBER_EL_MOTIF=1,LEVEL+len(CAP)*2+len(EXTRA_SORBATE)#1 if monomer, 2 if dimmer and so on, for square_antiprism only
NUMBER_SORBATE_LAYER=1
NUMBER_EL_MOTIF=None
if type(BUILD_GRID)==type([]):
NUMBER_EL_MOTIF=len(BUILD_GRID)
elif type(BUILD_GRID)==int(1):
NUMBER_EL_MOTIF=BUILD_GRID**3
INFO_LIB={'basis':BASIS,'sorbate_el':'Zr','coordinate_el':'O','T':T,'T_INV':T_INV,'oligomer_type':'polymer'}#polymer_new_rot if square_antiprism
for i in range(NUMBER_SORBATE_LAYER):
vars()['rgh_domain1_set'+str(i+1)]=UserVars()
geo_lib_domain1={'cent_point_offset_x':0,'cent_point_offset_y':0,'cent_point_offset_z':0,'r':2.2,'theta':59.2641329,'rot_x':0,'rot_y':0,'rot_z':0,'shift_btop':0,'shift_mid':0,'shift_cap':0,'rot_ang_attach1':0,'rot_ang_attach2':0,'rot_ang_attach3':0}
Domain1,vars()['rgh_domain1_set'+str(i+1)]=domain_creator.add_sorbate_new(domain=Domain1,anchored_atoms=[],func=domain_creator_sorbate.OS_cubic_oligomer,geo_lib=geo_lib_domain1,info_lib=INFO_LIB,domain_tag='_D1',rgh=vars()['rgh_domain1_set'+str(i+1)],index_offset=[i*2*NUMBER_EL_MOTIF,NUMBER_EL_MOTIF+i*2*NUMBER_EL_MOTIF],xy_offset=XY_OFFSET,height_offset=HEIGHT_OFFSET,symmetry_couple=SYMMETRY,level=LEVEL,cap=CAP,attach_sorbate_number=EXTRA_SORBATE,first_or_second=SWITCH_EXTRA_SORBATE,mirror=MIRROR_EXTRA_SORBATE,build_grid=BUILD_GRID)
##<Adding Gaussian peaks>##
NUMBER_GAUSSIAN_PEAK, EL_GAUSSIAN_PEAK, FIRST_PEAK_HEIGHT=0,'O',5
GAUSSIAN_OCC_INIT, GAUSSIAN_LAYER_SPACING, GAUSSIAN_U_INIT=1,2,0.1
GAUSSIAN_SHAPE, GAUSSIAN_RMS='Flat',2
Domain1, Gaussian_groups,Gaussian_group_names=domain_creator.add_gaussian(domain=Domain1,el=EL_GAUSSIAN_PEAK,number=NUMBER_GAUSSIAN_PEAK,first_peak_height=FIRST_PEAK_HEIGHT,spacing=GAUSSIAN_LAYER_SPACING,u_init=GAUSSIAN_U_INIT,occ_init=GAUSSIAN_OCC_INIT,height_offset=HEIGHT_OFFSET,c=unitcell.c,domain_tag='_D1',shape=GAUSSIAN_SHAPE,gaussian_rms=GAUSSIAN_RMS)
for i in range(len(Gaussian_groups)):vars()[Gaussian_group_names[i]]=Gaussian_groups[i]
rgh_gaussian=domain_creator.define_gaussian_vars(rgh=UserVars(),domain=Domain1,shape=GAUSSIAN_SHAPE)
'''WARNING! Choose one way to freeze element. Errors will appear if using both ways.'''
##<Freeze Elements by specifing values>##
U_RAXS_LIST=[]
OC_RAXS_LIST=[]
X_RAXS_LIST=[]
Y_RAXS_LIST=[]
Z_RAXS_LIST=np.array([])/unitcell.c - 1.
el_freezed=RAXR_EL
Domain1=domain_creator.add_freezed_els(domain=Domain1,el=el_freezed,u=U_RAXS_LIST,oc=OC_RAXS_LIST,x=X_RAXS_LIST,y=Y_RAXS_LIST,z=Z_RAXS_LIST)
##<Freeze Elements using adding_gaussian function>##
NUMBER_GAUSSIAN_PEAK_FREEZE, EL_GAUSSIAN_PEAK_FREEZE, FIRST_PEAK_HEIGHT_FREEZE=0,RAXR_EL,5
GAUSSIAN_OCC_INIT_FREEZE, GAUSSIAN_LAYER_SPACING_FREEZE, GAUSSIAN_U_INIT_FREEZE=1,2,0.1
GAUSSIAN_SHAPE_FREEZE, GAUSSIAN_RMS_FREEZE='Flat',2
Domain1, Gaussian_groups_freeze,Gaussian_group_names_freeze=domain_creator.add_gaussian(domain=Domain1,el=EL_GAUSSIAN_PEAK_FREEZE,number=NUMBER_GAUSSIAN_PEAK_FREEZE,first_peak_height=FIRST_PEAK_HEIGHT_FREEZE,spacing=GAUSSIAN_LAYER_SPACING_FREEZE,u_init=GAUSSIAN_U_INIT_FREEZE,occ_init=GAUSSIAN_OCC_INIT_FREEZE,height_offset=HEIGHT_OFFSET,c=unitcell.c,domain_tag='_D1',shape=GAUSSIAN_SHAPE_FREEZE,gaussian_rms=GAUSSIAN_RMS_FREEZE,freeze_tag=True)
for i in range(len(Gaussian_groups_freeze)):vars()[Gaussian_group_names_freeze[i]]=Gaussian_groups_freeze[i]
rgh_gaussian_freeze=domain_creator.define_gaussian_vars(rgh=UserVars(),domain=Domain1,shape=GAUSSIAN_SHAPE_FREEZE)
##<Define atom groups>##
#surface atoms
group_number=5##to be set##(number of groups to be considered for model fit)
groups,group_names,atom_group_info=domain_creator.setup_atom_group_muscovite(domain=[Domain1,Domain2],group_number=group_number)
for i in range(len(groups)):vars()[group_names[i]]=groups[i]
#sorbate_atoms
sorbate_id_list_domain1,sorbate_group_names_domain1=domain_creator.generate_sorbate_ids(Domain1,NUMBER_SORBATE_LAYER,INFO_LIB['sorbate_el'],NUMBER_EL_MOTIF,symmetry=SYMMETRY,level=CAP)
sorbate_atom_group_info=[{'domain':Domain1,'ref_id_list':sorbate_id_list_domain1,'ref_group_names':sorbate_group_names_domain1,'ref_sym_list':[],'domain_tag':''}]
sorbate_groups,sorbate_group_names=domain_creator.setup_atom_group(gp_info=sorbate_atom_group_info)
for i in range(len(sorbate_groups)):vars()[sorbate_group_names[i]]=sorbate_groups[i]
##<Define other pars>##
rgh=domain_creator.define_global_vars(rgh=UserVars(),domain_number=NUMBER_DOMAIN)#global vars
rgh_raxs=domain_creator.define_raxs_vars(rgh=UserVars(),number_spectra=NUMBER_RAXS_SPECTRA,number_domain=1)#RAXR spectra pars
rgh_dlw=domain_creator.define_diffused_layer_water_vars(rgh=UserVars())#Diffused Layered water pars
rgh_dls=domain_creator.define_diffused_layer_sorbate_vars(rgh=UserVars())#Diffused Layered sorbate pars
##<make fit table file>##
if not RUN:
table_container=[]
rgh_instance_list=[rgh]+groups+sorbate_groups+Gaussian_groups+[rgh_gaussian]+[rgh_gaussian_freeze]+[vars()['rgh_domain1_set'+str(i+1)] for i in range(NUMBER_SORBATE_LAYER)]+[rgh_dlw,rgh_dls]
rgh_instance_name_list=['rgh']+group_names+sorbate_group_names+Gaussian_group_names+['rgh_gaussian']+['rgh_gaussian_freeze']+['rgh_domain1_set'+str(i+1) for i in range(NUMBER_SORBATE_LAYER)]+['rgh_dlw','rgh_dls']
table_container=make_grid.set_table_input_all(container=table_container,rgh_instance_list=rgh_instance_list,rgh_instance_name_list=rgh_instance_name_list,par_file=os.path.join(BATCH_PATH_HEAD,'pars_ranges.txt'))
#raxs pars
table_container=make_grid.set_table_input_raxs(container=table_container,rgh_group_instance=rgh_raxs,rgh_group_instance_name='rgh_raxs',par_range={'a':[0,20],'b':[-5,5],'c':[0,1],'A':[0,2],'P':[0,1]},number_spectra=NUMBER_RAXS_SPECTRA,number_domain=1)
#build up the tab file
make_grid.make_table(container=table_container,file_path=os.path.join(OUTPUT_FILE_PATH,'par_table.tab'))
##<fitting function part>##
if COUNT_TIME:t_1=datetime.now()
VARS=vars()
def Sim(data,VARS=VARS):
##<update the basis info>##
INFO_LIB['basis']=np.array([unitcell.a, unitcell.b, unitcell.c])
##<Extract pars>##
layered_water_pars=vars(rgh_dlw)
layered_sorbate_pars=vars(rgh_dls)
raxs_vars=vars(rgh_raxs)
##<update sorbates>##
[domain_creator.update_sorbate_new(domain=Domain1,anchored_atoms=[],func=domain_creator_sorbate.OS_cubic_oligomer,info_lib=INFO_LIB,domain_tag='_D1',rgh=VARS['rgh_domain1_set'+str(i+1)],index_offset=[i*2*NUMBER_EL_MOTIF,NUMBER_EL_MOTIF+i*2*NUMBER_EL_MOTIF],xy_offset=XY_OFFSET,height_offset=HEIGHT_OFFSET,level=LEVEL,symmetry_couple=SYMMETRY,cap=CAP,attach_sorbate_number=EXTRA_SORBATE,first_or_second=SWITCH_EXTRA_SORBATE,mirror=MIRROR_EXTRA_SORBATE,build_grid=BUILD_GRID) for i in range(NUMBER_SORBATE_LAYER)]#domain1
##<update gaussian peaks>##
if NUMBER_GAUSSIAN_PEAK>0:
domain_creator.update_gaussian(domain=Domain1,rgh=rgh_gaussian,groups=Gaussian_groups,el=EL_GAUSSIAN_PEAK,number=NUMBER_GAUSSIAN_PEAK,height_offset=HEIGHT_OFFSET,c=unitcell.c,domain_tag='_D1',shape=GAUSSIAN_SHAPE,print_items=False,use_cumsum=True)
if NUMBER_GAUSSIAN_PEAK_FREEZE>0:
domain_creator.update_gaussian(domain=Domain1,rgh=rgh_gaussian_freeze,groups=Gaussian_groups_freeze,el=EL_GAUSSIAN_PEAK_FREEZE,number=NUMBER_GAUSSIAN_PEAK_FREEZE,height_offset=HEIGHT_OFFSET,c=unitcell.c,domain_tag='_D1',shape=GAUSSIAN_SHAPE_FREEZE,print_items=False,use_cumsum=True,freeze_tag=True)
##<link groups>##
[eval(each_command) for each_command in domain_creator.link_atom_group(gp_info=atom_group_info,gp_scheme=GROUP_SCHEME)]
##<format domains>##
domain={'domains':[Domain1,Domain2],'layered_water_pars':layered_water_pars,'layered_sorbate_pars':layered_sorbate_pars,\
'global_vars':rgh,'raxs_vars':raxs_vars,'F1F2':F1F2,'E0':E0,'el':RAXR_EL,'freeze':FREEZE}
sample = model.Sample(inst, bulk, domain, unitcell,coherence=COHERENCE,surface_parms={'delta1':0.,'delta2':0.})
##<calculate structure factor>##
F,fom_scaler=[],[]
i=0
for data_set in data:
f=np.array([])
h = data_set.extra_data['h']
k = data_set.extra_data['k']
x = data_set.x
y = data_set.extra_data['Y']
LB = data_set.extra_data['LB']
dL = data_set.extra_data['dL']
if x[0]>100:
i+=1
rough = (1-rgh.beta)/((1-rgh.beta)**2 + 4*rgh.beta*np.sin(np.pi*(y-LB)/dL)**2)**0.5
else:
rough = (1-rgh.beta)/((1-rgh.beta)**2 + 4*rgh.beta*np.sin(np.pi*(x-LB)/dL)**2)**0.5
f=rough*abs(sample.calculate_structure_factor(h,k,x,y,index=i,fit_mode=RAXR_FIT_MODE,height_offset=HEIGHT_OFFSET*unitcell.c,version=VERSION))
F.append(f*f)
fom_scaler.append(1)
if COUNT_TIME:
t_2=datetime.now()
##<print structure/plotting files>##
if not RUN:
domain_creator.print_structure_files_muscovite_new(domain_list=[Domain1,Domain2],z_shift=0.8+HEIGHT_OFFSET,number_gaussian=NUMBER_GAUSSIAN_PEAK,el=RAXR_EL,matrix_info=INFO_LIB,save_file=OUTPUT_FILE_PATH)
create_plots.generate_plot_files(output_file_path=OUTPUT_FILE_PATH,sample=sample,rgh=rgh,data=data,fit_mode=RAXR_FIT_MODE,z_min=0,z_max=50,RAXR_HKL=[0,0,20],height_offset=HEIGHT_OFFSET*BASIS[2],version=VERSION,freeze=FREEZE)
#make sure the tab_file is saved in the dumped files directory before running this function
domain_creator.print_data_for_publication_B3_muscovite(N_sorbate=NUMBER_GAUSSIAN_PEAK+len(U_RAXS_LIST)+NUMBER_GAUSSIAN_PEAK_FREEZE,domain=Domain1,z_shift=0.8+HEIGHT_OFFSET+0.8666,save_file=os.path.join(OUTPUT_FILE_PATH,'temp_publication_data_muscovite.xyz'),tab_file=os.path.join(OUTPUT_FILE_PATH,'best_fit_pars.tab'))
#then do this command inside shell to extract the errors for A and P: model.script_module.create_plots.append_errors_for_A_P(par_instance=model.parameters,dump_file=os.path.join(model.script_module.OUTPUT_FILE_PATH,'temp_plot_raxr_A_P_Q'),raxs_rgh='rgh_raxs')
make_dummy_data,combine_data_sets=False,False
if make_dummy_data:
domain_creator.make_dummy_data(file=os.path.join(OUTPUT_FILE_PATH,'temp_dummy_data.dat'),data=data,I=F)
if combine_data_sets:
domain_creator.combine_all_datasets(file=os.path.join(OUTPUT_FILE_PATH,'temp_full_dataset.dat'),data=data)
if COUNT_TIME:
t_3=datetime.now()
print "It took "+str(t_1-t_0)+" seconds to setup"
print "It took "+str(t_2-t_1)+" seconds to do calculation for one generation"
print "It took "+str(t_3-t_2)+" seconds to generate output files"
return F,1,fom_scaler
##========================================<program ends here>========================================================##
|
jackey-qiu/genx_pc_qiu
|
scripts/Manual to use RAXS CTR for muscovite polymer system_debug.py
|
Python
|
gpl-3.0
| 13,908
|
[
"Gaussian"
] |
4ea496856cbb426adb6e90981eeef86c5d7f5dd37e33ed90230c61b3e99999d8
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to validate the requirements for data migration."""
import logging
from hive_to_bigquery.properties_reader import PropertiesReader
logger = logging.getLogger('Hive2BigQuery')
LOCATION_HELP_URL = "https://cloud.google.com/bigquery/docs/dataset-locations#data-locations"
class ResourceValidator(object):
"""Validates all the user provided resources.
Validates the existence of resources such as Hive database, Hive table,
GCS bucket, BigQuery dataset and also validates the compatibility between
the GCS bucket location and BigQuery dataset location.
"""
def __init__(self):
pass
@staticmethod
def check_location_compatibility(bq_dataset_location, gcs_bucket_location):
"""Checks the compatibility of the BigQuery dataset location and GCS
bucket location to support loading data.
Args:
bq_dataset_location (str): BigQuery dataset location.
gcs_bucket_location (str): GCS bucket location.
Returns:
boolean: True if compatible, False if not.
"""
# Update these locations if there are any new additions to BigQuery
# locations.
# List of BigQuery multi-regional locations.
bq_multi_regional_locations = ['EU']
# List of BigQuery regional locations.
bq_regional_locations = [
'asia-east1',
'asia-northeast1',
'asia-southeast1',
'australia-southeast1',
'europe-north1',
'europe-west2',
'us-east4',
]
# Mapping of BigQuery multi-regional location to supported GCS bucket
# locations.
bq_gcs_loc_map = {
"EU": [
"EU",
"europe-north1",
"europe-west1",
"europe-west2",
"europe-west3",
"europe-west4",
]
}
if bq_dataset_location == "US":
# If your dataset is in the US multi-regional location, you can
# load data from a Cloud Storage bucket in any regional or
# multi-regional location.
return True
elif bq_dataset_location in bq_multi_regional_locations:
# If your BigQuery dataset is in a multi-regional location,
# the Cloud Storage bucket containing the data you're loading
# must be in a regional or multi-regional bucket in the same
# location.
if gcs_bucket_location in bq_gcs_loc_map[bq_dataset_location]:
return True
elif bq_dataset_location in bq_regional_locations:
# If your dataset is in a regional location, your Cloud Storage
# bucket must be a regional bucket in the same location.
if bq_dataset_location == gcs_bucket_location:
return True
else:
# Handle any new additions to the BigQuery supported locations.
pass
return False
@staticmethod
def validate(hive_component, gcs_component, bq_component):
"""Method to validate the resources.
Args:
hive_component (:class:`HiveComponent`): Instance of
HiveComponent to connect to Hive.
gcs_component (:class:`GCSStorageComponent`): Instance of
GCSStorageComponent to do GCS operations.
bq_component (:class:`BigQueryComponent`): Instance of
BigQueryComponent to do BigQuery operations.
"""
if hive_component.check_database_exists(
PropertiesReader.get('hive_database')):
logger.debug("Hive database %s found",
PropertiesReader.get('hive_database'))
else:
logger.error("Hive database %s doesn't exist",
PropertiesReader.get('hive_database'))
return False
if hive_component.check_table_exists(
PropertiesReader.get('hive_database'),
PropertiesReader.get('hive_table_name')):
logger.debug("Hive table %s found in database %s",
PropertiesReader.get('hive_table_name'),
PropertiesReader.get('hive_database'))
else:
logger.error("Hive table %s doesn't exist in database %s",
PropertiesReader.get('hive_table_name'),
PropertiesReader.get('hive_database'))
return False
if gcs_component.check_bucket_exists(
PropertiesReader.get('gcs_bucket_name')):
logger.debug("GCS Bucket %s found",
PropertiesReader.get('gcs_bucket_name'))
else:
logger.error("GCS bucket %s does not exist",
PropertiesReader.get('gcs_bucket_name'))
return False
if bq_component.check_dataset_exists(
PropertiesReader.get('dataset_id')):
logger.debug("BigQuery dataset %s found",
PropertiesReader.get('dataset_id'))
else:
logger.error("BigQuery dataset %s does not exist",
PropertiesReader.get('dataset_id'))
return False
bq_dataset_location = bq_component.get_dataset_location(
PropertiesReader.get('dataset_id'))
gcs_bucket_location = gcs_component.get_bucket_location(
PropertiesReader.get('gcs_bucket_name'))
# Checks whether the BigQuery dataset location and GCS bucket
# location are compatible, since location constraints do not allow
# loading data if locations are not compatible.
if ResourceValidator.check_location_compatibility(
bq_dataset_location, gcs_bucket_location):
logger.debug(
"Dataset location %s and GCS Bucket location %s matches",
bq_dataset_location, gcs_bucket_location)
else:
logger.critical(
"Dataset location %s and GCS Bucket location %s do not match",
bq_dataset_location, gcs_bucket_location)
logger.critical("Visit %s for more information", LOCATION_HELP_URL)
return False
return True
|
CloudVLab/professional-services
|
tools/hive-bigquery/hive_to_bigquery/resource_validator.py
|
Python
|
apache-2.0
| 6,856
|
[
"VisIt"
] |
1fe35446e85d7d63aa8f3feb115b75ace83325152e07aa82a2c8cc24d2177847
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAllelicimbalance(RPackage):
"""Provides a framework for allelic specific expression
investigation using RNA-seq data."""
homepage = "http://bioconductor.org/packages/AllelicImbalance/"
url = "https://git.bioconductor.org/packages/AllelicImbalance"
version('1.14.0', git='https://git.bioconductor.org/packages/AllelicImbalance', commit='35958534945819baafde0e13d1eb4d05a514142c')
depends_on('r@3.4.0:3.4.9', when='@1.14.0')
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-summarizedexperiment', type=('build', 'run'))
depends_on('r-genomicalignments', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-bsgenome', type=('build', 'run'))
depends_on('r-variantannotation', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
depends_on('r-genomicfeatures', type=('build', 'run'))
depends_on('r-gviz', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-latticeextra', type=('build', 'run'))
depends_on('r-gridextra', type=('build', 'run'))
depends_on('r-seqinr', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-nlme', type=('build', 'run'))
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-allelicimbalance/package.py
|
Python
|
lgpl-2.1
| 2,759
|
[
"Bioconductor"
] |
7b50ef05b7b8581c151fe39421aec998cbebf3e75dff62268401a3d2bdf2e4fe
|
#!/usr/bin/env python
# This code is a direct translation of the Tcl code in
# ImagePlaneWidget.tcl. It could easily be written using a nice class
# to do the job but the present code should definitely make for an
# illustrative example.
# This example demonstrates how to use the vtkImagePlaneWidget
# to probe a 3D image dataset with three orthogonal planes.
# Buttons are provided to:
# a) capture the render window display to a tiff file
# b) x,y,z buttons reset the widget to orthonormal
# positioning, set the horizontal slider to move the
# associated widget along its normal, and set the
# camera to face the widget
# c) right clicking on x,y,z buttons pops up a menu to set
# the associated widget's reslice interpolation mode
import vtk
import Tkinter
from vtk.tk.vtkTkRenderWindowInteractor import \
vtkTkRenderWindowInteractor
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Start by loading some data.
v16 = vtk.vtkVolume16Reader()
v16.SetDataDimensions(64, 64)
v16.SetDataByteOrderToLittleEndian()
v16.SetFilePrefix(VTK_DATA_ROOT + "/Data/headsq/quarter")
v16.SetImageRange(1, 93)
v16.SetDataSpacing(3.2, 3.2, 1.5)
v16.Update()
xMin, xMax, yMin, yMax, zMin, zMax = v16.GetExecutive().GetWholeExtent(v16.GetOutputInformation(0))
spacing = v16.GetOutput().GetSpacing()
sx, sy, sz = spacing
origin = v16.GetOutput().GetOrigin()
ox, oy, oz = origin
# An outline is shown for context.
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(v16.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
# The shared picker enables us to use 3 planes at one time
# and gets the picking order right
picker = vtk.vtkCellPicker()
picker.SetTolerance(0.005)
# The 3 image plane widgets are used to probe the dataset.
planeWidgetX = vtk.vtkImagePlaneWidget()
planeWidgetX.DisplayTextOn()
planeWidgetX.SetInputConnection(v16.GetOutputPort())
planeWidgetX.SetPlaneOrientationToXAxes()
planeWidgetX.SetSliceIndex(32)
planeWidgetX.SetPicker(picker)
planeWidgetX.SetKeyPressActivationValue("x")
prop1 = planeWidgetX.GetPlaneProperty()
prop1.SetColor(1, 0, 0)
planeWidgetY = vtk.vtkImagePlaneWidget()
planeWidgetY.DisplayTextOn()
planeWidgetY.SetInputConnection(v16.GetOutputPort())
planeWidgetY.SetPlaneOrientationToYAxes()
planeWidgetY.SetSliceIndex(32)
planeWidgetY.SetPicker(picker)
planeWidgetY.SetKeyPressActivationValue("y")
prop2 = planeWidgetY.GetPlaneProperty()
prop2.SetColor(1, 1, 0)
planeWidgetY.SetLookupTable(planeWidgetX.GetLookupTable())
# for the z-slice, turn off texture interpolation:
# interpolation is now nearest neighbour, to demonstrate
# cross-hair cursor snapping to pixel centers
planeWidgetZ = vtk.vtkImagePlaneWidget()
planeWidgetZ.DisplayTextOn()
planeWidgetZ.SetInputConnection(v16.GetOutputPort())
planeWidgetZ.SetPlaneOrientationToZAxes()
planeWidgetZ.SetSliceIndex(46)
planeWidgetZ.SetPicker(picker)
planeWidgetZ.SetKeyPressActivationValue("z")
prop3 = planeWidgetZ.GetPlaneProperty()
prop3.SetColor(0, 0, 1)
planeWidgetZ.SetLookupTable(planeWidgetX.GetLookupTable())
# Create the RenderWindow and Renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# Add the outline actor to the renderer, set the background color and size
ren.AddActor(outlineActor)
renWin.SetSize(600, 600)
ren.SetBackground(0.1, 0.1, 0.2)
current_widget = planeWidgetZ
mode_widget = planeWidgetZ
# Create the GUI
# We first create the supporting functions (callbacks) for the GUI
#
# Align the camera so that it faces the desired widget
def AlignCamera():
#global ox, oy, oz, sx, sy, sz, xMax, xMin, yMax, yMin, zMax, \
# zMin, slice_number
#global current_widget
cx = ox+(0.5*(xMax-xMin))*sx
cy = oy+(0.5*(yMax-yMin))*sy
cz = oy+(0.5*(zMax-zMin))*sz
vx, vy, vz = 0, 0, 0
nx, ny, nz = 0, 0, 0
iaxis = current_widget.GetPlaneOrientation()
if iaxis == 0:
vz = -1
nx = ox + xMax*sx
cx = ox + slice_number*sx
elif iaxis == 1:
vz = -1
ny = oy+yMax*sy
cy = oy+slice_number*sy
else:
vy = 1
nz = oz+zMax*sz
cz = oz+slice_number*sz
px = cx+nx*2
py = cy+ny*2
pz = cz+nz*3
camera = ren.GetActiveCamera()
camera.SetViewUp(vx, vy, vz)
camera.SetFocalPoint(cx, cy, cz)
camera.SetPosition(px, py, pz)
camera.OrthogonalizeViewUp()
ren.ResetCameraClippingRange()
renWin.Render()
# Capture the display and place in a tiff
def CaptureImage():
w2i = vtk.vtkWindowToImageFilter()
writer = vtk.vtkTIFFWriter()
w2i.SetInput(renWin)
w2i.Update()
writer.SetInputConnection(w2i.GetOutputPort())
writer.SetFileName("image.tif")
renWin.Render()
writer.Write()
# Align the widget back into orthonormal position,
# set the slider to reflect the widget's position,
# call AlignCamera to set the camera facing the widget
def AlignXaxis():
global xMax, xMin, current_widget, slice_number
po = planeWidgetX.GetPlaneOrientation()
if po == 3:
planeWidgetX.SetPlaneOrientationToXAxes()
slice_number = (xMax-xMin)/2
planeWidgetX.SetSliceIndex(slice_number)
else:
slice_number = planeWidgetX.GetSliceIndex()
current_widget = planeWidgetX
slice.config(from_=xMin, to=xMax)
slice.set(slice_number)
AlignCamera()
def AlignYaxis():
global yMin, yMax, current_widget, slice_number
po = planeWidgetY.GetPlaneOrientation()
if po == 3:
planeWidgetY.SetPlaneOrientationToYAxes()
slice_number = (yMax-yMin)/2
planeWidgetY.SetSliceIndex(slice_number)
else:
slice_number = planeWidgetY.GetSliceIndex()
current_widget = planeWidgetY
slice.config(from_=yMin, to=yMax)
slice.set(slice_number)
AlignCamera()
def AlignZaxis():
global yMin, yMax, current_widget, slice_number
po = planeWidgetZ.GetPlaneOrientation()
if po == 3:
planeWidgetZ.SetPlaneOrientationToZAxes()
slice_number = (zMax-zMin)/2
planeWidgetZ.SetSliceIndex(slice_number)
else:
slice_number = planeWidgetZ.GetSliceIndex()
current_widget = planeWidgetZ
slice.config(from_=zMin, to=zMax)
slice.set(slice_number)
AlignCamera()
# Set the widget's reslice interpolation mode
# to the corresponding popup menu choice
def SetInterpolation():
global mode_widget, mode
if mode.get() == 0:
mode_widget.TextureInterpolateOff()
else:
mode_widget.TextureInterpolateOn()
mode_widget.SetResliceInterpolate(mode.get())
renWin.Render()
# Share the popup menu among buttons, keeping track of associated
# widget's interpolation mode
def buttonEvent(event, arg=None):
global mode, mode_widget, popm
if arg == 0:
mode_widget = planeWidgetX
elif arg == 1:
mode_widget = planeWidgetY
elif arg == 2:
mode_widget = planeWidgetZ
else:
return
mode.set(mode_widget.GetResliceInterpolate())
popm.entryconfigure(arg, variable=mode)
popm.post(event.x + event.x_root, event.y + event.y_root)
def SetSlice(sl):
global current_widget
current_widget.SetSliceIndex(int(sl))
ren.ResetCameraClippingRange()
renWin.Render()
###
# Now actually create the GUI
root = Tkinter.Tk()
root.withdraw()
top = Tkinter.Toplevel(root)
# Define a quit method that exits cleanly.
def quit(obj=root):
obj.quit()
# Popup menu
popm = Tkinter.Menu(top, tearoff=0)
mode = Tkinter.IntVar()
mode.set(1)
popm.add_radiobutton(label="nearest", variable=mode, value=0,
command=SetInterpolation)
popm.add_radiobutton(label="linear", variable=mode, value=1,
command=SetInterpolation)
popm.add_radiobutton(label="cubic", variable=mode, value=2,
command=SetInterpolation)
display_frame = Tkinter.Frame(top)
display_frame.pack(side="top", anchor="n", fill="both", expand="false")
# Buttons
ctrl_buttons = Tkinter.Frame(top)
ctrl_buttons.pack(side="top", anchor="n", fill="both", expand="false")
quit_button = Tkinter.Button(ctrl_buttons, text="Quit", command=quit)
capture_button = Tkinter.Button(ctrl_buttons, text="Tif",
command=CaptureImage)
x_button = Tkinter.Button(ctrl_buttons, text="x", command=AlignXaxis)
y_button = Tkinter.Button(ctrl_buttons, text="y", command=AlignYaxis)
z_button = Tkinter.Button(ctrl_buttons, text="z", command=AlignZaxis)
x_button.bind("<Button-3>", lambda e: buttonEvent(e, 0))
y_button.bind("<Button-3>", lambda e: buttonEvent(e, 1))
z_button.bind("<Button-3>", lambda e: buttonEvent(e, 2))
for i in (quit_button, capture_button, x_button, y_button, z_button):
i.pack(side="left", expand="true", fill="both")
# Create the render widget
renderer_frame = Tkinter.Frame(display_frame)
renderer_frame.pack(padx=3, pady=3,side="left", anchor="n",
fill="both", expand="false")
render_widget = vtkTkRenderWindowInteractor(renderer_frame,
rw=renWin, width=600,
height=600)
for i in (render_widget, display_frame):
i.pack(side="top", anchor="n",fill="both", expand="false")
# Add a slice scale to browse the current slice stack
slice_number = Tkinter.IntVar()
slice_number.set(current_widget.GetSliceIndex())
slice = Tkinter.Scale(top, from_=zMin, to=zMax, orient="horizontal",
command=SetSlice,variable=slice_number,
label="Slice")
slice.pack(fill="x", expand="false")
# Done with the GUI.
###
# Set the interactor for the widgets
iact = render_widget.GetRenderWindow().GetInteractor()
planeWidgetX.SetInteractor(iact)
planeWidgetX.On()
planeWidgetY.SetInteractor(iact)
planeWidgetY.On()
planeWidgetZ.SetInteractor(iact)
planeWidgetZ.On()
# Create an initial interesting view
cam1 = ren.GetActiveCamera()
cam1.Elevation(110)
cam1.SetViewUp(0, 0, -1)
cam1.Azimuth(45)
ren.ResetCameraClippingRange()
# Render it
render_widget.Render()
iact.Initialize()
renWin.Render()
iact.Start()
# Start Tkinter event loop
root.mainloop()
|
ashray/VTK-EVM
|
Examples/GUI/Python/ImagePlaneWidget.py
|
Python
|
bsd-3-clause
| 10,228
|
[
"VTK"
] |
e8b7771e2f83f98668493854261c008052350c53cdd29c9974d951ab9eeeb0dc
|
## Automatically adapted for numpy.oldnumeric Mar 26, 2007 by alter_code1.py
## class ChainSeperator:
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
## $Revision$
## last $Author$
## last $Date$
"""
Seperate PDB into continuous peptide chains for XPlor. Remove duplicate
peptide chains. Required by pdb2xplor.py
This is vintage code. See L{Biskit.PDBCleaner} for a more recent
version (though yet lacking some functions).
@todo: Create an override for the chain comparison if one wants
to keep identical chains (i.e homodimers)
"""
## from Blast2Seq import * # compare 2 sequences
from molUtils import singleAA
import Biskit.tools as T
from LogFile import LogFile
from Scientific.IO.PDB import *
import numpy.oldnumeric as N
import string
from difflib import SequenceMatcher
import re
class ChainSeparator:
"""
Open PDB file; give back one chain whenever next() is
called. This class is used by the pdb2xplor script.
This class constitutes vintage code. See
L{Biskit.PDBCleaner} and L{Biskit.Mod.TemplateCleaner} for a more
recent implementation of PDB cleaning.
@todo: The removal of duplicate chains should be transferred to
the PDBCleaner so that this class can be retired
"""
def __init__(self, fname, outPath='', chainIdOffset=0,
capBreaks=0, chainMask=0, log=None ):
"""
@param fname: pdb filename
@type fname: str
@param outPath: path for log file
@type outPath: str
@param chainIdOffset: start chain numbering at this offset
@type chainIdOffset: int
@param capBreaks: add ACE and NME to N- and C-term. of chain breaks [0]
@type capBreaks: 0|1
@param chainMask: chain mask for overriding the default sequence identity [None]
@type chainMask: [1|0]
@param log: LogFile object
@type log: object
"""
self.pdb = Structure(fname);
self.fname = fname
self.outPath = T.absfile( outPath )
self.chainIdOffset = chainIdOffset
self.capBreaks = capBreaks
self.log = LogFile( T.absfile(outPath)+'/' + self.pdbname()+'.log')
if log:
self.log = log
self.chains = self.pdb.peptide_chains
self.counter = -1
self.threshold = 0.9 # sequence identity between multiple copies in PDB
self._expressionCheck(
"[^\n].*[Hh][Oo][Mm][Oo].?[Dd][Ii][Mm][eE][Rr].*\n", 'HOMODIMER')
self._expressionCheck("[^\n].*[Tt][Rr][Ii][Mm][Ee][Rr].*\n", 'TRIMER')
self._hetatomCheck()
self.log.add("Separate chains: \n------------------")
self._removeDuplicateChains(chainMask) # keep only one copy of molecule
self._separateChainBreaks()
self._assign_seg_ids() # new segment id for each chain
def pdbname(self):
"""
Extract pdb code from file name.
@return: (assumed) pdb code
@rtype: str
"""
return T.stripFilename(self.pdb.filename)
def _expressionCheck(self, findExpression, findClean):
"""
Check and report if the regular expression 'findExpression'
exists in the PDB-file. Use this to locate data in the REMARK
section of a pdb file. Prints a warning to stdOut if the
regular expression is found.
@param findExpression: regular expression
@type findExpression: str
@param findClean: clean name of regular expression
@type findClean: str
"""
pdb = open(self.fname,'r')
pdbFile = pdb.read()
searchResult = re.findall(findExpression,pdbFile)
warningMessage = """
WARNINGR! The text string'%s' was found in the PDB-file.
If this PDB-file contains a homodimer one of the chains will be
deleted by this script. To avoid this prepare the file for Xplor manualy \n""" %\
( findClean )
warningMessage2 = """--------------------------------------------\n"""
if len(searchResult) != 0:
self.log.add(warningMessage)
self.log.add("String found in line(s): \n")
for i in range(0,len(searchResult)):
self.log.add(searchResult[i])
self.log.add(warningMessage2)
pdb.close()
def _hetatomCheck(self):
"""
Check and report if there are any none-water HETATMs in the PDB-file
"""
pdb = open(self.fname,'r')
pdbFile = pdb.read()
findExpression = "HETATM.*\n"
searchResult = re.findall(findExpression,pdbFile)
i=0
j = len(searchResult)
while i<j:
if searchResult[i][17:20] == "HOH" or \
searchResult[i][0:6] != "HETATM" :
del searchResult[i]
i=i-1
j=j-1
i=i+1
warningMessage = """
WARNING! The PDB-file contains coordinates for none water HETATMs.
If you want to keep the HETATM - prepare the file for Xplor manualy \n"""
warningMessage2 = "\n"+ 80*"-" + "\n"
if len(searchResult) != 0:
self.log.add(warningMessage)
self.log.add("String found in line(s): \n")
for i in range(0,len(searchResult)):
self.log.add(searchResult[i][0:-1])
self.log.add(warningMessage2)
pdb.close()
def _compareSequences( self, seq1, seq2 ):
"""
@param seq1: sequence 1 to compare
@type seq1: str
@param seq2: sequence 1 to compare
@type seq2: str
@return: identity (0.0 - 1.0) between the two sequences
@rtype : float
"""
# compare the 2 sequences
## blast = Blast2Seq( seq1, seq2 )
## id = blast.run()
matcher = SequenceMatcher( None, ''.join(seq1) , ''.join(seq2) )
return matcher.ratio()
def _removeDuplicateChains(self, chainMask=None):
"""
Get rid of identical chains by comparing all chains with Blast2seq.
@param chainMask: chain mask for overriding the
chain identity checking (default: None)
@type chainMask: [int]
@return: number of chains removed
@rtype: int
"""
chainCount = len(self.chains)
matrix = 1.0 * N.zeros((chainCount,chainCount))
chain_ids = []
## create identity matrix for all chains against all chains
for i in range(0, chainCount):
chain_ids = chain_ids + [self.chains[i].chain_id] # collect for log file
for j in range(i, len(self.chains)):
# convert 3-letter-code res list into 1-letter-code String
seq1 = singleAA( self.chains[i].sequence() )
seq2 = singleAA( self.chains[j].sequence() )
## if len(seq1) > len(seq2): # take shorter sequence
## # aln len at least half the len of the shortest sequence
## alnCutoff = len(seq2) * 0.5
## else:
## alnCutoff = len(seq1) * 0.5
## if id['aln_len'] > alnCutoff:
## matrix[i,j] = id['aln_id']
## else: # aln length too short, ignore
## matrix[i,j] = 0
matrix[i,j] = self._compareSequences( seq1, seq2 )
## report activity
self.log.add("\n Chain ID's of compared chains: "+str(chain_ids))
self.log.add(" Cross-Identity between chains:\n"+str(matrix))
self.log.add(" Identity threshold used: "+str(self.threshold))
## override the automatic chain deletion by supplying a
## chain mask to this function
if chainMask:
if len(chainMask) == chainCount:
self.chains = N.compress(chainMask, self.chains)
self.log.add("NOTE: chain mask %s used for removing chains.\n"%chainMask)
else:
self.log.add("########## ERROR ###############")
self.log.add("# Chain mask is only %i chains long"%len(chainMask))
self.log.add("# when a mask of length %i is needed"%chainCount)
self.log.add("# No cleaning will be performed.\n")
if not chainMask:
## look at diagonals in "identity matrix"
## (each chain against each)
duplicate = len(self.chains)
for offset in range(1,chainCount):
diag = N.diagonal(matrix, offset ,0,1)
# diagonal of 1's mark begin of duplicate
avg = 1.0 * N.sum(diag)/len(diag)
if (avg >= self.threshold):
duplicate = offset
break
self.chains = self.chains[:duplicate]
self.log.add("NOTE: Identity matrix will be used for removing identical chains.")
## report activit
self.log.add(str(chainCount - len(self.chains))+\
" chains have been removed.\n")
# how many chains have been removed?
return (chainCount - len(self.chains))
def _assign_seg_ids(self):
"""
Assign new segment id to each chain.
"""
counter = self.chainIdOffset
for chain in self.chains:
## Assemble segid from pdb code + one letter out of A to Z
chain.segment_id = self.pdbname()[:3] + string.uppercase[counter]
counter = counter + 1
try: # report changed segement ids
chain_id = chain.chain_id
self.log.add("changed segment ID of chain "+chain_id+\
" to "+chain.segment_id)
except:
T.errWriteln("_assign_seg_ids(): logerror")
def _sequentialDist(self, chain, cutoff, atom):
"""
Calculate sequential atom-atom distance, report residues with
longer distance than cutoff (chain break positions).
@param chain: Scientific.IO.PDB.PeptideChain object
@type chain: object
@param cutoff: threshold for reporting gap (chain break)
@type cutoff: float
@param atom: type of atoms to check (i.e. 'CA')
@type atom: str
@return: list of chain break positions (residue index for each
first residue of two that are too distant)
@rtype: list of int
"""
distanceList = []
v0 = Vector( 0,0,0 )
jump = 1
for res in range(0,len(chain)-2):
try:
v1 = Vector(chain[res][atom].position.array)
## ignore CA with 0,0,0 coordinate
if v1 != v0:
jump = 1
v2 = Vector(chain[ res+jump ][atom].position.array)
## look for next CA with non-zero coordinate
while v2 == v0 and jump + res < len( chain ):
jump += 1
v2 = Vector(chain[ res+jump ][atom].position.array)
if (v1 - v2).length() > cutoff * jump:
distanceList = distanceList + [res + jump - 1]
except:
self.log.add(
"_sequentialDist():\nError while checking CA-CA distance"+\
" between residues "+str(chain[res].name)+\
str(chain[res].number)+" and "+\
str(chain[res+jump].name)+\
str(chain[res+jump].number)+ " in chain "+chain.chain_id)
self.log.add("Error: " + T.lastError() )
return distanceList
## def _sequentialDist(self, chain, cutoff, atom):
## """
## Calculate sequential atom-atom distance, report residues with
## longer distance than cutoff (chain break positions).
## chain - PDB.PeptideChain
## cutoff - float, threshold for reporting gap (chain break)
## atom - str, type of atoms to check (i.e. 'CA')
## -> [int, int, ...], list of chain break positions (residue index
## for each first residue of two that are too distant)
## """
## distanceList = []
## for residue in range(0,len(chain)-1):
## # iterate through residue 1 to ter-1
## try:
## vectorAtom1 = Vector(chain[residue][atom].position.array)
## vectorAtom2 = Vector(chain[residue+1][atom].position.array)
## if (vectorAtom1 - vectorAtom2).length() > cutoff:
## distanceList = distanceList + [residue]
## except:
## self.log.add(
## "_sequentialDist():\nError while checking CA-CA distance"+ \
## " between residues "+str(chain[residue].name)+\
## str(chain[residue].number)+" and "+str(chain[residue+1].name)+\
## str(chain[residue+1].number)+ " in chain "+chain.chain_id)
## self.log.add("Error: " + T.lastError() )
## return distanceList
def _separateChainBreaks(self):
"""
Separate chains with breaks into 2 chains.
The new chain(s) is/are added to the internal PDB instance
(self.chains).
"""
fragments = []
for chain in self.chains:
# res number of residues before a break
breaks = self._sequentialDist(chain, 4.5, 'CA')
self.log.add(str(len(breaks)) + " breaks found in chain " +\
"(" + str(len(chain)) \
+ " residues) " + chain.chain_id + ": "+str(breaks))
previous = 0
ncap_next = 0
for breakRes in breaks:
residues = chain.residues[previous:breakRes+1]
previous = breakRes + 1
chainNew = PeptideChain(residues, chain.chain_id,
chain.segment_id)
if ncap_next:
self.__nCap( chainNew )
ncap_next = 0
if self.capBreaks:
## add N-Methyl to c terminal
self.__cCap( chainNew )
ncap_next = 1
fragments = fragments + [chainNew]
chainNew = PeptideChain(chain.residues[previous:], chain.chain_id,
chain.segment_id)
if ncap_next:
self.__nCap( chainNew )
fragments = fragments + [chainNew]
self.chains = fragments
def __nCap( self, pep_chain ):
"""
Add acetyl capping to N-terminal of peptide chain
"""
n = (pep_chain[0].number or 1) - 1
r = AminoAcidResidue('ACE', number=n, atoms=[Atom('CA', Vector(0,0,0),
element='C')])
pep_chain.residues = [r] + pep_chain.residues
self.log.add('Capping chain break with ACE %i' % n)
def __cCap( self, pep_chain ):
"""
Add methyle amine capping to C-terminal of peptide chain
"""
n = (pep_chain[-1].number or len(pep_chain)) + 1
r = AminoAcidResidue('NME', number=n, atoms=[Atom('CA', Vector(0,0,0),
element='C')])
pep_chain.residues = pep_chain.residues + [r]
self.log.add('Capping chain break at with NME %i' % n)
def extractWaters(self):
"""
Write waters into separate pdb file, called |pdbCode|_waters.pdb.
"""
try:
fTarget = self.outPath + '/' +\
self.pdbname()[:4] + '_waters.pdb'
pdb = PDBFile( fTarget, mode='w' )
waters = []
for key in ['HOH', 'DOD']:
if self.pdb.molecules.has_key( key ):
waters += self.pdb.molecules[ key ]
pdb.nextChain(chain_id='', segment_id='1XWW')
for w in waters:
pdb.nextResidue('TIP3')
## XPLOR wants "ATOM" not "HETATM":
pdb.het_flag = 0
pdb.writeAtom('OH2', w.atoms['O'].position)
## keep TIP3 waters as well
if len(waters) == 0:
try:
TIP3_waters = self.pdb.molecules[ 'TIP3' ]
except:
TIP3_waters = []
for w in TIP3_waters:
pdb.nextResidue('TIP3')
## XPLOR wants "ATOM" not "HETATM":
pdb.het_flag = 0
pdb.writeAtom('OH2', w.atoms['OH2'].position)
pdb.writeAtom('H1', w.atoms['H1'].position)
pdb.writeAtom('H2', w.atoms['H2'].position)
pdb.close()
except:
T.errWriteln("Error writing waters to %s: " % fTarget )
T.errWriteln( T.lastError() )
def next(self):
"""
Return next 'clean', non-redundant, non-broken chain from PDB
@return: Scientific.IO.PDB.PeptideChain, completed chain OR
if no chain is left
@rtype: chain object OR None
"""
self.counter = self.counter + 1
if (len(self.chains) > self.counter):
return self.chains[self.counter]
else:
return None
#############
## TESTING
#############
import Biskit.test as BT
class Test(BT.BiskitTest):
"""Test ChainSeparator """
def prepare(self):
self.fname = T.testRoot() + '/com/1BGS_original.pdb'
self.outPath = T.tempDir()
def cleanUp(self):
T.tryRemove( self.sep.log.fname )
def test_ChainSeparator( self ):
"""ChainSeparator test"""
self.sep = ChainSeparator( self.fname, self.outPath, 1)
self.chain = self.sep.next()
i=1
all_chains = []
while self.chain <> None:
if self.local:
print 'Chain %i:'%i, ''.join(singleAA(self.chain.sequence() ) )
all_chains += self.chain.sequence()
self.chain = self.sep.next()
i += 1
if self.local:
print 'ChainSeparator log file written to: %s'%self.sep.log.fname
r = ''.join( singleAA( all_chains ) )
self.assertEqual(r, self.EXPECTED)
EXPECTED='AQVINTFDGVADYLQTYHKLPDNYITKSEAQALGWVASKGNLADVAPGKSIGGDIFSNREGKLPGKSGRTWREADINYTSGFRNSDRILYSSDWLIYKTTDHYQTFTKIRAQVINTFDGVADYLQTYHKLPDNYITKSEAQALGWVASKGNLADVAPGKSIGGDIFSNREGKLPGKSGRTWREADINYTSGFRNSDRILYSSDWLIYKTTDHYQTFTKIRAQVINTFDGVADYLQTYHKLPDNYITKSEAQALGWVASKGNLADVAPGKSIGGDIFSNREGKLPGKSGRTWREADINYTSGFRNSDRILYSSDWLIYKTTDHYQTFTKIRKKAVINGEQIRSISDLHQTLKKELALPEYYGENLDALWDALTGWVEYPLVLEWRQFEQSKQLTENGAESVLQVFREAKAEGADITIILSKKAVINGEQIRSISDLHQTLKKELALPEYYGENLDALWDALTGWVEYPLVLEWRQFEQSKQLTENGAESVLQVFREAKAEGADITIILSKKAVINGEQIRSISDLHQTLKKELALPEYYGENLDALWDALTGWVEYPLVLEWRQFEQSKQLTENGAESVLQVFREAKAEGADITIILS'
if __name__ == '__main__':
BT.localTest()
|
ostrokach/biskit
|
Biskit/ChainSeparator.py
|
Python
|
gpl-3.0
| 19,938
|
[
"BLAST"
] |
ada8b4708462069ea433cf94840512c48d2e7ae65430b8304f2d50789cd384e9
|
###import the simple module from the paraview
from paraview.simple import *
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
total_proc=2
#Render={}
#Disp={}
#get active source.
#ActiveReader= GetActiveSource()
# get active view
#renderView1 = GetActiveViewOrCreate('RenderView')
for i in range(total_proc):
name='H5PartReader'+str(i+1)
# find source
Render= FindSource(name)
# set active source
SetActiveSource(Render)
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
# get display properties
Disp = GetDisplayProperties(Render, view=renderView1)
# set scalar coloring
ColorBy(Disp, ('POINTS', 'Vz'))
# rescale color and/or opacity maps used to include current data range
Disp.RescaleTransferFunctionToDataRange(True)
# show color bar/color legend
Disp.SetScalarBarVisibility(renderView1, True)
# get color transfer function/color map for 'Vz'
vzLUT = GetColorTransferFunction('Vz')
# get opacity transfer function/opacity map for 'Vz'
vzPWF = GetOpacityTransferFunction('Vz')
|
zhixuanc/Volcano_Plume_SPH
|
bin/post_show_Vz.py
|
Python
|
gpl-3.0
| 1,106
|
[
"ParaView"
] |
6d44b2f13988e4b70ed14c550787bc4d5535f4ff83b4d7db5ed62eb67df8979b
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This package provides the packages and modules to perform IO from various
input file formats and pymatgen objects.
"""
|
sonium0/pymatgen
|
pymatgen/io/__init__.py
|
Python
|
mit
| 233
|
[
"pymatgen"
] |
b4e9057404e4852f60b9f9e3e6de19e084226959db1612eba5313f8803b970ba
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Save Tower CSV data as NetCDF
# <markdowncell>
# ### Set local variables
# <codecell>
url='http://geoport.whoi.edu/thredds/fileServer/usgs/data2/notebook/data/CR3000_SN3557_Table1_MainTowerCR3000_ground_V6.CR3.txt'
input_data="data.txt"
output_dir="/data"
output_file="julia.nc"
fillvalue=-9999.9
# <markdowncell>
# ### Download the data
# <codecell>
import urllib
urllib.urlretrieve(url, input_data)
# <codecell>
import pandas as pd
df = pd.read_csv(input_data,skiprows=[0,2,3],
parse_dates=True,
index_col='TIMESTAMP',
low_memory=False,
na_values=['NAN',''],
tupleize_cols=True)
df = df.fillna(fillvalue)
df.head()
# <markdowncell>
# ### Simple plot
# <codecell>
import matplotlib.pyplot as plt
%matplotlib inline
df[['Tsoil10cmTree_Avg','Tsoil20cmTree_Avg']].plot(figsize=(12,4));
# <markdowncell>
# ### Create netCDF file
# <codecell>
import numpy as np
def pd_to_secs(df):
# convert a pandas datetime index to seconds since 1970
import calendar
return np.asarray([ calendar.timegm(x.timetuple()) for x in df.index ], dtype=np.int64)
def cf_safe_name(name):
# Create a CF safe name for a group/dimension/variable
import re
if isinstance(name, basestring):
if re.match('^[0-9_]', name):
# Add a letter to the front
name = "v_{}".format(name)
return re.sub(r'[^_a-zA-Z0-9]', "_", name)
return name
# <codecell>
import os
out_file = os.path.join(output_dir, output_file)
if os.path.isfile(out_file):
os.remove(out_file)
from pyaxiom.netcdf.sensors import TimeSeries
ts = TimeSeries(output_dir,
latitude=0.39,
longitude=36.7,
station_name='urn:ioos:station:edu.princeton.ecohydrolab:MainTower',
global_attributes={},
times=pd_to_secs(df),
verticals=[10],
output_filename=output_file)
# <codecell>
for c in df.columns[::-1]:
# Add units based on column name?
var_attributes = dict()
ts.add_variable(cf_safe_name(c), df[c].values, attributes=var_attributes, fillvalue=-9999.9)
# <codecell>
|
rsignell-usgs/notebook
|
People/Julia/tower_to_netcdf.py
|
Python
|
mit
| 2,275
|
[
"NetCDF"
] |
523491774c3e0e673df9c190ef1870233dd0986c8d25932740c9833c5bfaf2cc
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
from collections.abc import Iterable
import json
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests import Request, PreparedRequest
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.region_health_check_services import (
RegionHealthCheckServicesClient,
)
from google.cloud.compute_v1.services.region_health_check_services import pagers
from google.cloud.compute_v1.services.region_health_check_services import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert RegionHealthCheckServicesClient._get_default_mtls_endpoint(None) is None
assert (
RegionHealthCheckServicesClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
RegionHealthCheckServicesClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
RegionHealthCheckServicesClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
RegionHealthCheckServicesClient._get_default_mtls_endpoint(
sandbox_mtls_endpoint
)
== sandbox_mtls_endpoint
)
assert (
RegionHealthCheckServicesClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class,transport_name", [(RegionHealthCheckServicesClient, "rest"),]
)
def test_region_health_check_services_client_from_service_account_info(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
@pytest.mark.parametrize(
"transport_class,transport_name",
[(transports.RegionHealthCheckServicesRestTransport, "rest"),],
)
def test_region_health_check_services_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class,transport_name", [(RegionHealthCheckServicesClient, "rest"),]
)
def test_region_health_check_services_client_from_service_account_file(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
def test_region_health_check_services_client_get_transport_class():
transport = RegionHealthCheckServicesClient.get_transport_class()
available_transports = [
transports.RegionHealthCheckServicesRestTransport,
]
assert transport in available_transports
transport = RegionHealthCheckServicesClient.get_transport_class("rest")
assert transport == transports.RegionHealthCheckServicesRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
RegionHealthCheckServicesClient,
transports.RegionHealthCheckServicesRestTransport,
"rest",
),
],
)
@mock.patch.object(
RegionHealthCheckServicesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionHealthCheckServicesClient),
)
def test_region_health_check_services_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
RegionHealthCheckServicesClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
RegionHealthCheckServicesClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
RegionHealthCheckServicesClient,
transports.RegionHealthCheckServicesRestTransport,
"rest",
"true",
),
(
RegionHealthCheckServicesClient,
transports.RegionHealthCheckServicesRestTransport,
"rest",
"false",
),
],
)
@mock.patch.object(
RegionHealthCheckServicesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionHealthCheckServicesClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_region_health_check_services_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [RegionHealthCheckServicesClient])
@mock.patch.object(
RegionHealthCheckServicesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionHealthCheckServicesClient),
)
def test_region_health_check_services_client_get_mtls_endpoint_and_cert_source(
client_class,
):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
RegionHealthCheckServicesClient,
transports.RegionHealthCheckServicesRestTransport,
"rest",
),
],
)
def test_region_health_check_services_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
RegionHealthCheckServicesClient,
transports.RegionHealthCheckServicesRestTransport,
"rest",
None,
),
],
)
def test_region_health_check_services_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"request_type", [compute.DeleteRegionHealthCheckServiceRequest, dict,]
)
def test_delete_unary_rest(request_type):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"health_check_service": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_delete_unary_rest_required_fields(
request_type=compute.DeleteRegionHealthCheckServiceRequest,
):
transport_class = transports.RegionHealthCheckServicesRestTransport
request_init = {}
request_init["health_check_service"] = ""
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["healthCheckService"] = "health_check_service_value"
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "healthCheckService" in jsonified_request
assert jsonified_request["healthCheckService"] == "health_check_service_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "delete",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_delete_unary_rest_unset_required_fields():
transport = transports.RegionHealthCheckServicesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.delete._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("healthCheckService", "project", "region",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_delete_unary_rest_interceptors(null_interceptor):
transport = transports.RegionHealthCheckServicesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionHealthCheckServicesRestInterceptor(),
)
client = RegionHealthCheckServicesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionHealthCheckServicesRestInterceptor, "post_delete"
) as post, mock.patch.object(
transports.RegionHealthCheckServicesRestInterceptor, "pre_delete"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.DeleteRegionHealthCheckServiceRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.delete_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_delete_unary_rest_bad_request(
transport: str = "rest", request_type=compute.DeleteRegionHealthCheckServiceRequest
):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"health_check_service": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.delete_unary(request)
def test_delete_unary_rest_flattened():
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"health_check_service": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
health_check_service="health_check_service_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.delete_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/healthCheckServices/{health_check_service}"
% client.transport._host,
args[1],
)
def test_delete_unary_rest_flattened_error(transport: str = "rest"):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_unary(
compute.DeleteRegionHealthCheckServiceRequest(),
project="project_value",
region="region_value",
health_check_service="health_check_service_value",
)
def test_delete_unary_rest_error():
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize(
"request_type", [compute.GetRegionHealthCheckServiceRequest, dict,]
)
def test_get_rest(request_type):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"health_check_service": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.HealthCheckService(
creation_timestamp="creation_timestamp_value",
description="description_value",
fingerprint="fingerprint_value",
health_checks=["health_checks_value"],
health_status_aggregation_policy="health_status_aggregation_policy_value",
id=205,
kind="kind_value",
name="name_value",
network_endpoint_groups=["network_endpoint_groups_value"],
notification_endpoints=["notification_endpoints_value"],
region="region_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.HealthCheckService.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.HealthCheckService)
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.fingerprint == "fingerprint_value"
assert response.health_checks == ["health_checks_value"]
assert (
response.health_status_aggregation_policy
== "health_status_aggregation_policy_value"
)
assert response.id == 205
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.network_endpoint_groups == ["network_endpoint_groups_value"]
assert response.notification_endpoints == ["notification_endpoints_value"]
assert response.region == "region_value"
assert response.self_link == "self_link_value"
def test_get_rest_required_fields(
request_type=compute.GetRegionHealthCheckServiceRequest,
):
transport_class = transports.RegionHealthCheckServicesRestTransport
request_init = {}
request_init["health_check_service"] = ""
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["healthCheckService"] = "health_check_service_value"
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "healthCheckService" in jsonified_request
assert jsonified_request["healthCheckService"] == "health_check_service_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.HealthCheckService()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.HealthCheckService.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_get_rest_unset_required_fields():
transport = transports.RegionHealthCheckServicesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.get._get_unset_required_fields({})
assert set(unset_fields) == (
set(()) & set(("healthCheckService", "project", "region",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_get_rest_interceptors(null_interceptor):
transport = transports.RegionHealthCheckServicesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionHealthCheckServicesRestInterceptor(),
)
client = RegionHealthCheckServicesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionHealthCheckServicesRestInterceptor, "post_get"
) as post, mock.patch.object(
transports.RegionHealthCheckServicesRestInterceptor, "pre_get"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.HealthCheckService.to_json(
compute.HealthCheckService()
)
request = compute.GetRegionHealthCheckServiceRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.HealthCheckService
client.get(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_get_rest_bad_request(
transport: str = "rest", request_type=compute.GetRegionHealthCheckServiceRequest
):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"health_check_service": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.get(request)
def test_get_rest_flattened():
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.HealthCheckService()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"health_check_service": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
health_check_service="health_check_service_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.HealthCheckService.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.get(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/healthCheckServices/{health_check_service}"
% client.transport._host,
args[1],
)
def test_get_rest_flattened_error(transport: str = "rest"):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetRegionHealthCheckServiceRequest(),
project="project_value",
region="region_value",
health_check_service="health_check_service_value",
)
def test_get_rest_error():
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize(
"request_type", [compute.InsertRegionHealthCheckServiceRequest, dict,]
)
def test_insert_unary_rest(request_type):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request_init["health_check_service_resource"] = {
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"fingerprint": "fingerprint_value",
"health_checks": ["health_checks_value_1", "health_checks_value_2"],
"health_status_aggregation_policy": "health_status_aggregation_policy_value",
"id": 205,
"kind": "kind_value",
"name": "name_value",
"network_endpoint_groups": [
"network_endpoint_groups_value_1",
"network_endpoint_groups_value_2",
],
"notification_endpoints": [
"notification_endpoints_value_1",
"notification_endpoints_value_2",
],
"region": "region_value",
"self_link": "self_link_value",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_insert_unary_rest_required_fields(
request_type=compute.InsertRegionHealthCheckServiceRequest,
):
transport_class = transports.RegionHealthCheckServicesRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_insert_unary_rest_unset_required_fields():
transport = transports.RegionHealthCheckServicesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.insert._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("healthCheckServiceResource", "project", "region",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_insert_unary_rest_interceptors(null_interceptor):
transport = transports.RegionHealthCheckServicesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionHealthCheckServicesRestInterceptor(),
)
client = RegionHealthCheckServicesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionHealthCheckServicesRestInterceptor, "post_insert"
) as post, mock.patch.object(
transports.RegionHealthCheckServicesRestInterceptor, "pre_insert"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.InsertRegionHealthCheckServiceRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.insert_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_insert_unary_rest_bad_request(
transport: str = "rest", request_type=compute.InsertRegionHealthCheckServiceRequest
):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request_init["health_check_service_resource"] = {
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"fingerprint": "fingerprint_value",
"health_checks": ["health_checks_value_1", "health_checks_value_2"],
"health_status_aggregation_policy": "health_status_aggregation_policy_value",
"id": 205,
"kind": "kind_value",
"name": "name_value",
"network_endpoint_groups": [
"network_endpoint_groups_value_1",
"network_endpoint_groups_value_2",
],
"notification_endpoints": [
"notification_endpoints_value_1",
"notification_endpoints_value_2",
],
"region": "region_value",
"self_link": "self_link_value",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.insert_unary(request)
def test_insert_unary_rest_flattened():
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
health_check_service_resource=compute.HealthCheckService(
creation_timestamp="creation_timestamp_value"
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.insert_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/healthCheckServices"
% client.transport._host,
args[1],
)
def test_insert_unary_rest_flattened_error(transport: str = "rest"):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert_unary(
compute.InsertRegionHealthCheckServiceRequest(),
project="project_value",
region="region_value",
health_check_service_resource=compute.HealthCheckService(
creation_timestamp="creation_timestamp_value"
),
)
def test_insert_unary_rest_error():
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize(
"request_type", [compute.ListRegionHealthCheckServicesRequest, dict,]
)
def test_list_rest(request_type):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.HealthCheckServicesList(
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.HealthCheckServicesList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
def test_list_rest_required_fields(
request_type=compute.ListRegionHealthCheckServicesRequest,
):
transport_class = transports.RegionHealthCheckServicesRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(
("filter", "max_results", "order_by", "page_token", "return_partial_success",)
)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.HealthCheckServicesList()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.HealthCheckServicesList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_list_rest_unset_required_fields():
transport = transports.RegionHealthCheckServicesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.list._get_unset_required_fields({})
assert set(unset_fields) == (
set(("filter", "maxResults", "orderBy", "pageToken", "returnPartialSuccess",))
& set(("project", "region",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_list_rest_interceptors(null_interceptor):
transport = transports.RegionHealthCheckServicesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionHealthCheckServicesRestInterceptor(),
)
client = RegionHealthCheckServicesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionHealthCheckServicesRestInterceptor, "post_list"
) as post, mock.patch.object(
transports.RegionHealthCheckServicesRestInterceptor, "pre_list"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.HealthCheckServicesList.to_json(
compute.HealthCheckServicesList()
)
request = compute.ListRegionHealthCheckServicesRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.HealthCheckServicesList
client.list(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_list_rest_bad_request(
transport: str = "rest", request_type=compute.ListRegionHealthCheckServicesRequest
):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list(request)
def test_list_rest_flattened():
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.HealthCheckServicesList()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2"}
# get truthy value for each flattened field
mock_args = dict(project="project_value", region="region_value",)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.HealthCheckServicesList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/healthCheckServices"
% client.transport._host,
args[1],
)
def test_list_rest_flattened_error(transport: str = "rest"):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListRegionHealthCheckServicesRequest(),
project="project_value",
region="region_value",
)
def test_list_rest_pager(transport: str = "rest"):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.HealthCheckServicesList(
items=[
compute.HealthCheckService(),
compute.HealthCheckService(),
compute.HealthCheckService(),
],
next_page_token="abc",
),
compute.HealthCheckServicesList(items=[], next_page_token="def",),
compute.HealthCheckServicesList(
items=[compute.HealthCheckService(),], next_page_token="ghi",
),
compute.HealthCheckServicesList(
items=[compute.HealthCheckService(), compute.HealthCheckService(),],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.HealthCheckServicesList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1", "region": "sample2"}
pager = client.list(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.HealthCheckService) for i in results)
pages = list(client.list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [compute.PatchRegionHealthCheckServiceRequest, dict,]
)
def test_patch_unary_rest(request_type):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"health_check_service": "sample3",
}
request_init["health_check_service_resource"] = {
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"fingerprint": "fingerprint_value",
"health_checks": ["health_checks_value_1", "health_checks_value_2"],
"health_status_aggregation_policy": "health_status_aggregation_policy_value",
"id": 205,
"kind": "kind_value",
"name": "name_value",
"network_endpoint_groups": [
"network_endpoint_groups_value_1",
"network_endpoint_groups_value_2",
],
"notification_endpoints": [
"notification_endpoints_value_1",
"notification_endpoints_value_2",
],
"region": "region_value",
"self_link": "self_link_value",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.patch_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_patch_unary_rest_required_fields(
request_type=compute.PatchRegionHealthCheckServiceRequest,
):
transport_class = transports.RegionHealthCheckServicesRestTransport
request_init = {}
request_init["health_check_service"] = ""
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).patch._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["healthCheckService"] = "health_check_service_value"
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).patch._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "healthCheckService" in jsonified_request
assert jsonified_request["healthCheckService"] == "health_check_service_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "patch",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.patch_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_patch_unary_rest_unset_required_fields():
transport = transports.RegionHealthCheckServicesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.patch._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",))
& set(
("healthCheckService", "healthCheckServiceResource", "project", "region",)
)
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_patch_unary_rest_interceptors(null_interceptor):
transport = transports.RegionHealthCheckServicesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionHealthCheckServicesRestInterceptor(),
)
client = RegionHealthCheckServicesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionHealthCheckServicesRestInterceptor, "post_patch"
) as post, mock.patch.object(
transports.RegionHealthCheckServicesRestInterceptor, "pre_patch"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.PatchRegionHealthCheckServiceRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.patch_unary(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_patch_unary_rest_bad_request(
transport: str = "rest", request_type=compute.PatchRegionHealthCheckServiceRequest
):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"health_check_service": "sample3",
}
request_init["health_check_service_resource"] = {
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"fingerprint": "fingerprint_value",
"health_checks": ["health_checks_value_1", "health_checks_value_2"],
"health_status_aggregation_policy": "health_status_aggregation_policy_value",
"id": 205,
"kind": "kind_value",
"name": "name_value",
"network_endpoint_groups": [
"network_endpoint_groups_value_1",
"network_endpoint_groups_value_2",
],
"notification_endpoints": [
"notification_endpoints_value_1",
"notification_endpoints_value_2",
],
"region": "region_value",
"self_link": "self_link_value",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.patch_unary(request)
def test_patch_unary_rest_flattened():
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"health_check_service": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
health_check_service="health_check_service_value",
health_check_service_resource=compute.HealthCheckService(
creation_timestamp="creation_timestamp_value"
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.patch_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/healthCheckServices/{health_check_service}"
% client.transport._host,
args[1],
)
def test_patch_unary_rest_flattened_error(transport: str = "rest"):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.patch_unary(
compute.PatchRegionHealthCheckServiceRequest(),
project="project_value",
region="region_value",
health_check_service="health_check_service_value",
health_check_service_resource=compute.HealthCheckService(
creation_timestamp="creation_timestamp_value"
),
)
def test_patch_unary_rest_error():
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.RegionHealthCheckServicesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.RegionHealthCheckServicesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionHealthCheckServicesClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.RegionHealthCheckServicesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = RegionHealthCheckServicesClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = RegionHealthCheckServicesClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.RegionHealthCheckServicesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionHealthCheckServicesClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.RegionHealthCheckServicesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = RegionHealthCheckServicesClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize(
"transport_class", [transports.RegionHealthCheckServicesRestTransport,]
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_region_health_check_services_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.RegionHealthCheckServicesTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_region_health_check_services_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.region_health_check_services.transports.RegionHealthCheckServicesTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.RegionHealthCheckServicesTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"delete",
"get",
"insert",
"list",
"patch",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_region_health_check_services_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.region_health_check_services.transports.RegionHealthCheckServicesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionHealthCheckServicesTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_region_health_check_services_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.region_health_check_services.transports.RegionHealthCheckServicesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionHealthCheckServicesTransport()
adc.assert_called_once()
def test_region_health_check_services_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
RegionHealthCheckServicesClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_region_health_check_services_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.RegionHealthCheckServicesRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_region_health_check_services_host_no_port(transport_name):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:443"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com"
)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_region_health_check_services_host_with_port(transport_name):
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:8000"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com:8000"
)
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = RegionHealthCheckServicesClient.common_billing_account_path(
billing_account
)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = RegionHealthCheckServicesClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = RegionHealthCheckServicesClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = RegionHealthCheckServicesClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = RegionHealthCheckServicesClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = RegionHealthCheckServicesClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = RegionHealthCheckServicesClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = RegionHealthCheckServicesClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = RegionHealthCheckServicesClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = RegionHealthCheckServicesClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = RegionHealthCheckServicesClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = RegionHealthCheckServicesClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = RegionHealthCheckServicesClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = RegionHealthCheckServicesClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = RegionHealthCheckServicesClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.RegionHealthCheckServicesTransport, "_prep_wrapped_messages"
) as prep:
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.RegionHealthCheckServicesTransport, "_prep_wrapped_messages"
) as prep:
transport_class = RegionHealthCheckServicesClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_transport_close():
transports = {
"rest": "_session",
}
for transport, close_name in transports.items():
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"rest",
]
for transport in transports:
client = RegionHealthCheckServicesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(
RegionHealthCheckServicesClient,
transports.RegionHealthCheckServicesRestTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-compute
|
tests/unit/gapic/compute_v1/test_region_health_check_services.py
|
Python
|
apache-2.0
| 97,275
|
[
"Octopus"
] |
bd0f998c65e84d1b9a52d1b05f4b2e7e5d8c4535ce2b13fd98605f7566080157
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for primitive Neural Net (NN) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numbers
import os
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables as variables_lib
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import device_context
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
# Aliases for some automatically-generated names.
local_response_normalization = gen_nn_ops.lrn
# pylint: disable=protected-access
# Acceptable channels last formats (robust to H, W, D order).
_CHANNELS_LAST_FORMATS = frozenset({
"NWC", "NHC", "NHWC", "NWHC", "NDHWC", "NDWHC", "NHDWC", "NHWDC", "NWDHC",
"NWHDC"
})
def _get_sequence(value, n, channel_index, name):
"""Formats a value input for gen_nn_ops."""
# Performance is fast-pathed for common cases:
# `None`, `list`, `tuple` and `int`.
if value is None:
return [1] * (n + 2)
# Always convert `value` to a `list`.
if isinstance(value, list):
pass
elif isinstance(value, tuple):
value = list(value)
elif isinstance(value, int):
value = [value]
elif not isinstance(value, collections_abc.Sized):
value = [value]
else:
value = list(value) # Try casting to a list.
len_value = len(value)
# Fully specified, including batch and channel dims.
if len_value == n + 2:
return value
# Apply value to spatial dims only.
if len_value == 1:
value = value * n # Broadcast to spatial dimensions.
elif len_value != n:
raise ValueError("{} should be of length 1, {} or {} but was {}".format(
name, n, n + 2, len_value))
# Add batch and channel dims (always 1).
if channel_index == 1:
return [1, 1] + value
else:
return [1] + value + [1]
def _non_atrous_convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
data_format=None, # pylint: disable=redefined-builtin
strides=None,
name=None):
"""Computes sums of N-D convolutions (actually cross correlation).
It is required that 1 <= N <= 3.
This is used to implement the more generic `convolution` function, which
extends the interface of this function with a `dilation_rate` parameter.
Args:
input: Rank N+2 tensor of type T of shape
`[batch_size] + input_spatial_shape + [in_channels]` if `data_format`
does not start with `"NC"`, or
`[batch_size, in_channels] + input_spatial_shape` if `data_format` starts
with `"NC"`.
filter: Rank N+2 tensor of type T of shape
`filter_spatial_shape + [in_channels, out_channels]`. Rank of either
`input` or `filter` must be known.
padding: Padding method to use, must be either "VALID" or "SAME".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
strides: Sequence of N positive integers, defaults to `[1] * N`.
name: Name prefix to use.
Returns:
Rank N+2 tensor of type T of shape
`[batch_size] + output_spatial_shape + [out_channels]`, where
if padding == "SAME":
output_spatial_shape = input_spatial_shape
if padding == "VALID":
output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1.
Raises:
ValueError: if ranks are incompatible.
"""
with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.shape
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.shape
op = _NonAtrousConvolution(
input_shape,
filter_shape=filter_shape,
padding=padding,
data_format=data_format,
strides=strides,
name=scope)
return op(input, filter)
class _NonAtrousConvolution(object):
"""Helper class for _non_atrous_convolution.
Note that this class assumes that shapes of input and filter passed to
`__call__` are compatible with `input_shape` and filter_shape passed to the
constructor.
Arguments:
input_shape: static input shape, i.e. input.shape.
filter_shape: static filter shape, i.e. filter.shape.
padding: see _non_atrous_convolution.
data_format: see _non_atrous_convolution.
strides: see _non_atrous_convolution.
name: see _non_atrous_convolution.
num_batch_dims: (Optional.) The number of batch dimensions in the input;
if not provided, the default of `1` is used.
"""
def __init__(
self,
input_shape,
filter_shape,
padding,
data_format=None,
strides=None,
name=None,
num_batch_dims=1):
# filter shape is always rank num_spatial_dims + 2
# and num_spatial_dims == input_shape.ndims - num_batch_dims - 1
if input_shape.ndims is not None:
filter_shape = filter_shape.with_rank(
input_shape.ndims - num_batch_dims + 1)
self.padding = padding
self.name = name
# input shape is == num_spatial_dims + num_batch_dims + 1
# and filter_shape is always rank num_spatial_dims + 2
if filter_shape.ndims is not None:
input_shape = input_shape.with_rank(
filter_shape.ndims + num_batch_dims - 1)
if input_shape.ndims is None:
raise ValueError(
"Rank of convolution must be known, but saw input_shape.ndims == {}"
.format(input_shape.ndims))
if input_shape.ndims < 3 or input_shape.ndims - num_batch_dims + 1 > 5:
raise ValueError(
"`input_shape.ndims - num_batch_dims + 1` must be at least 3 and at "
"most 5 but saw `input_shape.ndims == {}` and `num_batch_dims == {}`"
.format(input_shape.ndims, num_batch_dims))
conv_dims = input_shape.ndims - num_batch_dims - 1
if strides is None:
strides = [1] * conv_dims
elif len(strides) != conv_dims:
raise ValueError("len(strides)=%d, but should be %d" % (len(strides),
conv_dims))
if conv_dims == 1:
# conv1d uses the 2-d data format names
if data_format is None:
data_format = "NWC"
elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
self.strides = strides[0]
self.data_format = data_format
self.conv_op = self._conv1d
elif conv_dims == 2:
if data_format is None or data_format == "NHWC":
data_format = "NHWC"
strides = [1] + list(strides) + [1]
elif data_format == "NCHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")
self.strides = strides
self.data_format = data_format
self.conv_op = conv2d
elif conv_dims == 3:
if data_format is None or data_format == "NDHWC":
strides = [1] + list(strides) + [1]
elif data_format == "NCDHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NDHWC\" or \"NCDHW\". Have: %s"
% data_format)
self.strides = strides
self.data_format = data_format
self.conv_op = _conv3d_expanded_batch
# Note that we need this adapter since argument names for conv1d don't match
# those for gen_nn_ops.conv2d and gen_nn_ops.conv3d.
# pylint: disable=redefined-builtin
def _conv1d(self, input, filter, strides, padding, data_format, name):
return conv1d(
value=input,
filters=filter,
stride=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(
input=inp,
filter=filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
name=self.name)
def squeeze_batch_dims(inp, op, inner_rank, name=None):
"""Returns `unsqueeze_batch(op(squeeze_batch(inp)))`.
Where `squeeze_batch` reshapes `inp` to shape
`[prod(inp.shape[:-inner_rank])] + inp.shape[-inner_rank:]`
and `unsqueeze_batch` does the reverse reshape but on the output.
Args:
inp: A tensor with dims `batch_shape + inner_shape` where `inner_shape`
is length `inner_rank`.
op: A callable that takes a single input tensor and returns a single.
output tensor.
inner_rank: A python integer.
name: A string.
Returns:
`unsqueeze_batch_op(squeeze_batch(inp))`.
"""
with ops.name_scope(name, "squeeze_batch_dims", [inp]):
inp = ops.convert_to_tensor(inp, name="input")
shape = inp.shape
inner_shape = shape[-inner_rank:]
if not inner_shape.is_fully_defined():
inner_shape = array_ops.shape(inp)[-inner_rank:]
batch_shape = shape[:-inner_rank]
if not batch_shape.is_fully_defined():
batch_shape = array_ops.shape(inp)[:-inner_rank]
if isinstance(inner_shape, tensor_shape.TensorShape):
inp_reshaped = array_ops.reshape(inp, [-1] + inner_shape.as_list())
else:
inp_reshaped = array_ops.reshape(
inp, array_ops.concat(([-1], inner_shape), axis=-1))
out_reshaped = op(inp_reshaped)
out_inner_shape = out_reshaped.shape[-inner_rank:]
if not out_inner_shape.is_fully_defined():
out_inner_shape = array_ops.shape(out_reshaped)[-inner_rank:]
out = array_ops.reshape(
out_reshaped, array_ops.concat((batch_shape, out_inner_shape), axis=-1))
out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])
return out
@tf_export("nn.dilation2d", v1=[])
@dispatch.add_dispatch_support
def dilation2d_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors.
The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D dilation is the max-sum correlation
(for consistency with `conv2d`, we use unmirrored filters):
output[b, y, x, c] =
max_{dy, dx} input[b,
strides[1] * y + rates[1] * dy,
strides[2] * x + rates[2] * dx,
c] +
filters[dy, dx, c]
Max-pooling is a special case when the filter has size equal to the pooling
kernel size and contains all zeros.
Note on duality: The dilation of `input` by the `filters` is equal to the
negation of the erosion of `-input` by the reflected `filters`.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the input
tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
The input stride for atrous morphological dilation. Must be:
`[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
return gen_nn_ops.dilation2d(input=input,
filter=filters,
strides=strides,
rates=dilations,
padding=padding,
name=name)
@tf_export(v1=["nn.dilation2d"])
@dispatch.add_dispatch_support
def dilation2d_v1( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
filter=None, # pylint: disable=redefined-builtin
strides=None,
rates=None,
padding=None,
name=None,
filters=None,
dilations=None):
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
rates = deprecated_argument_lookup("dilations", dilations, "rates", rates)
return gen_nn_ops.dilation2d(input, filter, strides, rates, padding, name)
dilation2d_v1.__doc__ = gen_nn_ops.dilation2d.__doc__
@tf_export("nn.with_space_to_batch")
@dispatch.add_dispatch_support
def with_space_to_batch(
input, # pylint: disable=redefined-builtin
dilation_rate,
padding,
op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Performs `op` on the space-to-batch representation of `input`.
This has the effect of transforming sliding window operations into the
corresponding "atrous" operation in which the input is sampled at the
specified `dilation_rate`.
In the special case that `dilation_rate` is uniformly 1, this simply returns:
op(input, num_spatial_dims, padding)
Otherwise, it returns:
batch_to_space_nd(
op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings),
num_spatial_dims,
"VALID")
adjusted_dilation_rate,
adjusted_crops),
where:
adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)],
adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2]
defined as follows:
We first define two int64 tensors `paddings` and `crops` of shape
`[num_spatial_dims, 2]` based on the value of `padding` and the spatial
dimensions of the `input`:
If `padding = "VALID"`, then:
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate)
If `padding = "SAME"`, then:
dilated_filter_shape =
filter_shape + (filter_shape - 1) * (dilation_rate - 1)
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate,
[(dilated_filter_shape - 1) // 2,
dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2])
Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial
dimensions are contiguous starting at the second dimension, but the specified
`spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and
`crops` in order to be usable with these operations. For a given dimension,
if the block size is 1, and both the starting and ending padding and crop
amounts are 0, then space_to_batch_nd effectively leaves that dimension alone,
which is what is needed for dimensions not part of `spatial_dims`.
Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case
efficiently for any number of leading and trailing dimensions.
For 0 <= i < len(spatial_dims), we assign:
adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i]
adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :]
adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :]
All unassigned values of `adjusted_dilation_rate` default to 1, while all
unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0.
Note in the case that `dilation_rate` is not uniformly 1, specifying "VALID"
padding is equivalent to specifying `padding = "SAME"` with a filter_shape of
`[1]*N`.
Advanced usage. Note the following optimization: A sequence of
`with_space_to_batch` operations with identical (not uniformly 1)
`dilation_rate` parameters and "VALID" padding
net = with_space_to_batch(net, dilation_rate, "VALID", op_1)
...
net = with_space_to_batch(net, dilation_rate, "VALID", op_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "VALID")
...
result = op_k(result, num_spatial_dims, "VALID")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and
`batch_to_space_nd`.
Similarly, a sequence of `with_space_to_batch` operations with identical (not
uniformly 1) `dilation_rate` parameters, "SAME" padding, and odd filter
dimensions
net = with_space_to_batch(net, dilation_rate, "SAME", op_1, filter_shape_1)
...
net = with_space_to_batch(net, dilation_rate, "SAME", op_k, filter_shape_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "SAME")
...
result = op_k(result, num_spatial_dims, "SAME")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
Args:
input: Tensor of rank > max(spatial_dims).
dilation_rate: int32 Tensor of *known* shape [num_spatial_dims].
padding: str constant equal to "VALID" or "SAME"
op: Function that maps (input, num_spatial_dims, padding) -> output
filter_shape: If padding = "SAME", specifies the shape of the convolution
kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims].
If padding = "VALID", filter_shape is ignored and need not be specified.
spatial_dims: Monotonically increasing sequence of `num_spatial_dims`
integers (which are >= 1) specifying the spatial dimensions of `input`
and output. Defaults to: `range(1, num_spatial_dims+1)`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
The output Tensor as described above, dimensions will vary based on the op
provided.
Raises:
ValueError: if `padding` is invalid or the arguments are incompatible.
ValueError: if `spatial_dims` are invalid.
"""
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.shape
def build_op(num_spatial_dims, padding):
return lambda inp, _: op(inp, num_spatial_dims, padding)
new_op = _WithSpaceToBatch(
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
return new_op(input, None)
class _WithSpaceToBatch(object):
"""Helper class for with_space_to_batch.
Note that this class assumes that shapes of input and filter passed to
`__call__` are compatible with `input_shape`, `filter_shape`, and
`spatial_dims` passed to the constructor.
Arguments
input_shape: static shape of input. i.e. input.shape.
dilation_rate: see `with_space_to_batch`.
padding: see `with_space_to_batch`.
build_op: Function that maps (num_spatial_dims, paddings) -> (function that
maps (input, filter) -> output).
filter_shape: see `with_space_to_batch`.
spatial_dims: `see with_space_to_batch`.
data_format: see `with_space_to_batch`.
num_batch_dims: (Optional). Number of batch dims in `input_shape`.
"""
def __init__(self,
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=None,
spatial_dims=None,
data_format=None,
num_batch_dims=1):
"""Helper class for _with_space_to_batch."""
dilation_rate = ops.convert_to_tensor(
dilation_rate, dtypes.int32, name="dilation_rate")
if dilation_rate.shape.ndims not in (None, 1):
raise ValueError(
"rate must be rank 1 but saw {}".format(dilation_rate.shape.ndims))
if not dilation_rate.shape.is_fully_defined():
raise ValueError("rate must have known shape, but saw {}"
.format(dilation_rate.shape))
num_spatial_dims = dilation_rate.shape.dims[0].value
if data_format is not None and data_format.startswith("NC"):
starting_spatial_dim = num_batch_dims + 1
else:
starting_spatial_dim = num_batch_dims
if spatial_dims is None:
spatial_dims = range(starting_spatial_dim,
num_spatial_dims + starting_spatial_dim)
orig_spatial_dims = list(spatial_dims)
spatial_dims = sorted(set(int(x) for x in orig_spatial_dims))
if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims):
raise ValueError(
"spatial_dims must be a monotonically increasing sequence of "
"positive integers, but saw: {}".format(orig_spatial_dims))
if data_format is not None and data_format.startswith("NC"):
expected_input_rank = spatial_dims[-1]
else:
expected_input_rank = spatial_dims[-1] + 1
try:
input_shape.with_rank_at_least(expected_input_rank)
except ValueError:
raise ValueError(
"input tensor must have rank at least {}, but saw rank {}"
.format(expected_input_rank, input_shape.ndims))
const_rate = tensor_util.constant_value(dilation_rate)
rate_or_const_rate = dilation_rate
if const_rate is not None:
rate_or_const_rate = const_rate
if np.any(const_rate < 1):
raise ValueError("dilation_rate must be positive, but saw: {}"
.format(const_rate))
if np.all(const_rate == 1):
self.call = build_op(num_spatial_dims, padding)
return
padding, explicit_paddings = convert_padding(padding)
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
if filter_shape is None:
raise ValueError("filter_shape must be specified for SAME padding")
filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape")
const_filter_shape = tensor_util.constant_value(filter_shape)
if const_filter_shape is not None:
filter_shape = const_filter_shape
self.base_paddings = _with_space_to_batch_base_paddings(
const_filter_shape, num_spatial_dims, rate_or_const_rate)
else:
self.num_spatial_dims = num_spatial_dims
self.rate_or_const_rate = rate_or_const_rate
self.base_paddings = None
elif padding == "VALID":
self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32)
elif padding == "EXPLICIT":
base_paddings = (np.array(explicit_paddings)
.reshape([num_spatial_dims + 2, 2]))
# Remove batch and channel dimensions
if data_format is not None and data_format.startswith("NC"):
self.base_paddings = base_paddings[2:]
else:
self.base_paddings = base_paddings[1:-1]
else:
raise ValueError("Invalid padding method %r" % padding)
self.input_shape = input_shape
self.spatial_dims = spatial_dims
self.dilation_rate = dilation_rate
self.data_format = data_format
self.op = build_op(num_spatial_dims, "VALID")
self.call = self._with_space_to_batch_call
def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin
"""Call functionality for with_space_to_batch."""
# Handle input whose shape is unknown during graph creation.
input_spatial_shape = None
input_shape = self.input_shape
spatial_dims = self.spatial_dims
if input_shape.ndims is not None:
input_shape_list = input_shape.as_list()
input_spatial_shape = [input_shape_list[i] for i in spatial_dims]
if input_spatial_shape is None or None in input_spatial_shape:
input_shape_tensor = array_ops.shape(inp)
input_spatial_shape = array_ops.stack(
[input_shape_tensor[i] for i in spatial_dims])
base_paddings = self.base_paddings
if base_paddings is None:
# base_paddings could not be computed at build time since static filter
# shape was not fully defined.
filter_shape = array_ops.shape(filter)
base_paddings = _with_space_to_batch_base_paddings(
filter_shape, self.num_spatial_dims, self.rate_or_const_rate)
paddings, crops = array_ops.required_space_to_batch_paddings(
input_shape=input_spatial_shape,
base_paddings=base_paddings,
block_shape=self.dilation_rate)
dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,
spatial_dims)
paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)
crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)
input_converted = array_ops.space_to_batch_nd(
input=inp, block_shape=dilation_rate, paddings=paddings)
result = self.op(input_converted, filter)
result_converted = array_ops.batch_to_space_nd(
input=result, block_shape=dilation_rate, crops=crops)
# Recover channel information for output shape if channels are not last.
if self.data_format is not None and self.data_format.startswith("NC"):
if not result_converted.shape.dims[1].value and filter is not None:
output_shape = result_converted.shape.as_list()
output_shape[1] = filter.shape[-1]
result_converted.set_shape(output_shape)
return result_converted
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.call(inp, filter)
def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,
rate_or_const_rate):
"""Helper function to compute base_paddings."""
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_spatial_shape = filter_shape[:num_spatial_dims]
pad_extra_shape = (filter_spatial_shape - 1) * rate_or_const_rate
# When full_padding_shape is odd, we pad more at end, following the same
# convention as conv2d.
pad_extra_start = pad_extra_shape // 2
pad_extra_end = pad_extra_shape - pad_extra_start
base_paddings = array_ops.stack(
[[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])
return base_paddings
def _with_space_to_batch_adjust(orig, fill_value, spatial_dims):
"""Returns an `adjusted` version of `orig` based on `spatial_dims`.
Tensor of the same type as `orig` and with shape
`[max(spatial_dims), ...]` where:
adjusted[spatial_dims[i] - 1, ...] = orig[i, ...]
for 0 <= i < len(spatial_dims), and
adjusted[j, ...] = fill_value
for j != spatial_dims[i] - 1 for some i.
If `orig` is a constant value, then the result will be a constant value.
Args:
orig: Tensor of rank > max(spatial_dims).
fill_value: Numpy scalar (of same data type as `orig) specifying the fill
value for non-spatial dimensions.
spatial_dims: See with_space_to_batch.
Returns:
`adjusted` tensor.
"""
fill_dims = orig.get_shape().as_list()[1:]
dtype = orig.dtype.as_numpy_dtype
parts = []
const_orig = tensor_util.constant_value(orig)
const_or_orig = const_orig if const_orig is not None else orig
prev_spatial_dim = 0
i = 0
while i < len(spatial_dims):
start_i = i
start_spatial_dim = spatial_dims[i]
if start_spatial_dim > 1:
# Fill in any gap from the previous spatial dimension (or dimension 1 if
# this is the first spatial dimension) with `fill_value`.
parts.append(
np.full(
[start_spatial_dim - 1 - prev_spatial_dim] + fill_dims,
fill_value,
dtype=dtype))
# Find the largest value of i such that:
# [spatial_dims[start_i], ..., spatial_dims[i]]
# == [start_spatial_dim, ..., start_spatial_dim + i - start_i],
# i.e. the end of a contiguous group of spatial dimensions.
while (i + 1 < len(spatial_dims) and
spatial_dims[i + 1] == spatial_dims[i] + 1):
i += 1
parts.append(const_or_orig[start_i:i + 1])
prev_spatial_dim = spatial_dims[i]
i += 1
if const_orig is not None:
return np.concatenate(parts)
else:
return array_ops.concat(parts, 0)
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
"""Helper function for verifying strides and dilation_rate arguments.
This is used by `convolution` and `pool`.
Args:
num_spatial_dims: int
strides: Optional. List of N ints >= 1. Defaults to [1]*N. If any value
of strides is > 1, then all values of dilation_rate must be 1.
dilation_rate: Optional. List of N ints >= 1. Defaults to [1]*N. If any
value of dilation_rate is > 1, then all values of strides must be 1.
Returns:
Normalized (strides, dilation_rate) as int32 numpy arrays of shape
[num_spatial_dims].
Raises:
ValueError: if the parameters are invalid.
"""
if dilation_rate is None:
dilation_rate = [1] * num_spatial_dims
elif len(dilation_rate) != num_spatial_dims:
raise ValueError("len(dilation_rate)=%d but should be %d" %
(len(dilation_rate), num_spatial_dims))
dilation_rate = np.array(dilation_rate, dtype=np.int32)
if np.any(dilation_rate < 1):
raise ValueError("all values of dilation_rate must be positive")
if strides is None:
strides = [1] * num_spatial_dims
elif len(strides) != num_spatial_dims:
raise ValueError("len(strides)=%d but should be %d" % (len(strides),
num_spatial_dims))
strides = np.array(strides, dtype=np.int32)
if np.any(strides < 1):
raise ValueError("all values of strides must be positive")
if np.any(strides > 1) and np.any(dilation_rate > 1):
raise ValueError(
"strides > 1 not supported in conjunction with dilation_rate > 1")
return strides, dilation_rate
@tf_export(v1=["nn.convolution"])
@dispatch.add_dispatch_support
def convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None,
filters=None,
dilations=None): # pylint: disable=g-doc-args
"""Computes sums of N-D convolutions (actually cross-correlation).
This also supports either output striding via the optional `strides` parameter
or atrous convolution (also known as convolution with holes or dilated
convolution, based on the French word "trous" meaning holes in English) via
the optional `dilation_rate` parameter. Currently, however, output striding
is not supported for atrous convolutions.
Specifically, in the case that `data_format` does not start with "NC", given
a rank (N+2) `input` Tensor of shape
[num_batches,
input_spatial_shape[0],
...,
input_spatial_shape[N-1],
num_input_channels],
a rank (N+2) `filter` Tensor of shape
[spatial_filter_shape[0],
...,
spatial_filter_shape[N-1],
num_input_channels,
num_output_channels],
an optional `dilation_rate` tensor of shape [N] (defaulting to [1]*N)
specifying the filter upsampling/input downsampling rate, and an optional list
of N `strides` (defaulting [1]*N), this computes for each N-D spatial output
position (x[0], ..., x[N-1]):
```
output[b, x[0], ..., x[N-1], k] =
sum_{z[0], ..., z[N-1], q}
filter[z[0], ..., z[N-1], q, k] *
padded_input[b,
x[0]*strides[0] + dilation_rate[0]*z[0],
...,
x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1],
q]
```
where b is the index into the batch, k is the output channel number, q is the
input channel number, and z is the N-D spatial offset within the filter. Here,
`padded_input` is obtained by zero padding the input using an effective
spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and
output striding `strides` as described in the
[comment here](https://tensorflow.org/api_guides/python/nn#Convolution).
In the case that `data_format` does start with `"NC"`, the `input` and output
(but not the `filter`) are simply transposed as follows:
convolution(input, data_format, **kwargs) =
tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
It is required that 1 <= N <= 3.
Args:
input: An (N+2)-D `Tensor` of type `T`, of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
filter: An (N+2)-D `Tensor` with the same type as `input` and shape
`spatial_filter_shape + [in_channels, out_channels]`.
padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm.
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
strides: Optional. Sequence of N ints >= 1. Specifies the output stride.
Defaults to [1]*N. If any value of strides is > 1, then all values of
dilation_rate must be 1.
dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter
upsampling/input downsampling rate. In the literature, the same parameter
is sometimes called `input stride` or `dilation`. The effective filter
size used for the convolution will be `spatial_filter_shape +
(spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting
(dilation_rate[i]-1) zeros between consecutive elements of the original
filter in each spatial dimension i. If any value of dilation_rate is > 1,
then all values of strides must be 1.
name: Optional name for the returned tensor.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
A `Tensor` with the same type as `input` of shape
`[batch_size] + output_spatial_shape + [out_channels]`
if data_format is None or does not start with "NC", or
`[batch_size, out_channels] + output_spatial_shape`
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of `padding`.
If padding == "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding == "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] -
(spatial_filter_shape[i]-1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: If input/output depth does not match `filter` shape, if padding
is other than `"VALID"` or `"SAME"`, or if data_format is invalid.
"""
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
dilation_rate = deprecated_argument_lookup(
"dilations", dilations, "dilation_rate", dilation_rate)
return convolution_internal(
input,
filter,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilation_rate,
name=name)
@tf_export("nn.convolution", v1=[])
@dispatch.add_dispatch_support
def convolution_v2( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
return convolution_internal(
input, # pylint: disable=redefined-builtin
filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
convolution_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
convolution.__doc__, "dilation_rate", "dilations"),
"filter", "filters")
def convolution_internal(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None,
call_from_convolution=True,
num_spatial_dims=None):
"""Internal function which performs rank agnostic convolution.
Args:
input: See `convolution`.
filters: See `convolution`.
strides: See `convolution`.
padding: See `convolution`.
data_format: See `convolution`.
dilations: See `convolution`.
name: See `convolution`.
call_from_convolution: See `convolution`.
num_spatial_dims: (Optional.). It is a integer describing the
rank of the spatial dimensions. For `1-D`, `2-D` and `3-D` convolutions,
the value of `num_spatial_dims` is `1`, `2`, and `3`, respectively.
This argument is only required to disambiguate the rank of `batch_shape`
when `filter_shape.ndims is None` and `len(batch_shape) > 1`. For
backwards compatibility, if `num_spatial_dims is None` and
`filter_shape.ndims is None`, then `len(batch_shape)` is assumed to be
`1` (i.e., the input is expected to be
`[batch_size, num_channels] + input_spatial_shape`
or `[batch_size] + input_spatial_shape + [num_channels]`.
Returns:
A tensor of shape and dtype matching that of `input`.
Raises:
ValueError: If input and filter both have unknown shapes, or if
`num_spatial_dims` is provided and incompatible with the value
estimated from `filters.shape`.
"""
if (not isinstance(filters, variables_lib.Variable) and
not tensor_util.is_tensor(filters)):
with ops.name_scope("convolution_internal", None, [filters, input]):
filters = ops.convert_to_tensor(filters, name='filters')
if (not isinstance(input, ops.Tensor) and not tensor_util.is_tensor(input)):
with ops.name_scope("convolution_internal", None, [filters, input]):
input = ops.convert_to_tensor(input, name="input")
filters_rank = filters.shape.rank
inputs_rank = input.shape.rank
if num_spatial_dims is None:
if filters_rank:
num_spatial_dims = filters_rank - 2
elif inputs_rank:
num_spatial_dims = inputs_rank - 2
else:
raise ValueError("rank of input or filter must be known")
elif filters_rank and filters_rank - 2 != num_spatial_dims:
raise ValueError(
"inconsistent estimate of spatial dims ({}) vs. actual passed "
"num_spatial_dims ({}). n was estimated as len(filters.shape) - 2, "
"but filters shape is: {}".format(filters_rank, num_spatial_dims,
filters.shape))
if inputs_rank:
num_batch_dims = inputs_rank - num_spatial_dims - 1 # Channel dimension.
else:
num_batch_dims = 1 # By default, assume single batch dimension.
if num_spatial_dims not in {1, 2, 3}:
raise ValueError(
"num_spatial_dims (input.shape.ndims - num_batch_dims - 1) must be one "
"of 1, 2 or 3 but saw {}. num_batch_dims: {}.".format(
num_spatial_dims, num_batch_dims))
if data_format is None or data_format in _CHANNELS_LAST_FORMATS:
channel_index = num_batch_dims + num_spatial_dims
else:
channel_index = num_batch_dims
if dilations is None:
dilations = _get_sequence(dilations, num_spatial_dims, channel_index,
"dilations")
is_dilated_conv = False
else:
dilations = _get_sequence(dilations, num_spatial_dims, channel_index,
"dilations")
is_dilated_conv = any(i != 1 for i in dilations)
strides = _get_sequence(strides, num_spatial_dims, channel_index, "strides")
has_tpu_context = device_context.enclosing_tpu_context() is not None
if name:
default_name = None
elif not has_tpu_context or call_from_convolution:
default_name = "convolution"
elif num_spatial_dims == 2: # Most common case.
default_name = "Conv2D"
elif num_spatial_dims == 3:
default_name = "Conv3D"
else:
default_name = "conv1d"
with ops.name_scope(name, default_name, [input, filters]) as name:
# Fast path for TPU or if no dilation, as gradient only supported on TPU
# for dilations.
if not is_dilated_conv or has_tpu_context:
if num_spatial_dims == 2: # Most common case.
op = _conv2d_expanded_batch
elif num_spatial_dims == 3:
op = _conv3d_expanded_batch
else:
op = conv1d
return op(
input,
filters,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
else:
if channel_index == 1:
strides = strides[2:]
dilations = dilations[2:]
else:
strides = strides[1:-1]
dilations = dilations[1:-1]
op = Convolution(
tensor_shape.as_shape(input.shape),
tensor_shape.as_shape(filters.shape),
padding,
strides=strides,
dilation_rate=dilations,
name=name,
data_format=data_format,
num_spatial_dims=num_spatial_dims)
return op(input, filters)
class Convolution(object):
"""Helper class for convolution.
Note that this class assumes that shapes of input and filter passed to
`__call__` are compatible with `input_shape`, `filter_shape`, and
`num_spatial_dims` passed to the constructor.
Arguments
input_shape: static shape of input. i.e. input.shape. Its length is
`batch_shape + input_spatial_shape + [num_channels]` if `data_format`
does not start with `NC`, or
`batch_shape + [num_channels] + input_spatial_shape` if `data_format`
starts with `NC`.
filter_shape: static shape of the filter. i.e. filter.shape.
padding: The padding algorithm, must be "SAME" or "VALID".
strides: see convolution.
dilation_rate: see convolution.
name: see convolution.
data_format: A string or `None`. Specifies whether the channel dimension of
the `input` and output is the last dimension (if `data_format` is `None`
or does not start with `NC`), or the first post-batch dimension (i.e. if
`data_format` starts with `NC`).
num_spatial_dims: (Usually optional.) Python integer, the rank of the
spatial and channel dimensions. For `1-D`, `2-D` and `3-D` convolutions,
the value of `num_spatial_dims` is `1`, `2`, and `3`, respectively.
This argument is only required to disambiguate the rank of `batch_shape`
when `filter_shape.ndims is None` and `len(batch_shape) > 1`. For
backwards compatibility, if `num_spatial_dims is None` and
`filter_shape.ndims is None`, then `len(batch_shape)` is assumed to be
`1` (i.e., the input is expected to be
`[batch_size, num_channels] + input_spatial_shape`
or `[batch_size] + input_spatial_shape + [num_channels]`.
"""
def __init__(self,
input_shape,
filter_shape,
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None,
num_spatial_dims=None):
"""Helper function for convolution."""
num_batch_dims = None
filter_shape = tensor_shape.as_shape(filter_shape)
input_shape = tensor_shape.as_shape(input_shape)
if filter_shape.ndims is not None:
if (num_spatial_dims is not None and
filter_shape.ndims != num_spatial_dims + 2):
raise ValueError(
"Expected filter_shape.ndims == num_spatial_dims + 2, "
"but saw filter_shape.ndims == {} and num_spatial_dims == {}"
.format(filter_shape.ndims, num_spatial_dims))
else:
num_spatial_dims = filter_shape.ndims - 2
if input_shape.ndims is not None and num_spatial_dims is not None:
num_batch_dims = input_shape.ndims - num_spatial_dims - 1
if num_spatial_dims is None:
num_spatial_dims = input_shape.ndims - 2
else:
if input_shape.ndims is not None:
if input_shape.ndims < num_spatial_dims + 2:
raise ValueError(
"Expected input_shape.ndims >= num_spatial_dims + 2, but saw "
"input_shape.ndims == {} and num_spatial_dims == {}"
.format(input_shape.ndims, num_spatial_dims))
else:
if num_batch_dims is None:
num_batch_dims = input_shape.ndims - num_spatial_dims - 1
if num_spatial_dims is None:
raise ValueError(
"Cannot estimate num_spatial_dims since input_shape.ndims is None, "
"filter_shape.ndims is None, and argument num_spatial_dims is also "
"None.")
if num_batch_dims is None:
num_batch_dims = 1
if num_batch_dims < 1:
raise ValueError(
"num_batch_dims should be >= 1, but saw {}. num_batch_dims was "
"estimated as `input_shape.ndims - num_spatial_dims - 1` and "
"num_spatial_dims was either provided or estimated as "
"`filter_shape.ndims - 2`. input_shape.ndims: {}, "
"num_spatial_dims: {}, filter_shape.ndims: {}"
.format(num_batch_dims, input_shape.ndims, num_spatial_dims,
filter_shape.ndims))
if data_format is None or not data_format.startswith("NC"):
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_spatial_dims + num_batch_dims)
spatial_dims = range(num_batch_dims, num_spatial_dims + num_batch_dims)
else:
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_batch_dims)
spatial_dims = range(
num_batch_dims + 1, num_spatial_dims + num_batch_dims + 1)
filter_dim = tensor_shape.dimension_at_index(filter_shape, num_spatial_dims)
if not (input_channels_dim % filter_dim).is_compatible_with(0):
raise ValueError("The number of input channels is not divisible by the "
"corresponding number of output filters. Received: "
"input channels={}, output filters={}".format(
input_channels_dim, filter_dim))
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
self.input_shape = input_shape
self.filter_shape = filter_shape
self.data_format = data_format
self.strides = strides
self.padding = padding
self.name = name
self.dilation_rate = dilation_rate
self.num_batch_dims = num_batch_dims
self.num_spatial_dims = num_spatial_dims
self.conv_op = _WithSpaceToBatch(
input_shape,
dilation_rate=dilation_rate,
padding=padding,
build_op=self._build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format,
num_batch_dims=num_batch_dims)
def _build_op(self, _, padding):
return _NonAtrousConvolution(
self.input_shape,
filter_shape=self.filter_shape,
padding=padding,
data_format=self.data_format,
strides=self.strides,
name=self.name,
num_batch_dims=self.num_batch_dims)
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
# TPU convolution supports dilations greater than 1.
if device_context.enclosing_tpu_context() is not None:
return convolution_internal(
inp,
filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilations=self.dilation_rate,
name=self.name,
call_from_convolution=False,
num_spatial_dims=self.num_spatial_dims)
else:
return self.conv_op(inp, filter)
@tf_export(v1=["nn.pool"])
@dispatch.add_dispatch_support
def pool(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding,
dilation_rate=None,
strides=None,
name=None,
data_format=None,
dilations=None):
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
See the "returns" section of `tf.nn.convolution` for details.
dilation_rate: Optional. Dilation rate. List of N ints >= 1.
Defaults to [1]*N. If any value of dilation_rate is > 1, then all values
of strides must be 1.
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N.
If any value of strides is > 1, then all values of dilation_rate must be
1.
name: Optional. Name of the op.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Alias for dilation_rate
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
dilation_rate = deprecated_argument_lookup(
"dilations", dilations, "dilation_rate", dilation_rate)
# pylint: enable=line-too-long
with ops.name_scope(name, "%s_pool" % (pooling_type.lower()),
[input]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
num_spatial_dims = len(window_shape)
if num_spatial_dims < 1 or num_spatial_dims > 3:
raise ValueError("It is required that 1 <= num_spatial_dims <= 3.")
input.get_shape().with_rank(num_spatial_dims + 2)
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
if padding == "SAME" and np.any(dilation_rate > 1):
raise ValueError(
"pooling with SAME padding is not implemented for dilation_rate > 1")
if np.any(strides > window_shape):
raise ValueError(
"strides > window_shape not supported due to inconsistency between "
"CPU and GPU implementations")
pooling_ops = {
("MAX", 1): max_pool,
("MAX", 2): max_pool,
("MAX", 3): max_pool3d, # pylint: disable=undefined-variable
("AVG", 1): avg_pool,
("AVG", 2): avg_pool,
("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable
}
op_key = (pooling_type, num_spatial_dims)
if op_key not in pooling_ops:
raise ValueError("%d-D %s pooling is not supported." % (op_key[1],
op_key[0]))
if data_format is None or not data_format.startswith("NC"):
adjusted_window_shape = [1] + list(window_shape) + [1]
adjusted_strides = [1] + list(strides) + [1]
spatial_dims = range(1, num_spatial_dims + 1)
else:
adjusted_window_shape = [1, 1] + list(window_shape)
adjusted_strides = [1, 1] + list(strides)
spatial_dims = range(2, num_spatial_dims + 2)
if num_spatial_dims == 1:
if data_format is None or data_format == "NWC":
data_format_kwargs = dict(data_format="NHWC")
elif data_format == "NCW":
data_format_kwargs = dict(data_format="NCHW")
else:
raise ValueError("data_format must be either \"NWC\" or \"NCW\".")
adjusted_window_shape = [1] + adjusted_window_shape
adjusted_strides = [1] + adjusted_strides
else:
data_format_kwargs = dict(data_format=data_format)
def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring
if num_spatial_dims == 1:
converted_input = array_ops.expand_dims(converted_input,
spatial_dims[0])
result = pooling_ops[op_key](
converted_input,
adjusted_window_shape,
adjusted_strides,
converted_padding,
name=scope,
**data_format_kwargs)
if num_spatial_dims == 1:
result = array_ops.squeeze(result, [spatial_dims[0]])
return result
return with_space_to_batch(
input=input,
dilation_rate=dilation_rate,
padding=padding,
op=op,
spatial_dims=spatial_dims,
filter_shape=window_shape)
@tf_export("nn.pool", v1=[])
@dispatch.add_dispatch_support
def pool_v2(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if data_format does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N. If any value of
strides is > 1, then all values of dilation_rate must be 1.
padding: The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME".
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to
[1]*N. If any value of dilation_rate is > 1, then all values of strides
must be 1.
name: Optional. Name of the op.
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
return pool(
input=input,
window_shape=window_shape,
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilations,
strides=strides,
name=name,
data_format=data_format)
@tf_export("nn.atrous_conv2d")
@dispatch.add_dispatch_support
def atrous_conv2d(value, filters, rate, padding, name=None):
"""Atrous convolution (a.k.a. convolution with holes or dilated convolution).
This function is a simpler wrapper around the more general
`tf.nn.convolution`, and exists only for backwards compatibility. You can
use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution.
Computes a 2-D atrous convolution, also known as convolution with holes or
dilated convolution, given 4-D `value` and `filters` tensors. If the `rate`
parameter is equal to one, it performs regular 2-D convolution. If the `rate`
parameter is greater than one, it performs convolution with holes, sampling
the input values every `rate` pixels in the `height` and `width` dimensions.
This is equivalent to convolving the input with a set of upsampled filters,
produced by inserting `rate - 1` zeros between two consecutive values of the
filters along the `height` and `width` dimensions, hence the name atrous
convolution or convolution with holes (the French word trous means holes in
English).
More specifically:
```
output[batch, height, width, out_channel] =
sum_{dheight, dwidth, in_channel} (
filters[dheight, dwidth, in_channel, out_channel] *
value[batch, height + rate*dheight, width + rate*dwidth, in_channel]
)
```
Atrous convolution allows us to explicitly control how densely to compute
feature responses in fully convolutional networks. Used in conjunction with
bilinear interpolation, it offers an alternative to `conv2d_transpose` in
dense prediction tasks such as semantic image segmentation, optical flow
computation, or depth estimation. It also allows us to effectively enlarge
the field of view of filters without increasing the number of parameters or
the amount of computation.
For a description of atrous convolution and how it can be used for dense
feature extraction, please see: (Chen et al., 2015). The same operation is
investigated further in (Yu et al., 2016). Previous works that effectively
use atrous convolution in different ways are, among others,
(Sermanet et al., 2014) and (Giusti et al., 2013).
Atrous convolution is also closely related to the so-called noble identities
in multi-rate signal processing.
There are many different ways to implement atrous convolution (see the refs
above). The implementation here reduces
```python
atrous_conv2d(value, filters, rate, padding=padding)
```
to the following three operations:
```python
paddings = ...
net = space_to_batch(value, paddings, block_size=rate)
net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID")
crops = ...
net = batch_to_space(net, crops, block_size=rate)
```
Advanced usage. Note the following optimization: A sequence of `atrous_conv2d`
operations with identical `rate` parameters, 'SAME' `padding`, and filters
with odd heights/ widths:
```python
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
```
can be equivalently performed cheaper in terms of computation and memory as:
```python
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
```
because a pair of consecutive `space_to_batch` and `batch_to_space` ops with
the same `block_size` cancel out when their respective `paddings` and `crops`
inputs are identical.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC"
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, in_channels, out_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Output shape with `'VALID'` padding is:
[batch, height - 2 * (filter_width - 1),
width - 2 * (filter_height - 1), out_channels].
Output shape with `'SAME'` padding is:
[batch, height, width, out_channels].
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Multi-Scale Context Aggregation by Dilated Convolutions:
[Yu et al., 2016](https://arxiv.org/abs/1511.07122)
([pdf](https://arxiv.org/pdf/1511.07122.pdf))
Semantic Image Segmentation with Deep Convolutional Nets and Fully
Connected CRFs:
[Chen et al., 2015](http://arxiv.org/abs/1412.7062)
([pdf](https://arxiv.org/pdf/1412.7062))
OverFeat - Integrated Recognition, Localization and Detection using
Convolutional Networks:
[Sermanet et al., 2014](https://arxiv.org/abs/1312.6229)
([pdf](https://arxiv.org/pdf/1312.6229.pdf))
Fast Image Scanning with Deep Max-Pooling Convolutional Neural Networks:
[Giusti et al., 2013]
(https://ieeexplore.ieee.org/abstract/document/6738831)
([pdf](https://arxiv.org/pdf/1302.1700.pdf))
"""
return convolution(
input=value,
filter=filters,
padding=padding,
dilation_rate=np.broadcast_to(rate, (2,)),
name=name)
def convert_padding(padding, expected_length=4):
"""Converts Python padding to C++ padding for ops which take EXPLICIT padding.
Args:
padding: the `padding` argument for a Python op which supports EXPLICIT
padding.
expected_length: Expected number of entries in the padding list when
explicit padding is used.
Returns:
(padding, explicit_paddings) pair, which should be passed as attributes to a
C++ op.
Raises:
ValueError: If padding is invalid.
"""
explicit_paddings = []
if padding == "EXPLICIT":
# Give a better error message if EXPLICIT is passed.
raise ValueError('"EXPLICIT" is not a valid value for the padding '
"parameter. To use explicit padding, the padding "
"parameter must be a list.")
if isinstance(padding, (list, tuple)):
for i, dim_paddings in enumerate(padding):
if not isinstance(dim_paddings, (list, tuple)):
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding is not a list/tuple" % i)
if len(dim_paddings) != 2:
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding has size %d" % (i, len(dim_paddings)))
explicit_paddings.extend(dim_paddings)
if len(padding) != expected_length:
raise ValueError("When padding is a list, it must be of size %d. Got "
"padding of size: %d" % (expected_length, len(padding)))
padding = "EXPLICIT"
return padding, explicit_paddings
@tf_export(v1=["nn.conv1d"])
@dispatch.add_dispatch_support
@deprecation.deprecated_arg_values(
None,
"`NCHW` for data_format is deprecated, use `NCW` instead",
warn_once=True,
data_format="NCHW")
@deprecation.deprecated_arg_values(
None,
"`NHWC` for data_format is deprecated, use `NWC` instead",
warn_once=True,
data_format="NHWC")
def conv1d(
value=None,
filters=None,
stride=None,
padding=None,
use_cudnn_on_gpu=None,
data_format=None,
name=None,
input=None, # pylint: disable=redefined-builtin
dilations=None):
r"""Computes a 1-D convolution of input with rank `>=3` and a `3-D` filter.
Given an input tensor of shape
`batch_shape + [in_width, in_channels]`
if `data_format` is `"NWC"`, or
`batch_shape + [in_channels, in_width]`
if `data_format` is `"NCW"`,
and a filter / kernel tensor of shape
`[filter_width, in_channels, out_channels]`, this op reshapes
the arguments to pass them to `conv2d` to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
`batch_shape + [in_width, in_channels]`
is reshaped to
`batch_shape + [1, in_width, in_channels]`,
and the filter is reshaped to
`[1, filter_width, in_channels, out_channels]`.
The result is then reshaped back to
`batch_shape + [out_width, out_channels]`
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
value: A Tensor of rank at least 3. Must be of type `float16`, `float32`, or
`float64`.
filters: A Tensor of rank at least 3. Must have the same type as `value`.
stride: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: 'SAME' or 'VALID'
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`,
the data is stored in the order of `batch_shape + [in_width,
in_channels]`. The `"NCW"` format stores data as `batch_shape +
[in_channels, in_width]`.
name: A name for the operation (optional).
input: Alias for value.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
value = deprecation.deprecated_argument_lookup("input", input, "value", value)
with ops.name_scope(name, "conv1d", [value, filters]) as name:
# Reshape the input tensor to batch_shape + [1, in_width, in_channels]
if data_format is None or data_format == "NHWC" or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = -3
channel_index = 2
elif data_format == "NCHW" or data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = -2
channel_index = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
strides = [1] + _get_sequence(stride, 1, channel_index, "stride")
dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations")
value = array_ops.expand_dims(value, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
if value.shape.ndims in (4, 3, 2, 1, 0, None):
result = gen_nn_ops.conv2d(
value,
filters,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations,
name=name)
else:
result = squeeze_batch_dims(
value,
functools.partial(
gen_nn_ops.conv2d,
filter=filters,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations,
),
inner_rank=3,
name=name)
return array_ops.squeeze(result, [spatial_start_dim])
@tf_export("nn.conv1d", v1=[])
@dispatch.add_dispatch_support
def conv1d_v2(
input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
data_format="NWC",
dilations=None,
name=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
`batch_shape + [in_width, in_channels]`
if `data_format` is `"NWC"`, or
`batch_shape + [in_channels, in_width]`
if `data_format` is `"NCW"`,
and a filter / kernel tensor of shape
`[filter_width, in_channels, out_channels]`, this op reshapes
the arguments to pass them to `conv2d` to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with `"NC"`, a tensor of shape
`batch_shape + [in_width, in_channels]`
is reshaped to
`batch_shape + [1, in_width, in_channels]`,
and the filter is reshaped to
`[1, filter_width, in_channels, out_channels]`.
The result is then reshaped back to
`batch_shape + [out_width, out_channels]`
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
input: A Tensor of rank at least 3. Must be of type `float16`, `float32`, or
`float64`.
filters: A Tensor of rank at least 3. Must have the same type as `input`.
stride: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: 'SAME' or 'VALID'
data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`,
the data is stored in the order of
`batch_shape + [in_width, in_channels]`. The `"NCW"` format stores data
as `batch_shape + [in_channels, in_width]`.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
return conv1d(
input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
name=name,
dilations=dilations)
@tf_export("nn.conv1d_transpose")
@dispatch.add_dispatch_support
def conv1d_transpose(
input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format="NWC",
dilations=None,
name=None):
"""The transpose of `conv1d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is actually the transpose (gradient) of `conv1d`
rather than an actual deconvolution.
Args:
input: A 3-D `Tensor` of type `float` and shape
`[batch, in_width, in_channels]` for `NWC` data format or
`[batch, in_channels, in_width]` for `NCW` data format.
filters: A 3-D `Tensor` with the same type as `input` and shape
`[filter_width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `input`.
output_shape: A 1-D `Tensor`, containing three elements, representing the
output shape of the deconvolution op.
strides: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. `'NWC'` and `'NCW'` are supported.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `input`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, if
`output_shape` is not at 3-element vector, if `padding` is other than
`'VALID'` or `'SAME'`, or if `data_format` is invalid.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv1d_transpose",
[input, filters, output_shape]) as name:
# The format could be either NWC or NCW, map to NHWC or NCHW
if data_format is None or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
channel_index = 2
elif data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
channel_index = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
# Reshape the input tensor to [batch, 1, in_width, in_channels]
strides = [1] + _get_sequence(strides, 1, channel_index, "stride")
dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations")
input = array_ops.expand_dims(input, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
output_shape = list(output_shape) if not isinstance(
output_shape, ops.Tensor) else output_shape
output_shape = array_ops.concat([output_shape[: spatial_start_dim], [1],
output_shape[spatial_start_dim:]], 0)
result = gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
return array_ops.squeeze(result, spatial_start_dim)
@tf_export("nn.conv2d", v1=[])
@dispatch.add_dispatch_support
def conv2d_v2(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
# pylint: disable=line-too-long
r"""Computes a 2-D convolution given `input` and 4-D `filters` tensors.
The `input` tensor may have rank `4` or higher, where shape dimensions `[:-3]`
are considered batch dimensions (`batch_shape`).
Given an input tensor of shape
`batch_shape + [in_height, in_width, in_channels]` and a filter / kernel
tensor of shape `[filter_height, filter_width, in_channels, out_channels]`,
this op performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Usage Example:
>>> x_in = np.array([[
... [[2], [1], [2], [0], [1]],
... [[1], [3], [2], [2], [3]],
... [[1], [1], [3], [3], [0]],
... [[2], [2], [0], [1], [1]],
... [[0], [0], [3], [1], [2]], ]])
>>> kernel_in = np.array([
... [ [[2, 0.1]], [[3, 0.2]] ],
... [ [[0, 0.3]],[[1, 0.4]] ], ])
>>> x = tf.constant(x_in, dtype=tf.float32)
>>> kernel = tf.constant(kernel_in, dtype=tf.float32)
>>> tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
<tf.Tensor: shape=(1, 4, 4, 2), dtype=float32, numpy=..., dtype=float32)>
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A Tensor of rank at least 4. The dimension order is interpreted according
to the value of `data_format`; with the all-but-inner-3 dimensions acting
as batch dimensions. See below for details.
filters: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 1. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
`batch_shape + [height, width, channels]`.
Alternatively, the format could be "NCHW", the data storage order of:
`batch_shape + [channels, height, width]`.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input` and the same outer batch shape.
"""
# pylint: enable=line-too-long
return conv2d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d"])
@dispatch.add_dispatch_support
def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter=None,
strides=None,
padding=None,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None,
filters=None):
r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q]
* filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filter: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 1. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: A name for the operation (optional).
filters: Alias for filter.
Returns:
A `Tensor`. Has the same type as `input`.
"""
filter = deprecation.deprecated_argument_lookup(
"filters", filters, "filter", filter)
padding, explicit_paddings = convert_padding(padding)
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
strides = _get_sequence(strides, 2, channel_index, "strides")
dilations = _get_sequence(dilations, 2, channel_index, "dilations")
shape = input.shape
# shape object may lack ndims, e.g., if input is an np.ndarray. In that case,
# we fall back to len(shape).
ndims = getattr(shape, "ndims", -1)
if ndims == -1:
ndims = len(shape)
if ndims in (4, 3, 2, 1, 0, None):
# We avoid calling squeeze_batch_dims to reduce extra python function
# call slowdown in eager mode. This branch doesn't require reshapes.
return gen_nn_ops.conv2d(
input,
filter=filter,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
return squeeze_batch_dims(
input,
functools.partial(
gen_nn_ops.conv2d,
filter=filter,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations),
inner_rank=3,
name=name)
@tf_export(v1=["nn.conv2d_backprop_filter"])
@dispatch.add_dispatch_support
def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.conv2d_backprop_filter(
input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_backprop_input"])
@dispatch.add_dispatch_support
def conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter=None,
out_backprop=None,
strides=None,
padding=None,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None,
filters=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
filters: Alias for filter.
Returns:
A `Tensor`. Has the same type as `filter`.
"""
filter = deprecation.deprecated_argument_lookup(
"filters", filters, "filter", filter)
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.conv2d_backprop_input(
input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_transpose"])
@dispatch.add_dispatch_support
def conv2d_transpose(
value=None,
filter=None, # pylint: disable=redefined-builtin
output_shape=None,
strides=None,
padding="SAME",
data_format="NHWC",
name=None,
input=None, # pylint: disable=redefined-builtin
filters=None,
dilations=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv2d`
rather than an actual deconvolution.
Args:
value: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
filter: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
input: Alias for value.
filters: Alias for filter.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
value = deprecated_argument_lookup("input", input, "value", value)
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
with ops.name_scope(name, "conv2d_transpose",
[value, filter, output_shape]) as name:
return conv2d_transpose_v2(
value,
filter,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv2d_transpose", v1=[])
@dispatch.add_dispatch_support
def conv2d_transpose_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
dilations=None,
name=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of
`atrous_conv2d` rather than an actual deconvolution.
Args:
input: A 4-D `Tensor` of type `float` and shape `[batch, height, width,
in_channels]` for `NHWC` data format or `[batch, in_channels, height,
width]` for `NCHW` data format.
filters: A 4-D `Tensor` with the same type as `input` and shape `[height,
width, output_channels, in_channels]`. `filter`'s `in_channels` dimension
must match that of `input`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: A string. 'NHWC' and 'NCHW' are supported.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `input`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv2d_transpose",
[input, filter, output_shape]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
strides = _get_sequence(strides, 2, channel_index, "strides")
dilations = _get_sequence(dilations, 2, channel_index, "dilations")
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
def _conv2d_expanded_batch(
input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format,
dilations,
name):
"""Helper function for `convolution_internal`; handles expanded batches."""
# Try really hard to avoid modifying the legacy name scopes - return early.
input_rank = input.shape.rank
if input_rank is None or input_rank < 5:
# We avoid calling squeeze_batch_dims to reduce extra python function
# call slowdown in eager mode. This branch doesn't require reshapes.
return gen_nn_ops.conv2d(
input,
filter=filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
return squeeze_batch_dims(
input,
functools.partial(
gen_nn_ops.conv2d,
filter=filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations),
inner_rank=3,
name=name)
@tf_export("nn.atrous_conv2d_transpose")
@dispatch.add_dispatch_support
def atrous_conv2d_transpose(value,
filters,
output_shape,
rate,
padding,
name=None):
"""The transpose of `atrous_conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of
`atrous_conv2d` rather than an actual deconvolution.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC`
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, out_channels, in_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
output_shape: A 1-D `Tensor` of shape representing the output shape of the
deconvolution op.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less
than one, or if the output_shape is not a tensor with 4 elements.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "atrous_conv2d_transpose",
[value, filters, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filters = ops.convert_to_tensor(filters, name="filters")
if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]):
raise ValueError(
"value's input channels does not match filters' input channels, "
"{} != {}".format(value.get_shape()[3],
filters.get_shape()[3]))
if rate < 1:
raise ValueError("rate {} cannot be less than one".format(rate))
if rate == 1:
return conv2d_transpose(
value,
filters,
output_shape,
strides=[1, 1, 1, 1],
padding=padding,
data_format="NHWC")
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(
tensor_shape.TensorShape([4])):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, tuple):
output_shape = list(output_shape)
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3],
filters.get_shape()[2]))
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
# Handle filters whose shape is unknown during graph creation.
if filters.get_shape().is_fully_defined():
filter_shape = filters.get_shape().as_list()
else:
filter_shape = array_ops.shape(filters)
filter_height, filter_width = filter_shape[0], filter_shape[1]
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_height_up = filter_height + (filter_height - 1) * (rate - 1)
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad_height = filter_height_up - 1
pad_width = filter_width_up - 1
# When pad_height (pad_width) is odd, we pad more to bottom (right),
# following the same convention as conv2d().
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
elif padding == "VALID":
pad_top = 0
pad_bottom = 0
pad_left = 0
pad_right = 0
else:
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
in_height = output_shape[1] + pad_top + pad_bottom
in_width = output_shape[2] + pad_left + pad_right
# More padding so that rate divides the height and width of the input.
pad_bottom_extra = (rate - in_height % rate) % rate
pad_right_extra = (rate - in_width % rate) % rate
# The paddings argument to space_to_batch is just the extra padding
# component.
space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]]
value = array_ops.space_to_batch(
input=value, paddings=space_to_batch_pad, block_size=rate)
input_sizes = [
rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate,
(in_width + pad_right_extra) // rate, output_shape[3]
]
value = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=filters,
out_backprop=value,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
# The crops argument to batch_to_space includes both padding components.
batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra],
[pad_left, pad_right + pad_right_extra]]
return array_ops.batch_to_space(
input=value, crops=batch_to_space_crop, block_size=rate)
@tf_export(v1=["nn.depthwise_conv2d_native"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native")
def depthwise_conv2d_native( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter,
strides,
padding,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes a 2-D depthwise convolution.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`, containing
`in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
a different filter to each input channel (expanding from 1 channel to
`channel_multiplier` channels for each), then concatenates the results
together. Thus, the output has `in_channels * channel_multiplier` channels.
```
for k in 0..in_channels-1
for q in 0..channel_multiplier-1
output[b, i, j, k * channel_multiplier + q] =
sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
filter[di, dj, k, q]
```
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`,
`float32`, `float64`.
filter: A `Tensor`. Must have the same type as `input`.
strides: A list of `ints`. 1-D of length 4. The stride of the sliding
window for each dimension of `input`.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to
`"NHWC"`. Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of: [batch, height,
width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D
tensor of length 4. The dilation factor for each dimension of `input`. If
set to k > 1, there will be k-1 skipped cells between each filter element
on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.depthwise_conv2d_native(
input,
filter,
strides,
padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(
"nn.depthwise_conv2d_backprop_input",
v1=[
"nn.depthwise_conv2d_native_backprop_input",
"nn.depthwise_conv2d_backprop_input"
])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native_backprop_input")
def depthwise_conv2d_native_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of depthwise convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`. An integer vector representing the
shape of `input`, based on `data_format`. For example, if `data_format`
is 'NHWC' then `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`,
`float32`, `float64`. 4-D with shape `[filter_height, filter_width,
in_channels, depthwise_multiplier]`.
out_backprop: A `Tensor`. Must have the same type as `filter`. 4-D with
shape based on `data_format`. For example, if `data_format` is 'NHWC'
then out_backprop shape is `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`. The stride of the sliding window for each
dimension of the input of the convolution.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to
`"NHWC"`. Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of: [batch, height,
width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D
tensor of length 4. The dilation factor for each dimension of `input`. If
set to k > 1, there will be k-1 skipped cells between each filter element
on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filter`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.depthwise_conv2d_native_backprop_input(
input_sizes,
filter,
out_backprop,
strides,
padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(
"nn.depthwise_conv2d_backprop_filter",
v1=[
"nn.depthwise_conv2d_native_backprop_filter",
"nn.depthwise_conv2d_backprop_filter"
])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native_backprop_filter")
def depthwise_conv2d_native_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of depthwise convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`,
`float32`, `float64`. 4-D with shape based on `data_format`. For example,
if `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
in_width, in_channels]` tensor.
filter_sizes: A `Tensor` of type `int32`. An integer vector representing the
tensor shape of `filter`, where `filter` is a 4-D `[filter_height,
filter_width, in_channels, depthwise_multiplier]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`. 4-D with shape
based on `data_format`. For example, if `data_format` is 'NHWC' then
out_backprop shape is `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`. The stride of the sliding window for each
dimension of the input of the convolution.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to
`"NHWC"`. Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of: [batch, height,
width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D
tensor of length 4. The dilation factor for each dimension of `input`. If
set to k > 1, there will be k-1 skipped cells between each filter element
on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.depthwise_conv2d_native_backprop_filter(
input,
filter_sizes,
out_backprop,
strides,
padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
def _conv3d_expanded_batch(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations=None,
name=None):
"""Helper function for `conv3d`; handles expanded batches."""
shape = input.shape
# shape object may lack ndims, e.g., if input is an np.ndarray. In that case,
# we fall back to len(shape).
ndims = getattr(shape, "ndims", -1)
if ndims == -1:
ndims = len(shape)
if ndims in (5, 4, 3, 2, 1, 0, None):
# We avoid calling squeeze_batch_dims to reduce extra python function
# call slowdown in eager mode. This branch doesn't require reshapes.
return gen_nn_ops.conv3d(
input,
filter,
strides,
padding,
data_format=data_format,
dilations=dilations,
name=name)
else:
return squeeze_batch_dims(
input,
functools.partial(
gen_nn_ops.conv3d,
filter=filter,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations),
inner_rank=4,
name=name)
@tf_export("nn.conv3d", v1=[])
@dispatch.add_dispatch_support
def conv3d_v2(input, # pylint: disable=redefined-builtin,missing-docstring
filters,
strides,
padding,
data_format="NDHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1, 1]
return _conv3d_expanded_batch(input, filters, strides, padding, data_format,
dilations, name)
@tf_export(v1=["nn.conv3d"])
@dispatch.add_dispatch_support
def conv3d_v1( # pylint: disable=missing-docstring,dangerous-default-value
input, # pylint: disable=redefined-builtin
filter=None, # pylint: disable=redefined-builtin
strides=None,
padding=None,
data_format="NDHWC",
dilations=[1, 1, 1, 1, 1],
name=None,
filters=None):
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
return gen_nn_ops.conv3d(
input, filter, strides, padding, data_format, dilations, name)
conv3d_v2.__doc__ = deprecation.rewrite_argument_docstring(
gen_nn_ops.conv3d.__doc__, "filter", "filters")
conv3d_v1.__doc__ = gen_nn_ops.conv3d.__doc__
@tf_export(v1=["nn.conv3d_transpose"])
@dispatch.add_dispatch_support
def conv3d_transpose(
value,
filter=None, # pylint: disable=redefined-builtin
output_shape=None,
strides=None,
padding="SAME",
data_format="NDHWC",
name=None,
input=None, # pylint: disable=redefined-builtin
filters=None,
dilations=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
value: A 5-D `Tensor` of type `float` and shape
`[batch, depth, height, width, in_channels]`.
filter: A 5-D `Tensor` with the same type as `value` and shape
`[depth, height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout
of the input and output tensors. Defaults to `'NDHWC'`.
name: Optional name for the returned tensor.
input: Alias of value.
filters: Alias of filter.
dilations: An int or list of `ints` that has length `1`, `3` or `5`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `D`, `H` and `W` dimension.
By default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 5-d tensor
must be 1.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
value = deprecated_argument_lookup("input", input, "value", value)
return conv3d_transpose_v2(
value,
filter,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv3d_transpose", v1=[])
@dispatch.add_dispatch_support
def conv3d_transpose_v2(input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
dilations=None,
name=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
input: A 5-D `Tensor` of type `float` and shape `[batch, depth, height,
width, in_channels]` for `NDHWC` data format or `[batch, in_channels,
depth, height, width]` for `NCDHW` data format.
filters: A 5-D `Tensor` with the same type as `input` and shape `[depth,
height, width, output_channels, in_channels]`. `filter`'s `in_channels`
dimension must match that of `input`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `D`, `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 0. The dimension order is
determined by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NDHWC' and 'NCDHW' are supported.
dilations: An int or list of `ints` that has length `1`, `3` or `5`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `D`, `H` and `W` dimension.
By default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 5-d tensor
must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `input`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv3d_transpose",
[input, filter, output_shape]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 4
strides = _get_sequence(strides, 3, channel_index, "strides")
dilations = _get_sequence(dilations, 3, channel_index, "dilations")
return gen_nn_ops.conv3d_backprop_input_v2(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
CONV_TRANSPOSE_OPS = (
conv1d_transpose,
conv2d_transpose_v2,
conv3d_transpose_v2,
)
@tf_export("nn.conv_transpose")
@dispatch.add_dispatch_support
def conv_transpose(input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format=None,
dilations=None,
name=None):
"""The transpose of `convolution`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
input: An N+2 dimensional `Tensor` of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC". It must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
filters: An N+2 dimensional `Tensor` with the same type as `input` and
shape `spatial_filter_shape + [in_channels, out_channels]`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the spatial dimensions. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: An int or list of `ints` that has length `1`, `N` or `N+2`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the spatial dimensions. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details.
name: A name for the operation (optional). If not specified "conv_transpose"
is used.
Returns:
A `Tensor` with the same type as `value`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv_transpose",
[input, filter, output_shape]) as name:
if tensor_util.is_tensor(output_shape):
n = output_shape.shape[0] - 2
elif isinstance(output_shape, collections_abc.Sized):
n = len(output_shape) - 2
else:
raise ValueError("output_shape must be a tensor or sized collection.")
if not 1 <= n <= 3:
raise ValueError(
"output_shape must be of length 3, 4 or 5 but was {}.".format(n + 2))
op = CONV_TRANSPOSE_OPS[n-1]
return op(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
def _tf_deterministic_ops():
if _tf_deterministic_ops.value is None:
tf_deterministic_ops = os.environ.get("TF_DETERMINISTIC_OPS")
if tf_deterministic_ops is not None:
tf_deterministic_ops = tf_deterministic_ops.lower()
_tf_deterministic_ops.value = (
tf_deterministic_ops == "true" or tf_deterministic_ops == "1")
return _tf_deterministic_ops.value
_tf_deterministic_ops.value = None
@tf_export("nn.bias_add")
@dispatch.add_dispatch_support
def bias_add(value, bias, data_format=None, name=None):
"""Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the channel dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'N...C' and 'NC...' are supported. If `None` (the
default) is specified then 'N..C' is assumed.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError if data format is unrecognized, if `value` has less than two
dimensions when `data_format` is 'N..C'/`None` or `value` has less
then three dimensions when `data_format` is `NC..`, if `bias` does not
have exactly one dimension (is a vector), or if the size of `bias`
does not match the size of the channel dimension of `value`.
"""
with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
if data_format is not None:
if data_format.startswith("NC"):
data_format = "NCHW"
elif data_format.startswith("N") and data_format.endswith("C"):
data_format = "NHWC"
else:
raise ValueError("data_format must be of the form `N...C` or `NC...`")
if not context.executing_eagerly():
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
# TODO(duncanriach): Implement deterministic functionality at CUDA kernel
# level.
if _tf_deterministic_ops():
# Note that this code does not implement the same error checks as the
# pre-existing C++ ops.
if data_format == "NCHW":
broadcast_shape_head = [1, array_ops.size(bias)]
broadcast_shape_tail = array_ops.ones(
array_ops.rank(value) - 2, dtype=dtypes.int32)
broadcast_shape = array_ops.concat(
[broadcast_shape_head, broadcast_shape_tail], 0)
return math_ops.add(
value, array_ops.reshape(bias, broadcast_shape), name=name)
else: # data_format == 'NHWC' or data_format == None
return math_ops.add(value, bias, name=name)
else:
return gen_nn_ops.bias_add(
value, bias, data_format=data_format, name=name)
def bias_add_v1(value, bias, name=None):
"""Adds `bias` to `value`.
This is a deprecated version of bias_add and will soon to be removed.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add_v1(value, bias, name=name)
@tf_export(v1=["nn.crelu"])
@dispatch.add_dispatch_support
def crelu(features, name=None, axis=-1):
"""Computes Concatenated ReLU.
Concatenates a ReLU which selects only the positive part of the activation
with a ReLU which selects only the *negative* part of the activation.
Note that as a result this non-linearity doubles the depth of the activations.
Source: [Understanding and Improving Convolutional Neural Networks via
Concatenated Rectified Linear Units. W. Shang, et
al.](https://arxiv.org/abs/1603.05201)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
axis: The axis that the output values are concatenated along. Default is -1.
Returns:
A `Tensor` with the same type as `features`.
References:
Understanding and Improving Convolutional Neural Networks via Concatenated
Rectified Linear Units:
[Shang et al., 2016](http://proceedings.mlr.press/v48/shang16)
([pdf](http://proceedings.mlr.press/v48/shang16.pdf))
"""
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
c = array_ops.concat([features, -features], axis, name=name)
return gen_nn_ops.relu(c)
@tf_export("nn.crelu", v1=[])
@dispatch.add_dispatch_support
def crelu_v2(features, axis=-1, name=None):
return crelu(features, name=name, axis=axis)
crelu_v2.__doc__ = crelu.__doc__
@tf_export("nn.relu6")
@dispatch.add_dispatch_support
def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
References:
Convolutional Deep Belief Networks on CIFAR-10:
Krizhevsky et al., 2010
([pdf](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf))
"""
with ops.name_scope(name, "Relu6", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops.relu6(features, name=name)
@tf_export("nn.leaky_relu")
@dispatch.add_dispatch_support
def leaky_relu(features, alpha=0.2, name=None):
"""Compute the Leaky ReLU activation function.
Source: [Rectifier Nonlinearities Improve Neural Network Acoustic Models.
AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013]
(https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf).
Args:
features: A `Tensor` representing preactivation values. Must be one of
the following types: `float16`, `float32`, `float64`, `int32`, `int64`.
alpha: Slope of the activation function at x < 0.
name: A name for the operation (optional).
Returns:
The activation value.
References:
Rectifier Nonlinearities Improve Neural Network Acoustic Models:
[Maas et al., 2013]
(http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.693.1422)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.693.1422&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name:
features = ops.convert_to_tensor(features, name="features")
if features.dtype.is_integer:
features = math_ops.cast(features, dtypes.float32)
if isinstance(alpha, np.ndarray):
alpha = alpha.item()
return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)
@tf_export("nn.gelu", v1=[])
@dispatch.add_dispatch_support
def gelu(features, approximate=False, name=None):
"""Compute the Gaussian Error Linear Unit (GELU) activation function.
Gaussian error linear unit (GELU) computes
`x * P(X <= x)`, where `P(X) ~ N(0, 1)`.
The (GELU) nonlinearity weights inputs by their value, rather than gates
inputs by their sign as in ReLU.
For example:
>>> x = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype=tf.float32)
>>> y = tf.nn.gelu(x)
>>> y.numpy()
array([-0.00404951, -0.15865529, 0. , 0.8413447 , 2.9959507 ],
dtype=float32)
>>> y = tf.nn.gelu(x, approximate=True)
>>> y.numpy()
array([-0.00363752, -0.15880796, 0. , 0.841192 , 2.9963627 ],
dtype=float32)
Args:
features: A `Tensor` representing preactivation values.
approximate: An optional `bool`. Defaults to `False`. Whether to enable
approximation.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
References:
[Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415).
"""
with ops.name_scope(name, "Gelu", [features]):
features = ops.convert_to_tensor(features, name="features")
if approximate:
coeff = math_ops.cast(0.044715, features.dtype)
return 0.5 * features * (
1.0 + math_ops.tanh(0.7978845608028654 *
(features + coeff * math_ops.pow(features, 3))))
else:
return 0.5 * features * (1.0 + math_ops.erf(
features / math_ops.cast(1.4142135623730951, features.dtype)))
def _flatten_outer_dims(logits):
"""Flattens logits' outer dimensions and keep its last dimension."""
rank = array_ops.rank(logits)
last_dim_size = array_ops.slice(
array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])
output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))
# Set output shape if known.
if not context.executing_eagerly():
shape = logits.get_shape()
if shape is not None and shape.dims is not None:
shape = shape.as_list()
product = 1
product_valid = True
for d in shape[:-1]:
if d is None:
product_valid = False
break
else:
product *= d
if product_valid:
output_shape = [product, shape[-1]]
output.set_shape(output_shape)
return output
def _wrap_2d_function(inputs, compute_op, dim=-1, name=None):
"""Helper function for ops that accept and return 2d inputs of same shape.
It reshapes and transposes the inputs into a 2-D Tensor and then invokes
the given function. The output would be transposed and reshaped back.
If the given function returns a tuple of tensors, each of them will be
transposed and reshaped.
Args:
inputs: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
compute_op: The function to wrap. Must accept the input tensor as its first
arugment, and a second keyword argument `name`.
dim: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same shape as inputs. If compute_op returns multiple
tensors, each of them have the same shape as the input.
Raises:
InvalidArgumentError: if `inputs` is empty or `dim` is beyond the last
dimension of `inputs`.
"""
def _swap_axis(input_tensor, dim_index, last_index, name=None):
"""Swaps logits's dim_index and last_index."""
return array_ops.transpose(
input_tensor,
array_ops.concat([
math_ops.range(dim_index), [last_index],
math_ops.range(dim_index + 1, last_index), [dim_index]
], 0),
name=name)
inputs = ops.convert_to_tensor(inputs)
# We need its original shape for shape inference.
shape = inputs.get_shape()
is_last_dim = (dim == -1) or (dim == shape.ndims - 1)
if is_last_dim:
return compute_op(inputs, name=name)
dim_val = dim
if isinstance(dim, ops.Tensor):
dim_val = tensor_util.constant_value(dim)
if dim_val is not None and not -shape.ndims <= dim_val < shape.ndims:
raise errors_impl.InvalidArgumentError(
None, None,
"Dimension (%d) must be in the range [%d, %d) where %d is the number of"
" dimensions in the input." % (dim_val, -shape.ndims, shape.ndims,
shape.ndims))
# If dim is not the last dimension, we have to do a transpose so that we can
# still perform the op on its last dimension.
# In case dim is negative (and is not last dimension -1), add shape.ndims
ndims = array_ops.rank(inputs)
if not isinstance(dim, ops.Tensor):
if dim < 0:
dim += ndims
else:
dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim)
# Swap logits' dimension of dim and its last dimension.
input_rank = array_ops.rank(inputs)
dim_axis = dim % shape.ndims
inputs = _swap_axis(inputs, dim_axis, math_ops.subtract(input_rank, 1))
# Do the actual call on its last dimension.
def fix_output(output):
output = _swap_axis(
output, dim_axis, math_ops.subtract(input_rank, 1), name=name)
# Make shape inference work since transpose may erase its static shape.
output.set_shape(shape)
return output
outputs = compute_op(inputs)
if isinstance(outputs, tuple):
return tuple(fix_output(output) for output in outputs)
else:
return fix_output(outputs)
@tf_export(v1=["nn.softmax", "math.softmax"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax(logits, axis=None, name=None, dim=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
See: https://en.wikipedia.org/wiki/Softmax_function
Example usage:
>>> tf.nn.softmax([-1, 0., 1.])
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0.09003057, 0.24472848, 0.66524094], dtype=float32)>
Args:
logits: A non-empty `Tensor`, or an object whose type has a registered
`Tensor` conversion function. Must be one of the following types:
`half`,`float32`, `float64`. See also `convert_to_tensor`
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
TypeError: If no conversion function is registered for `logits` to
Tensor.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _wrap_2d_function(logits, gen_nn_ops.softmax, axis, name)
@tf_export("nn.softmax", "math.softmax", v1=[])
@dispatch.add_dispatch_support
def softmax_v2(logits, axis=None, name=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _wrap_2d_function(logits, gen_nn_ops.softmax, axis, name)
@tf_export(v1=["nn.log_softmax", "math.log_softmax"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def log_softmax(logits, axis=None, name=None, dim=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)
@tf_export("nn.log_softmax", "math.log_softmax", v1=[])
@dispatch.add_dispatch_support
def log_softmax_v2(logits, axis=None, name=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)
def _ensure_xent_args(name, sentinel, labels, logits):
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)" % name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
@tf_export("nn.softmax_cross_entropy_with_logits", v1=[])
@dispatch.add_dispatch_support
def softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
Usage:
>>> logits = [[4.0, 2.0, 1.0], [0.0, 5.0, 1.0]]
>>> labels = [[1.0, 0.0, 0.0], [0.0, 0.8, 0.2]]
>>> tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([0.16984604, 0.82474494], dtype=float32)>
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Per-label activations, typically a linear output. These activation
energies are interpreted as unnormalized log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
return softmax_cross_entropy_with_logits_v2_helper(
labels=labels, logits=logits, axis=axis, name=name)
@tf_export(v1=["nn.softmax_cross_entropy_with_logits_v2"])
@dispatch.add_dispatch_support
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax_cross_entropy_with_logits_v2_helper(
labels, logits, axis=None, name=None, dim=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for axis.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
# TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This
# could break users who call this with bad labels, but disregard the bad
# results.
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
del dim
if axis is None:
axis = -1
with ops.name_scope(name, "softmax_cross_entropy_with_logits",
[logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
convert_to_float32 = (
logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16)
precise_logits = math_ops.cast(
logits, dtypes.float32) if convert_to_float32 else logits
# labels and logits must be of the same type
labels = math_ops.cast(labels, precise_logits.dtype)
input_rank = array_ops.rank(precise_logits)
# For shape inference.
shape = logits.get_shape()
# Move the dim to the end if dim is not the last dimension.
if axis != -1:
def _move_dim_to_end(tensor, dim_index, rank):
return array_ops.transpose(
tensor,
array_ops.concat([
math_ops.range(dim_index),
math_ops.range(dim_index + 1, rank), [dim_index]
], 0))
precise_logits = _move_dim_to_end(precise_logits, axis, input_rank)
labels = _move_dim_to_end(labels, axis, input_rank)
input_shape = array_ops.shape(precise_logits)
# Make precise_logits and labels into matrices.
precise_logits = _flatten_outer_dims(precise_logits)
labels = _flatten_outer_dims(labels)
# Do the actual op computation.
# The second output tensor contains the gradients. We use it in
# CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
# The output cost shape should be the input minus axis.
output_shape = array_ops.slice(input_shape, [0],
[math_ops.subtract(input_rank, 1)])
cost = array_ops.reshape(cost, output_shape)
# Make shape inference work since reshape and transpose may erase its static
# shape.
if not context.executing_eagerly(
) and shape is not None and shape.dims is not None:
shape = shape.as_list()
del shape[axis]
cost.set_shape(shape)
if convert_to_float32:
return math_ops.cast(cost, logits.dtype)
else:
return cost
_XENT_DEPRECATION = """
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See `tf.nn.softmax_cross_entropy_with_logits_v2`.
"""
@tf_export(v1=["nn.softmax_cross_entropy_with_logits"])
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION)
def softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
dim=-1,
name=None,
axis=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `dim` argument specifying the class dimension.
Backpropagation will happen only into `logits`. To calculate a cross entropy
loss that allows backpropagation into both `logits` and `labels`, see
`tf.nn.softmax_cross_entropy_with_logits_v2`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Per-label activations, typically a linear output. These activation
energies are interpreted as unnormalized log probabilities.
dim: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
axis: Alias for dim.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
dim = deprecated_argument_lookup("axis", axis, "dim", dim)
_ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels,
logits)
with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg",
[logits, labels]) as name:
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
return softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits, axis=dim, name=name)
@tf_export(v1=["nn.sparse_softmax_cross_entropy_with_logits"])
@dispatch.add_dispatch_support
def sparse_softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Per-label activations (typically a linear output) of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or
`float64`. These activation energies are interpreted as unnormalized log
probabilities.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
_ensure_xent_args("sparse_softmax_cross_entropy_with_logits", _sentinel,
labels, logits)
# TODO(pcmurray) Raise an error when the label is not an index in
# [0, num_classes). Note: This could break users who call this with bad
# labels, but disregard the bad results.
# Reshape logits and labels to rank 2.
with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits",
[labels, logits]):
labels = ops.convert_to_tensor(labels)
logits = ops.convert_to_tensor(logits)
precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype(
logits.dtype) == dtypes.float16) else logits
# Store label shape for result later.
labels_static_shape = labels.get_shape()
labels_shape = array_ops.shape(labels)
static_shapes_fully_defined = (
labels_static_shape.is_fully_defined() and
logits.get_shape()[:-1].is_fully_defined())
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError(
"Logits cannot be scalars - received shape %s." % logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Rank of labels (received %s) should "
"equal rank of logits minus 1 (received %s)." %
(labels_static_shape.ndims, logits.get_shape().ndims))
if (static_shapes_fully_defined and
labels_static_shape != logits.get_shape()[:-1]):
raise ValueError("Shape mismatch: The shape of labels (received %s) "
"should equal the shape of logits except for the last "
"dimension (received %s)." % (labels_static_shape,
logits.get_shape()))
# Check if no reshapes are required.
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
# Perform a check of the dynamic shapes if the static shapes are not fully
# defined.
shape_checks = []
if not static_shapes_fully_defined:
shape_checks.append(
check_ops.assert_equal(
array_ops.shape(labels),
array_ops.shape(logits)[:-1]))
with ops.control_dependencies(shape_checks):
# Reshape logits to 2 dim, labels to 1 dim.
num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1]
precise_logits = array_ops.reshape(precise_logits, [-1, num_classes])
labels = array_ops.reshape(labels, [-1])
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
cost = array_ops.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
@tf_export("nn.sparse_softmax_cross_entropy_with_logits", v1=[])
@dispatch.add_dispatch_support
def sparse_softmax_cross_entropy_with_logits_v2(labels, logits, name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape `[d_0, d_1, ..., d_{r-1},
num_classes]` and dtype `float16`, `float32`, or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
return sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name=name)
@tf_export("nn.avg_pool", v1=["nn.avg_pool_v2"])
@dispatch.add_dispatch_support
def avg_pool_v2(input, ksize, strides, padding, data_format=None, name=None): # pylint: disable=redefined-builtin
"""Performs the avg pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if `data_format` does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size
of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. Specifies the channel dimension. For N=1 it can be
either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default)
or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW".
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The average pooled output tensor.
"""
if input.shape is not None:
n = len(input.shape) - 2
elif data_format is not None:
n = len(data_format) - 2
else:
raise ValueError(
"The input must have a rank or a data format must be given.")
if not 1 <= n <= 3:
raise ValueError(
"Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2))
if data_format is None:
channel_index = n + 1
else:
channel_index = 1 if data_format.startswith("NC") else n + 1
ksize = _get_sequence(ksize, n, channel_index, "ksize")
strides = _get_sequence(strides, n, channel_index, "strides")
avg_pooling_ops = {
1: avg_pool1d,
2: gen_nn_ops.avg_pool,
3: gen_nn_ops.avg_pool3d
}
op = avg_pooling_ops[n]
return op(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export(v1=["nn.avg_pool", "nn.avg_pool2d"])
@dispatch.add_dispatch_support
def avg_pool(value, ksize, strides, padding, data_format="NHWC",
name=None, input=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
input: Alias for value.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool", [value]) as name:
value = deprecation.deprecated_argument_lookup(
"input", input, "value", value)
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.avg_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.avg_pool2d", v1=[])
@dispatch.add_dispatch_support
def avg_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool2D", [input]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.avg_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.avg_pool1d")
@dispatch.add_dispatch_support
def avg_pool1d(input, ksize, strides, padding, data_format="NWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Note internally this op reshapes and uses the underlying 2d operation.
Args:
input: A 3-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1` or `3`. The size of the
window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1` or `3`. The stride of
the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NWC", "NCW". Defaults to "NWC".
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "AvgPool1D", [input]) as name:
if data_format is None:
data_format = "NWC"
channel_index = 1 if data_format.startswith("NC") else 2
ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize")
strides = [1] + _get_sequence(strides, 1, channel_index, "strides")
expanding_dim = 1 if data_format == "NWC" else 2
data_format = "NHWC" if data_format == "NWC" else "NCHW"
input = array_ops.expand_dims_v2(input, expanding_dim)
result = gen_nn_ops.avg_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return array_ops.squeeze(result, expanding_dim)
@tf_export("nn.avg_pool3d")
@dispatch.add_dispatch_support
def avg_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: A 5-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NDHWC' and 'NCDHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool3D", [input]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 3, channel_index, "ksize")
strides = _get_sequence(strides, 3, channel_index, "strides")
return gen_nn_ops.avg_pool3d(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool", v1=["nn.max_pool_v2"])
@dispatch.add_dispatch_support
def max_pool_v2(input, ksize, strides, padding, data_format=None, name=None):
"""Performs the max pooling on the input.
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if `data_format` does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size
of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of the input tensor.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`. When using explicit
padding, the size of the paddings cannot be greater than the sliding
window size.
data_format: A string. Specifies the channel dimension. For N=1 it can be
either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default)
or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW".
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
if input.shape is not None:
n = len(input.shape) - 2
elif data_format is not None:
n = len(data_format) - 2
else:
raise ValueError(
"The input must have a rank or a data format must be given.")
if not 1 <= n <= 3:
raise ValueError(
"Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2))
if data_format is None:
channel_index = n + 1
else:
channel_index = 1 if data_format.startswith("NC") else n + 1
if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C":
raise ValueError("Data formats NCHW_VECT_C is not yet supported with "
"explicit padding")
ksize = _get_sequence(ksize, n, channel_index, "ksize")
strides = _get_sequence(strides, n, channel_index, "strides")
if (isinstance(padding, (list, tuple)) and n == 3):
raise ValueError("Explicit padding is not yet supported with an input "
"tensor of rank 5")
max_pooling_ops = {
1: max_pool1d,
2: max_pool2d,
3: gen_nn_ops.max_pool3d
}
op = max_pooling_ops[n]
return op(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
@tf_export(v1=["nn.max_pool"])
@dispatch.add_dispatch_support
def max_pool(value,
ksize,
strides,
padding,
data_format="NHWC",
name=None,
input=None): # pylint: disable=redefined-builtin
"""Performs the max pooling on the input.
Args:
value: A 4-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`.
The size of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`.
The stride of the sliding window for each dimension of the input tensor.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`. When using explicit
padding, the size of the paddings cannot be greater than the sliding
window size.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
input: Alias for value.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
value = deprecation.deprecated_argument_lookup("input", input, "value", value)
with ops.name_scope(name, "MaxPool", [value]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C":
raise ValueError("Data formats NCHW_VECT_C is not yet supported with "
"explicit padding")
padding, explicit_paddings = convert_padding(padding)
if ((np.isscalar(ksize) and ksize == 0) or
(isinstance(ksize,
(list, tuple, np.ndarray)) and any(v == 0 for v in ksize))):
raise ValueError("ksize cannot be zero.")
return gen_nn_ops.max_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool1d")
@dispatch.add_dispatch_support
def max_pool1d(input, ksize, strides, padding, data_format="NWC", name=None):
"""Performs the max pooling on the input.
Note internally this op reshapes and uses the underlying 2d operation.
Args:
input: A 3-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1` or `3`. The size of the
window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1` or `3`. The stride of
the sliding window for each dimension of the input tensor.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NWC"`, this should be in the form `[[0, 0], [pad_left,
pad_right], [0, 0]]`. When explicit padding used and data_format is
`"NCW"`, this should be in the form `[[0, 0], [0, 0], [pad_left,
pad_right]]`. When using explicit padding, the size of the paddings cannot
be greater than the sliding window size.
data_format: An optional string from: "NWC", "NCW". Defaults to "NWC".
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool1d", [input]) as name:
if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C":
raise ValueError("Data formats NCHW_VECT_C is not yet supported with "
"explicit padding")
if data_format is None:
data_format = "NWC"
channel_index = 1 if data_format.startswith("NC") else 2
ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize")
strides = [1] + _get_sequence(strides, 1, channel_index, "strides")
padding, explicit_paddings = convert_padding(padding, 3)
if padding == "EXPLICIT":
explicit_paddings = [0, 0] + explicit_paddings
expanding_dim = 1 if data_format == "NWC" else 2
data_format = "NHWC" if data_format == "NWC" else "NCHW"
input = array_ops.expand_dims_v2(input, expanding_dim)
result = gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
name=name)
return array_ops.squeeze(result, expanding_dim)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool2d")
@dispatch.add_dispatch_support
def max_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the max pooling on the input.
Args:
input: A 4-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`. When using explicit
padding, the size of the paddings cannot be greater than the sliding
window size.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool2d", [input]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C":
raise ValueError("Data formats NCHW_VECT_C is not yet supported with "
"explicit padding")
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool3d")
@dispatch.add_dispatch_support
def max_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None):
"""Performs the max pooling on the input.
Args:
input: A 5-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is: [batch, in_channels, in_depth, in_height,
in_width].
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool3D", [input]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 4
ksize = _get_sequence(ksize, 3, channel_index, "ksize")
strides = _get_sequence(strides, 3, channel_index, "strides")
return gen_nn_ops.max_pool3d(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
@tf_export("nn.max_pool_with_argmax", v1=[])
@dispatch.add_dispatch_support
def max_pool_with_argmax_v2(
input, # pylint: disable=redefined-builtin
ksize,
strides,
padding,
data_format="NHWC",
output_dtype=dtypes.int64,
include_batch_in_index=False,
name=None):
"""Performs max pooling on the input and outputs both max values and indices.
The indices in `argmax` are flattened, so that a maximum value at position
`[b, y, x, c]` becomes flattened index: `(y * width + x) * channels + c` if
`include_batch_in_index` is False;
`((b * height + y) * width + x) * channels + c`
if `include_batch_in_index` is True.
The indices returned are always in `[0, height) x [0, width)` before
flattening, even if padding is involved and the mathematically correct answer
is outside (either negative or too large). This is a bug, but fixing it is
difficult to do in a safe backwards compatible way, especially due to
flattening.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, height, width, channels]`. Input to pool over.
ksize: An int or list of `ints` that has length `1`, `2` or `4`.
The size of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string`, must be set to `"NHWC"`. Defaults to
`"NHWC"`.
Specify the data format of the input and output data.
output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
The dtype of the returned argmax tensor.
include_batch_in_index: An optional `boolean`. Defaults to `False`.
Whether to include batch dimension in flattened index of `argmax`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, argmax).
output: A `Tensor`. Has the same type as `input`.
argmax: A `Tensor` of type `output_dtype`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
ksize = _get_sequence(ksize, 2, 3, "ksize")
strides = _get_sequence(strides, 2, 3, "strides")
return gen_nn_ops.max_pool_with_argmax(
input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=output_dtype,
include_batch_in_index=include_batch_in_index,
name=name)
@tf_export(v1=["nn.max_pool_with_argmax"])
@dispatch.add_dispatch_support
def max_pool_with_argmax_v1( # pylint: disable=missing-docstring,invalid-name
input, # pylint: disable=redefined-builtin
ksize,
strides,
padding,
data_format="NHWC",
Targmax=None,
name=None,
output_dtype=None,
include_batch_in_index=False):
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
Targmax = deprecated_argument_lookup(
"output_dtype", output_dtype, "Targmax", Targmax)
if Targmax is None:
Targmax = dtypes.int64
return gen_nn_ops.max_pool_with_argmax(
input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=Targmax,
include_batch_in_index=include_batch_in_index,
name=name)
max_pool_with_argmax_v1.__doc__ = gen_nn_ops.max_pool_with_argmax.__doc__
@ops.RegisterStatistics("Conv3D", "flops")
def _calc_conv3d_flops(graph, node):
"""Calculates the compute resources needed for Conv3D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_time = int(filter_shape[0])
filter_height = int(filter_shape[1])
filter_width = int(filter_shape[2])
filter_in_depth = int(filter_shape[3])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_in_depth * filter_time *
filter_height * filter_width * 2))
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats(
"flops",
(output_count * filter_in_depth * filter_height * filter_width * 2))
@ops.RegisterStatistics("DepthwiseConv2dNative", "flops")
def _calc_depthwise_conv_flops(graph, node):
"""Calculates the compute resources needed for DepthwiseConv2dNative."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@tf_export(v1=["nn.xw_plus_b"])
@dispatch.add_dispatch_support
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None):
"""Computes matmul(x, weights) + biases.
This is a deprecated version of that will soon be removed.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b_v1" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
def _get_noise_shape(x, noise_shape):
# If noise_shape is none return immediately.
if noise_shape is None:
return array_ops.shape(x)
try:
# Best effort to figure out the intended shape.
# If not possible, let the op to handle it.
# In eager mode exception will show up.
noise_shape_ = tensor_shape.as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tensor_shape.TensorShape(new_dims)
return noise_shape
@tf_export(v1=["nn.dropout"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. "
"Rate should be set to `rate = 1 - keep_prob`.",
"keep_prob")
def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None,
rate=None):
"""Computes dropout.
For each element of `x`, with probability `rate`, outputs `0`, and otherwise
scales up the input by `1 / (1-rate)`. The scaling is such that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
keep_prob: (deprecated) A deprecated alias for `(1-rate)`.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
name: A name for this operation (optional).
rate: A scalar `Tensor` with the same type as `x`. The probability that each
element of `x` is discarded.
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating
point tensor.
"""
try:
keep = 1. - keep_prob if keep_prob is not None else None
except TypeError:
raise ValueError("keep_prob must be a floating point number or Tensor "
"(got %r)" % keep_prob)
rate = deprecation.deprecated_argument_lookup(
"rate", rate,
"keep_prob", keep)
if rate is None:
raise ValueError("You must provide a rate to dropout.")
return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
@tf_export("nn.dropout", v1=[])
@dispatch.add_dispatch_support
def dropout_v2(x, rate, noise_shape=None, seed=None, name=None):
"""Computes dropout: randomly sets elements to zero to prevent overfitting.
Note: The behavior of dropout has changed between TensorFlow 1.x and 2.x.
When converting 1.x code, please use named arguments to ensure behavior stays
consistent.
See also: `tf.keras.layers.Dropout` for a dropout layer.
[Dropout](https://arxiv.org/abs/1207.0580) is useful for regularizing DNN
models. Inputs elements are randomly set to zero (and the other elements are
rescaled). This encourages each node to be independently useful, as it cannot
rely on the output of other nodes.
More precisely: With probability `rate` elements of `x` are set to `0`.
The remaining elements are scaled up by `1.0 / (1 - rate)`, so that the
expected value is preserved.
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,5])
>>> tf.nn.dropout(x, rate = 0.5, seed = 1).numpy()
array([[2., 0., 0., 2., 2.],
[2., 2., 2., 2., 2.],
[2., 0., 2., 0., 2.]], dtype=float32)
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,5])
>>> tf.nn.dropout(x, rate = 0.8, seed = 1).numpy()
array([[0., 0., 0., 5., 5.],
[0., 5., 0., 5., 0.],
[5., 0., 5., 0., 5.]], dtype=float32)
>>> tf.nn.dropout(x, rate = 0.0) == x
<tf.Tensor: shape=(3, 5), dtype=bool, numpy=
array([[ True, True, True, True, True],
[ True, True, True, True, True],
[ True, True, True, True, True]])>
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. This is useful for dropping whole
channels from an image or sequence. For example:
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,10])
>>> tf.nn.dropout(x, rate = 2/3, noise_shape=[1,10], seed=1).numpy()
array([[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.],
[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.],
[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.]], dtype=float32)
Args:
x: A floating point tensor.
rate: A scalar `Tensor` with the same type as x. The probability
that each element is dropped. For example, setting rate=0.1 would drop
10% of input elements.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point
tensor. `rate=1` is disallowed, because the output would be all zeros,
which is likely not what was intended.
"""
with ops.name_scope(name, "dropout", [x]) as name:
is_rate_number = isinstance(rate, numbers.Real)
if is_rate_number and (rate < 0 or rate >= 1):
raise ValueError("rate must be a scalar tensor or a float in the "
"range [0, 1), got %g" % rate)
x = ops.convert_to_tensor(x, name="x")
x_dtype = x.dtype
if not x_dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it's going "
"to be scaled. Got a %s tensor instead." % x_dtype)
is_executing_eagerly = context.executing_eagerly()
if not tensor_util.is_tensor(rate):
if is_rate_number:
keep_prob = 1 - rate
scale = 1 / keep_prob
scale = ops.convert_to_tensor(scale, dtype=x_dtype)
ret = gen_math_ops.mul(x, scale)
else:
raise ValueError("rate is neither scalar nor scalar tensor %r" % rate)
else:
rate.get_shape().assert_has_rank(0)
rate_dtype = rate.dtype
if rate_dtype != x_dtype:
if not rate_dtype.is_compatible_with(x_dtype):
raise ValueError(
"Tensor dtype %s is incomptaible with Tensor dtype %s: %r" %
(x_dtype.name, rate_dtype.name, rate))
rate = gen_math_ops.cast(rate, x_dtype, name="rate")
one_tensor = constant_op.constant(1, dtype=x_dtype)
ret = gen_math_ops.real_div(x, gen_math_ops.sub(one_tensor, rate))
noise_shape = _get_noise_shape(x, noise_shape)
# Sample a uniform distribution on [0.0, 1.0) and select values larger
# than rate.
#
# NOTE: Random uniform can only generate 2^23 floats on [1.0, 2.0)
# and subtract 1.0.
random_tensor = random_ops.random_uniform(
noise_shape, seed=seed, dtype=x_dtype)
# NOTE: if (1.0 + rate) - 1 is equal to rate, then that float is selected,
# hence a >= comparison is used.
keep_mask = random_tensor >= rate
ret = gen_math_ops.mul(ret, gen_math_ops.cast(keep_mask, x_dtype))
if not is_executing_eagerly:
ret.set_shape(x.get_shape())
return ret
@tf_export("math.top_k", "nn.top_k")
@dispatch.add_dispatch_support
def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank=1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
Args:
input: 1-D or higher `Tensor` with last dimension at least `k`.
k: 0-D `int32` `Tensor`. Number of top elements to look for along the last
dimension (along each row for matrices).
sorted: If true the resulting `k` elements will be sorted by the values in
descending order.
name: Optional name for the operation.
Returns:
values: The `k` largest elements along each last dimensional slice.
indices: The indices of `values` within the last dimension of `input`.
"""
return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name)
def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin
r"""Finds values of the `n`-th smallest value for the last dimension.
Note that n is zero-indexed.
If the input is a vector (rank-1), finds the entries which is the nth-smallest
value in the vector and outputs their values as scalar tensor.
For matrices (resp. higher rank input), computes the entries which is the
nth-smallest value in each row (resp. vector along the last dimension). Thus,
values.shape = input.shape[:-1]
Args:
input: 1-D or higher `Tensor` with last dimension at least `n+1`.
n: A `Tensor` of type `int32`.
0-D. Position of sorted vector to select along the last dimension (along
each row for matrices). Valid range of n is `[0, input.shape[:-1])`
reverse: An optional `bool`. Defaults to `False`.
When set to True, find the nth-largest value in the vector and vice
versa.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
The `n`-th order statistic along each last dimensional slice.
"""
return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)
@tf_export(v1=["nn.fractional_max_pool"])
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_max_pool_v2.")
def fractional_max_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
This is a deprecated version of `fractional_max_pool`.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_max_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_max_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name)
@tf_export("nn.fractional_max_pool", v1=[])
@dispatch.add_dispatch_support
def fractional_max_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: An int or list of `ints` that has length `1`, `2` or `4`.
Pooling ratio for each dimension of `value`, currently only supports row
and col dimension and should be >= 1.0. For example, a valid pooling ratio
looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0
because we don't allow pooling on batch and channels dimensions. 1.44 and
1.73 are pooling ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
pooling_ratio = _get_sequence(pooling_ratio, 2, 3, "pooling_ratio")
if seed == 0:
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.fractional_avg_pool"])
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_avg_pool_v2.")
def fractional_avg_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
This is a deprecated version of `fractional_avg_pool`.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_avg_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_avg_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name=name)
@tf_export("nn.fractional_avg_pool", v1=[])
@dispatch.add_dispatch_support
def fractional_avg_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
if seed == 0:
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@ops.RegisterStatistics("Dilation2D", "flops")
def _calc_dilation2d_flops(graph, node):
"""Calculates the compute resources needed for Dilation2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@tf_export(v1=["nn.erosion2d"])
@dispatch.add_dispatch_support
def erosion2d(value, kernel, strides, rates, padding, name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - rates[1] * dy,
strides[2] * x - rates[2] * dx,
c] -
kernel[dy, dx, c]
Duality: The erosion of `value` by the `kernel` is equal to the negation of
the dilation of `-value` by the reflected `kernel`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
kernel: A `Tensor`. Must have the same type as `value`.
3-D with shape `[kernel_height, kernel_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `kernel`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "erosion2d", [value, kernel]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(kernel, [0, 1]),
strides=strides,
rates=rates,
padding=padding,
name=name))
@tf_export("nn.erosion2d", v1=[])
@dispatch.add_dispatch_support
def erosion2d_v2(value,
filters,
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `filters` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filters_height, filters_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - dilations[1] * dy,
strides[2] * x - dilations[2] * dx,
c] -
filters[dy, dx, c]
Duality: The erosion of `value` by the `filters` is equal to the negation of
the dilation of `-value` by the reflected `filters`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `value`.
3-D with shape `[filters_height, filters_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
with ops.name_scope(name, "erosion2d", [value, filters]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(filters, [0, 1]),
strides=strides,
rates=dilations,
padding=padding,
name=name))
@tf_export(v1=["math.in_top_k", "nn.in_top_k"])
@dispatch.add_dispatch_support
def in_top_k(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
prediction for the target class is finite (not inf, -inf, or nan) and among
the top `k` predictions among all predictions for example `i`. Note that the
behavior of `InTopK` differs from the `TopK` op in its handling of ties; if
multiple classes have the same prediction value and straddle the top-`k`
boundary, all of those classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`,
\\(targets_i\\) be the target class for example `i`,
\\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Args:
predictions: A `Tensor` of type `float32`.
A `batch_size` x `classes` tensor.
targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `batch_size` vector of class ids.
k: An `int`. Number of top elements to look at for computing precision.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`.
"""
with ops.name_scope(name, "in_top_k"):
return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)
@tf_export("math.in_top_k", "nn.in_top_k", v1=[])
@dispatch.add_dispatch_support
def in_top_k_v2(targets, predictions, k, name=None):
return in_top_k(predictions, targets, k, name)
in_top_k_v2.__doc__ = in_top_k.__doc__
tf_export(v1=["nn.quantized_avg_pool"])(
dispatch.add_dispatch_support(gen_nn_ops.quantized_avg_pool))
tf_export(v1=["nn.quantized_conv2d"])(
dispatch.add_dispatch_support(gen_nn_ops.quantized_conv2d))
tf_export(v1=["nn.quantized_relu_x"])(
dispatch.add_dispatch_support(gen_nn_ops.quantized_relu_x))
tf_export(v1=["nn.quantized_max_pool"])(
dispatch.add_dispatch_support(gen_nn_ops.quantized_max_pool))
@tf_export("nn.isotonic_regression", v1=[])
@dispatch.add_dispatch_support
def isotonic_regression(inputs, decreasing=True, axis=-1):
r"""Solves isotonic regression problems along the given axis.
For each vector x, the problem solved is
$$\argmin_{y_1 >= y_2 >= ... >= y_n} \sum_i (x_i - y_i)^2.$$
As the solution is component-wise constant, a second tensor is returned that
encodes the segments. The problems are solved over the given axis.
Consider the following example, where we solve a batch of two problems. The
first input is [3, 1, 2], while the second [1, 3, 4] (as the axis is 1).
>>> x = tf.constant([[3, 1, 2], [1, 3, 4]], dtype=tf.float32)
>>> y, segments = tf.nn.isotonic_regression(x, axis=1)
>>> y # The solution.
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=
array([[3. , 1.5 , 1.5 ],
[2.6666667, 2.6666667, 2.6666667]], dtype=float32)>
Note that the first solution has two blocks [2] and [1.5, 1.5]. The second
solution is constant, and thus has a single segment. These segments are
exactly what the second returned tensor encodes:
>>> segments
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[0, 1, 1],
[0, 0, 0]], dtype=int32)>
Args:
inputs: A tensor holding the inputs.
decreasing: If set to False, the inequalities in the optimizing constrained
are flipped.
axis: The axis along which the problems should be solved.
Returns:
output: The solutions, same shape as type as the input.
segments: An int32 tensor, same shape as the input indicating the segments
that have the same value. Specifically, those positions that have the same
value correspond to the same segment. These values start at zero, and are
monotonously increasing for each solution.
"""
type_promotions = {
# Float types get mapped to themselves, int8/16 to float32, rest to double
dtypes.float32:
dtypes.float32,
dtypes.half:
dtypes.half,
dtypes.bfloat16:
dtypes.bfloat16,
dtypes.int8:
dtypes.float32,
dtypes.int16:
dtypes.float32,
}
inputs = ops.convert_to_tensor(inputs)
try:
output_dtype = type_promotions[inputs.dtype]
except KeyError:
output_dtype = dtypes.float64
def compute_on_matrix(matrix, name=None):
iso_fn = functools.partial(
gen_nn_ops.isotonic_regression, output_dtype=output_dtype, name=name)
if decreasing:
return iso_fn(matrix)
else:
output, segments = iso_fn(-matrix)
return -output, segments
return _wrap_2d_function(inputs, compute_on_matrix, axis)
|
davidzchen/tensorflow
|
tensorflow/python/ops/nn_ops.py
|
Python
|
apache-2.0
| 236,626
|
[
"Gaussian"
] |
68930cfc5c92b62bceab08631712d9fe425cb5681722e832e31090cf41b6263d
|
# -*- coding: utf-8 -*-
"""
equip.analysis.flow
~~~~~~~~~~~~~~~~~~~
Extract the control flow graphs from the bytecode.
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
import opcode
from operator import itemgetter, attrgetter
from itertools import tee, izip
from .graph import DiGraph, Edge, Node, Walker, EdgeVisitor
from .graph.dominators import DominatorTree
from .block import BasicBlock
from ..utils.log import logger
from ..bytecode.utils import show_bytecode
BREAK_LOOP = 80
RETURN_VALUE = 83
FOR_ITER = 93
JUMP_FORWARD = 110
JUMP_IF_FALSE_OR_POP = 111
JUMP_IF_TRUE_OR_POP = 112
JUMP_ABSOLUTE = 113
POP_JUMP_IF_FALSE = 114
POP_JUMP_IF_TRUE = 115
JUMP_OPCODES = opcode.hasjabs + opcode.hasjrel
SETUP_LOOP = 120
SETUP_EXCEPT = 121
SETUP_FINALLY = 122
RAISE_VARARGS = 130
SETUP_WITH = 143
NO_FALL_THROUGH = (JUMP_ABSOLUTE, JUMP_FORWARD)
class ControlFlow(object):
"""
Performs the control-flow analysis on a ``Declaration`` object. It iterates
over its bytecode and builds the basic block. The final representation
leverages the ``DiGraph`` structure, and contains an instance of the
``DominatorTree``.
"""
E_TRUE = 'TRUE'
E_FALSE = 'FALSE'
E_UNCOND = 'UNCOND'
E_COND = 'COND'
E_EXCEPT = 'EXCEPT'
E_FINALLY = 'FINALLY'
E_RETURN = 'RETURN'
E_RAISE = 'RAISE'
E_END_LOOP = 'END_LOOP'
N_ENTRY = 'ENTRY'
N_IMPLICIT_RETURN = 'IMPLICIT_RETURN'
N_UNKNOWN = 'UNKNOWN'
N_LOOP = 'LOOP'
N_IF = 'IF'
N_EXCEPT = 'EXCEPT'
N_CONDITION = 'CONDITION'
CFG_TMP_RETURN = -1
CFG_TMP_BREAK = -2
CFG_TMP_RAISE = -3
def __init__(self, decl):
self._decl = decl
self._blocks = None
self._block_idx_map = {}
self._block_nodes = {}
self._frames = None
self._graph = None
self._entry = None
self._exit = None
self._entry_node = None
self._exit_node = None
self._dom = None
self.analyze()
@property
def decl(self):
return self._decl
@decl.setter
def decl(self, value):
self._decl = value
@property
def entry(self):
return self._entry
@entry.setter
def entry(self, value):
self._entry = value
@property
def entry_node(self):
return self._entry_node
@entry_node.setter
def entry_node(self, value):
self._entry_node = value
@property
def exit(self):
return self._exit
@exit.setter
def exit(self, value):
self._exit = value
@property
def exit_node(self):
return self._exit_node
@exit_node.setter
def exit_node(self, value):
self._exit_node = value
@property
def blocks(self):
"""
Returns the basic blocks created during the control flow analysis.
"""
return self._blocks
@property
def block_indices_dict(self):
"""
Returns the mapping of a bytecode indices and a basic blocks.
"""
return self._block_idx_map
@property
def block_nodes_dict(self):
"""
Returns the mapping of a basic bocks and CFG nodes.
"""
return self._block_nodes
@property
def frames(self):
return self._frames
@property
def graph(self):
"""
Returns the underlying graph that holds the CFG.
"""
return self._graph
@property
def dominators(self):
"""
Returns the ``DominatorTree`` that contains:
- Dominator tree (dict of IDom)
- Post dominator tree (doc of PIDom)
- Dominance frontier (dict of CFG node -> set CFG nodes)
"""
if self._dom is None:
self._dom = DominatorTree(self)
return self._dom
def analyze(self):
"""
Performs the CFA and stores the resulting CFG.
"""
bytecode = self.decl.bytecode
self.entry = BasicBlock(BasicBlock.ENTRY, self.decl, -1)
self.exit = BasicBlock(BasicBlock.IMPLICIT_RETURN, self.decl, -1)
self._blocks = ControlFlow.make_blocks(self.decl, bytecode)
self.__build_flowgraph(bytecode)
# logger.debug("CFG(%s) :=\n%s", self.decl, self.graph.to_dot())
def __build_flowgraph(self, bytecode):
g = DiGraph(multiple_edges=False)
self.entry_node = g.make_add_node(kind=ControlFlow.N_ENTRY, data=self._entry)
self.exit_node = g.make_add_node(kind=ControlFlow.N_IMPLICIT_RETURN, data=self._exit)
self._block_idx_map = {}
self._block_nodes = {}
# Connect entry/implicit return blocks
last_block_index, last_block = -1, None
for block in self.blocks:
self._block_idx_map[block.index] = block
node_kind = ControlFlow.get_kind_from_block(block)
block_node = g.make_add_node(kind=node_kind, data=block)
self._block_nodes[block] = block_node
if block.index == 0:
g.make_add_edge(self.entry_node,
self._block_nodes[block],
kind=ControlFlow.E_UNCOND)
if block.index >= last_block_index:
last_block = block
last_block_index = block.index
g.make_add_edge(self._block_nodes[last_block],
self.exit_node,
kind=ControlFlow.E_UNCOND)
sorted_blocks = sorted(self.blocks, key=attrgetter('_index'))
i, length = 0, len(sorted_blocks)
while i < length:
cur_block = sorted_blocks[i]
if cur_block.jumps:
# Connect the current block to its jump targets
for (jump_index, branch_kind) in cur_block.jumps:
if jump_index <= ControlFlow.CFG_TMP_RETURN:
continue
target_block = self._block_idx_map[jump_index]
g.make_add_edge(self._block_nodes[cur_block],
self._block_nodes[target_block],
kind=branch_kind)
i += 1
self._graph = g
self.__finalize()
def __finalize(self):
def has_true_false_branches(list_edges):
has_true, has_false = False, False
for edge in list_edges:
if edge.kind == ControlFlow.E_TRUE: has_true = True
elif edge.kind == ControlFlow.E_FALSE: has_false = True
return has_true and has_false
def get_cfg_tmp_values(node):
values = set()
for (jump_index, branch_kind) in node.data.jumps:
if jump_index <= ControlFlow.CFG_TMP_RETURN:
values.add(jump_index)
return values
def get_parent_loop(node):
class BwdEdges(EdgeVisitor):
def __init__(self):
EdgeVisitor.__init__(self)
self.edges = []
def visit(self, edge):
self.edges.append(edge)
visitor = BwdEdges()
walker = Walker(self.graph, visitor, backwards=True)
walker.traverse(node)
parents = visitor.edges
node_bc_index = node.data.index
for parent_edge in parents:
parent = parent_edge.source
if parent.kind != ControlFlow.N_LOOP:
continue
# Find the loop in which the break/current node is nested in
if parent.data.index < node_bc_index and parent.data.end_target > node_bc_index:
return parent
return None
# Burn N_CONDITION nodes
for node in self.graph.nodes:
out_edges = self.graph.out_edges(node)
if len(out_edges) < 2 or not has_true_false_branches(out_edges):
continue
node.kind = ControlFlow.N_CONDITION
# Handle return/break statements:
# - blocks with returns are simply connected to the IMPLICIT_RETURN
# and previous out edges removed
# - blocks with breaks are connected to the end of the current loop
# and previous out edges removed
for node in self.graph.nodes:
cfg_tmp_values = get_cfg_tmp_values(node)
if not cfg_tmp_values:
continue
if ControlFlow.CFG_TMP_BREAK in cfg_tmp_values:
parent_loop = get_parent_loop(node)
if not parent_loop:
logger.error("Cannot find parent loop for %s", node)
continue
target_block = self._block_idx_map[parent_loop.data.end_target]
out_edges = self.graph.out_edges(node)
for edge in out_edges:
self.graph.remove_edge(edge)
self.graph.make_add_edge(node,
self.block_nodes_dict[target_block],
kind=ControlFlow.E_UNCOND)
if ControlFlow.CFG_TMP_RETURN in cfg_tmp_values:
# Remove existing out edges and add a RETURN edge to the IMPLICIT_RETURN
out_edges = self.graph.out_edges(node)
for edge in out_edges:
self.graph.remove_edge(edge)
self.graph.make_add_edge(node,
self._exit_node,
kind=ControlFlow.E_RETURN)
BLOCK_NODE_KIND = {
BasicBlock.UNKNOWN: N_UNKNOWN,
BasicBlock.ENTRY: N_ENTRY,
BasicBlock.IMPLICIT_RETURN: N_IMPLICIT_RETURN,
BasicBlock.LOOP: N_LOOP,
BasicBlock.IF: N_IF,
BasicBlock.EXCEPT: N_EXCEPT,
}
@staticmethod
def get_kind_from_block(block):
return ControlFlow.BLOCK_NODE_KIND[block.kind]
@staticmethod
def get_pairs(iterable):
a, b = tee(iterable)
next(b, None)
return izip(a, b)
@staticmethod
def make_blocks(decl, bytecode):
"""
Returns the set of ``BasicBlock`` that are encountered in the current bytecode.
Each block is annotated with its qualified jump targets (if any).
:param decl: The current declaration object.
:param bytecode: The bytecode associated with the declaration object.
"""
blocks = set()
block_map = {} # bytecode index -> block
i, length = 0, len(bytecode)
start_index = [j for j in range(length) if bytecode[j][0] == 0][0]
prev_co = bytecode[start_index][5]
slice_bytecode = [tpl for tpl in bytecode[start_index:] if tpl[5] == prev_co]
# logger.debug("Current bytecode:\n%s", show_bytecode(slice_bytecode))
slice_length = len(slice_bytecode)
known_targets = ControlFlow.find_targets(slice_bytecode)
known_targets.add(0)
known_targets.add(1 + max([tpl[0] for tpl in slice_bytecode]))
known_targets = list(known_targets)
known_targets.sort()
# logger.debug("Targets: %s", [d for d in ControlFlow.get_pairs(known_targets)])
slice_bytecode_indexed = {}
idx = 0
for l in slice_bytecode:
index = l[0]
slice_bytecode_indexed[index] = (l, idx)
idx += 1
for start_index, end_index in ControlFlow.get_pairs(known_targets):
index, lineno, op, arg, cflow_in, code_object = slice_bytecode_indexed[start_index][0]
block_kind = ControlFlow.block_kind_from_op(op)
cur_block = BasicBlock(block_kind, decl, start_index)
cur_block.length = end_index - start_index - 1
i = slice_bytecode_indexed[start_index][1]
try:
length = slice_bytecode_indexed[end_index][1]
if length >= slice_length:
length = slice_length
except:
length = slice_length
while i < length:
index, lineno, op, arg, cflow_in, code_object = slice_bytecode[i]
if op in JUMP_OPCODES:
jump_address = arg
if op in opcode.hasjrel:
jump_address = arg + index + 3
if op in (SETUP_FINALLY, SETUP_EXCEPT, SETUP_WITH):
kind = ControlFlow.E_UNCOND
if op == SETUP_FINALLY: kind = ControlFlow.E_FINALLY
if op in (SETUP_EXCEPT, SETUP_WITH): kind = ControlFlow.E_EXCEPT
cur_block.end_target = jump_address
cur_block.add_jump(jump_address, kind)
elif op in (JUMP_ABSOLUTE, JUMP_FORWARD):
cur_block.add_jump(jump_address, ControlFlow.E_UNCOND)
elif op in (POP_JUMP_IF_FALSE, JUMP_IF_FALSE_OR_POP, FOR_ITER):
cur_block.add_jump(jump_address, ControlFlow.E_FALSE)
elif op in (POP_JUMP_IF_TRUE, JUMP_IF_TRUE_OR_POP):
cur_block.add_jump(jump_address, ControlFlow.E_TRUE)
elif op == SETUP_LOOP:
cur_block.kind = BasicBlock.LOOP
cur_block.end_target = jump_address
elif op == RETURN_VALUE:
cur_block.has_return_path = True
cur_block.add_jump(ControlFlow.CFG_TMP_RETURN, ControlFlow.E_RETURN)
elif op == BREAK_LOOP:
cur_block.has_return_path = True
cur_block.add_jump(ControlFlow.CFG_TMP_BREAK, ControlFlow.E_UNCOND)
elif op == RAISE_VARARGS:
cur_block.has_return_path = False
cur_block.add_jump(ControlFlow.CFG_TMP_RAISE, ControlFlow.E_UNCOND)
i += 1
# If the last block is not a NO_FALL_THROUGH, we connect the fall through
if not cur_block.has_return_path and op not in NO_FALL_THROUGH and i < slice_length:
kind = ControlFlow.E_UNCOND
if op in (POP_JUMP_IF_FALSE, JUMP_IF_FALSE_OR_POP, FOR_ITER):
kind = ControlFlow.E_TRUE
if op in (POP_JUMP_IF_TRUE, JUMP_IF_TRUE_OR_POP):
kind = ControlFlow.E_FALSE
cur_block.fallthrough = True
fallthrough_address = slice_bytecode[i][0]
cur_block.add_jump(fallthrough_address, kind)
else:
cur_block.fallthrough = False
block_map[start_index] = cur_block
blocks.add(cur_block)
return blocks
@staticmethod
def block_kind_from_op(op):
if op in (FOR_ITER,):
return BasicBlock.LOOP
# Cannot make the decision at this point, need to await the finalization
# of the CFG
return BasicBlock.UNKNOWN
@staticmethod
def find_targets(bytecode):
targets = set()
i, length = 0, len(bytecode)
while i < length:
index, lineno, op, arg, cflow_in, code_object = bytecode[i]
if op in JUMP_OPCODES:
jump_address = arg
if op in opcode.hasjrel:
jump_address = arg + index + 3
targets.add(jump_address)
if op not in NO_FALL_THROUGH:
targets.add(bytecode[i + 1][0])
i += 1
return targets
|
sukwon0709/equip
|
equip/analysis/flow.py
|
Python
|
apache-2.0
| 13,747
|
[
"VisIt"
] |
9beb44e0a67dfb9a54fa5c1bea9e83cd8ccc4938fbdfb815d2514a06c4a565f1
|
import multiprocessing
bind = "127.0.0.1:8000"
workers = multiprocessing.cpu_count() * 2 + 1
worker_connections = 3000
keepalive = 1
daemon = True
loglevel = 'info'
pidfile = '/var/run/moe.pid'
errorlog = '/var/log/gunicorn.error.log'
accesslog = '/var/log/gunicorn.access.log'
|
bung87/moto-moe
|
conf/gunicorn_conf.py
|
Python
|
mit
| 279
|
[
"MOE"
] |
3a551317b87f05ff156db82494a8532dd3fec37054c0ad6c449c5dbdb46a8586
|
from setuptools import setup, find_packages
from os import path
# io.open is needed for projects that support Python 2.7
# It ensures open() defaults to text mode with universal newlines,
# and accepts an argument to specify the text encoding
# Python 3 only projects can skip this import
from io import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='SetSimilaritySearch', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.7', # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='A Python library of set similarity search algorithms', # Optional
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# Denotes that our long_description is in Markdown; valid values are
# text/plain, text/x-rst, and text/markdown
#
# Optional if long_description is written in reStructuredText (rst) but
# required for plain-text or Markdown; if unspecified, "applications should
# attempt to render [the long_description] as text/x-rst; charset=UTF-8 and
# fall back to text/plain if it is not valid rst" (see link below)
#
# This field corresponds to the "Description-Content-Type" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
long_description_content_type='text/markdown', # Optional (see note above)
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/ekzhu/SetSimilaritySearch', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Eric Zhu', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='ekzhu@cs.toronto.edu', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish
#"License :: OSI Approved :: Apache Software License 2.0 (Apache-2.0)",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='set similarity search all pairs', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
# packages=find_packages(exclude=['contrib', 'docs', 'test*']), # Required
packages=['SetSimilaritySearch'],
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy'], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
extras_require={ # Optional
#'dev': ['check-manifest'],
'test': ['coverage', 'nose'],
},
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
# package_data={ # Optional
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
# entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
scripts = [
'scripts/all_pairs.py',
]
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
# project_urls={ # Optional
# 'Bug Reports': 'https://github.com/pypa/sampleproject/issues',
# 'Funding': 'https://donate.pypi.org',
# 'Say Thanks!': 'http://saythanks.io/to/example',
# 'Source': 'https://github.com/pypa/sampleproject/',
# },
)
|
ekzhu/SetSimilaritySearch
|
setup.py
|
Python
|
apache-2.0
| 8,192
|
[
"VisIt"
] |
81c419cfb7926a99fa5efb82faa9ce5bdad2a8cfe9f62fea21af7d51347eba99
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import numpy as np
from skbio import DNA, RNA, Protein, GeneticCode
from skbio.sequence._nucleotide_mixin import NucleotideMixin
from skbio.sequence import GrammaredSequence
from skbio.util import classproperty
from skbio.metadata import IntervalMetadata
# This file contains tests for functionality of sequence types which implement
# NucleotideMixin. Currently this means DNA and RNA. These types are so
# similar that the testing logic can be shared and parameterized across
# different test data.
class TestNucleotideSequence(unittest.TestCase):
def setUp(self):
self.sequence_kinds = frozenset([
str,
lambda s: np.frombuffer(s.encode('ascii'), dtype='|S1'),
lambda s: np.frombuffer(s.encode('ascii'), dtype=np.uint8)])
dna_str = 'ACGTMRWSYKVHDBN.-'
dna_comp_str = 'TGCAKYWSRMBDHVN.-'
dna_rev_comp_str = '-.NVHDBMRSWYKACGT'
rna_str = 'ACGUMRWSYKVHDBN.-'
rna_comp_str = 'UGCAKYWSRMBDHVN.-'
rna_rev_comp_str = '-.NVHDBMRSWYKACGU'
qual = tuple(range(len(dna_str)))
self.dna = (DNA, dna_str)
self.rna = (RNA, rna_str)
dna_comp = self.dna + (dna_comp_str,)
rna_comp = self.rna + (rna_comp_str,)
dna_comp_qual = dna_comp + (qual,)
rna_comp_qual = rna_comp + (qual,)
self.all_combos_comp_qual = (dna_comp_qual, rna_comp_qual)
dna_rev_comp = self.dna + (dna_rev_comp_str,)
rna_rev_comp = self.rna + (rna_rev_comp_str,)
self.all_combos_rev_comp = (dna_rev_comp, rna_rev_comp)
dna_rev_comp_qual = dna_rev_comp + (qual,)
rna_rev_comp_qual = rna_rev_comp + (qual,)
self.all_combos_rev_comp_qual = \
(dna_rev_comp_qual, rna_rev_comp_qual)
def test_instantiation_with_no_implementation(self):
class NucleotideSequenceSubclassNoImplementation(NucleotideMixin):
pass
with self.assertRaises(TypeError) as cm:
NucleotideSequenceSubclassNoImplementation()
self.assertIn("abstract class", str(cm.exception))
self.assertIn("complement_map", str(cm.exception))
# TODO: remove when nondegenerate_chars is removed
def test_nondegenerate_chars(self):
dna = (DNA, "ACGT")
rna = (RNA, "ACGU")
for constructor, nondegenerate in (dna, rna):
exp = set(nondegenerate)
self.assertEqual(constructor('').nondegenerate_chars, exp)
self.assertEqual(constructor.nondegenerate_chars, exp)
def test_definite_chars(self):
dna = (DNA, "ACGT")
rna = (RNA, "ACGU")
for constructor, definite_char in (dna, rna):
exp = set(definite_char)
self.assertEqual(constructor('').definite_chars, exp)
self.assertEqual(constructor.definite_chars, exp)
def test_degenerate_map(self):
dna_exp = (DNA, {
'B': set(['C', 'T', 'G']), 'D': set(['A', 'T', 'G']),
'H': set(['A', 'C', 'T']), 'K': set(['T', 'G']),
'M': set(['A', 'C']), 'N': set(['A', 'C', 'T', 'G']),
'S': set(['C', 'G']), 'R': set(['A', 'G']), 'W': set(['A', 'T']),
'V': set(['A', 'C', 'G']), 'Y': set(['C', 'T'])
})
rna_exp = (RNA, {
'B': set(['C', 'U', 'G']), 'D': set(['A', 'U', 'G']),
'H': set(['A', 'C', 'U']), 'K': set(['U', 'G']),
'M': set(['A', 'C']), 'N': set(['A', 'C', 'U', 'G']),
'S': set(['C', 'G']), 'R': set(['A', 'G']), 'W': set(['A', 'U']),
'V': set(['A', 'C', 'G']), 'Y': set(['C', 'U'])
})
for constructor, degenerate in (dna_exp, rna_exp):
self.assertEqual(constructor('').degenerate_map, degenerate)
self.assertEqual(constructor.degenerate_map, degenerate)
def test_complement_map(self):
dna_exp = (DNA, {
'-': '-', '.': '.', 'A': 'T', 'C': 'G', 'B': 'V', 'D': 'H',
'G': 'C', 'H': 'D', 'K': 'M', 'M': 'K', 'N': 'N', 'S': 'S',
'R': 'Y', 'T': 'A', 'W': 'W', 'V': 'B', 'Y': 'R'
})
rna_exp = (RNA, {
'-': '-', '.': '.', 'A': 'U', 'C': 'G', 'B': 'V', 'D': 'H',
'G': 'C', 'H': 'D', 'K': 'M', 'M': 'K', 'N': 'N', 'S': 'S',
'R': 'Y', 'U': 'A', 'W': 'W', 'V': 'B', 'Y': 'R'
})
for constructor, comp_map in (dna_exp, rna_exp):
self.assertEqual(constructor('').complement_map, comp_map)
self.assertEqual(constructor.complement_map, comp_map)
# immutable
constructor.complement_map['A'] = 'X'
constructor.complement_map['C'] = 'W'
self.assertEqual(constructor.complement_map, comp_map)
with self.assertRaises(AttributeError):
constructor('').complement_map = {'W': 'X'}
def test_translate_ncbi_table_id(self):
for seq in RNA('AAAUUUAUGCAU'), DNA('AAATTTATGCAT'):
# default
obs = seq.translate()
self.assertEqual(obs, Protein('KFMH'))
obs = seq.translate(9)
self.assertEqual(obs, Protein('NFMH'))
def test_translate_genetic_code_object(self):
gc = GeneticCode('M' * 64, '-' * 64)
for seq in RNA('AAAUUUAUGCAU'), DNA('AAATTTATGCAT'):
obs = seq.translate(gc)
self.assertEqual(obs, Protein('MMMM'))
def test_translate_passes_parameters_through(self):
exp = Protein('MW')
for seq in RNA('UAAAUUGUGGUAA'), DNA('TAAATTGTGGTAA'):
# mix of args and kwargs
obs = seq.translate(13, reading_frame=2, start='require',
stop='require')
self.assertEqual(obs, exp)
# kwargs only
obs = seq.translate(genetic_code=13, reading_frame=2,
start='require', stop='require')
self.assertEqual(obs, exp)
# args only
obs = seq.translate(13, 2, 'require', 'require')
self.assertEqual(obs, exp)
def test_translate_preserves_metadata(self):
metadata = {'foo': 'bar', 'baz': 42}
positional_metadata = {'foo': range(3)}
for seq in (RNA('AUG', metadata=metadata,
positional_metadata=positional_metadata),
DNA('ATG', metadata=metadata,
positional_metadata=positional_metadata)):
obs = seq.translate()
# metadata retained, positional metadata dropped
self.assertEqual(obs,
Protein('M', metadata={'foo': 'bar', 'baz': 42}))
def test_translate_invalid_id(self):
for seq in RNA('AUG'), DNA('ATG'):
with self.assertRaisesRegex(ValueError, r'table_id.*42'):
seq.translate(42)
def test_translate_six_frames_ncbi_table_id(self):
# rc = CAAUUU
for seq in RNA('AAAUUG'), DNA('AAATTG'):
# default
obs = list(seq.translate_six_frames())
self.assertEqual(obs, [Protein('KL'), Protein('N'), Protein('I'),
Protein('QF'), Protein('N'), Protein('I')])
obs = list(seq.translate_six_frames(9))
self.assertEqual(obs, [Protein('NL'), Protein('N'), Protein('I'),
Protein('QF'), Protein('N'), Protein('I')])
def test_translate_six_frames_genetic_code_object(self):
gc = GeneticCode('M' * 64, '-' * 64)
for seq in RNA('AAAUUG'), DNA('AAATTG'):
obs = list(seq.translate_six_frames(gc))
self.assertEqual(obs, [Protein('MM'), Protein('M'), Protein('M'),
Protein('MM'), Protein('M'), Protein('M')])
def test_translate_six_frames_passes_parameters_through(self):
for seq in RNA('UUUAUGUGGUGA'), DNA('TTTATGTGGTGA'):
# mix of args and kwargs
obs = next(seq.translate_six_frames(11, start='require',
stop='require'))
self.assertEqual(obs, Protein('MW'))
# kwargs only
obs = next(seq.translate_six_frames(genetic_code=11,
start='require',
stop='require'))
self.assertEqual(obs, Protein('MW'))
# args only
obs = next(seq.translate_six_frames(11, 'require', 'require'))
self.assertEqual(obs, Protein('MW'))
def test_translate_six_frames_preserves_metadata(self):
metadata = {'foo': 'bar', 'baz': 42}
positional_metadata = {'foo': range(3)}
for seq in (RNA('AUG', metadata=metadata,
positional_metadata=positional_metadata),
DNA('ATG', metadata=metadata,
positional_metadata=positional_metadata)):
obs = list(seq.translate_six_frames())[:2]
# metadata retained, positional metadata dropped
self.assertEqual(
obs,
[Protein('M', metadata={'foo': 'bar', 'baz': 42}),
Protein('', metadata={'foo': 'bar', 'baz': 42})])
def test_translate_six_frames_invalid_id(self):
for seq in RNA('AUG'), DNA('ATG'):
with self.assertRaisesRegex(ValueError, r'table_id.*42'):
seq.translate_six_frames(42)
def test_repr(self):
# basic sanity checks for custom repr stats. more extensive testing is
# performed on Sequence.__repr__
for seq in DNA(''), RNA(''):
obs = repr(seq)
# obtained from super()
self.assertIn('has gaps: False', obs)
# custom to Protein
self.assertIn('GC-content: 0.00%', obs)
for seq in DNA('ACGT'), RNA('ACGU'):
obs = repr(seq)
self.assertIn('has gaps: False', obs)
self.assertIn('GC-content: 50.00%', obs)
for seq in DNA('CST'), RNA('CSU'):
obs = repr(seq)
self.assertIn('has gaps: False', obs)
self.assertIn('GC-content: 66.67%', obs)
for seq in DNA('GCSSCG'), RNA('GCSSCG'):
obs = repr(seq)
self.assertIn('has gaps: False', obs)
self.assertIn('GC-content: 100.00%', obs)
for seq in DNA('-GCSSCG.'), RNA('-GCSSCG.'):
obs = repr(seq)
self.assertIn('has gaps: True', obs)
self.assertIn('GC-content: 100.00%', obs)
def test_complement_without_reverse_empty(self):
for constructor in (DNA, RNA):
# without optional attributes
comp = constructor('').complement()
self.assertEqual(comp, constructor(''))
# with optional attributes
comp = constructor(
'',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': []},
interval_metadata=IntervalMetadata(0)).complement()
self.assertEqual(
comp,
constructor(
'',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': []}))
def test_complement_without_reverse_non_empty(self):
for (constructor, seq_str, comp_str,
qual) in self.all_combos_comp_qual:
comp = constructor(seq_str).complement()
self.assertEqual(comp, constructor(comp_str))
im = IntervalMetadata(len(seq_str))
im.add([(0, 1)], metadata={'gene': 'p53'})
comp = constructor(
seq_str,
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': qual},
interval_metadata=im).complement()
self.assertEqual(
comp,
constructor(
comp_str,
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': qual},
interval_metadata=im))
def test_complement_with_reverse_empty(self):
for constructor in (DNA, RNA):
rc = constructor('').complement(reverse=True)
self.assertEqual(rc, constructor(''))
rc = constructor(
'',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': []},
interval_metadata=IntervalMetadata(0)).complement(reverse=True)
self.assertEqual(
rc,
constructor(
'',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': []}))
def test_complement_with_reverse_non_empty(self):
for (constructor, seq_str, rev_comp_str,
qual) in self.all_combos_rev_comp_qual:
rc = constructor(seq_str).complement(reverse=True)
self.assertEqual(rc, constructor(rev_comp_str))
length = len(seq_str)
im = IntervalMetadata(length)
im.add([(0, 1)], metadata={'gene': 'p53'})
im_rc = IntervalMetadata(length)
im_rc.add([(length-1, length)], metadata={'gene': 'p53'})
original = constructor(
seq_str,
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={
'quality': qual},
interval_metadata=im)
rc = original.complement(reverse=True)
self.assertEqual(
rc,
constructor(
rev_comp_str,
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality':
list(qual)[::-1]},
interval_metadata=im_rc))
# assert the original object is not changed
self.assertIsNot(original.interval_metadata, im)
self.assertEqual(original.interval_metadata, im)
def test_reverse_complement(self):
# light tests because this just calls
# NucleotideSequence.complement(reverse=True), which is tested more
# extensively
for (constructor, seq_str, rev_comp_str,
qual) in self.all_combos_rev_comp_qual:
rc = constructor(
seq_str,
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': qual}).reverse_complement()
self.assertEqual(
rc,
constructor(
rev_comp_str,
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': list(qual)[::-1]}))
def test_is_reverse_complement_varied_types(self):
tested = 0
for constructor, seq_str, rev_comp_str in self.all_combos_rev_comp:
seq_kinds = self.sequence_kinds.union(frozenset([constructor]))
for sequence in seq_kinds:
tested += 1
seq1 = constructor(seq_str)
seq2 = sequence(rev_comp_str)
self.assertTrue(seq1.is_reverse_complement(seq2))
self.assertEqual(tested, 8)
def test_is_reverse_complement_empty(self):
for constructor in (DNA, RNA):
seq1 = constructor('')
self.assertTrue(seq1.is_reverse_complement(seq1))
# optional attributes are ignored, only the sequence is compared
seq2 = constructor(
'',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality':
np.array([], dtype=np.int64)})
self.assertTrue(seq2.is_reverse_complement(seq2))
self.assertTrue(seq1.is_reverse_complement(seq2))
self.assertTrue(seq2.is_reverse_complement(seq1))
def test_is_reverse_complement_metadata_ignored(self):
for (constructor, seq_str, rev_comp_str,
qual) in self.all_combos_rev_comp_qual:
seq1 = constructor(seq_str)
seq2 = constructor(
rev_comp_str,
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': qual})
self.assertFalse(seq1.is_reverse_complement(seq1))
self.assertFalse(seq2.is_reverse_complement(seq2))
self.assertTrue(seq1.is_reverse_complement(seq2))
self.assertTrue(seq2.is_reverse_complement(seq1))
def test_is_reverse_complement_non_reverse_complements(self):
for constructor in (DNA, RNA):
# same length
seq1 = constructor('ACAG')
seq2 = constructor('AAAA')
self.assertFalse(seq1.is_reverse_complement(seq1))
self.assertFalse(seq2.is_reverse_complement(seq2))
self.assertFalse(seq1.is_reverse_complement(seq2))
self.assertFalse(seq2.is_reverse_complement(seq1))
# different length
seq1 = constructor('ACAG')
seq2 = constructor('AAAAA')
self.assertFalse(seq1.is_reverse_complement(seq1))
self.assertFalse(seq2.is_reverse_complement(seq2))
self.assertFalse(seq1.is_reverse_complement(seq2))
self.assertFalse(seq2.is_reverse_complement(seq1))
def test_is_reverse_complement_type_mismatch(self):
for Class in (DNA, RNA):
class DifferentSequenceClass(GrammaredSequence):
@classproperty
def degenerate_map(cls):
return {"X": set("AB")}
@classproperty
def definite_chars(cls):
return set("ABC")
@classproperty
def default_gap_char(cls):
return '-'
@classproperty
def gap_chars(cls):
return set('-.')
seq1 = Class('ABC')
seq2 = DifferentSequenceClass('ABC')
with self.assertRaisesRegex(TypeError,
r"Cannot use.*and "
"DifferentSequenceClass together"):
seq1.is_reverse_complement(seq2)
def test_motif_purine_run(self):
dna = (DNA, "AARC--TCRG", "AA-RC--TCR-G")
rna = (RNA, "AARC--UCRG", "AA-RC--UCR-G")
all_sets = (dna, rna)
for constructor, run1, run2 in all_sets:
seq = constructor("")
self.assertEqual(list(seq.find_motifs("purine-run")), [])
seq = constructor(run1)
self.assertEqual(list(seq.find_motifs("purine-run")),
[slice(0, 3), slice(8, 10)])
seq = constructor(run2)
self.assertEqual(list(seq.find_motifs("purine-run", min_length=3,
ignore=seq.gaps())),
[slice(0, 4)])
def test_motif_pyrimidine_run(self):
dna = (DNA, "AARC--TCRA", "AA-RC--TCR-A")
rna = (RNA, "AARC--UCRG", "AA-RC--UCR-G")
all_sets = (dna, rna)
for constructor, run1, run2 in all_sets:
seq = constructor("")
self.assertEqual(list(seq.find_motifs("pyrimidine-run")), [])
seq = constructor(run1)
self.assertEqual(list(seq.find_motifs("pyrimidine-run")),
[slice(3, 4), slice(6, 8)])
seq = constructor(run2)
self.assertEqual(list(seq.find_motifs("pyrimidine-run",
min_length=3,
ignore=seq.gaps())),
[slice(4, 9)])
def test_gc_frequency_and_gc_content(self):
universal_sets = (('', 0, 0.0), ('ADDDH', 0, 0.0), ('ACGA', 2, 0.5),
('ACGS', 3, 0.75), ('AAAAAAAG', 1, 0.125),
('CCC', 3, 1.0), ('GGG', 3, 1.0), ('SSS', 3, 1.0),
('CGS', 3, 1.0), ('----....', 0, 0.0),
('G--..', 1, 1.0), ('ACGA', 2, 0.5))
dna = (DNA, universal_sets + (('ATMRWYKVHDBN.-', 0, 0.0),))
rna = (RNA, universal_sets + (('AUMRWYKVHDBN.-', 0, 0.0),))
for constructor, current_set in (dna, rna):
for seq_str, count, ratio in current_set:
seq = constructor(seq_str)
self.assertEqual(count, seq.gc_frequency())
self.assertEqual(count, seq.gc_frequency(relative=False))
self.assertEqual(ratio, seq.gc_frequency(relative=True))
self.assertEqual(ratio, seq.gc_content())
if __name__ == "__main__":
unittest.main()
|
gregcaporaso/scikit-bio
|
skbio/sequence/tests/test_nucleotide_sequences.py
|
Python
|
bsd-3-clause
| 21,247
|
[
"scikit-bio"
] |
750fb6580903ec9b8792c8420948e6f6cefa003d3ed2cf4da674a239972c25c5
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements the pickler objects used in abinitio.
"""
from __future__ import unicode_literals, division, print_function
import pickle
from pymatgen.core.periodic_table import Element
class PmgPickler(pickle.Pickler):
"""
Persistence of External Objects as described in section 12.1.5.1 of
https://docs.python.org/3/library/pickle.html
"""
def persistent_id(self, obj):
"""Instead of pickling as a regular class instance, we emit a persistent ID."""
if isinstance(obj, Element):
# Here, our persistent ID is simply a tuple, containing a tag and a key
return obj.__class__.__name__, obj.symbol
else:
# If obj does not have a persistent ID, return None. This means obj needs to be pickled as usual.
return None
class PmgUnpickler(pickle.Unpickler):
"""
Persistence of External Objects as described in section 12.1.5.1 of
https://docs.python.org/3/library/pickle.html
"""
def persistent_load(self, pid):
"""
This method is invoked whenever a persistent ID is encountered.
Here, pid is the tuple returned by PmgPickler.
"""
try:
type_tag, key_id = pid
except Exception as exc:
# Sometimes we get a string such as ('Element', u'C') instead
# of a real tuple. Use ast to evalute the expression (much safer than eval).
import ast
type_tag, key_id = ast.literal_eval(pid)
#raise pickle.UnpicklingError("Exception:\n%s\npid: %s\ntype(pid)%s" %
# (str(exc), str(pid), type(pid)))
if type_tag == "Element":
return Element(key_id)
else:
# Always raises an error if you cannot return the correct object.
# Otherwise, the unpickler will think None is the object referenced by the persistent ID.
raise pickle.UnpicklingError("unsupported persistent object with pid %s" % pid)
def pmg_pickle_load(filobj, **kwargs):
"""
Loads a pickle file and deserialize it with PmgUnpickler.
Args:
filobj: File-like object
\*\*kwargs: Any of the keyword arguments supported by PmgUnpickler
Returns:
Deserialized object.
"""
#return pickle.load(filobj, **kwargs)
return PmgUnpickler(filobj, **kwargs).load()
def pmg_pickle_dump(obj, filobj, **kwargs):
"""
Dump an object to a pickle file using PmgPickler.
Args:
obj (object): Object to dump.
fileobj: File-like object
\*\*kwargs: Any of the keyword arguments supported by PmgPickler
"""
#return pickle.dump(obj, filobj)
#print(type(obj), type(filobj))
return PmgPickler(filobj, **kwargs).dump(obj)
|
aykol/pymatgen
|
pymatgen/serializers/pickle_coders.py
|
Python
|
mit
| 2,874
|
[
"pymatgen"
] |
cbb483b6b3c3e8e1458af4922341a3e07f141767bd86f73ff14c11e2ce45d1a1
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides a class used to describe the elastic tensor,
including methods used to fit the elastic tensor from linear response
stress-strain data
"""
import itertools
import warnings
from collections import OrderedDict
import numpy as np
import sympy as sp
from monty.dev import deprecated
from scipy.integrate import quad
from scipy.optimize import root
from scipy.special import factorial
from pymatgen.analysis.elasticity.strain import Strain
from pymatgen.analysis.elasticity.stress import Stress
from pymatgen.core.tensors import (
DEFAULT_QUAD,
SquareTensor,
Tensor,
TensorCollection,
get_uvec,
)
from pymatgen.core.units import Unit
__author__ = "Joseph Montoya"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Maarten de Jong, Ian Winter, Shyam Dwaraknath, Mark Asta, Anubhav Jain"
__version__ = "1.0"
__maintainer__ = "Joseph Montoya"
__email__ = "montoyjh@lbl.gov"
__status__ = "Production"
__date__ = "July 24, 2018"
class NthOrderElasticTensor(Tensor):
"""
An object representing an nth-order tensor expansion
of the stress-strain constitutive equations
"""
GPa_to_eV_A3 = Unit("GPa").get_conversion_factor(Unit("eV ang^-3"))
symbol = "C"
def __new__(cls, input_array, check_rank=None, tol=1e-4):
"""
Args:
input_array ():
check_rank ():
tol ():
"""
obj = super().__new__(cls, input_array, check_rank=check_rank)
if obj.rank % 2 != 0:
raise ValueError("ElasticTensor must have even rank")
if not obj.is_voigt_symmetric(tol):
warnings.warn("Input elastic tensor does not satisfy standard voigt symmetries")
return obj.view(cls)
@property
def order(self):
"""
Order of the elastic tensor
"""
return self.rank // 2
def calculate_stress(self, strain):
"""
Calculate's a given elastic tensor's contribution to the
stress using Einstein summation
Args:
strain (3x3 array-like): matrix corresponding to strain
"""
strain = np.array(strain)
if strain.shape == (6,):
strain = Strain.from_voigt(strain)
assert strain.shape == (3, 3), "Strain must be 3x3 or voigt-notation"
stress_matrix = self.einsum_sequence([strain] * (self.order - 1)) / factorial(self.order - 1)
return Stress(stress_matrix)
def energy_density(self, strain, convert_GPa_to_eV=True):
"""
Calculates the elastic energy density due to a strain
"""
e_density = np.sum(self.calculate_stress(strain) * strain) / self.order
if convert_GPa_to_eV:
e_density *= self.GPa_to_eV_A3 # Conversion factor for GPa to eV/A^3
return e_density
@classmethod
def from_diff_fit(cls, strains, stresses, eq_stress=None, order=2, tol=1e-10):
"""
Args:
strains ():
stresses ():
eq_stress ():
order ():
tol ():
Returns:
"""
return cls(diff_fit(strains, stresses, eq_stress, order, tol)[order - 2])
def raise_error_if_unphysical(f):
"""
Wrapper for functions or properties that should raise an error
if tensor is unphysical.
"""
def wrapper(self, *args, **kwargs):
"""
Args:
self ():
*args ():
**kwargs ():
Returns:
"""
if self.k_vrh < 0 or self.g_vrh < 0:
raise ValueError("Bulk or shear modulus is negative, property cannot be determined")
return f(self, *args, **kwargs)
return wrapper
class ElasticTensor(NthOrderElasticTensor):
"""
This class extends Tensor to describe the 3x3x3x3
second-order elastic tensor, C_{ijkl}, with various
methods for estimating other properties derived from
the second order elastic tensor
"""
def __new__(cls, input_array, tol=1e-4):
"""
Create an ElasticTensor object. The constructor throws an error if
the shape of the input_matrix argument is not 3x3x3x3, i. e. in true
tensor notation. Issues a warning if the input_matrix argument does
not satisfy standard symmetries. Note that the constructor uses
__new__ rather than __init__ according to the standard method of
subclassing numpy ndarrays.
Args:
input_array (3x3x3x3 array-like): the 3x3x3x3 array-like
representing the elastic tensor
tol (float): tolerance for initial symmetry test of tensor
"""
obj = super().__new__(cls, input_array, check_rank=4, tol=tol)
return obj.view(cls)
@property
def compliance_tensor(self):
"""
returns the Voigt-notation compliance tensor,
which is the matrix inverse of the
Voigt-notation elastic tensor
"""
s_voigt = np.linalg.inv(self.voigt)
return ComplianceTensor.from_voigt(s_voigt)
@property
def k_voigt(self):
"""
returns the K_v bulk modulus
"""
return self.voigt[:3, :3].mean()
@property
def g_voigt(self):
"""
returns the G_v shear modulus
"""
return (
2.0 * self.voigt[:3, :3].trace() - np.triu(self.voigt[:3, :3]).sum() + 3 * self.voigt[3:, 3:].trace()
) / 15.0
@property
def k_reuss(self):
"""
returns the K_r bulk modulus
"""
return 1.0 / self.compliance_tensor.voigt[:3, :3].sum()
@property
def g_reuss(self):
"""
returns the G_r shear modulus
"""
return 15.0 / (
8.0 * self.compliance_tensor.voigt[:3, :3].trace()
- 4.0 * np.triu(self.compliance_tensor.voigt[:3, :3]).sum()
+ 3.0 * self.compliance_tensor.voigt[3:, 3:].trace()
)
@property
def k_vrh(self):
"""
returns the K_vrh (Voigt-Reuss-Hill) average bulk modulus
"""
return 0.5 * (self.k_voigt + self.k_reuss)
@property
def g_vrh(self):
"""
returns the G_vrh (Voigt-Reuss-Hill) average shear modulus
"""
return 0.5 * (self.g_voigt + self.g_reuss)
@property
def y_mod(self):
"""
Calculates Young's modulus (in SI units) using the
Voigt-Reuss-Hill averages of bulk and shear moduli
"""
return 9.0e9 * self.k_vrh * self.g_vrh / (3.0 * self.k_vrh + self.g_vrh)
def directional_poisson_ratio(self, n, m, tol=1e-8):
"""
Calculates the poisson ratio for a specific direction
relative to a second, orthogonal direction
Args:
n (3-d vector): principal direction
m (3-d vector): secondary direction orthogonal to n
tol (float): tolerance for testing of orthogonality
"""
n, m = get_uvec(n), get_uvec(m)
if not np.abs(np.dot(n, m)) < tol:
raise ValueError("n and m must be orthogonal")
v = self.compliance_tensor.einsum_sequence([n] * 2 + [m] * 2)
v *= -1 / self.compliance_tensor.einsum_sequence([n] * 4)
return v
def directional_elastic_mod(self, n):
"""
Calculates directional elastic modulus for a specific vector
"""
n = get_uvec(n)
return self.einsum_sequence([n] * 4)
@raise_error_if_unphysical
def trans_v(self, structure):
"""
Calculates transverse sound velocity (in SI units) using the
Voigt-Reuss-Hill average bulk modulus
Args:
structure: pymatgen structure object
Returns: transverse sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
natoms = structure.composition.num_atoms
weight = float(structure.composition.weight)
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
if self.g_vrh < 0:
raise ValueError("k_vrh or g_vrh is negative, sound velocity is undefined")
return (1e9 * self.g_vrh / mass_density) ** 0.5
@raise_error_if_unphysical
def long_v(self, structure):
"""
Calculates longitudinal sound velocity (in SI units)
using the Voigt-Reuss-Hill average bulk modulus
Args:
structure: pymatgen structure object
Returns: longitudinal sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
natoms = structure.composition.num_atoms
weight = float(structure.composition.weight)
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
if self.g_vrh < 0:
raise ValueError("k_vrh or g_vrh is negative, sound velocity is undefined")
return (1e9 * (self.k_vrh + 4.0 / 3.0 * self.g_vrh) / mass_density) ** 0.5
@raise_error_if_unphysical
def snyder_ac(self, structure):
"""
Calculates Snyder's acoustic sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's acoustic sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
natoms = structure.composition.num_atoms
num_density = 1e30 * nsites / volume
tot_mass = sum(e.atomic_mass for e in structure.species)
avg_mass = 1.6605e-27 * tot_mass / natoms
return (
0.38483
* avg_mass
* ((self.long_v(structure) + 2.0 * self.trans_v(structure)) / 3.0) ** 3.0
/ (300.0 * num_density ** (-2.0 / 3.0) * nsites ** (1.0 / 3.0))
)
@raise_error_if_unphysical
def snyder_opt(self, structure):
"""
Calculates Snyder's optical sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's optical sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
num_density = 1e30 * nsites / volume
return (
1.66914e-23
* (self.long_v(structure) + 2.0 * self.trans_v(structure))
/ 3.0
/ num_density ** (-2.0 / 3.0)
* (1 - nsites ** (-1.0 / 3.0))
)
@raise_error_if_unphysical
def snyder_total(self, structure):
"""
Calculates Snyder's total sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's total sound velocity (in SI units)
"""
return self.snyder_ac(structure) + self.snyder_opt(structure)
@raise_error_if_unphysical
def clarke_thermalcond(self, structure):
"""
Calculates Clarke's thermal conductivity (in SI units)
Args:
structure: pymatgen structure object
Returns: Clarke's thermal conductivity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
tot_mass = sum(e.atomic_mass for e in structure.species)
natoms = structure.composition.num_atoms
weight = float(structure.composition.weight)
avg_mass = 1.6605e-27 * tot_mass / natoms
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
return 0.87 * 1.3806e-23 * avg_mass ** (-2.0 / 3.0) * mass_density ** (1.0 / 6.0) * self.y_mod ** 0.5
@raise_error_if_unphysical
def cahill_thermalcond(self, structure):
"""
Calculates Cahill's thermal conductivity (in SI units)
Args:
structure: pymatgen structure object
Returns: Cahill's thermal conductivity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
num_density = 1e30 * nsites / volume
return 1.3806e-23 / 2.48 * num_density ** (2.0 / 3.0) * (self.long_v(structure) + 2 * self.trans_v(structure))
@raise_error_if_unphysical
def debye_temperature(self, structure):
"""
Estimates the debye temperature from longitudinal and
transverse sound velocities
Args:
structure: pymatgen structure object
Returns: debye temperature (in SI units)
"""
v0 = structure.volume * 1e-30 / structure.num_sites
vl, vt = self.long_v(structure), self.trans_v(structure)
vm = 3 ** (1.0 / 3.0) * (1 / vl ** 3 + 2 / vt ** 3) ** (-1.0 / 3.0)
td = 1.05457e-34 / 1.38065e-23 * vm * (6 * np.pi ** 2 / v0) ** (1.0 / 3.0)
return td
@deprecated(
"debye_temperature_from_sound_velocities is now the default"
"debye_temperature function, this one will be removed."
)
@raise_error_if_unphysical
def debye_temperature_from_sound_velocities(self, structure):
"""
Estimates Debye temperature from sound velocities
"""
return self.debye_temperature(structure)
@property
def universal_anisotropy(self):
"""
returns the universal anisotropy value
"""
return 5.0 * self.g_voigt / self.g_reuss + self.k_voigt / self.k_reuss - 6.0
@property
def homogeneous_poisson(self):
"""
returns the homogeneous poisson ratio
"""
return (1.0 - 2.0 / 3.0 * self.g_vrh / self.k_vrh) / (2.0 + 2.0 / 3.0 * self.g_vrh / self.k_vrh)
def green_kristoffel(self, u):
"""
Returns the Green-Kristoffel tensor for a second-order tensor
"""
return self.einsum_sequence([u, u], "ijkl,i,l")
@property
def property_dict(self):
"""
returns a dictionary of properties derived from the elastic tensor
"""
props = [
"k_voigt",
"k_reuss",
"k_vrh",
"g_voigt",
"g_reuss",
"g_vrh",
"universal_anisotropy",
"homogeneous_poisson",
"y_mod",
]
return {prop: getattr(self, prop) for prop in props}
def get_structure_property_dict(self, structure, include_base_props=True, ignore_errors=False):
"""
returns a dictionary of properties derived from the elastic tensor
and an associated structure
Args:
structure (Structure): structure object for which to calculate
associated properties
include_base_props (bool): whether to include base properties,
like k_vrh, etc.
ignore_errors (bool): if set to true, will set problem properties
that depend on a physical tensor to None, defaults to False
"""
s_props = [
"trans_v",
"long_v",
"snyder_ac",
"snyder_opt",
"snyder_total",
"clarke_thermalcond",
"cahill_thermalcond",
"debye_temperature",
]
if ignore_errors and (self.k_vrh < 0 or self.g_vrh < 0):
sp_dict = {prop: None for prop in s_props}
else:
sp_dict = {prop: getattr(self, prop)(structure) for prop in s_props}
sp_dict["structure"] = structure
if include_base_props:
sp_dict.update(self.property_dict)
return sp_dict
@classmethod
def from_pseudoinverse(cls, strains, stresses):
"""
Class method to fit an elastic tensor from stress/strain
data. Method uses Moore-Penrose pseudoinverse to invert
the s = C*e equation with elastic tensor, stress, and
strain in voigt notation
Args:
stresses (Nx3x3 array-like): list or array of stresses
strains (Nx3x3 array-like): list or array of strains
"""
# convert the stress/strain to Nx6 arrays of voigt-notation
warnings.warn(
"Pseudoinverse fitting of Strain/Stress lists may yield "
"questionable results from vasp data, use with caution."
)
stresses = np.array([Stress(stress).voigt for stress in stresses])
with warnings.catch_warnings(record=True):
strains = np.array([Strain(strain).voigt for strain in strains])
voigt_fit = np.transpose(np.dot(np.linalg.pinv(strains), stresses))
return cls.from_voigt(voigt_fit)
@classmethod
def from_independent_strains(cls, strains, stresses, eq_stress=None, vasp=False, tol=1e-10):
"""
Constructs the elastic tensor least-squares fit of independent strains
Args:
strains (list of Strains): list of strain objects to fit
stresses (list of Stresses): list of stress objects to use in fit
corresponding to the list of strains
eq_stress (Stress): equilibrium stress to use in fitting
vasp (boolean): flag for whether the stress tensor should be
converted based on vasp units/convention for stress
tol (float): tolerance for removing near-zero elements of the
resulting tensor
"""
strain_states = [tuple(ss) for ss in np.eye(6)]
ss_dict = get_strain_state_dict(strains, stresses, eq_stress=eq_stress)
if not set(strain_states) <= set(ss_dict.keys()):
raise ValueError(f"Missing independent strain states: {set(strain_states) - set(ss_dict)}")
if len(set(ss_dict.keys()) - set(strain_states)) > 0:
warnings.warn("Extra strain states in strain-stress pairs are neglected in independent strain fitting")
c_ij = np.zeros((6, 6))
for i in range(6):
istrains = ss_dict[strain_states[i]]["strains"]
istresses = ss_dict[strain_states[i]]["stresses"]
for j in range(6):
c_ij[i, j] = np.polyfit(istrains[:, i], istresses[:, j], 1)[0]
if vasp:
c_ij *= -0.1 # Convert units/sign convention of vasp stress tensor
c = cls.from_voigt(c_ij)
c = c.zeroed(tol)
return c
class ComplianceTensor(Tensor):
"""
This class represents the compliance tensor, and exists
primarily to keep the voigt-conversion scheme consistent
since the compliance tensor has a unique vscale
"""
def __new__(cls, s_array):
"""
Args:
s_array ():
"""
vscale = np.ones((6, 6))
vscale[3:] *= 2
vscale[:, 3:] *= 2
obj = super().__new__(cls, s_array, vscale=vscale)
return obj.view(cls)
class ElasticTensorExpansion(TensorCollection):
"""
This class is a sequence of elastic tensors corresponding
to an elastic tensor expansion, which can be used to
calculate stress and energy density and inherits all
of the list-based properties of TensorCollection
(e. g. symmetrization, voigt conversion, etc.)
"""
def __init__(self, c_list):
"""
Initialization method for ElasticTensorExpansion
Args:
c_list (list or tuple): sequence of Tensor inputs
or tensors from which the elastic tensor
expansion is constructed.
"""
c_list = [NthOrderElasticTensor(c, check_rank=4 + i * 2) for i, c in enumerate(c_list)]
super().__init__(c_list)
@classmethod
def from_diff_fit(cls, strains, stresses, eq_stress=None, tol=1e-10, order=3):
"""
Generates an elastic tensor expansion via the fitting function
defined below in diff_fit
"""
c_list = diff_fit(strains, stresses, eq_stress, order, tol)
return cls(c_list)
@property
def order(self):
"""
Order of the elastic tensor expansion, i. e. the order of the
highest included set of elastic constants
"""
return self[-1].order
def calculate_stress(self, strain):
"""
Calculate's a given elastic tensor's contribution to the
stress using Einstein summation
"""
return sum(c.calculate_stress(strain) for c in self)
def energy_density(self, strain, convert_GPa_to_eV=True):
"""
Calculates the elastic energy density due to a strain
"""
return sum(c.energy_density(strain, convert_GPa_to_eV) for c in self)
def get_ggt(self, n, u):
"""
Gets the Generalized Gruneisen tensor for a given
third-order elastic tensor expansion.
Args:
n (3x1 array-like): normal mode direction
u (3x1 array-like): polarization direction
"""
gk = self[0].einsum_sequence([n, u, n, u])
result = -(
2 * gk * np.outer(u, u) + self[0].einsum_sequence([n, n]) + self[1].einsum_sequence([n, u, n, u])
) / (2 * gk)
return result
def get_tgt(self, temperature=None, structure=None, quad=None):
"""
Gets the thermodynamic Gruneisen tensor (TGT) by via an
integration of the GGT weighted by the directional heat
capacity.
See refs:
R. N. Thurston and K. Brugger, Phys. Rev. 113, A1604 (1964).
K. Brugger Phys. Rev. 137, A1826 (1965).
Args:
temperature (float): Temperature in kelvin, if not specified
will return non-cv-normalized value
structure (float): Structure to be used in directional heat
capacity determination, only necessary if temperature
is specified
quad (dict): quadrature for integration, should be
dictionary with "points" and "weights" keys defaults
to quadpy.sphere.Lebedev(19) as read from file
"""
if temperature and not structure:
raise ValueError("If using temperature input, you must also include structure")
quad = quad if quad else DEFAULT_QUAD
points = quad["points"]
weights = quad["weights"]
num, denom, c = np.zeros((3, 3)), 0, 1
for p, w in zip(points, weights):
gk = ElasticTensor(self[0]).green_kristoffel(p)
rho_wsquareds, us = np.linalg.eigh(gk)
us = [u / np.linalg.norm(u) for u in np.transpose(us)]
for u in us:
# TODO: this should be benchmarked
if temperature:
c = self.get_heat_capacity(temperature, structure, p, u)
num += c * self.get_ggt(p, u) * w
denom += c * w
return SquareTensor(num / denom)
def get_gruneisen_parameter(self, temperature=None, structure=None, quad=None):
"""
Gets the single average gruneisen parameter from the TGT.
Args:
temperature (float): Temperature in kelvin, if not specified
will return non-cv-normalized value
structure (float): Structure to be used in directional heat
capacity determination, only necessary if temperature
is specified
quad (dict): quadrature for integration, should be
dictionary with "points" and "weights" keys defaults
to quadpy.sphere.Lebedev(19) as read from file
"""
return np.trace(self.get_tgt(temperature, structure, quad)) / 3.0
def get_heat_capacity(self, temperature, structure, n, u, cutoff=1e2):
"""
Gets the directional heat capacity for a higher order tensor
expansion as a function of direction and polarization.
Args:
temperature (float): Temperature in kelvin
structure (float): Structure to be used in directional heat
capacity determination
n (3x1 array-like): direction for Cv determination
u (3x1 array-like): polarization direction, note that
no attempt for verification of eigenvectors is made
cutoff (float): cutoff for scale of kt / (hbar * omega)
if lower than this value, returns 0
"""
k = 1.38065e-23
kt = k * temperature
hbar_w = 1.05457e-34 * self.omega(structure, n, u)
if hbar_w > kt * cutoff:
return 0.0
c = k * (hbar_w / kt) ** 2
c *= np.exp(hbar_w / kt) / (np.exp(hbar_w / kt) - 1) ** 2
return c * 6.022e23
def omega(self, structure, n, u):
"""
Finds directional frequency contribution to the heat
capacity from direction and polarization
Args:
structure (Structure): Structure to be used in directional heat
capacity determination
n (3x1 array-like): direction for Cv determination
u (3x1 array-like): polarization direction, note that
no attempt for verification of eigenvectors is made
"""
l0 = np.dot(np.sum(structure.lattice.matrix, axis=0), n)
l0 *= 1e-10 # in A
weight = float(structure.composition.weight) * 1.66054e-27 # in kg
vol = structure.volume * 1e-30 # in m^3
vel = (1e9 * self[0].einsum_sequence([n, u, n, u]) / (weight / vol)) ** 0.5
return vel / l0
def thermal_expansion_coeff(self, structure, temperature, mode="debye"):
"""
Gets thermal expansion coefficient from third-order constants.
Args:
temperature (float): Temperature in kelvin, if not specified
will return non-cv-normalized value
structure (Structure): Structure to be used in directional heat
capacity determination, only necessary if temperature
is specified
mode (string): mode for finding average heat-capacity,
current supported modes are 'debye' and 'dulong-petit'
"""
soec = ElasticTensor(self[0])
v0 = structure.volume * 1e-30 / structure.num_sites
if mode == "debye":
td = soec.debye_temperature(structure)
t_ratio = temperature / td
def integrand(x):
return (x ** 4 * np.exp(x)) / (np.exp(x) - 1) ** 2
cv = 9 * 8.314 * t_ratio ** 3 * quad(integrand, 0, t_ratio ** -1)[0]
elif mode == "dulong-petit":
cv = 3 * 8.314
else:
raise ValueError("Mode must be debye or dulong-petit")
tgt = self.get_tgt(temperature, structure)
alpha = np.einsum("ijkl,ij", soec.compliance_tensor, tgt)
alpha *= cv / (1e9 * v0 * 6.022e23)
return SquareTensor(alpha)
def get_compliance_expansion(self):
"""
Gets a compliance tensor expansion from the elastic
tensor expansion.
"""
# TODO: this might have a general form
if not self.order <= 4:
raise ValueError("Compliance tensor expansion only supported for fourth-order and lower")
ce_exp = [ElasticTensor(self[0]).compliance_tensor]
einstring = "ijpq,pqrsuv,rskl,uvmn->ijklmn"
ce_exp.append(np.einsum(einstring, -ce_exp[-1], self[1], ce_exp[-1], ce_exp[-1]))
if self.order == 4:
# Four terms in the Fourth-Order compliance tensor
# pylint: disable=E1130
einstring_1 = "pqab,cdij,efkl,ghmn,abcdefgh"
tensors_1 = [ce_exp[0]] * 4 + [self[-1]]
temp = -np.einsum(einstring_1, *tensors_1)
einstring_2 = "pqab,abcdef,cdijmn,efkl"
einstring_3 = "pqab,abcdef,efklmn,cdij"
einstring_4 = "pqab,abcdef,cdijkl,efmn"
for es in [einstring_2, einstring_3, einstring_4]:
temp -= np.einsum(es, ce_exp[0], self[-2], ce_exp[1], ce_exp[0])
ce_exp.append(temp)
return TensorCollection(ce_exp)
def get_strain_from_stress(self, stress):
"""
Gets the strain from a stress state according
to the compliance expansion corresponding to the
tensor expansion.
"""
compl_exp = self.get_compliance_expansion()
strain = 0
for n, compl in enumerate(compl_exp):
strain += compl.einsum_sequence([stress] * (n + 1)) / factorial(n + 1)
return strain
def get_effective_ecs(self, strain, order=2):
"""
Returns the effective elastic constants
from the elastic tensor expansion.
Args:
strain (Strain or 3x3 array-like): strain condition
under which to calculate the effective constants
order (int): order of the ecs to be returned
"""
ec_sum = 0
for n, ecs in enumerate(self[order - 2 :]):
ec_sum += ecs.einsum_sequence([strain] * n) / factorial(n)
return ec_sum
def get_wallace_tensor(self, tau):
"""
Gets the Wallace Tensor for determining yield strength
criteria.
Args:
tau (3x3 array-like): stress at which to evaluate
the wallace tensor
"""
b = 0.5 * (
np.einsum("ml,kn->klmn", tau, np.eye(3))
+ np.einsum("km,ln->klmn", tau, np.eye(3))
+ np.einsum("nl,km->klmn", tau, np.eye(3))
+ np.einsum("kn,lm->klmn", tau, np.eye(3))
+ -2 * np.einsum("kl,mn->klmn", tau, np.eye(3))
)
strain = self.get_strain_from_stress(tau)
b += self.get_effective_ecs(strain)
return b
def get_symmetric_wallace_tensor(self, tau):
"""
Gets the symmetrized wallace tensor for determining
yield strength criteria.
Args:
tau (3x3 array-like): stress at which to evaluate
the wallace tensor.
"""
wallace = self.get_wallace_tensor(tau)
return Tensor(0.5 * (wallace + np.transpose(wallace, [2, 3, 0, 1])))
def get_stability_criteria(self, s, n):
"""
Gets the stability criteria from the symmetric
Wallace tensor from an input vector and stress
value.
Args:
s (float): Stress value at which to evaluate
the stability criteria
n (3x1 array-like): direction of the applied
stress
"""
n = get_uvec(n)
stress = s * np.outer(n, n)
sym_wallace = self.get_symmetric_wallace_tensor(stress)
return np.linalg.det(sym_wallace.voigt)
def get_yield_stress(self, n):
"""
Gets the yield stress for a given direction
Args:
n (3x1 array-like): direction for which to find the
yield stress
"""
# TODO: root finding could be more robust
comp = root(self.get_stability_criteria, -1, args=n)
tens = root(self.get_stability_criteria, 1, args=n)
return (comp.x, tens.x)
# TODO: abstract this for other tensor fitting procedures
def diff_fit(strains, stresses, eq_stress=None, order=2, tol=1e-10):
"""
nth order elastic constant fitting function based on
central-difference derivatives with respect to distinct
strain states. The algorithm is summarized as follows:
1. Identify distinct strain states as sets of indices
for which nonzero strain values exist, typically
[(0), (1), (2), (3), (4), (5), (0, 1) etc.]
2. For each strain state, find and sort strains and
stresses by strain value.
3. Find first, second .. nth derivatives of each stress
with respect to scalar variable corresponding to
the smallest perturbation in the strain.
4. Use the pseudoinverse of a matrix-vector expression
corresponding to the parameterized stress-strain
relationship and multiply that matrix by the respective
calculated first or second derivatives from the
previous step.
5. Place the calculated nth-order elastic
constants appropriately.
Args:
order (int): order of the elastic tensor set to return
strains (nx3x3 array-like): Array of 3x3 strains
to use in fitting of ECs
stresses (nx3x3 array-like): Array of 3x3 stresses
to use in fitting ECs. These should be PK2 stresses.
eq_stress (3x3 array-like): stress corresponding to
equilibrium strain (i. e. "0" strain state).
If not specified, function will try to find
the state in the list of provided stresses
and strains. If not found, defaults to 0.
tol (float): value for which strains below
are ignored in identifying strain states.
Returns:
Set of tensors corresponding to nth order expansion of
the stress/strain relation
"""
strain_state_dict = get_strain_state_dict(strains, stresses, eq_stress=eq_stress, tol=tol, add_eq=True, sort=True)
# Collect derivative data
c_list = []
dei_dsi = np.zeros((order - 1, 6, len(strain_state_dict)))
for n, (strain_state, data) in enumerate(strain_state_dict.items()):
hvec = data["strains"][:, strain_state.index(1)]
for i in range(1, order):
coef = get_diff_coeff(hvec, i)
dei_dsi[i - 1, :, n] = np.dot(coef, data["stresses"])
m, absent = generate_pseudo(list(strain_state_dict.keys()), order)
for i in range(1, order):
cvec, carr = get_symbol_list(i + 1)
svec = np.ravel(dei_dsi[i - 1].T)
cmap = dict(zip(cvec, np.dot(m[i - 1], svec)))
c_list.append(v_subs(carr, cmap))
return [Tensor.from_voigt(c) for c in c_list]
def find_eq_stress(strains, stresses, tol=1e-10):
"""
Finds stress corresponding to zero strain state in stress-strain list
Args:
strains (Nx3x3 array-like): array corresponding to strains
stresses (Nx3x3 array-like): array corresponding to stresses
tol (float): tolerance to find zero strain state
"""
stress_array = np.array(stresses)
strain_array = np.array(strains)
eq_stress = stress_array[np.all(abs(strain_array) < tol, axis=(1, 2))]
if eq_stress.size != 0:
all_same = (abs(eq_stress - eq_stress[0]) < 1e-8).all()
if len(eq_stress) > 1 and not all_same:
raise ValueError(
"Multiple stresses found for equilibrium strain"
" state, please specify equilibrium stress or "
" remove extraneous stresses."
)
eq_stress = eq_stress[0]
else:
warnings.warn("No eq state found, returning zero voigt stress")
eq_stress = Stress(np.zeros((3, 3)))
return eq_stress
def get_strain_state_dict(strains, stresses, eq_stress=None, tol=1e-10, add_eq=True, sort=True):
"""
Creates a dictionary of voigt-notation stress-strain sets
keyed by "strain state", i. e. a tuple corresponding to
the non-zero entries in ratios to the lowest nonzero value,
e.g. [0, 0.1, 0, 0.2, 0, 0] -> (0,1,0,2,0,0)
This allows strains to be collected in stencils as to
evaluate parameterized finite difference derivatives
Args:
strains (Nx3x3 array-like): strain matrices
stresses (Nx3x3 array-like): stress matrices
eq_stress (Nx3x3 array-like): equilibrium stress
tol (float): tolerance for sorting strain states
add_eq (bool): flag for whether to add eq_strain
to stress-strain sets for each strain state
sort (bool): flag for whether to sort strain states
Returns:
OrderedDict with strain state keys and dictionaries
with stress-strain data corresponding to strain state
"""
# Recast stress/strains
vstrains = np.array([Strain(s).zeroed(tol).voigt for s in strains]) # pylint: disable=E1101
vstresses = np.array([Stress(s).zeroed(tol).voigt for s in stresses]) # pylint: disable=E1101
# Collect independent strain states:
independent = {tuple(np.nonzero(vstrain)[0].tolist()) for vstrain in vstrains}
strain_state_dict = OrderedDict()
if add_eq:
if eq_stress is not None:
veq_stress = Stress(eq_stress).voigt
else:
veq_stress = find_eq_stress(strains, stresses).voigt
for n, ind in enumerate(independent):
# match strains with templates
template = np.zeros(6, dtype=bool)
np.put(template, ind, True)
template = np.tile(template, [vstresses.shape[0], 1])
mode = (template == (np.abs(vstrains) > 1e-10)).all(axis=1)
mstresses = vstresses[mode]
mstrains = vstrains[mode]
# Get "strain state", i.e. ratio of each value to minimum strain
min_nonzero_ind = np.argmin(np.abs(np.take(mstrains[-1], ind)))
min_nonzero_val = np.take(mstrains[-1], ind)[min_nonzero_ind]
strain_state = mstrains[-1] / min_nonzero_val
strain_state = tuple(strain_state)
if add_eq:
# add zero strain state
mstrains = np.vstack([mstrains, np.zeros(6)])
mstresses = np.vstack([mstresses, veq_stress])
# sort strains/stresses by strain values
if sort:
mstresses = mstresses[mstrains[:, ind[0]].argsort()]
mstrains = mstrains[mstrains[:, ind[0]].argsort()]
strain_state_dict[strain_state] = {"strains": mstrains, "stresses": mstresses}
return strain_state_dict
def generate_pseudo(strain_states, order=3):
"""
Generates the pseudoinverse for a given set of strains.
Args:
strain_states (6xN array like): a list of voigt-notation
"strain-states", i. e. perturbed indices of the strain
as a function of the smallest strain e. g. (0, 1, 0, 0, 1, 0)
order (int): order of pseudoinverse to calculate
Returns:
mis: pseudo inverses for each order tensor, these can
be multiplied by the central difference derivative
of the stress with respect to the strain state
absent_syms: symbols of the tensor absent from the PI
expression
"""
s = sp.Symbol("s")
nstates = len(strain_states)
ni = np.array(strain_states) * s
mis, absent_syms = [], []
for degree in range(2, order + 1):
cvec, carr = get_symbol_list(degree)
sarr = np.zeros((nstates, 6), dtype=object)
for n, strain_v in enumerate(ni):
# Get expressions
exps = carr.copy()
for i in range(degree - 1):
exps = np.dot(exps, strain_v)
exps /= np.math.factorial(degree - 1)
sarr[n] = [sp.diff(exp, s, degree - 1) for exp in exps]
svec = sarr.ravel()
present_syms = set.union(*[exp.atoms(sp.Symbol) for exp in svec])
absent_syms += [set(cvec) - present_syms]
m = np.zeros((6 * nstates, len(cvec)))
for n, c in enumerate(cvec):
m[:, n] = v_diff(svec, c)
mis.append(np.linalg.pinv(m))
return mis, absent_syms
def get_symbol_list(rank, dim=6):
"""
Returns a symbolic representation of the voigt-notation
tensor that places identical symbols for entries related
by index transposition, i. e. C_1121 = C_1211 etc.
Args:
dim (int): dimension of matrix/tensor, e. g. 6 for
voigt notation and 3 for standard
rank (int): rank of tensor, e. g. 3 for third-order ECs
Returns:
c_vec (array): array representing distinct indices
c_arr (array): array representing tensor with equivalent
indices assigned as above
"""
indices = list(itertools.combinations_with_replacement(range(dim), r=rank))
c_vec = np.zeros(len(indices), dtype=object)
c_arr = np.zeros([dim] * rank, dtype=object)
for n, idx in enumerate(indices):
c_vec[n] = sp.Symbol("c_" + "".join([str(i) for i in idx]))
for perm in itertools.permutations(idx):
c_arr[perm] = c_vec[n]
return c_vec, c_arr
def subs(entry, cmap):
"""
Sympy substitution function, primarily for the purposes
of numpy vectorization
Args:
entry (symbol or exp): sympy expr to undergo subs
cmap (dict): map for symbols to values to use in subs
Returns:
Evaluated expression with substitution
"""
return entry.subs(cmap)
# Vectorized functions
v_subs = np.vectorize(subs)
v_diff = np.vectorize(sp.diff)
def get_diff_coeff(hvec, n=1):
"""
Helper function to find difference coefficients of an
derivative on an arbitrary mesh.
Args:
hvec (1D array-like): sampling stencil
n (int): degree of derivative to find
"""
hvec = np.array(hvec, dtype=np.float_)
acc = len(hvec)
exp = np.column_stack([np.arange(acc)] * acc)
a = np.vstack([hvec] * acc) ** exp
b = np.zeros(acc)
b[n] = factorial(n)
return np.linalg.solve(a, b)
|
vorwerkc/pymatgen
|
pymatgen/analysis/elasticity/elastic.py
|
Python
|
mit
| 40,737
|
[
"VASP",
"pymatgen"
] |
890e97b64c452c6a7f6ee79bd6533b2f8769b071b739b8a443a3913ccf54b025
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for reading images, parsing protobufs, etc."""
import math
import os
import random
from google.protobuf import text_format
import matplotlib as plt
import matplotlib.cm # pylint: disable=unused-import
import numpy as np
from skimage.draw import ellipse
from skimage.exposure import adjust_gamma
from skimage.filters import gaussian
from skimage.transform import ProjectiveTransform
from skimage.transform import resize
from skimage.transform import warp
import tensorflow as tf
import yaml
from keypose import data_pb2 as pb
try:
import cv2 # pylint: disable=g-import-not-at-top
except ImportError as e:
print(e)
# Top level keypose directory.
KEYPOSE_PATH = os.path.join(os.getcwd(), 'keypose')
# Read image, including .exr images.
def read_image(fname):
ext = os.path.splitext(fname)[1]
if ext == '.exr':
print('reading exr file')
image = cv2.imread(fname, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
else:
image = cv2.imread(fname)
assert image is not None, 'Cannot open %s' % fname
return image
# Camera array to camera protobuf
def cam_array_to_pb(cam):
cam_pb = pb.Camera()
cam_pb.fx = cam[0]
cam_pb.fy = cam[1]
cam_pb.cx = cam[2]
cam_pb.cy = cam[3]
cam_pb.baseline = cam[4]
cam_pb.resx = cam[5]
cam_pb.resy = cam[6]
return cam_pb
# Camera protobuf to camera array
def cam_pb_to_array(cam_pb):
return [
cam_pb.fx, cam_pb.fy, cam_pb.cx, cam_pb.cy, cam_pb.baseline, cam_pb.resx,
cam_pb.resy
]
# Dummy transform that always produces 0 vector as result.
def dummy_to_uvd():
to_uvd = np.zeros((4, 4))
to_uvd[3, 3] = 1.0
return to_uvd
# Zero uvd stack.
def dummy_keys_uvd(num):
uvd = np.zeros(4)
uvd[3] = 1.0
return [uvd for _ in range(num)]
def k_matrix_from_camera(camera):
return np.array([
[camera.fx, 0, camera.cx], # fx, cx
[0, camera.fy, camera.cy], # fy, cy
[0, 0, 1]
])
def q_matrix_from_camera(camera):
return np.array([
[1.0, 0, 0, -camera.cx], # cx
[0, 1.0, 0, -camera.cy], # cy
[0, 0, 0, camera.fx], # fx
[0, 0, 1.0 / camera.baseline, 0]
]) # baseline
def p_matrix_from_camera(camera):
return np.array([
[camera.fx, 0, camera.cx, 0], # fx, cx
[0, camera.fy, camera.cy, 0], # fy, cy
[0, 0, 0, camera.fx * camera.baseline], # fx*baseline
[0, 0, 1.0, 0]
])
# Parse a transform protobuf.
def get_transform(trans_pb):
transform = np.array(trans_pb.element)
if transform.size == 16:
return transform.reshape((4, 4))
return None
def get_keypoints(targ_pb):
"""Parse a keypoint protobuf."""
uvds = []
visible = []
focal_length = targ_pb.camera.fx
baseline = targ_pb.camera.baseline
for kp_pb in targ_pb.keypoints:
uvds.append(
np.array([kp_pb.u, kp_pb.v, focal_length * baseline / kp_pb.z, 1]))
visible.append(kp_pb.visible)
transform = get_transform(targ_pb.transform)
if transform is None:
world_t_uvd = None
else:
world_t_uvd = np.linalg.inv(transform).dot(
q_matrix_from_camera(targ_pb.camera))
return np.array(uvds), world_t_uvd, np.array(visible)
# NOTE: xyzs are NOT world coords. xy are percent image coords, z is depth (m).
def get_contents_pb(targ_pb):
uvds, xyzs, visible = get_keypoints(targ_pb)
cam = targ_pb.camera
transform = get_transform(targ_pb.transform)
if transform is None:
return cam, None, None, uvds, xyzs, visible, transform
uvd_t_world = p_matrix_from_camera(cam).dot(transform)
world_t_uvd = np.linalg.inv(transform).dot(q_matrix_from_camera(cam))
return cam, uvd_t_world, world_t_uvd, uvds, xyzs, visible, transform
# Reading text-formatted protobuf files
def read_from_text_file(path, proto):
with open(path, 'r') as file:
text_format.Parse(file.read(), proto)
return proto
def read_target_pb(path):
target_pb = pb.KeyTargets()
read_from_text_file(path, target_pb)
return target_pb
def read_keys_pb(path):
target_pb = pb.KeyTargets()
read_from_text_file(path, target_pb)
return target_pb.kp_target
# Returns a dict of the filename strings in a TfSet.
def read_tfset(path):
data_pb = pb.TfSet()
read_from_text_file(path, data_pb)
return data_pb
def make_tfset(train_names, val_names, name):
tfset_pb = pb.TfSet()
tfset_pb.train[:] = train_names
tfset_pb.val[:] = val_names
tfset_pb.name = name
return tfset_pb
# Read the contents of a target protobuf.
def read_contents_pb(path):
return get_contents_pb(read_target_pb(path).kp_target)
# Make a DataParams protobuf.
def make_data_params(resx, resy, num_kp, cam):
data_pb = pb.DataParams()
data_pb.camera.CopyFrom(cam)
data_pb.resx = resx
data_pb.resy = resy
data_pb.num_kp = num_kp
return data_pb
# Read contents of a camera protobuf.
def read_data_params(path):
data_pb = pb.DataParams()
read_from_text_file(path, data_pb)
cam = data_pb.camera
return data_pb.resx, data_pb.resy, data_pb.num_kp, cam
def write_to_text_file(path, proto):
with open(path, 'w') as file:
file.write(str(proto))
def project_np(mat, vec):
"""Projects homogeneous 3D XYZ coordinates to image uvd coordinates."""
# <vec> has shape [4, N].
# <mat> has shape [4, 4]
# Return has shape [4, N].
p = mat.dot(vec)
# Using <3:4> instead of <3> preserves shape.
p = p / (p[3:4, :] + 1.0e-10)
return p
# Maximum number of projection frames for losses.
MAX_TARGET_FRAMES = 5
# Standard image input parameters for the model.
DEFAULT_PARAMS = """
batch_size: 32
dset_dir: ''
model_dir: ''
steps: 80000
model_params:
model_params:
use_regress: True # Use regression or integration.
batchnorm: [0.999, 1.0e-8, False]
num_filters: 48 # Number of filters across DCNN.
filter_size: 3
max_dilation: 32
dilation_rep: 1
dropout: 0.0
disp_default: 30
sym: [0]
input_sym: [0] # Mix up symmetric ground truth values.
use_stereo: true
visible: true # Use only samples with visible keypoints.
crop: [180, 120, 30] # Crop patch size, WxH, plus disp offset.
dither: 20.0 # Amount to dither the crop, in pixels.
loss_kp: 1.0
loss_kp_step: [0, 200]
loss_prob: 0.001
loss_proj: 2.5
loss_proj_step: [10000, 20000]
loss_reg: 0.01
loss_dispmap: 1.0
loss_dispmap_step: [5000, 10000]
noise: 4.0
occ_fraction: 0.2
kp_occ_radius: 0.0 # Radius for occlusion for real-world keypoints.
blur: [1.0, 4.0] # Min, max in pixels.
motion: [0, 0, 0, 0] # Motion blur, min/max in pixels, min/max in angle (deg).
gamma: [0.8, 1.2]
rot: [0.0, 0.0, 0.0] # In degrees, [X-axis, Y-axis, Z-axis]
# Homography parameters [X-axis, Y-axis]
shear: [] # Use X-axis only for stereo [10,0].
scale: [] # [min, max] applied only on the Y axis.
flip: [] # Use Y-axis only for stereo [False,True].
"""
def get_params(param_file=None,
cam_file=None,
cam_image_file=None,
defaults=DEFAULT_PARAMS):
"""Returns default or overridden user-specified parameters, and cam params."""
# param_file points to a yaml string.
# cam_file points to a camera pbtxt.
# cam_image_file points to a camera pbtxt.
params = ConfigParams(defaults)
if param_file:
params.merge_yaml(param_file)
mparams = params.model_params
cam = None
if cam_file:
print('Model camera params file name is: %s' % cam_file)
resx, resy, num_kp, cam = read_data_params(cam_file)
mparams.resx = resx
mparams.resy = resy
mparams.num_kp = num_kp
mparams.modelx = resx
mparams.modely = resy
mparams.disp_default /= mparams.resx # Convert to fraction of image.
if mparams.crop:
mparams.modelx = int(mparams.crop[0])
mparams.modely = int(mparams.crop[1])
cam_image = None
if cam_image_file:
print('Image camera params file name is: %s' % cam_image_file)
_, _, _, cam_image = read_data_params(cam_image_file)
print('MParams:', mparams.make_dict())
return params, cam, cam_image
# General configuration class for referencing parameters using dot notation.
class ConfigParams:
"""General configuration class for referencing params using dot notation."""
def __init__(self, init=None):
if init:
self.merge_yaml(init)
def make_dict(self):
ret = {}
for k in self.__dict__:
val = self.__dict__[k]
if isinstance(val, self.__class__):
ret[k] = val.make_dict()
else:
ret[k] = val
return ret
# No nesting.
def make_shallow_dict(self):
ret = {}
for k in self.__dict__:
val = self.__dict__[k]
if not isinstance(val, self.__class__):
ret[k] = val
return ret
def read_yaml(self, fname):
with open(fname, 'r') as f:
ret = self.merge_yaml(f)
return ret
def write_yaml(self, fname):
with open(fname, 'w') as f:
yaml.dump(self.make_dict(), f, default_flow_style=None)
def merge_dict(self, p_dict):
"""Merges a dictionary into this params."""
for k in p_dict:
val = p_dict[k]
if k == 'default_file':
print('Default file:', val)
fname = os.path.join(KEYPOSE_PATH, val + '.yaml')
print('Default file fname:', fname)
self.read_yaml(fname)
continue
if isinstance(val, dict):
if getattr(self, k, None):
sub_params = getattr(self, k)
sub_params.merge_dict(val)
else:
sub_params = self.__class__()
sub_params.merge_dict(val)
setattr(self, k, sub_params)
else:
setattr(self, k, val)
def merge_yaml(self, yaml_str):
try:
ret = yaml.safe_load(yaml_str)
except yaml.YAMLError as exc:
print('Error in loading yaml string')
print(exc)
return False
self.merge_dict(ret)
return True
# Takes a string of the form 'a:1,b:2,c:[1,2],d:abcd', etc.
# No nesting.
def merge_str(self, p_str):
items = p_str.split('=')
assert len(items) >= 2
# pylint: disable=g-complex-comprehension
items = items[:1] + [
item for v in items[1:-1] for item in v.rsplit(',', 1)
] + items[-1:]
y_list = [items[i] + ': ' + items[i + 1] for i in range(0, len(items), 2)]
y_str = '\n'.join(y_list)
self.merge_yaml(y_str)
# Returns a string suitable for reading back in. Does not allow
# nested ConfigParams.
def __repr__(self):
# Checks float repr for not having a decimal point in scientific notation.
# Checks for None and returns empty string.
def yaml_str(obj):
if isinstance(obj, type(None)):
return ''
if isinstance(obj, list):
return '[' + ','.join([yaml_str(x) for x in obj]) + ']'
ret = str(obj)
if isinstance(obj, float):
if 'e' in ret and '.' not in ret:
return '.0e'.join(ret.split('e'))
return ret
ivars = self.make_shallow_dict()
s = ','.join([k + '=' + yaml_str(ivars[k]) for k in ivars])
return s.replace(' ', '')
# uint8 image [0,255] to float [0,1]
def image_uint8_to_float(image):
if image.dtype == np.float32:
return image
image = image.astype(np.float32) * (1.0 / 255.0)
image_np = np.clip(image, 0.0, 1.0)
return image_np
def resize_image(image, cam, cam_image, targs_pb):
"""Resize an image using scaling and cropping; changes kps_pb to correspond."""
resx, resy = int(cam_image.resx), int(cam_image.resy)
nxs, nys = int(cam.resx), int(cam.resy)
fx = cam_image.fx
nfx = cam.fx
scale = fx / nfx
crop = resy - nys * scale, resx - nxs * scale
cropx, cropy = 0, 0
if crop[0] > 1.5:
cropy = int(round(crop[0] * 0.5))
if crop[1] > 1.5:
cropx = int(round(crop[1] * 0.5))
# Resize image.
image = image[cropy:resy - cropy, cropx:resx - cropx, :]
image = resize(image, (nys, nxs), mode='constant') # Converts to float.
image = image.astype(np.float32)
def scale_cam(cam_pb):
cam_pb.fx /= scale
cam_pb.fy /= scale
cam_pb.cx = (cam_pb.cx - cropx) / scale
cam_pb.cy = (cam_pb.cy - cropy) / scale
cam_pb.resx = nxs
cam_pb.resy = nys
def resize_target(targ_pb):
scale_cam(targ_pb.camera)
for kp in targ_pb.keypoints:
kp.u = (kp.u - cropx) / scale
kp.v = (kp.v - cropy) / scale
kp.x = kp.u / nxs
kp.y = kp.v / nys
if kp.u < 0 or kp.u >= nxs or kp.v < 0 or kp.v >= nys:
kp.visible = 0.0
resize_target(targs_pb.kp_target)
for targ_pb in targs_pb.proj_targets:
resize_target(targ_pb)
return image
def rotation_transform(x_rot, y_rot, z_rot):
"""Creates a rotation transform with rotations around the three axes.
Args:
x_rot: rotate around X axis (degrees).
y_rot: rotate around Y axis (degrees).
z_rot: rotate around Z axis (degrees).
Returns:
4x4 transform.
"""
x_rot = np.pi * x_rot / 180.0
xt = np.array([[1.0, 0, 0, 0], [0, np.cos(x_rot), -np.sin(x_rot), 0.0],
[0, np.sin(x_rot), np.cos(x_rot), 0.0], [0, 0, 0, 1]])
y_rot = np.pi * y_rot / 180.0
yt = np.array([[np.cos(y_rot), 0, np.sin(y_rot), 0], [0, 1, 0, 0],
[-np.sin(y_rot), 0, np.cos(y_rot), 0], [0, 0, 0, 1]])
z_rot = np.pi * z_rot / 180.0
zt = np.array([[np.cos(z_rot), -np.sin(z_rot), 0, 0],
[np.sin(z_rot), np.cos(z_rot), 0, 0], [0, 0, 1, 0],
[0, 0, 0, 1]])
return xt.dot(yt).dot(zt)
# rotation of the camera
def rotate_camera(rotation, image, camera, transform, key_pts):
"""Rotates a camera around its optical axis.
Args:
rotation: 4x4 rotation transform, c'_R_c
image: Camera image, h x w x 4 (or 3).
camera: 7-element camera parameters (fx, fy, cx, cy, baseline, resx, resy)
transform: 4x4 transform matrix, w_T_c (camera-to-world).
key_pts: Nx4 u,v,d,w image keypoints, in pixel coordinates.
Returns:
Rotated image, zero-filled
Updated transform w_T_c' = w_T_c * P * c_R_c' * Q
updated keypoints u,v,d,w, Nx4
visibility vector for keypoint, N
Keypoints are converted by P * c'_R_c * Q
"""
def in_bounds(pt, bounds):
if (pt[0] >= 0 and pt[0] <= bounds[0] and pt[1] >= 0 and
pt[1] <= bounds[1]):
return 1.0
else:
return 0.0
cam_pb = cam_array_to_pb(camera)
pmat = p_matrix_from_camera(cam_pb)
qmat = q_matrix_from_camera(cam_pb)
tp = np.dot(transform, np.dot(pmat, np.dot(np.linalg.inv(rotation), qmat)))
key_pts_p = project_np(
np.dot(pmat, np.dot(rotation, qmat)), np.transpose(key_pts))
kmat = k_matrix_from_camera(cam_pb)
hmat = kmat.dot(rotation[:3, :3].transpose()).dot(np.linalg.inv(kmat))
image_np = np.clip(image, 0.0, 0.9999)
warped = warp(image_np, ProjectiveTransform(matrix=hmat))
visible = np.array([
in_bounds(key_pts_p[:2, i], camera[5:7]) for i in range(key_pts.shape[0])
])
return warped, tp, key_pts_p.transpose(), visible
def warp_homography(res, scale, shear, flip):
"""Returns a homography for image scaling, shear and flip.
Args:
res: resolution of the image, [x_res, y_res].
scale: scale factor [x_scale, y_scale].
shear: shear in [x_deg, y_deg].
flip: boolean [x_flip, y_flip].
"""
center_mat = np.eye(3)
center_mat[0, 2] = -res[0] / 2.0
center_mat[1, 2] = -res[1] / 2.0
cmat_inv = np.linalg.inv(center_mat)
flip_mat = np.eye(3)
if flip[0]:
flip_mat[0, 0] = -1
if flip[1]:
flip_mat[1, 1] = -1
shear_mat = np.eye(3)
shear_mat[0, 1] = math.tan(math.radians(shear[0]))
shear_mat[1, 0] = math.tan(math.radians(shear[1]))
scale_mat = np.eye(3)
scale_mat[0, 0] = scale[0]
scale_mat[1, 1] = scale[1]
return cmat_inv.dot(scale_mat.dot(shear_mat.dot(flip_mat.dot(center_mat))))
def do_rotation(image, image2, transform, camera, key_pts, visible, rotation):
"""Add a random rotation about the camera centerpoint.
Args:
image: HxWx4 image, left image if stereo; can be uint8 or float.
image2: HxWx4 image, right image if stereo, or None
transform: 4x4 to_world transform.
camera: 7-element camera parameters.
key_pts: Nx4 uvdw keypoints.
visible: Visibility prediate for keypoints.
rotation: Rotation as 3-tuple, XYZ axes.
Returns:
image: Warped by random rotation, float32.
transform: Updated to_world transform.
key_pts: updated uvdw keypoints.
visible: visibility vector for the keypoints.
"""
image = image_uint8_to_float(image)
image2 = image_uint8_to_float(image2)
area, _ = get_area(image)
if area < 10: # Something is wrong here.
return image, image2, transform, key_pts, visible
rotation = (float(rotation[0]), float(rotation[1]), float(rotation[2]))
while True:
rot = rotation_transform(
random.uniform(-rotation[0], rotation[0]),
random.uniform(-rotation[1], rotation[1]),
random.uniform(-rotation[2], rotation[2]))
image_p, transform_p, key_pts_p, visible_p = rotate_camera(
rot, image, camera, transform, key_pts)
area_p, _ = get_area(image)
if float(area_p) / area > 0.6:
if image2 is not None:
image2_p, _, _, _ = rotate_camera(rot, image2, camera, transform,
key_pts)
else:
image2_p = image_p
break
# Warp function converts images to float64, this converts back.
return (image_p.astype(np.float32), image2_p.astype(np.float32),
transform_p.astype(np.float32), key_pts_p.astype(np.float32),
visible_p.astype(np.float32))
def do_2d_homography(image, image2, scale, shear, flip, mirrored, split):
"""Add random 2D transforms to input images.
Images are warped according to the 2D transforms of scaling,
shear and flip. The 2D homogenous transform inverse is returned,
so that keypoints can be adjusted after they are predicted.
Transforms that preserve horizontal epipolar lines are vertical flip,
X-axis shear, mirroring, and scaling.
TODO: visibility analysis.
Args:
image: HxWx4 image, left image if stereo; can be uint8 or float.
image2: HxWx4 image, right image if stereo, or None
scale: floating point bounds, uniform random scale, e.g., [0.8, 1.2].
shear: x,y shear max bounds for uniform random shear, in degrees.
flip: [True, True] if images are randomly flipped horizontal, vertical.
mirrored: True if images are mirrored.
split: train / eval split.
Returns:
image: Warped by random transform, float32.
image2: Warped by same random transform, float32.
homography: 3x3 homography matrix for the warp.
"""
if (not mirrored) and not (split == 'train' and (scale or shear or flip)):
return image, image2, np.eye(3, dtype=np.float32)
if not scale:
scale = [1.0, 1.0]
if not shear:
shear = [0.0, 0.0]
if not flip:
flip = [False, False]
image = image_uint8_to_float(image)
image2 = image_uint8_to_float(image2)
if mirrored:
flip = [False, True]
else:
flip = [random.choice([False, flip[0]]), random.choice([False, flip[1]])]
hom = warp_homography((image.shape[1], image.shape[0]), [
1.0, random.uniform(scale[0], scale[1])
], [random.uniform(-shear[0], shear[0]),
random.uniform(-shear[1], shear[1])], flip)
if np.allclose(hom, np.eye(3)):
return image, image2, np.eye(3, dtype=np.float32)
hom_inv = np.linalg.inv(hom)
image_p = warp_2d(image, hom_inv)
image2_p = warp_2d(image2, hom_inv)
# Warp function converts images to float64, this converts back.
return (image_p.astype(np.float32), image2_p.astype(np.float32),
hom_inv.astype(np.float32))
def warp_2d(image, hom):
image_np = np.clip(image, 0.0, 0.9999)
warped = warp(image_np, ProjectiveTransform(matrix=hom))
return warped
# Returns a 2D gaussian centered on mean (pix coords, float) with
# variance var (pixels, float).
def gauss2d(mean, var, size):
x = np.arange(0, size[0])
y = np.arange(0, size[1])
x, y = np.meshgrid(x, y)
mx, my = mean
vx, vy = var
return np.float32(1. / (2. * np.pi * vx * vy) *
np.exp(-((x - mx)**2. / (2. * vx**2.) + (y - my)**2. /
(2. * vy**2.))))
# Normalize so the peak is 1.0.
def norm_gauss2d(mean, var, size):
g2d = gauss2d(mean, var, size)
m = np.max(g2d)
if m <= 0.0:
return g2d
return g2d * (1.0 / np.max(g2d))
# Make an inverse gaussian for exclusion of a prob field.
def inv_gauss(mean, var, size):
g = gauss2d(mean, var, size)
m = np.max(g)
g_inv = (m - g) * (1.0 / m)
return g_inv
def project_uvd(mat, uvd, offset):
uvw = np.array([uvd[0] - offset[0], uvd[1] - offset[1], 1.0])
uvwt = mat.dot(uvw)
return uvwt[:2] / (uvwt[2] + 1e-10)
def do_vertical_flip(image, image2, mirrored):
"""Flip image vertically.
The 2D homogenous transform inverse is returned,
so that keypoints can be adjusted after they are predicted.
Args:
image: HxWx4 image, left image if stereo; can be uint8 or float.
image2: HxWx4 image, right image if stereo, or None.
mirrored: True if image is mirrored.
Returns:
image: flipped vertically.
image2: flipped vertically.
homography: 3x3 homography matrix for vertical flip.
"""
if not mirrored:
return image, image2, np.eye(3, dtype=np.float32)
image = image_uint8_to_float(image)
image2 = image_uint8_to_float(image2)
image_p = np.flipud(image)
image2_p = np.flipud(image2)
hom = warp_homography(
(image.shape[1], image.shape[0]),
[1.0, 1.0], # Scale (none).
[0.0, 0.0], # Shear (none).
[False, True])
return image_p, image2_p, np.linalg.inv(hom).astype(np.float32)
# Returns gaussian 2Ds with shape [num_kpts, h, w].
# keys_uvd has shape [num_kps, 4]
def do_spatial_prob(keys_uvd, hom, offset, var, size):
hom_inv = np.linalg.inv(hom)
uvs = [project_uvd(hom_inv, uvd, offset) for uvd in keys_uvd]
probs = [inv_gauss(uv, [var, var], size) for uv in uvs]
return np.array(probs, dtype=np.float32)
# Largest fraction of object that can be occluded.
def do_occlude(image, image2, occ_fraction=0.0):
"""Add an elliptical occlusion to the RGBA image.
Args:
image: RGBA images with A channel @ 255 for valid object.
image2: RGBA images, right stereo.
occ_fraction: fraction of image to be occluded.
Returns:
Modified image.
Modifies image in place.
"""
area, inds = get_area(image)
if area < 50:
return image, image2
radius = 2.0 * np.sqrt(area / np.pi) * occ_fraction
for _ in range(0, 1):
i = random.randint(0, area - 1)
ind = (inds[0][i], inds[1][i])
rr, cc = ellipse(
ind[0],
ind[1],
random.uniform(1.0, radius),
random.uniform(1.0, radius),
shape=image.shape[0:2],
rotation=random.uniform(-1.0, 1.0) * np.pi)
image[rr, cc, 3] = 0
image2[rr, cc, 3] = 0
return image, image2
def do_motion_blur(image, distance, angle):
"""Random fake motion blur by compositing in the x,y plane of the image.
Args:
image: tensor of images, floating point [0,1], 3 or 4 channels.
distance: how far to move, in pixels <min, max>.
angle: how much to rotate, in degrees <min, max>.
Returns:
Updated image with motion blur.
"""
if not distance[1]: # No blur.
return image
dist = random.uniform(*distance) * 0.5
ang = math.radians(random.uniform(*angle))
x, y = dist * math.cos(ang), dist * math.sin(ang)
rows, cols = image.shape[:2]
im = np.array([[1, 0, x], [0, 1, y]])
im1 = cv2.warpAffine(image, im, (cols, rows))
im = np.array([[1, 0, 2 * x], [0, 1, 2 * y]])
im2 = cv2.warpAffine(image, im, (cols, rows))
im = np.array([[1, 0, -x], [0, 1, -y]])
im3 = cv2.warpAffine(image, im, (cols, rows))
im = np.array([[1, 0, -2 * x], [0, 1, -2 * y]])
im4 = cv2.warpAffine(image, im, (cols, rows))
im = (image + im1 + im2 + im3 + im4) * 0.2
np.clip(im, 0.0, 1.0, im)
return im
def do_composite(image, bg_fname, sigma, motion, noise, gamma):
"""Composite a background image onto the foreground.
Args:
image: original image, floating point [0,1].
bg_fname: background image file name, None or empty string if none.
sigma: blur in pixels; single value or range.
motion: 4-tuple <min_pix_move, max_pix_move, min_deg_angle, max_deg angle>.
noise: pixel noise in the range 0-255, either single value or range.
gamma: gamma correction to be applied to image, 0 for none.
Returns:
Updated image.
"""
def make_random(x):
# Arg x can be list, tuple, numpy.ndarray
if isinstance(x, list) or isinstance(x, tuple):
x = np.array(x)
assert isinstance(
x, np.ndarray), 'Argument to do_composite must be list or array'
if x.size == 0:
return None
elif x.size == 1:
return x[0]
else:
return random.uniform(x[0], x[1])
if motion[1]:
image = do_motion_blur(image, motion[:2], motion[2:])
ys, xs, _ = image.shape
if bg_fname:
scene = read_image(bg_fname) * (1.0 / 255.0)
if random.choice([True, False]):
scene = np.flipud(scene)
if random.choice([True, False]):
scene = np.fliplr(scene)
yss, xss = scene.shape[0], scene.shape[1]
assert yss > ys, 'Background image must be larger than training image'
assert xss > xs, 'Background image must be larger than training image'
# Adjust gamma of image.
gamma = make_random(gamma)
if gamma is not None:
image[:, :, :3] = adjust_gamma(image[:, :, :3], gamma)
# Add noise to object.
noise = make_random(noise)
if noise is not None:
image[:, :, :3] += np.random.randn(ys, xs, 3) * noise * 0.5 / 255.
np.clip(image, 0.0, 1.0, image)
# Cut out ellipse where image alpha is 0.
if bg_fname:
ul_y = random.randint(0, yss - ys - 1)
ul_x = random.randint(0, xss - xs - 1)
scene_crop = scene[ul_y:ul_y + ys, ul_x:ul_x + xs, :]
mask = image[:, :, 3]
rgbmask = np.stack([mask, mask, mask], axis=2)
image[:, :, :3] = scene_crop * (1.0 - rgbmask) + image[:, :, :3] * rgbmask
else:
image[image[:, :, 3] == 0, 0:3] = 0.5
# Add gaussian blur and noise.
sigma = make_random(sigma)
im = np.copy(image[:, :, :3]) # Need copy to preserve float32
gaussian(image[:, :, :3], sigma, multichannel=True, output=im)
# Add noise to whole scene, after blur.
if noise is not None:
im[:, :, :3] += np.random.randn(ys, xs, 3) * noise * 0.5 / 255.
np.clip(im, 0.0, 1.0, im)
return im
# Returns area in pixels.
# Works for both uint8 and float32 images.
def get_area(image):
inds = np.nonzero(image[:, :, 3] > 0)
area = inds[0].shape[0]
return area, inds
def do_occlude_crop(image,
image2,
key_pts,
key_pts_r,
crop,
visible,
dither,
var_offset=False):
"""Crop area around the object.
Crop is [W, H, R]', where 'R' is right-disparity offset; or else [].
Images can be either floating-point or uint8.
Args:
image: left image.
image2: right image.
key_pts: left keypoints.
key_pts_r: right keypoints.
crop: crop is [W, H, R]', where 'R' is right-disparity offset; or else [].
visible: visibility status of keypoints, modified by function.
dither: amount to dither the crop.
var_offset: vary the offset between left and right images.
Returns:
image: cropped left image.
image2: cropped right image.
offset: offset of the crop in the original image.
visible: visibility status of the keypoints.
"""
offset = np.array([0, 0, 0], dtype=np.float32)
crop = np.array(crop)
if crop.size == 0:
return image, image2, offset, visible
nxs, nys = crop[0], crop[1]
def do_crop(im, left_x, top_y, margin=10.0):
y, x, _ = im.shape
x -= margin
y -= margin
right_x = left_x + nxs
bot_y = top_y + nys
if (left_x < margin or left_x > x or right_x < margin or right_x > x or
top_y < margin or top_y > y or bot_y < margin or bot_y > y):
visible[:] = 0.0
return im[0:nys, 0:nxs, :]
return im[top_y:bot_y, left_x:right_x, :]
centroid = np.mean(key_pts, axis=0)[0:2]
centroid += np.random.uniform(low=-dither, high=dither, size=(2))
off_x = int(centroid[0] - nxs / 2 - (nxs - nys) / 2)
off_y = int(centroid[1] - nys / 2)
image = do_crop(image, off_x, off_y)
off_d = crop[2]
if var_offset:
off_d = int(centroid[0] - np.mean(key_pts_r, axis=0)[0])
image2 = do_crop(image2, off_x - off_d, off_y)
offset = np.array([off_x, off_y, off_d], dtype=np.float32)
return image, image2, offset, visible
# Meshing functions.
def farthest_point_sampling(point_set, k):
"""Find the k most spread-out poses of a set of poses; translation only."""
num_pts = point_set.shape[0]
start_idx = np.random.randint(num_pts)
existing_set = np.expand_dims(point_set[start_idx], axis=0)
rest_set = np.copy(point_set)
np.delete(rest_set, start_idx, 0)
existing_indices = [start_idx]
rest_indices = np.arange(num_pts)
np.delete(rest_indices, start_idx)
for _ in range(k - 1):
dist = (
np.sum(np.square(existing_set), axis=1, keepdims=True) +
np.sum(np.square(rest_set.T), axis=0, keepdims=True) -
np.dot(existing_set, rest_set.T) * 2)
min_dist = dist.min(axis=0)
max_idx = min_dist.argmax()
existing_set = np.concatenate(
[existing_set, np.expand_dims(rest_set[max_idx], axis=0)], axis=0)
existing_indices.append(rest_indices[max_idx])
np.delete(rest_set, max_idx, 0)
np.delete(rest_indices, max_idx)
return existing_set, existing_indices
# Class for holding a CAD object and its keypoints.
class MeshObj:
"""Class for holding a CAD object and its keypoints."""
def __init__(self):
self.keypoints = np.array([])
self.vertices = np.array([])
self.large_points = True
def read_obj(self, path, num=300):
"""Read in a .obj file, parse into vertices and keypoints."""
# Vertices are in label "o mesh".
# Keypoints are in label "o kp.NNN".
# Just read vertices, not other elements of the .obj file.
kps = {}
with open(path, 'r') as f:
line = f.readline()
while True:
if not line:
break
if len(line) > 2 and line[:2] == 'o ':
# New mesh.
name = line[2:-1]
vertices, line = self._read_sub_obj(f)
if 'mesh' in name:
self.vertices = vertices
if 'kp' in name: # Keypoint object.
kp_num = int(name.split('.')[1])
kp_val = np.mean(vertices, axis=0)
kps[kp_num] = kp_val
else:
line = f.readline()
keypoints = [kps[i] for i in range(len(kps))]
if not keypoints:
print('Did not find any keypoints')
return
self.keypoints = np.array(keypoints)
self.keypoints = np.concatenate(
[self.keypoints, np.ones((self.keypoints.shape[0], 1))], axis=-1)
self.xyzw = np.concatenate(
[self.vertices, np.ones((self.vertices.shape[0], 1))], axis=-1)
self.make_reduced(num)
def make_reduced(self, num):
self._make_colored_subset(num)
self.xyzw_reduced = np.concatenate(
[self.vertices_reduced,
np.ones((self.vertices_reduced.shape[0], 1))],
axis=-1)
def _read_sub_obj(self, f):
"""Read in all the vertices."""
# First read to the beginning of vertices.
line = ''
vertices = []
while True:
line = f.readline()
if not line:
return None
if 'v ' in line:
break
# Now process all vertices.
while True:
elems = line[:-1].split(' ')
if elems[0] != 'v':
break
vertices.append(np.array([float(x) for x in elems[1:]]))
line = f.readline()
if not line:
break
return np.array(vertices), line
# Pick a subset of colored points for display.
def _make_colored_subset(self, num):
self.vertices_reduced, _ = farthest_point_sampling(self.vertices, num)
colors = plt.cm.get_cmap('spring')
zs = self.vertices_reduced[:, 2]
self.reduced_colors = (255 *
colors(np.interp(zs, (zs.min(), zs.max()),
(0, 1)))[:, :3]).astype(np.uint8)
def project_to_uvd(self, xyzs, p_matrix):
"""Does a transform from CAD coords to kps coords, then projects to uvd."""
kps_t_mesh = ortho_procrustes(self.keypoints, xyzs.T[:, :3])
uvd_t_mesh = p_matrix.dot(kps_t_mesh)
self.uvds = project_np(uvd_t_mesh, self.xyzw.T).T
self.uvds_reduced = project_np(uvd_t_mesh, self.xyzw_reduced.T).T
def draw_points(self, image, offsets=(0, 0)):
"""Draws u,v points on an image as circles."""
for i, pt in enumerate(self.uvds_reduced):
u = int(pt[0] - offsets[0])
v = int(pt[1] - offsets[1])
if u < 0 or u >= image.shape[1] or v < 0 or v >= image.shape[0]:
continue
image[v, u, :] = self.reduced_colors[i, :]
if self.large_points:
if u > 0 and v > 0 and u < image.shape[1] - 1 and v < image.shape[0] - 1:
for ui in range(-1, 2):
for vi in range(-1, 2):
image[v + vi, u + ui, :] = self.reduced_colors[i, :]
return image
def segmentation(self, size):
"""Create segmentation image using morphology operations."""
mask = np.zeros(size)
for pt in self.uvds:
u = int(pt[0])
v = int(pt[1])
if u < 0 or u >= size[1] or v < 0 or v >= size[0]:
continue
mask[v, u] = 255
kernel = np.ones((3, 3), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel) # Converts to float.
mask_dilated = cv2.morphologyEx(mask, cv2.MORPH_DILATE, kernel)
border = mask_dilated - mask
return mask.astype(np.uint8), border.astype(np.uint8)
# Orthogonal procrustes method for two sets of 3D points.
# Also does the initial translation to centroid.
# Should work in degenerate cases: 1 or 2 points.
def ortho_procrustes(p_c, p_s):
"""Return R,t of the best estimate transform on point clouds p_c and p_s."""
# No scaling. Transform is from p_c to p_s, i.e., T * p_c ~= p_s.
# Format of args is numpy array, nx3 or nx4,d, each row a 3D point
p_c = p_c[:, :3]
p_s = p_s[:, :3]
cm = np.mean(p_c, 0)
pcn = p_c - cm # get mean of each dimension, subtract
sm = np.mean(p_s, 0)
psn = p_s - sm
a_mat = psn.transpose().dot(pcn)
u, _, vt = np.linalg.svd(a_mat)
dd = np.eye(3)
dd[2, 2] = np.linalg.det(u.dot(vt))
rot = u.dot(dd.dot(vt)) # Should check for orthogonality.
t = sm - rot.dot(cm)
tfm = np.eye(4)
tfm[0:3, 0:3] = rot
tfm[0:3, 3] = t
return tfm
# Read in CAD model from a .obj file.
# <path> is a file path from the data_tools/objects/ directory.
# <num> is the number of points to use in the reduced set.
def read_mesh(path, num=300):
obj = MeshObj()
print('Reading obj file %s' % path)
obj.read_obj(path, num)
print('Obj file has %d vertices and %d keypoints' %
(len(obj.vertices), len(obj.keypoints)))
return obj
# Error functions.
def project(tmat, tvec, tvec_transpose=False):
"""Projects homogeneous 3D XYZ coordinates to image uvd coordinates."""
# <tvec> has shape [[N,] batch_size, 4, num_kp] or [batch_size, num_kp, 4].
# <tmat> has shape [[N,] batch_size, 4, 4]
# Return has shape [[N,] batch_size, 4, num_kp]
tp = tf.matmul(tmat, tvec, transpose_b=tvec_transpose)
# Using <3:4> instead of <3> preserves shape.
tp = tp / (tp[Ellipsis, 3:4, :] + 1.0e-10)
return tp
def world_error(labels, xyzw):
xyzw = tf.transpose(xyzw, [0, 2, 1]) # [batch, 4, num_kp]
# [batch, 4, num_kp]
gt_world_coords = project(labels['to_world_L'], labels['keys_uvd_L'], True)
sub = xyzw[:, :3, :] - gt_world_coords[:, :3, :]
wd = tf.square(sub)
wd = tf.reduce_sum(wd, axis=[-2]) # [batch, num_kp] result.
wd = tf.sqrt(wd)
return wd # [batch, num_kp]
|
google-research/google-research
|
keypose/utils.py
|
Python
|
apache-2.0
| 36,285
|
[
"Gaussian"
] |
d1906c16e041b12462d8d15ace96fbfbfa4b0197d116ed104434a3e6c192bba9
|
# #
# Copyright 2009-2019 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Set of file tools.
:author: Stijn De Weirdt (Ghent University)
:author: Dries Verdegem (Ghent University)
:author: Kenneth Hoste (Ghent University)
:author: Pieter De Baets (Ghent University)
:author: Jens Timmerman (Ghent University)
:author: Toon Willems (Ghent University)
:author: Ward Poelmans (Ghent University)
:author: Fotis Georgatos (Uni.Lu, NTUA)
:author: Sotiris Fragkiskos (NTUA, CERN)
:author: Davide Vanzo (ACCRE, Vanderbilt University)
:author: Damian Alvarez (Forschungszentrum Juelich GmbH)
:author: Maxime Boissonneault (Compute Canada)
"""
import datetime
import difflib
import fileinput
import glob
import hashlib
import os
import re
import shutil
import stat
import sys
import tempfile
import time
import zlib
from xml.etree import ElementTree
from easybuild.base import fancylogger
from easybuild.tools import run
# import build_log must stay, to use of EasyBuildLog
from easybuild.tools.build_log import EasyBuildError, dry_run_msg, print_msg
from easybuild.tools.config import build_option
from easybuild.tools.py2vs3 import std_urllib, string_type
from easybuild.tools.utilities import nub
try:
import requests
HAVE_REQUESTS = True
except ImportError:
HAVE_REQUESTS = False
_log = fancylogger.getLogger('filetools', fname=False)
# easyblock class prefix
EASYBLOCK_CLASS_PREFIX = 'EB_'
# character map for encoding strings
STRING_ENCODING_CHARMAP = {
r' ': "_space_",
r'!': "_exclamation_",
r'"': "_quotation_",
r'#': "_hash_",
r'$': "_dollar_",
r'%': "_percent_",
r'&': "_ampersand_",
r'(': "_leftparen_",
r')': "_rightparen_",
r'*': "_asterisk_",
r'+': "_plus_",
r',': "_comma_",
r'-': "_minus_",
r'.': "_period_",
r'/': "_slash_",
r':': "_colon_",
r';': "_semicolon_",
r'<': "_lessthan_",
r'=': "_equals_",
r'>': "_greaterthan_",
r'?': "_question_",
r'@': "_atsign_",
r'[': "_leftbracket_",
r'\'': "_apostrophe_",
r'\\': "_backslash_",
r']': "_rightbracket_",
r'^': "_circumflex_",
r'_': "_underscore_",
r'`': "_backquote_",
r'{': "_leftcurly_",
r'|': "_verticalbar_",
r'}': "_rightcurly_",
r'~': "_tilde_",
}
CHECKSUM_TYPE_MD5 = 'md5'
CHECKSUM_TYPE_SHA256 = 'sha256'
DEFAULT_CHECKSUM = CHECKSUM_TYPE_MD5
# map of checksum types to checksum functions
CHECKSUM_FUNCTIONS = {
'adler32': lambda p: calc_block_checksum(p, ZlibChecksum(zlib.adler32)),
'crc32': lambda p: calc_block_checksum(p, ZlibChecksum(zlib.crc32)),
CHECKSUM_TYPE_MD5: lambda p: calc_block_checksum(p, hashlib.md5()),
'sha1': lambda p: calc_block_checksum(p, hashlib.sha1()),
CHECKSUM_TYPE_SHA256: lambda p: calc_block_checksum(p, hashlib.sha256()),
'sha512': lambda p: calc_block_checksum(p, hashlib.sha512()),
'size': lambda p: os.path.getsize(p),
}
CHECKSUM_TYPES = sorted(CHECKSUM_FUNCTIONS.keys())
EXTRACT_CMDS = {
# gzipped or gzipped tarball
'.gtgz': "tar xzf %(filepath)s",
'.gz': "gunzip -c %(filepath)s > %(target)s",
'.tar.gz': "tar xzf %(filepath)s",
'.tgz': "tar xzf %(filepath)s",
# bzipped or bzipped tarball
'.bz2': "bunzip2 -c %(filepath)s > %(target)s",
'.tar.bz2': "tar xjf %(filepath)s",
'.tb2': "tar xjf %(filepath)s",
'.tbz': "tar xjf %(filepath)s",
'.tbz2': "tar xjf %(filepath)s",
# xzipped or xzipped tarball
'.tar.xz': "unxz %(filepath)s --stdout | tar x",
'.txz': "unxz %(filepath)s --stdout | tar x",
'.xz': "unxz %(filepath)s",
# tarball
'.tar': "tar xf %(filepath)s",
# zip file
'.zip': "unzip -qq %(filepath)s",
# iso file
'.iso': "7z x %(filepath)s",
# tar.Z: using compress (LZW), but can be handled with gzip so use 'z'
'.tar.z': "tar xzf %(filepath)s",
}
class ZlibChecksum(object):
"""
wrapper class for adler32 and crc32 checksums to
match the interface of the hashlib module
"""
def __init__(self, algorithm):
self.algorithm = algorithm
self.checksum = algorithm(b'') # use the same starting point as the module
self.blocksize = 64 # The same as md5/sha1
def update(self, data):
"""Calculates a new checksum using the old one and the new data"""
self.checksum = self.algorithm(data, self.checksum)
def hexdigest(self):
"""Return hex string of the checksum"""
return '0x%s' % (self.checksum & 0xffffffff)
def is_readable(path):
"""Return whether file at specified location exists and is readable."""
try:
return os.path.exists(path) and os.access(path, os.R_OK)
except OSError as err:
raise EasyBuildError("Failed to check whether %s is readable: %s", path, err)
def read_file(path, log_error=True, mode='r'):
"""Read contents of file at given path, in a robust way."""
txt = None
try:
with open(path, mode) as handle:
txt = handle.read()
except IOError as err:
if log_error:
raise EasyBuildError("Failed to read %s: %s", path, err)
return txt
def write_file(path, data, append=False, forced=False, backup=False, always_overwrite=True, verbose=False):
"""
Write given contents to file at given path;
overwrites current file contents without backup by default!
:param path: location of file
:param data: contents to write to file
:param append: append to existing file rather than overwrite
:param forced: force actually writing file in (extended) dry run mode
:param backup: back up existing file before overwriting or modifying it
:param always_overwrite: don't require --force to overwrite an existing file
:param verbose: be verbose, i.e. inform where backup file was created
"""
# early exit in 'dry run' mode
if not forced and build_option('extended_dry_run'):
dry_run_msg("file written: %s" % path, silent=build_option('silent'))
return
if os.path.exists(path):
if not append:
if always_overwrite or build_option('force'):
_log.info("Overwriting existing file %s", path)
else:
raise EasyBuildError("File exists, not overwriting it without --force: %s", path)
if backup:
backed_up_fp = back_up_file(path)
_log.info("Existing file %s backed up to %s", path, backed_up_fp)
if verbose:
print_msg("Backup of %s created at %s" % (path, backed_up_fp), silent=build_option('silent'))
# figure out mode to use for open file handle
# cfr. https://docs.python.org/3/library/functions.html#open
mode = 'a' if append else 'w'
# special care must be taken with binary data in Python 3
if sys.version_info[0] >= 3 and isinstance(data, bytes):
mode += 'b'
# note: we can't use try-except-finally, because Python 2.4 doesn't support it as a single block
try:
mkdir(os.path.dirname(path), parents=True)
with open(path, mode) as handle:
handle.write(data)
except IOError as err:
raise EasyBuildError("Failed to write to %s: %s", path, err)
def resolve_path(path):
"""
Return fully resolved path for given path.
:param path: path that (maybe) contains symlinks
"""
try:
resolved_path = os.path.realpath(path)
except (AttributeError, OSError, TypeError) as err:
raise EasyBuildError("Resolving path %s failed: %s", path, err)
return resolved_path
def symlink(source_path, symlink_path, use_abspath_source=True):
"""
Create a symlink at the specified path to the given path.
:param source_path: source file path
:param symlink_path: symlink file path
:param use_abspath_source: resolves the absolute path of source_path
"""
if use_abspath_source:
source_path = os.path.abspath(source_path)
try:
os.symlink(source_path, symlink_path)
_log.info("Symlinked %s to %s", source_path, symlink_path)
except OSError as err:
raise EasyBuildError("Symlinking %s to %s failed: %s", source_path, symlink_path, err)
def remove_file(path):
"""Remove file at specified path."""
# early exit in 'dry run' mode
if build_option('extended_dry_run'):
dry_run_msg("file %s removed" % path, silent=build_option('silent'))
return
try:
# note: file may also be a broken symlink...
if os.path.exists(path) or os.path.islink(path):
os.remove(path)
except OSError as err:
raise EasyBuildError("Failed to remove file %s: %s", path, err)
def remove_dir(path):
"""Remove directory at specified path."""
# early exit in 'dry run' mode
if build_option('extended_dry_run'):
dry_run_msg("directory %s removed" % path, silent=build_option('silent'))
return
try:
if os.path.exists(path):
rmtree2(path)
except OSError as err:
raise EasyBuildError("Failed to remove directory %s: %s", path, err)
def remove(paths):
"""
Remove single file/directory or list of files and directories
:param paths: path(s) to remove
"""
if isinstance(paths, string_type):
paths = [paths]
_log.info("Removing %d files & directories", len(paths))
for path in paths:
if os.path.isfile(path):
remove_file(path)
elif os.path.isdir(path):
remove_dir(path)
else:
raise EasyBuildError("Specified path to remove is not an existing file or directory: %s", path)
def change_dir(path):
"""
Change to directory at specified location.
:param path: location to change to
:return: previous location we were in
"""
# determining the current working directory can fail if we're in a non-existing directory
try:
cwd = os.getcwd()
except OSError as err:
_log.debug("Failed to determine current working directory (but proceeding anyway: %s", err)
cwd = None
try:
os.chdir(path)
except OSError as err:
raise EasyBuildError("Failed to change from %s to %s: %s", cwd, path, err)
return cwd
def extract_file(fn, dest, cmd=None, extra_options=None, overwrite=False, forced=False):
"""
Extract file at given path to specified directory
:param fn: path to file to extract
:param dest: location to extract to
:param cmd: extract command to use (derived from filename if not specified)
:param extra_options: extra options to pass to extract command
:param overwrite: overwrite existing unpacked file
:param forced: force extraction in (extended) dry run mode
:return: path to directory (in case of success)
"""
if not os.path.isfile(fn) and not build_option('extended_dry_run'):
raise EasyBuildError("Can't extract file %s: no such file", fn)
mkdir(dest, parents=True)
# use absolute pathnames from now on
abs_dest = os.path.abspath(dest)
# change working directory
_log.debug("Unpacking %s in directory %s.", fn, abs_dest)
change_dir(abs_dest)
if not cmd:
cmd = extract_cmd(fn, overwrite=overwrite)
else:
# complete command template with filename
cmd = cmd % fn
if not cmd:
raise EasyBuildError("Can't extract file %s with unknown filetype", fn)
if extra_options:
cmd = "%s %s" % (cmd, extra_options)
run.run_cmd(cmd, simple=True, force_in_dry_run=forced)
return find_base_dir()
def which(cmd, retain_all=False, check_perms=True):
"""
Return (first) path in $PATH for specified command, or None if command is not found
:param retain_all: returns *all* locations to the specified command in $PATH, not just the first one
:param check_perms: check whether candidate path has read/exec permissions before accepting it as a match
"""
if retain_all:
res = []
else:
res = None
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
cmd_path = os.path.join(path, cmd)
# only accept path if command is there
if os.path.isfile(cmd_path):
_log.info("Command %s found at %s", cmd, cmd_path)
if check_perms:
# check if read/executable permissions are available
if not os.access(cmd_path, os.R_OK | os.X_OK):
_log.info("No read/exec permissions for %s, so continuing search...", cmd_path)
continue
if retain_all:
res.append(cmd_path)
else:
res = cmd_path
break
if not res:
_log.warning("Could not find command '%s' (with permissions to read/execute it) in $PATH (%s)" % (cmd, paths))
return res
def det_common_path_prefix(paths):
"""Determine common path prefix for a given list of paths."""
if not isinstance(paths, list):
raise EasyBuildError("det_common_path_prefix: argument must be of type list (got %s: %s)", type(paths), paths)
elif not paths:
return None
# initial guess for common prefix
prefix = paths[0]
found_common = False
while not found_common and prefix != os.path.dirname(prefix):
prefix = os.path.dirname(prefix)
found_common = all([p.startswith(prefix) for p in paths])
if found_common:
# prefix may be empty string for relative paths with a non-common prefix
return prefix.rstrip(os.path.sep) or None
else:
return None
def is_alt_pypi_url(url):
"""Determine whether specified URL is already an alternate PyPI URL, i.e. whether it contains a hash."""
# example: .../packages/5b/03/e135b19fadeb9b1ccb45eac9f60ca2dc3afe72d099f6bd84e03cb131f9bf/easybuild-2.7.0.tar.gz
alt_url_regex = re.compile('/packages/[a-f0-9]{2}/[a-f0-9]{2}/[a-f0-9]{60}/[^/]+$')
res = bool(alt_url_regex.search(url))
_log.debug("Checking whether '%s' is an alternate PyPI URL using pattern '%s'...: %s",
url, alt_url_regex.pattern, res)
return res
def pypi_source_urls(pkg_name):
"""
Fetch list of source URLs (incl. source filename) for specified Python package from PyPI, using 'simple' PyPI API.
"""
# example: https://pypi.python.org/simple/easybuild
# see also:
# - https://www.python.org/dev/peps/pep-0503/
# - https://wiki.python.org/moin/PyPISimple
simple_url = 'https://pypi.python.org/simple/%s' % re.sub(r'[-_.]+', '-', pkg_name.lower())
tmpdir = tempfile.mkdtemp()
urls_html = os.path.join(tmpdir, '%s_urls.html' % pkg_name)
if download_file(os.path.basename(urls_html), simple_url, urls_html) is None:
_log.debug("Failed to download %s to determine available PyPI URLs for %s", simple_url, pkg_name)
res = []
else:
parsed_html = ElementTree.parse(urls_html)
if hasattr(parsed_html, 'iter'):
res = [a.attrib['href'] for a in parsed_html.iter('a')]
else:
res = [a.attrib['href'] for a in parsed_html.getiterator('a')]
# links are relative, transform them into full URLs; for example:
# from: ../../packages/<dir1>/<dir2>/<hash>/easybuild-<version>.tar.gz#md5=<md5>
# to: https://pypi.python.org/packages/<dir1>/<dir2>/<hash>/easybuild-<version>.tar.gz#md5=<md5>
res = [re.sub('.*/packages/', 'https://pypi.python.org/packages/', x) for x in res]
return res
def derive_alt_pypi_url(url):
"""Derive alternate PyPI URL for given URL."""
alt_pypi_url = None
# example input URL: https://pypi.python.org/packages/source/e/easybuild/easybuild-2.7.0.tar.gz
pkg_name, pkg_source = url.strip().split('/')[-2:]
cand_urls = pypi_source_urls(pkg_name)
# md5 for old PyPI, sha256 for new PyPi (Warehouse)
regex = re.compile('.*/%s(?:#md5=[a-f0-9]{32}|#sha256=[a-f0-9]{64})$' % pkg_source.replace('.', '\\.'), re.M)
for cand_url in cand_urls:
res = regex.match(cand_url)
if res:
# e.g.: https://pypi.python.org/packages/<dir1>/<dir2>/<hash>/easybuild-<version>.tar.gz#md5=<md5>
alt_pypi_url = res.group(0).split('#sha256')[0].split('#md5')[0]
break
if not alt_pypi_url:
_log.debug("Failed to extract hash using pattern '%s' from list of URLs: %s", regex.pattern, cand_urls)
return alt_pypi_url
def download_file(filename, url, path, forced=False):
"""Download a file from the given URL, to the specified path."""
_log.debug("Trying to download %s from %s to %s", filename, url, path)
timeout = build_option('download_timeout')
if timeout is None:
# default to 10sec timeout if none was specified
# default system timeout (used is nothing is specified) may be infinite (?)
timeout = 10
_log.debug("Using timeout of %s seconds for initiating download" % timeout)
# make sure directory exists
basedir = os.path.dirname(path)
mkdir(basedir, parents=True)
# try downloading, three times max.
downloaded = False
max_attempts = 3
attempt_cnt = 0
# use custom HTTP header
headers = {'User-Agent': 'EasyBuild', 'Accept': '*/*'}
# for backward compatibility, and to avoid relying on 3rd party Python library 'requests'
url_req = std_urllib.Request(url, headers=headers)
used_urllib = std_urllib
switch_to_requests = False
while not downloaded and attempt_cnt < max_attempts:
attempt_cnt += 1
try:
if used_urllib is std_urllib:
# urllib2 (Python 2) / urllib.request (Python 3) does the right thing for http proxy setups,
# urllib does not!
url_fd = std_urllib.urlopen(url_req, timeout=timeout)
status_code = url_fd.getcode()
else:
response = requests.get(url, headers=headers, stream=True, timeout=timeout)
status_code = response.status_code
response.raise_for_status()
url_fd = response.raw
url_fd.decode_content = True
_log.debug('response code for given url %s: %s' % (url, status_code))
write_file(path, url_fd.read(), forced=forced, backup=True)
_log.info("Downloaded file %s from url %s to %s" % (filename, url, path))
downloaded = True
url_fd.close()
except used_urllib.HTTPError as err:
if used_urllib is std_urllib:
status_code = err.code
if status_code == 403 and attempt_cnt == 1:
switch_to_requests = True
elif 400 <= status_code <= 499:
_log.warning("URL %s was not found (HTTP response code %s), not trying again" % (url, status_code))
break
else:
_log.warning("HTTPError occurred while trying to download %s to %s: %s" % (url, path, err))
except IOError as err:
_log.warning("IOError occurred while trying to download %s to %s: %s" % (url, path, err))
error_re = re.compile(r"<urlopen error \[Errno 1\] _ssl.c:.*: error:.*:"
"SSL routines:SSL23_GET_SERVER_HELLO:sslv3 alert handshake failure>")
if error_re.match(str(err)):
switch_to_requests = True
except Exception as err:
raise EasyBuildError("Unexpected error occurred when trying to download %s to %s: %s", url, path, err)
if not downloaded and attempt_cnt < max_attempts:
_log.info("Attempt %d of downloading %s to %s failed, trying again..." % (attempt_cnt, url, path))
if used_urllib is std_urllib and switch_to_requests:
if not HAVE_REQUESTS:
raise EasyBuildError("SSL issues with urllib2. If you are using RHEL/CentOS 6.x please "
"install the python-requests and pyOpenSSL RPM packages and try again.")
_log.info("Downloading using requests package instead of urllib2")
used_urllib = requests
if downloaded:
_log.info("Successful download of file %s from url %s to path %s" % (filename, url, path))
return path
else:
_log.warning("Download of %s to %s failed, done trying" % (url, path))
return None
def find_easyconfigs(path, ignore_dirs=None):
"""
Find .eb easyconfig files in path
"""
if os.path.isfile(path):
return [path]
if ignore_dirs is None:
ignore_dirs = []
# walk through the start directory, retain all files that end in .eb
files = []
path = os.path.abspath(path)
for dirpath, dirnames, filenames in os.walk(path, topdown=True):
for f in filenames:
if not f.endswith('.eb') or f == 'TEMPLATE.eb':
continue
spec = os.path.join(dirpath, f)
_log.debug("Found easyconfig %s" % spec)
files.append(spec)
# ignore subdirs specified to be ignored by replacing items in dirnames list used by os.walk
dirnames[:] = [d for d in dirnames if d not in ignore_dirs]
return files
def search_file(paths, query, short=False, ignore_dirs=None, silent=False, filename_only=False, terse=False):
"""
Search for files using in specified paths using specified search query (regular expression)
:param paths: list of paths to search in
:param query: search query to use (regular expression); will be used case-insensitive
:param short: figure out common prefix of hits, use variable to factor it out
:param ignore_dirs: list of directories to ignore (default: ['.git', '.svn'])
:param silent: whether or not to remain silent (don't print anything)
:param filename_only: only return filenames, not file paths
:param terse: stick to terse (machine-readable) output, as opposed to pretty-printing
"""
if ignore_dirs is None:
ignore_dirs = ['.git', '.svn']
if not isinstance(ignore_dirs, list):
raise EasyBuildError("search_file: ignore_dirs (%s) should be of type list, not %s",
ignore_dirs, type(ignore_dirs))
# escape some special characters in query that may also occur in actual software names: +
# do not use re.escape, since that breaks queries with genuine regex characters like ^ or .*
query = re.sub('([+])', r'\\\1', query)
# compile regex, case-insensitive
try:
query = re.compile(query, re.I)
except re.error as err:
raise EasyBuildError("Invalid search query: %s", err)
var_defs = []
hits = []
var_index = 1
var = None
for path in paths:
path_hits = []
if not terse:
print_msg("Searching (case-insensitive) for '%s' in %s " % (query.pattern, path), log=_log, silent=silent)
for (dirpath, dirnames, filenames) in os.walk(path, topdown=True):
for filename in filenames:
if query.search(filename):
if not path_hits:
var = "CFGS%d" % var_index
var_index += 1
if filename_only:
path_hits.append(filename)
else:
path_hits.append(os.path.join(dirpath, filename))
# do not consider (certain) hidden directories
# note: we still need to consider e.g., .local !
# replace list elements using [:], so os.walk doesn't process deleted directories
# see http://stackoverflow.com/questions/13454164/os-walk-without-hidden-folders
dirnames[:] = [d for d in dirnames if d not in ignore_dirs]
path_hits = sorted(path_hits)
if path_hits:
common_prefix = det_common_path_prefix(path_hits)
if not terse and short and common_prefix is not None and len(common_prefix) > len(var) * 2:
var_defs.append((var, common_prefix))
hits.extend([os.path.join('$%s' % var, fn[len(common_prefix) + 1:]) for fn in path_hits])
else:
hits.extend(path_hits)
return var_defs, hits
def find_eb_script(script_name):
"""Find EasyBuild script with given name (in easybuild/scripts subdirectory)."""
filetools, eb_dir = __file__, None
if os.path.isabs(filetools):
eb_dir = os.path.dirname(os.path.dirname(filetools))
else:
# go hunting for absolute path to filetools module via sys.path;
# we can't rely on os.path.abspath or os.path.realpath, since they leverage os.getcwd()...
for path in sys.path:
path = os.path.abspath(path)
if os.path.exists(os.path.join(path, filetools)):
eb_dir = os.path.dirname(os.path.dirname(os.path.join(path, filetools)))
break
if eb_dir is None:
raise EasyBuildError("Failed to find parent directory for 'easybuild/scripts' subdirectory")
script_loc = os.path.join(eb_dir, 'scripts', script_name)
if not os.path.exists(script_loc):
prev_script_loc = script_loc
# fallback mechanism: check in location relative to location of 'eb'
eb_path = os.getenv('EB_SCRIPT_PATH') or which('eb')
if eb_path is None:
_log.warning("'eb' not found in $PATH, failed to determine installation prefix")
else:
install_prefix = os.path.dirname(os.path.dirname(resolve_path(eb_path)))
script_loc = os.path.join(install_prefix, 'easybuild', 'scripts', script_name)
if not os.path.exists(script_loc):
raise EasyBuildError("Script '%s' not found at expected location: %s or %s",
script_name, prev_script_loc, script_loc)
return script_loc
def compute_checksum(path, checksum_type=DEFAULT_CHECKSUM):
"""
Compute checksum of specified file.
:param path: Path of file to compute checksum for
:param checksum_type: type(s) of checksum ('adler32', 'crc32', 'md5' (default), 'sha1', 'sha256', 'sha512', 'size')
"""
if checksum_type not in CHECKSUM_FUNCTIONS:
raise EasyBuildError("Unknown checksum type (%s), supported types are: %s",
checksum_type, CHECKSUM_FUNCTIONS.keys())
try:
checksum = CHECKSUM_FUNCTIONS[checksum_type](path)
except IOError as err:
raise EasyBuildError("Failed to read %s: %s", path, err)
except MemoryError as err:
_log.warning("A memory error occurred when computing the checksum for %s: %s" % (path, err))
checksum = 'dummy_checksum_due_to_memory_error'
return checksum
def calc_block_checksum(path, algorithm):
"""Calculate a checksum of a file by reading it into blocks"""
# We pick a blocksize of 16 MB: it's a multiple of the internal
# blocksize of md5/sha1 (64) and gave the best speed results
try:
# in hashlib, blocksize is a class parameter
blocksize = algorithm.blocksize * 262144 # 2^18
except AttributeError as err:
blocksize = 16777216 # 2^24
_log.debug("Using blocksize %s for calculating the checksum" % blocksize)
try:
f = open(path, 'rb')
for block in iter(lambda: f.read(blocksize), b''):
algorithm.update(block)
f.close()
except IOError as err:
raise EasyBuildError("Failed to read %s: %s", path, err)
return algorithm.hexdigest()
def verify_checksum(path, checksums):
"""
Verify checksum of specified file.
:param file: path of file to verify checksum of
:param checksum: checksum value (and type, optionally, default is MD5), e.g., 'af314', ('sha', '5ec1b')
"""
filename = os.path.basename(path)
# if no checksum is provided, pretend checksum to be valid, unless presence of checksums to verify is enforced
if checksums is None:
if build_option('enforce_checksums'):
raise EasyBuildError("Missing checksum for %s", filename)
else:
return True
# make sure we have a list of checksums
if not isinstance(checksums, list):
checksums = [checksums]
for checksum in checksums:
if isinstance(checksum, dict):
if filename in checksum:
# Set this to a string-type checksum
checksum = checksum[filename]
elif build_option('enforce_checksums'):
raise EasyBuildError("Missing checksum for %s", filename)
else:
# Set to None and allow to fail elsewhere
checksum = None
if isinstance(checksum, string_type):
# if no checksum type is specified, it is assumed to be MD5 (32 characters) or SHA256 (64 characters)
if len(checksum) == 64:
typ = CHECKSUM_TYPE_SHA256
elif len(checksum) == 32:
typ = CHECKSUM_TYPE_MD5
else:
raise EasyBuildError("Length of checksum '%s' (%d) does not match with either MD5 (32) or SHA256 (64)",
checksum, len(checksum))
elif isinstance(checksum, tuple):
# if checksum is specified as a tuple, it could either be specifying:
# * the type of checksum + the checksum value
# * a set of alternative valid checksums to consider => recursive call
if len(checksum) == 2 and checksum[0] in CHECKSUM_FUNCTIONS:
typ, checksum = checksum
else:
_log.info("Found %d alternative checksums for %s, considering them one-by-one...", len(checksum), path)
for cand_checksum in checksum:
if verify_checksum(path, cand_checksum):
_log.info("Found matching checksum for %s: %s", path, cand_checksum)
return True
else:
_log.info("Ignoring non-matching checksum for %s (%s)...", path, cand_checksum)
else:
raise EasyBuildError("Invalid checksum spec '%s', should be a string (MD5) or 2-tuple (type, value).",
checksum)
actual_checksum = compute_checksum(path, typ)
_log.debug("Computed %s checksum for %s: %s (correct checksum: %s)" % (typ, path, actual_checksum, checksum))
if actual_checksum != checksum:
return False
# if we land here, all checksums have been verified to be correct
return True
def is_sha256_checksum(value):
"""Check whether provided string is a SHA256 checksum."""
res = False
if isinstance(value, string_type):
if re.match('^[0-9a-f]{64}$', value):
res = True
_log.debug("String value '%s' has the correct format to be a SHA256 checksum", value)
else:
_log.debug("String value '%s' does NOT have the correct format to be a SHA256 checksum", value)
else:
_log.debug("Non-string value %s is not a SHA256 checksum", value)
return res
def find_base_dir():
"""
Try to locate a possible new base directory
- this is typically a single subdir, e.g. from untarring a tarball
- when extracting multiple tarballs in the same directory,
expect only the first one to give the correct path
"""
def get_local_dirs_purged():
# e.g. always purge the log directory
# and hidden directories
ignoredirs = ["easybuild"]
lst = os.listdir(os.getcwd())
lst = [d for d in lst if not d.startswith('.') and d not in ignoredirs]
return lst
lst = get_local_dirs_purged()
new_dir = os.getcwd()
while len(lst) == 1:
new_dir = os.path.join(os.getcwd(), lst[0])
if not os.path.isdir(new_dir):
break
change_dir(new_dir)
lst = get_local_dirs_purged()
# make sure it's a directory, and not a (single) file that was in a tarball for example
while not os.path.isdir(new_dir):
new_dir = os.path.dirname(new_dir)
_log.debug("Last dir list %s" % lst)
_log.debug("Possible new dir %s found" % new_dir)
return new_dir
def find_extension(filename):
"""Find best match for filename extension."""
# sort by length, so longest file extensions get preference
suffixes = sorted(EXTRACT_CMDS.keys(), key=len, reverse=True)
pat = r'(?P<ext>%s)$' % '|'.join([s.replace('.', '\\.') for s in suffixes])
res = re.search(pat, filename, flags=re.IGNORECASE)
if res:
ext = res.group('ext')
else:
raise EasyBuildError('Unknown file type for file %s', filename)
return ext
def extract_cmd(filepath, overwrite=False):
"""
Determines the file type of file at filepath, returns extract cmd based on file suffix
"""
filename = os.path.basename(filepath)
ext = find_extension(filename)
target = filename.rstrip(ext)
cmd_tmpl = EXTRACT_CMDS[ext.lower()]
if overwrite:
if 'unzip -qq' in cmd_tmpl:
cmd_tmpl = cmd_tmpl.replace('unzip -qq', 'unzip -qq -o')
return cmd_tmpl % {'filepath': filepath, 'target': target}
def is_patch_file(path):
"""Determine whether file at specified path is a patch file (based on +++ and --- lines being present)."""
txt = read_file(path)
return bool(re.search(r'^\+{3}\s', txt, re.M) and re.search(r'^-{3}\s', txt, re.M))
def det_patched_files(path=None, txt=None, omit_ab_prefix=False, github=False, filter_deleted=False):
"""
Determine list of patched files from a patch.
It searches for "+++ path/to/patched/file" lines to determine the patched files.
Note: does not correctly handle filepaths with spaces.
:param path: the path to the diff
:param txt: the contents of the diff (either path or txt should be give)
:param omit_ab_prefix: ignore the a/ or b/ prefix of the files
:param github: only consider lines that start with 'diff --git' to determine list of patched files
:param filter_deleted: filter out all files that were deleted by the patch
"""
if github:
patched_regex = r"^diff --git (?:a/)?\S+\s*(?P<ab_prefix>b/)?(?P<file>\S+)"
else:
patched_regex = r"^\s*\+{3}\s+(?P<ab_prefix>[ab]/)?(?P<file>\S+)"
patched_regex = re.compile(patched_regex, re.M)
if path is not None:
txt = read_file(path)
elif txt is None:
raise EasyBuildError("Either a file path or a string representing a patch should be supplied")
patched_files = []
for match in patched_regex.finditer(txt):
patched_file = match.group('file')
if not omit_ab_prefix and match.group('ab_prefix') is not None:
patched_file = match.group('ab_prefix') + patched_file
delete_regex = re.compile(r"%s\ndeleted file" % re.escape(os.path.basename(patched_file)), re.M)
if patched_file in ['/dev/null']:
_log.debug("Ignoring patched file %s", patched_file)
elif filter_deleted and delete_regex.search(txt):
_log.debug("Filtering out deleted file %s", patched_file)
else:
patched_files.append(patched_file)
return patched_files
def guess_patch_level(patched_files, parent_dir):
"""Guess patch level based on list of patched files and specified directory."""
patch_level = None
for patched_file in patched_files:
# locate file by stripping of directories
tf2 = patched_file.split(os.path.sep)
n_paths = len(tf2)
path_found = False
level = None
for level in range(n_paths):
if os.path.isfile(os.path.join(parent_dir, *tf2[level:])):
path_found = True
break
if path_found:
patch_level = level
break
else:
_log.debug('No match found for %s, trying next patched file...' % patched_file)
return patch_level
def apply_patch(patch_file, dest, fn=None, copy=False, level=None, use_git_am=False):
"""
Apply a patch to source code in directory dest
- assume unified diff created with "diff -ru old new"
"""
if build_option('extended_dry_run'):
# skip checking of files in dry run mode
patch_filename = os.path.basename(patch_file)
dry_run_msg("* applying patch file %s" % patch_filename, silent=build_option('silent'))
elif not os.path.isfile(patch_file):
raise EasyBuildError("Can't find patch %s: no such file", patch_file)
elif fn and not os.path.isfile(fn):
raise EasyBuildError("Can't patch file %s: no such file", fn)
elif not os.path.isdir(dest):
raise EasyBuildError("Can't patch directory %s: no such directory", dest)
# copy missing files
if copy:
if build_option('extended_dry_run'):
dry_run_msg(" %s copied to %s" % (patch_file, dest), silent=build_option('silent'))
else:
copy_file(patch_file, dest)
_log.debug("Copied patch %s to dir %s" % (patch_file, dest))
# early exit, work is done after copying
return True
# use absolute paths
apatch = os.path.abspath(patch_file)
adest = os.path.abspath(dest)
# Attempt extracting the patch if it ends in .patch.gz, .patch.bz2, .patch.xz
# split in name + extension
apatch_root, apatch_file = os.path.split(apatch)
apatch_name, apatch_extension = os.path.splitext(apatch_file)
# Supports only bz2, gz and xz. zip can be archives which are not supported.
if apatch_extension in ['.gz', '.bz2', '.xz']:
# split again to get the second extension
apatch_subname, apatch_subextension = os.path.splitext(apatch_name)
if apatch_subextension == ".patch":
workdir = tempfile.mkdtemp(prefix='eb-patch-')
_log.debug("Extracting the patch to: %s", workdir)
# extracting the patch
apatch_dir = extract_file(apatch, workdir)
apatch = os.path.join(apatch_dir, apatch_name)
if level is None and build_option('extended_dry_run'):
level = '<derived>'
elif level is None:
# guess value for -p (patch level)
# - based on +++ lines
# - first +++ line that matches an existing file determines guessed level
# - we will try to match that level from current directory
patched_files = det_patched_files(path=apatch)
if not patched_files:
raise EasyBuildError("Can't guess patchlevel from patch %s: no testfile line found in patch", apatch)
return
level = guess_patch_level(patched_files, adest)
if level is None: # level can also be 0 (zero), so don't use "not level"
# no match
raise EasyBuildError("Can't determine patch level for patch %s from directory %s", patch_file, adest)
else:
_log.debug("Guessed patch level %d for patch %s" % (level, patch_file))
else:
_log.debug("Using specified patch level %d for patch %s" % (level, patch_file))
if use_git_am:
patch_cmd = "git am patch %s" % apatch
else:
patch_cmd = "patch -b -p%s -i %s" % (level, apatch)
out, ec = run.run_cmd(patch_cmd, simple=False, path=adest, log_ok=False, trace=False)
if ec:
raise EasyBuildError("Couldn't apply patch file %s. Process exited with code %s: %s", patch_file, ec, out)
return ec == 0
def apply_regex_substitutions(path, regex_subs, backup='.orig.eb'):
"""
Apply specified list of regex substitutions.
:param path: path to file to patch
:param regex_subs: list of substitutions to apply, specified as (<regexp pattern>, <replacement string>)
:param backup: create backup of original file with specified suffix (no backup if value evaluates to False)
"""
# only report when in 'dry run' mode
if build_option('extended_dry_run'):
dry_run_msg("applying regex substitutions to file %s" % path, silent=build_option('silent'))
for regex, subtxt in regex_subs:
dry_run_msg(" * regex pattern '%s', replacement string '%s'" % (regex, subtxt))
else:
_log.info("Applying following regex substitutions to %s: %s", path, regex_subs)
for i, (regex, subtxt) in enumerate(regex_subs):
regex_subs[i] = (re.compile(regex), subtxt)
if backup:
backup_ext = backup
else:
# no (persistent) backup file is created if empty string value is passed to 'backup' in fileinput.input
backup_ext = ''
try:
for line_id, line in enumerate(fileinput.input(path, inplace=1, backup=backup_ext)):
for regex, subtxt in regex_subs:
match = regex.search(line)
if match:
_log.info("Replacing line %d in %s: '%s' -> '%s'", (line_id + 1), path, match.group(0), subtxt)
line = regex.sub(subtxt, line)
sys.stdout.write(line)
except OSError as err:
raise EasyBuildError("Failed to patch %s: %s", path, err)
def modify_env(old, new):
"""NO LONGER SUPPORTED: use modify_env from easybuild.tools.environment instead"""
_log.nosupport("moved modify_env to easybuild.tools.environment", "2.0")
def convert_name(name, upper=False):
"""
Converts name so it can be used as variable name
"""
# no regexps
charmap = {
'+': 'plus',
'-': 'min',
'.': '',
}
for ch, new in charmap.items():
name = name.replace(ch, new)
if upper:
return name.upper()
else:
return name
def adjust_permissions(name, permissionBits, add=True, onlyfiles=False, onlydirs=False, recursive=True,
group_id=None, relative=True, ignore_errors=False, skip_symlinks=None):
"""
Add or remove (if add is False) permissionBits from all files (if onlydirs is False)
and directories (if onlyfiles is False) in path
"""
if skip_symlinks is not None:
depr_msg = "Use of 'skip_symlinks' argument for 'adjust_permissions' is deprecated "
depr_msg += "(symlinks are never followed anymore)"
_log.deprecated(depr_msg, '4.0')
name = os.path.abspath(name)
if recursive:
_log.info("Adjusting permissions recursively for %s" % name)
allpaths = [name]
for root, dirs, files in os.walk(name):
paths = []
if not onlydirs:
paths.extend(files)
if not onlyfiles:
# os.walk skips symlinked dirs by default, i.e., no special handling needed here
paths.extend(dirs)
for path in paths:
allpaths.append(os.path.join(root, path))
else:
_log.info("Adjusting permissions for %s" % name)
allpaths = [name]
failed_paths = []
fail_cnt = 0
err_msg = None
for path in allpaths:
try:
# don't change permissions if path is a symlink, since we're not checking where the symlink points to
# this is done because of security concerns (symlink may point out of installation directory)
# (note: os.lchmod is not supported on Linux)
if not os.path.islink(path):
if relative:
# relative permissions (add or remove)
perms = os.lstat(path)[stat.ST_MODE]
if add:
os.chmod(path, perms | permissionBits)
else:
os.chmod(path, perms & ~permissionBits)
else:
# hard permissions bits (not relative)
os.chmod(path, permissionBits)
if group_id:
# only change the group id if it the current gid is different from what we want
cur_gid = os.lstat(path).st_gid
if not cur_gid == group_id:
_log.debug("Changing group id of %s to %s" % (path, group_id))
os.lchown(path, -1, group_id)
else:
_log.debug("Group id of %s is already OK (%s)" % (path, group_id))
except OSError as err:
if ignore_errors:
# ignore errors while adjusting permissions (for example caused by bad links)
_log.info("Failed to chmod/chown %s (but ignoring it): %s" % (path, err))
fail_cnt += 1
else:
failed_paths.append(path)
err_msg = err
if failed_paths:
raise EasyBuildError("Failed to chmod/chown several paths: %s (last error: %s)", failed_paths, err_msg)
# we ignore some errors, but if there are to many, something is definitely wrong
fail_ratio = fail_cnt / float(len(allpaths))
max_fail_ratio = float(build_option('max_fail_ratio_adjust_permissions'))
if fail_ratio > max_fail_ratio:
raise EasyBuildError("%.2f%% of permissions/owner operations failed (more than %.2f%%), "
"something must be wrong...", 100 * fail_ratio, 100 * max_fail_ratio)
elif fail_cnt > 0:
_log.debug("%.2f%% of permissions/owner operations failed, ignoring that..." % (100 * fail_ratio))
def patch_perl_script_autoflush(path):
# patch Perl script to enable autoflush,
# so that e.g. run_cmd_qa receives all output to answer questions
# only report when in 'dry run' mode
if build_option('extended_dry_run'):
dry_run_msg("Perl script patched: %s" % path, silent=build_option('silent'))
else:
txt = read_file(path)
origpath = "%s.eb.orig" % path
write_file(origpath, txt)
_log.debug("Patching Perl script %s for autoflush, original script copied to %s" % (path, origpath))
# force autoflush for Perl print buffer
lines = txt.split('\n')
newtxt = '\n'.join([
lines[0], # shebang line
"\nuse IO::Handle qw();",
"STDOUT->autoflush(1);\n", # extra newline to separate from actual script
] + lines[1:])
write_file(path, newtxt)
def mkdir(path, parents=False, set_gid=None, sticky=None):
"""
Create a directory
Directory is the path to create
:param parents: create parent directories if needed (mkdir -p)
:param set_gid: set group ID bit, to make subdirectories and files inherit group
:param sticky: set the sticky bit on this directory (a.k.a. the restricted deletion flag),
to avoid users can removing/renaming files in this directory
"""
if set_gid is None:
set_gid = build_option('set_gid_bit')
if sticky is None:
sticky = build_option('sticky_bit')
if not os.path.isabs(path):
path = os.path.abspath(path)
# exit early if path already exists
if not os.path.exists(path):
_log.info("Creating directory %s (parents: %s, set_gid: %s, sticky: %s)", path, parents, set_gid, sticky)
# set_gid and sticky bits are only set on new directories, so we need to determine the existing parent path
existing_parent_path = os.path.dirname(path)
try:
if parents:
# climb up until we hit an existing path or the empty string (for relative paths)
while existing_parent_path and not os.path.exists(existing_parent_path):
existing_parent_path = os.path.dirname(existing_parent_path)
os.makedirs(path)
else:
os.mkdir(path)
except OSError as err:
raise EasyBuildError("Failed to create directory %s: %s", path, err)
# set group ID and sticky bits, if desired
bits = 0
if set_gid:
bits |= stat.S_ISGID
if sticky:
bits |= stat.S_ISVTX
if bits:
try:
new_subdir = path[len(existing_parent_path):].lstrip(os.path.sep)
new_path = os.path.join(existing_parent_path, new_subdir.split(os.path.sep)[0])
adjust_permissions(new_path, bits, add=True, relative=True, recursive=True, onlydirs=True)
except OSError as err:
raise EasyBuildError("Failed to set groud ID/sticky bit: %s", err)
else:
_log.debug("Not creating existing path %s" % path)
def expand_glob_paths(glob_paths):
"""Expand specified glob paths to a list of unique non-glob paths to only files."""
paths = []
for glob_path in glob_paths:
add_paths = [f for f in glob.glob(os.path.expanduser(glob_path)) if os.path.isfile(f)]
if add_paths:
paths.extend(add_paths)
else:
raise EasyBuildError("No files found using glob pattern '%s'", glob_path)
return nub(paths)
def weld_paths(path1, path2):
"""Weld two paths together, taking into account overlap between tail of 1st path with head of 2nd path."""
# strip path1 for use in comparisons
path1s = path1.rstrip(os.path.sep)
# init part2 head/tail/parts
path2_head = path2.rstrip(os.path.sep)
path2_tail = ''
path2_parts = path2.split(os.path.sep)
# if path2 is an absolute path, make sure it stays that way
if path2_parts[0] == '':
path2_parts[0] = os.path.sep
while path2_parts and not path1s.endswith(path2_head):
path2_tail = os.path.join(path2_parts.pop(), path2_tail)
if path2_parts:
# os.path.join requires non-empty list
path2_head = os.path.join(*path2_parts)
else:
path2_head = None
return os.path.join(path1, path2_tail)
def path_matches(path, paths):
"""Check whether given path matches any of the provided paths."""
if not os.path.exists(path):
return False
for somepath in paths:
if os.path.exists(somepath) and os.path.samefile(path, somepath):
return True
return False
def rmtree2(path, n=3):
"""Wrapper around shutil.rmtree to make it more robust when used on NFS mounted file systems."""
ok = False
for i in range(0, n):
try:
shutil.rmtree(path)
ok = True
break
except OSError as err:
_log.debug("Failed to remove path %s with shutil.rmtree at attempt %d: %s" % (path, n, err))
time.sleep(2)
# make sure write permissions are enabled on entire directory
adjust_permissions(path, stat.S_IWUSR, add=True, recursive=True)
if not ok:
raise EasyBuildError("Failed to remove path %s with shutil.rmtree, even after %d attempts.", path, n)
else:
_log.info("Path %s successfully removed." % path)
def find_backup_name_candidate(src_file):
"""Returns a non-existing file to be used as destination for backup files"""
# e.g. 20170817234510 on Aug 17th 2017 at 23:45:10
timestamp = datetime.datetime.now()
dst_file = '%s_%s_%s' % (src_file, timestamp.strftime('%Y%m%d%H%M%S'), os.getpid())
while os.path.exists(dst_file):
_log.debug("Backup of %s at %s already found at %s, trying again in a second...", src_file, dst_file, timestamp)
time.sleep(1)
timestamp = datetime.datetime.now()
dst_file = '%s_%s_%s' % (src_file, timestamp.strftime('%Y%m%d%H%M%S'), os.getpid())
return dst_file
def back_up_file(src_file, backup_extension='bak', hidden=False, strip_fn=None):
"""
Backs up a file appending a backup extension and timestamp to it (if there is already an existing backup).
:param src_file: file to be back up
:param backup_extension: extension to use for the backup file (can be empty or None)
:param hidden: make backup hidden (leading dot in filename)
:param strip_fn: strip specified trailing substring from filename of backup
:return: location of backed up file
"""
fn_prefix, fn_suffix = '', ''
if hidden:
fn_prefix = '.'
if backup_extension:
fn_suffix = '.%s' % backup_extension
src_dir, src_fn = os.path.split(src_file)
if strip_fn:
src_fn = src_fn.rstrip(strip_fn)
backup_fp = find_backup_name_candidate(os.path.join(src_dir, fn_prefix + src_fn + fn_suffix))
copy_file(src_file, backup_fp)
_log.info("File %s backed up in %s", src_file, backup_fp)
return backup_fp
def move_logs(src_logfile, target_logfile):
"""Move log file(s)."""
zip_log_cmd = build_option('zip_logs')
mkdir(os.path.dirname(target_logfile), parents=True)
src_logfile_len = len(src_logfile)
try:
# there may be multiple log files, due to log rotation
app_logs = glob.glob('%s*' % src_logfile)
for app_log in app_logs:
# retain possible suffix
new_log_path = target_logfile + app_log[src_logfile_len:]
# retain old logs
if os.path.exists(new_log_path):
back_up_file(new_log_path)
# move log to target path
move_file(app_log, new_log_path)
_log.info("Moved log file %s to %s" % (src_logfile, new_log_path))
if zip_log_cmd:
run.run_cmd("%s %s" % (zip_log_cmd, new_log_path))
_log.info("Zipped log %s using '%s'", new_log_path, zip_log_cmd)
except (IOError, OSError) as err:
raise EasyBuildError("Failed to move log file(s) %s* to new log file %s*: %s",
src_logfile, target_logfile, err)
def cleanup(logfile, tempdir, testing, silent=False):
"""
Cleanup the specified log file and the tmp directory, if desired.
:param logfile: path to log file to clean up
:param tempdir: path to temporary directory to clean up
:param testing: are we in testing mode? if so, don't actually clean up anything
:param silent: be silent (don't print anything to stdout)
"""
if build_option('cleanup_tmpdir') and not testing:
if logfile is not None:
try:
for log in [logfile] + glob.glob('%s.[0-9]*' % logfile):
os.remove(log)
except OSError as err:
raise EasyBuildError("Failed to remove log file(s) %s*: %s", logfile, err)
print_msg("Temporary log file(s) %s* have been removed." % (logfile), log=None, silent=testing or silent)
if tempdir is not None:
try:
shutil.rmtree(tempdir, ignore_errors=True)
except OSError as err:
raise EasyBuildError("Failed to remove temporary directory %s: %s", tempdir, err)
print_msg("Temporary directory %s has been removed." % tempdir, log=None, silent=testing or silent)
else:
msg = "Keeping temporary log file(s) %s* and directory %s." % (logfile, tempdir)
print_msg(msg, log=None, silent=testing or silent)
def copytree(src, dst, symlinks=False, ignore=None):
"""
Copied from Lib/shutil.py in python 2.7, since we need this to work for python2.4 aswell
and this code can be improved...
Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
_log.deprecated("Use 'copy_dir' rather than 'copytree'", '4.0')
class Error(EnvironmentError):
pass
try:
WindowsError # @UndefinedVariable
except NameError:
WindowsError = None
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
_log.debug("copytree: skipping copy of %s" % ignored_names)
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
shutil.copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
shutil.copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
def encode_string(name):
"""
This encoding function handles funky software names ad infinitum, like:
example: '0_foo+0x0x#-$__'
becomes: '0_underscore_foo_plus_0x0x_hash__minus__dollar__underscore__underscore_'
The intention is to have a robust escaping mechanism for names like c++, C# et al
It has been inspired by the concepts seen at, but in lowercase style:
* http://fossies.org/dox/netcdf-4.2.1.1/escapes_8c_source.html
* http://celldesigner.org/help/CDH_Species_01.html
* http://research.cs.berkeley.edu/project/sbp/darcsrepo-no-longer-updated/src/edu/berkeley/sbp/misc/ReflectiveWalker.java # noqa
and can be extended freely as per ISO/IEC 10646:2012 / Unicode 6.1 names:
* http://www.unicode.org/versions/Unicode6.1.0/
For readability of >2 words, it is suggested to use _CamelCase_ style.
So, yes, '_GreekSmallLetterEtaWithPsiliAndOxia_' *could* indeed be a fully
valid software name; software "electron" in the original spelling anyone? ;-)
"""
# do the character remapping, return same char by default
result = ''.join(map(lambda x: STRING_ENCODING_CHARMAP.get(x, x), name))
return result
def decode_string(name):
"""Decoding function to revert result of encode_string."""
result = name
for (char, escaped_char) in STRING_ENCODING_CHARMAP.items():
result = re.sub(escaped_char, char, result)
return result
def encode_class_name(name):
"""return encoded version of class name"""
return EASYBLOCK_CLASS_PREFIX + encode_string(name)
def decode_class_name(name):
"""Return decoded version of class name."""
if not name.startswith(EASYBLOCK_CLASS_PREFIX):
# name is not encoded, apparently
return name
else:
name = name[len(EASYBLOCK_CLASS_PREFIX):]
return decode_string(name)
def run_cmd(cmd, log_ok=True, log_all=False, simple=False, inp=None, regexp=True, log_output=False, path=None):
"""NO LONGER SUPPORTED: use run_cmd from easybuild.tools.run instead"""
_log.nosupport("run_cmd was moved from easybuild.tools.filetools to easybuild.tools.run", '2.0')
def run_cmd_qa(cmd, qa, no_qa=None, log_ok=True, log_all=False, simple=False, regexp=True, std_qa=None, path=None):
"""NO LONGER SUPPORTED: use run_cmd_qa from easybuild.tools.run instead"""
_log.nosupport("run_cmd_qa was moved from easybuild.tools.filetools to easybuild.tools.run", '2.0')
def parse_log_for_error(txt, regExp=None, stdout=True, msg=None):
"""NO LONGER SUPPORTED: use parse_log_for_error from easybuild.tools.run instead"""
_log.nosupport("parse_log_for_error was moved from easybuild.tools.filetools to easybuild.tools.run", '2.0')
def det_size(path):
"""
Determine total size of given filepath (in bytes).
"""
installsize = 0
try:
# walk install dir to determine total size
for (dirpath, _, filenames) in os.walk(path):
for filename in filenames:
fullpath = os.path.join(dirpath, filename)
if os.path.exists(fullpath):
installsize += os.path.getsize(fullpath)
except OSError as err:
_log.warn("Could not determine install size: %s" % err)
return installsize
def find_flexlm_license(custom_env_vars=None, lic_specs=None):
"""
Find FlexLM license.
Considered specified list of environment variables;
checks for path to existing license file or valid license server specification;
duplicate paths are not retained in the returned list of license specs.
If no license is found through environment variables, also consider 'lic_specs'.
:param custom_env_vars: list of environment variables to considered (if None, only consider $LM_LICENSE_FILE)
:param lic_specs: list of license specifications
:return: tuple with list of valid license specs found and name of first valid environment variable
"""
valid_lic_specs = []
lic_env_var = None
# regex for license server spec; format: <port>@<server>
server_port_regex = re.compile(r'^[0-9]+@\S+$')
# always consider $LM_LICENSE_FILE
lic_env_vars = ['LM_LICENSE_FILE']
if isinstance(custom_env_vars, string_type):
lic_env_vars.insert(0, custom_env_vars)
elif custom_env_vars is not None:
lic_env_vars = custom_env_vars + lic_env_vars
# grab values for defined environment variables
cand_lic_specs = {}
for env_var in lic_env_vars:
if env_var in os.environ:
cand_lic_specs[env_var] = nub(os.environ[env_var].split(os.pathsep))
# also consider provided license spec (last)
# use None as key to indicate that these license specs do not have an environment variable associated with them
if lic_specs:
cand_lic_specs[None] = lic_specs
_log.debug("Candidate license specs: %s", cand_lic_specs)
# check for valid license specs
# order matters, so loop over original list of environment variables to consider
valid_lic_specs = []
for env_var in lic_env_vars + [None]:
# obtain list of values to consider
# take into account that some keys may be missing, and that individual values may be None
values = [val for val in cand_lic_specs.get(env_var, None) or [] if val]
_log.info("Considering %s to find FlexLM license specs: %s", env_var, values)
for value in values:
# license files to consider
lic_files = None
if os.path.isfile(value):
lic_files = [value]
elif os.path.isdir(value):
# consider all *.dat and *.lic files in specified directory
lic_files = sorted(glob.glob(os.path.join(value, '*.dat')) + glob.glob(os.path.join(value, '*.lic')))
# valid license server spec
elif server_port_regex.match(value):
valid_lic_specs.append(value)
# check whether license files are readable before retaining them
if lic_files:
for lic_file in lic_files:
try:
open(lic_file, 'r')
valid_lic_specs.append(lic_file)
except IOError as err:
_log.warning("License file %s found, but failed to open it for reading: %s", lic_file, err)
# stop after finding valid license specs, filter out duplicates
if valid_lic_specs:
valid_lic_specs = nub(valid_lic_specs)
lic_env_var = env_var
break
if lic_env_var:
via_msg = '$%s' % lic_env_var
else:
via_msg = "provided license spec"
_log.info("Found valid license specs via %s: %s", via_msg, valid_lic_specs)
return (valid_lic_specs, lic_env_var)
def copy_file(path, target_path, force_in_dry_run=False):
"""
Copy a file from specified location to specified location
:param path: the original filepath
:param target_path: path to copy the file to
:param force_in_dry_run: force running the command during dry run
"""
if not force_in_dry_run and build_option('extended_dry_run'):
dry_run_msg("copied file %s to %s" % (path, target_path))
else:
try:
if os.path.exists(target_path) and os.path.samefile(path, target_path):
_log.debug("Not copying %s to %s since files are identical", path, target_path)
else:
mkdir(os.path.dirname(target_path), parents=True)
shutil.copy2(path, target_path)
_log.info("%s copied to %s", path, target_path)
except (IOError, OSError, shutil.Error) as err:
raise EasyBuildError("Failed to copy file %s to %s: %s", path, target_path, err)
def copy_dir(path, target_path, force_in_dry_run=False, **kwargs):
"""
Copy a directory from specified location to specified location
:param path: the original directory path
:param target_path: path to copy the directory to
:param force_in_dry_run: force running the command during dry run
Additional specified named arguments are passed down to shutil.copytree
"""
if not force_in_dry_run and build_option('extended_dry_run'):
dry_run_msg("copied directory %s to %s" % (path, target_path))
else:
try:
if os.path.exists(target_path):
raise EasyBuildError("Target location %s to copy %s to already exists", target_path, path)
shutil.copytree(path, target_path, **kwargs)
_log.info("%s copied to %s", path, target_path)
except (IOError, OSError) as err:
raise EasyBuildError("Failed to copy directory %s to %s: %s", path, target_path, err)
def copy(paths, target_path, force_in_dry_run=False):
"""
Copy single file/directory or list of files and directories to specified location
:param paths: path(s) to copy
:param target_path: target location
:param force_in_dry_run: force running the command during dry run
"""
if isinstance(paths, string_type):
paths = [paths]
_log.info("Copying %d files & directories to %s", len(paths), target_path)
for path in paths:
full_target_path = os.path.join(target_path, os.path.basename(path))
mkdir(os.path.dirname(full_target_path), parents=True)
if os.path.isfile(path):
copy_file(path, full_target_path, force_in_dry_run=force_in_dry_run)
elif os.path.isdir(path):
copy_dir(path, full_target_path, force_in_dry_run=force_in_dry_run)
else:
raise EasyBuildError("Specified path to copy is not an existing file or directory: %s", path)
def get_source_tarball_from_git(filename, targetdir, git_config):
"""
Downloads a git repository, at a specific tag or commit, recursively or not, and make an archive with it
:param filename: name of the archive to save the code to (must be .tar.gz)
:param targetdir: target directory where to save the archive to
:param git_config: dictionary containing url, repo_name, recursive, and one of tag or commit
"""
# sanity check on git_config value being passed
if not isinstance(git_config, dict):
raise EasyBuildError("Found unexpected type of value for 'git_config' argument: %s" % type(git_config))
# Making a copy to avoid modifying the object with pops
git_config = git_config.copy()
tag = git_config.pop('tag', None)
url = git_config.pop('url', None)
repo_name = git_config.pop('repo_name', None)
commit = git_config.pop('commit', None)
recursive = git_config.pop('recursive', False)
# input validation of git_config dict
if git_config:
raise EasyBuildError("Found one or more unexpected keys in 'git_config' specification: %s", git_config)
if not repo_name:
raise EasyBuildError("repo_name not specified in git_config parameter")
if not tag and not commit:
raise EasyBuildError("Neither tag nor commit found in git_config parameter")
if tag and commit:
raise EasyBuildError("Tag and commit are mutually exclusive in git_config parameter")
if not url:
raise EasyBuildError("url not specified in git_config parameter")
if not filename.endswith('.tar.gz'):
raise EasyBuildError("git_config currently only supports filename ending in .tar.gz")
# prepare target directory and clone repository
mkdir(targetdir, parents=True)
targetpath = os.path.join(targetdir, filename)
# compose 'git clone' command, and run it
clone_cmd = ['git', 'clone']
if tag:
clone_cmd.extend(['--branch', tag])
if recursive:
clone_cmd.append('--recursive')
clone_cmd.append('%s/%s.git' % (url, repo_name))
tmpdir = tempfile.mkdtemp()
cwd = change_dir(tmpdir)
run.run_cmd(' '.join(clone_cmd), log_all=True, log_ok=False, simple=False, regexp=False)
# if a specific commit is asked for, check it out
if commit:
checkout_cmd = ['git', 'checkout', commit]
if recursive:
checkout_cmd.extend(['&&', 'git', 'submodule', 'update'])
run.run_cmd(' '.join(checkout_cmd), log_all=True, log_ok=False, simple=False, regexp=False, path=repo_name)
# create an archive and delete the git repo directory
tar_cmd = ['tar', 'cfvz', targetpath, '--exclude', '.git', repo_name]
run.run_cmd(' '.join(tar_cmd), log_all=True, log_ok=False, simple=False, regexp=False)
# cleanup (repo_name dir does not exist in dry run mode)
change_dir(cwd)
remove(tmpdir)
return targetpath
def move_file(path, target_path, force_in_dry_run=False):
"""
Move a file from path to target_path
:param path: the original filepath
:param target_path: path to move the file to
:param force_in_dry_run: force running the command during dry run
"""
if not force_in_dry_run and build_option('extended_dry_run'):
dry_run_msg("moved file %s to %s" % (path, target_path))
else:
# remove first to ensure portability (shutil.move might fail when overwriting files in some systems)
remove_file(target_path)
try:
mkdir(os.path.dirname(target_path), parents=True)
shutil.move(path, target_path)
_log.info("%s moved to %s", path, target_path)
except (IOError, OSError) as err:
raise EasyBuildError("Failed to move %s to %s: %s", path, target_path, err)
def diff_files(path1, path2):
"""
Return unified diff between two files
"""
file1_lines = ['%s\n' % l for l in read_file(path1).split('\n')]
file2_lines = ['%s\n' % l for l in read_file(path2).split('\n')]
return ''.join(difflib.unified_diff(file1_lines, file2_lines, fromfile=path1, tofile=path2))
def install_fake_vsc():
"""
Put fake 'vsc' Python package in place, to catch easyblocks/scripts that still import from vsc.* namespace
(vsc-base & vsc-install were ingested into the EasyBuild framework for EasyBuild 4.0,
see https://github.com/easybuilders/easybuild-framework/pull/2708)
"""
# note: install_fake_vsc is called before parsing configuration, so avoid using functions that use build_option,
# like mkdir, write_file, ...
fake_vsc_path = os.path.join(tempfile.mkdtemp(prefix='fake_vsc_'))
fake_vsc_init = '\n'.join([
'import sys',
'import inspect',
'',
'stack = inspect.stack()',
'filename, lineno = "UNKNOWN", "UNKNOWN"',
'',
'for frame in stack[1:]:',
' _, cand_filename, cand_lineno, _, code_context, _ = frame',
' if code_context:',
' filename, lineno = cand_filename, cand_lineno',
' break',
'',
'sys.stderr.write("\\nERROR: Detected import from \'vsc\' namespace in %s (line %s)\\n" % (filename, lineno))',
'sys.stderr.write("vsc-base & vsc-install were ingested into the EasyBuild framework in EasyBuild v4.0\\n")',
'sys.stderr.write("The functionality you need may be available in the \'easybuild.base.*\' namespace.\\n")',
'sys.exit(1)',
])
fake_vsc_init_path = os.path.join(fake_vsc_path, 'vsc', '__init__.py')
if not os.path.exists(os.path.dirname(fake_vsc_init_path)):
os.makedirs(os.path.dirname(fake_vsc_init_path))
with open(fake_vsc_init_path, 'w') as fp:
fp.write(fake_vsc_init)
sys.path.insert(0, fake_vsc_path)
return fake_vsc_path
|
gppezzi/easybuild-framework
|
easybuild/tools/filetools.py
|
Python
|
gpl-2.0
| 74,118
|
[
"NetCDF"
] |
d288bb3cdc25a21d24499c6d7a272104a05b2fe5f40709031aba0d66f0a785b2
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Classes for reading/manipulating/writing VASP input files. All major VASP input
files.
"""
import os
import re
import itertools
import warnings
import logging
import math
import json
import glob
import subprocess
import numpy as np
from numpy.linalg import det
from collections import OrderedDict, namedtuple
from hashlib import md5
from monty.io import zopen
from monty.os.path import zpath
from monty.json import MontyDecoder
from monty.os import cd
from enum import Enum
from tabulate import tabulate
import scipy.constants as const
from pymatgen import SETTINGS, __version__
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.periodic_table import Element, get_el_sp
from pymatgen.electronic_structure.core import Magmom
from pymatgen.util.string import str_delimited
from pymatgen.util.io_utils import clean_lines
from pymatgen.util.typing import PathLike
from monty.json import MSONable
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Rickard Armiento, Vincent L Chevrier, Stephen Dacek"
__copyright__ = "Copyright 2011, The Materials Project"
logger = logging.getLogger(__name__)
class Poscar(MSONable):
"""
Object for representing the data in a POSCAR or CONTCAR file.
Please note that this current implementation. Most attributes can be set
directly.
.. attribute:: structure
Associated Structure.
.. attribute:: comment
Optional comment string.
.. attribute:: true_names
Boolean indication whether Poscar contains actual real names parsed
from either a POTCAR or the POSCAR itself.
.. attribute:: selective_dynamics
Selective dynamics attribute for each site if available. A Nx3 array of
booleans.
.. attribute:: velocities
Velocities for each site (typically read in from a CONTCAR). A Nx3
array of floats.
.. attribute:: predictor_corrector
Predictor corrector coordinates and derivatives for each site; i.e.
a list of three 1x3 arrays for each site (typically read in from a MD
CONTCAR).
.. attribute:: predictor_corrector_preamble
Predictor corrector preamble contains the predictor-corrector key,
POTIM, and thermostat parameters that precede the site-specic predictor
corrector data in MD CONTCAR
.. attribute:: temperature
Temperature of velocity Maxwell-Boltzmann initialization. Initialized
to -1 (MB hasn"t been performed).
"""
def __init__(
self,
structure: Structure,
comment: str = None,
selective_dynamics=None,
true_names: bool = True,
velocities=None,
predictor_corrector=None,
predictor_corrector_preamble=None,
sort_structure: bool = False,
):
"""
:param structure: Structure object.
:param comment: Optional comment line for POSCAR. Defaults to unit
cell formula of structure. Defaults to None.
:param selective_dynamics: bool values for selective dynamics,
where N is number of sites. Defaults to None.
:param true_names: Set to False if the names in the POSCAR are not
well-defined and ambiguous. This situation arises commonly in
vasp < 5 where the POSCAR sometimes does not contain element
symbols. Defaults to True.
:param velocities: Velocities for the POSCAR. Typically parsed
in MD runs or can be used to initialize velocities.
:param predictor_corrector: Predictor corrector for the POSCAR.
Typically parsed in MD runs.
:param predictor_corrector_preamble: Preamble to the predictor
corrector.
:param sort_structure: Whether to sort structure. Useful if species
are not grouped properly together.
"""
if structure.is_ordered:
site_properties = {}
if selective_dynamics:
site_properties["selective_dynamics"] = selective_dynamics
if velocities:
site_properties["velocities"] = velocities
if predictor_corrector:
site_properties["predictor_corrector"] = predictor_corrector
structure = Structure.from_sites(structure)
self.structure = structure.copy(site_properties=site_properties)
if sort_structure:
self.structure = self.structure.get_sorted_structure()
self.true_names = true_names
self.comment = structure.formula if comment is None else comment
self.predictor_corrector_preamble = predictor_corrector_preamble
else:
raise ValueError(
"Structure with partial occupancies cannot be " "converted into POSCAR!"
)
self.temperature = -1
@property
def velocities(self):
"""Velocities in Poscar"""
return self.structure.site_properties.get("velocities")
@property
def selective_dynamics(self):
"""Selective dynamics in Poscar"""
return self.structure.site_properties.get("selective_dynamics")
@property
def predictor_corrector(self):
"""Predictor corrector in Poscar"""
return self.structure.site_properties.get("predictor_corrector")
@velocities.setter # type: ignore
def velocities(self, velocities):
"""Setter for Poscar.velocities"""
self.structure.add_site_property("velocities", velocities)
@selective_dynamics.setter # type: ignore
def selective_dynamics(self, selective_dynamics):
"""Setter for Poscar.selective_dynamics"""
self.structure.add_site_property("selective_dynamics", selective_dynamics)
@predictor_corrector.setter # type: ignore
def predictor_corrector(self, predictor_corrector):
"""Setter for Poscar.predictor_corrector"""
self.structure.add_site_property("predictor_corrector", predictor_corrector)
@property
def site_symbols(self):
"""
Sequence of symbols associated with the Poscar. Similar to 6th line in
vasp 5+ POSCAR.
"""
syms = [site.specie.symbol for site in self.structure]
return [a[0] for a in itertools.groupby(syms)]
@property
def natoms(self):
"""
Sequence of number of sites of each type associated with the Poscar.
Similar to 7th line in vasp 5+ POSCAR or the 6th line in vasp 4 POSCAR.
"""
syms = [site.specie.symbol for site in self.structure]
return [len(tuple(a[1])) for a in itertools.groupby(syms)]
def __setattr__(self, name, value):
if name in ("selective_dynamics", "velocities"):
if value is not None and len(value) > 0:
value = np.array(value)
dim = value.shape
if dim[1] != 3 or dim[0] != len(self.structure):
raise ValueError(
name + " array must be same length as" + " the structure."
)
value = value.tolist()
super().__setattr__(name, value)
@staticmethod
def from_file(filename, check_for_POTCAR=True, read_velocities=True):
"""
Reads a Poscar from a file.
The code will try its best to determine the elements in the POSCAR in
the following order:
1. If check_for_POTCAR is True, the code will try to check if a POTCAR
is in the same directory as the POSCAR and use elements from that by
default. (This is the VASP default sequence of priority).
2. If the input file is Vasp5-like and contains element symbols in the
6th line, the code will use that if check_for_POTCAR is False or there
is no POTCAR found.
3. Failing (2), the code will check if a symbol is provided at the end
of each coordinate.
If all else fails, the code will just assign the first n elements in
increasing atomic number, where n is the number of species, to the
Poscar. For example, H, He, Li, .... This will ensure at least a
unique element is assigned to each site and any analysis that does not
require specific elemental properties should work fine.
Args:
filename (str): File name containing Poscar data.
check_for_POTCAR (bool): Whether to check if a POTCAR is present
in the same directory as the POSCAR. Defaults to True.
read_velocities (bool): Whether to read or not velocities if they
are present in the POSCAR. Default is True.
Returns:
Poscar object.
"""
dirname = os.path.dirname(os.path.abspath(filename))
names = None
if check_for_POTCAR:
potcars = glob.glob(os.path.join(dirname, "*POTCAR*"))
if potcars:
try:
potcar = Potcar.from_file(sorted(potcars)[0])
names = [sym.split("_")[0] for sym in potcar.symbols]
[get_el_sp(n) for n in names] # ensure valid names
except Exception:
names = None
with zopen(filename, "rt") as f:
return Poscar.from_string(f.read(), names, read_velocities=read_velocities)
@staticmethod
def from_string(data, default_names=None, read_velocities=True):
"""
Reads a Poscar from a string.
The code will try its best to determine the elements in the POSCAR in
the following order:
1. If default_names are supplied and valid, it will use those. Usually,
default names comes from an external source, such as a POTCAR in the
same directory.
2. If there are no valid default names but the input file is Vasp5-like
and contains element symbols in the 6th line, the code will use that.
3. Failing (2), the code will check if a symbol is provided at the end
of each coordinate.
If all else fails, the code will just assign the first n elements in
increasing atomic number, where n is the number of species, to the
Poscar. For example, H, He, Li, .... This will ensure at least a
unique element is assigned to each site and any analysis that does not
require specific elemental properties should work fine.
Args:
data (str): String containing Poscar data.
default_names ([str]): Default symbols for the POSCAR file,
usually coming from a POTCAR in the same directory.
read_velocities (bool): Whether to read or not velocities if they
are present in the POSCAR. Default is True.
Returns:
Poscar object.
"""
# "^\s*$" doesn't match lines with no whitespace
chunks = re.split(r"\n\s*\n", data.rstrip(), flags=re.MULTILINE)
try:
if chunks[0] == "":
chunks.pop(0)
chunks[0] = "\n" + chunks[0]
except IndexError:
raise ValueError("Empty POSCAR")
# Parse positions
lines = tuple(clean_lines(chunks[0].split("\n"), False))
comment = lines[0]
scale = float(lines[1])
lattice = np.array([[float(i) for i in line.split()] for line in lines[2:5]])
if scale < 0:
# In vasp, a negative scale factor is treated as a volume. We need
# to translate this to a proper lattice vector scaling.
vol = abs(det(lattice))
lattice *= (-scale / vol) ** (1 / 3)
else:
lattice *= scale
vasp5_symbols = False
try:
natoms = [int(i) for i in lines[5].split()]
ipos = 6
except ValueError:
vasp5_symbols = True
symbols = lines[5].split()
"""
Atoms and number of atoms in POSCAR written with vasp appear on
multiple lines when atoms of the same type are not grouped together
and more than 20 groups are then defined ...
Example :
Cr16 Fe35 Ni2
1.00000000000000
8.5415010000000002 -0.0077670000000000 -0.0007960000000000
-0.0077730000000000 8.5224019999999996 0.0105580000000000
-0.0007970000000000 0.0105720000000000 8.5356889999999996
Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Ni Fe Cr Fe Cr
Fe Ni Fe Cr Fe
1 1 2 4 2 1 1 1 2 1 1 1 4 1 1 1 5 3 6 1
2 1 3 2 5
Direct
...
"""
nlines_symbols = 1
for nlines_symbols in range(1, 11):
try:
int(lines[5 + nlines_symbols].split()[0])
break
except ValueError:
pass
for iline_symbols in range(6, 5 + nlines_symbols):
symbols.extend(lines[iline_symbols].split())
natoms = []
iline_natoms_start = 5 + nlines_symbols
for iline_natoms in range(
iline_natoms_start, iline_natoms_start + nlines_symbols
):
natoms.extend([int(i) for i in lines[iline_natoms].split()])
atomic_symbols = list()
for i in range(len(natoms)):
atomic_symbols.extend([symbols[i]] * natoms[i])
ipos = 5 + 2 * nlines_symbols
postype = lines[ipos].split()[0]
sdynamics = False
# Selective dynamics
if postype[0] in "sS":
sdynamics = True
ipos += 1
postype = lines[ipos].split()[0]
cart = postype[0] in "cCkK"
nsites = sum(natoms)
# If default_names is specified (usually coming from a POTCAR), use
# them. This is in line with Vasp"s parsing order that the POTCAR
# specified is the default used.
if default_names:
try:
atomic_symbols = []
for i in range(len(natoms)):
atomic_symbols.extend([default_names[i]] * natoms[i])
vasp5_symbols = True
except IndexError:
pass
if not vasp5_symbols:
ind = 3 if not sdynamics else 6
try:
# Check if names are appended at the end of the coordinates.
atomic_symbols = [
l.split()[ind] for l in lines[ipos + 1: ipos + 1 + nsites]
]
# Ensure symbols are valid elements
if not all([Element.is_valid_symbol(sym) for sym in atomic_symbols]):
raise ValueError("Non-valid symbols detected.")
vasp5_symbols = True
except (ValueError, IndexError):
# Defaulting to false names.
atomic_symbols = []
for i in range(len(natoms)):
sym = Element.from_Z(i + 1).symbol
atomic_symbols.extend([sym] * natoms[i])
warnings.warn(
"Elements in POSCAR cannot be determined. "
"Defaulting to false names %s." % " ".join(atomic_symbols)
)
# read the atomic coordinates
coords = []
selective_dynamics = list() if sdynamics else None
for i in range(nsites):
toks = lines[ipos + 1 + i].split()
crd_scale = scale if cart else 1
coords.append([float(j) * crd_scale for j in toks[:3]])
if sdynamics:
selective_dynamics.append([tok.upper()[0] == "T" for tok in toks[3:6]])
struct = Structure(
lattice,
atomic_symbols,
coords,
to_unit_cell=False,
validate_proximity=False,
coords_are_cartesian=cart,
)
if read_velocities:
# Parse velocities if any
velocities = []
if len(chunks) > 1:
for line in chunks[1].strip().split("\n"):
velocities.append([float(tok) for tok in line.split()])
# Parse the predictor-corrector data
predictor_corrector = []
predictor_corrector_preamble = None
if len(chunks) > 2:
lines = chunks[2].strip().split("\n")
# There are 3 sets of 3xN Predictor corrector parameters
# So can't be stored as a single set of "site_property"
# First line in chunk is a key in CONTCAR
# Second line is POTIM
# Third line is the thermostat parameters
predictor_corrector_preamble = (
lines[0] + "\n" + lines[1] + "\n" + lines[2]
)
# Rest is three sets of parameters, each set contains
# x, y, z predictor-corrector parameters for every atom in orde
lines = lines[3:]
for st in range(nsites):
d1 = [float(tok) for tok in lines[st].split()]
d2 = [float(tok) for tok in lines[st + nsites].split()]
d3 = [float(tok) for tok in lines[st + 2 * nsites].split()]
predictor_corrector.append([d1, d2, d3])
else:
velocities = None
predictor_corrector = None
predictor_corrector_preamble = None
return Poscar(
struct,
comment,
selective_dynamics,
vasp5_symbols,
velocities=velocities,
predictor_corrector=predictor_corrector,
predictor_corrector_preamble=predictor_corrector_preamble,
)
def get_string(self, direct=True, vasp4_compatible=False, significant_figures=6):
"""
Returns a string to be written as a POSCAR file. By default, site
symbols are written, which means compatibility is for vasp >= 5.
Args:
direct (bool): Whether coordinates are output in direct or
cartesian. Defaults to True.
vasp4_compatible (bool): Set to True to omit site symbols on 6th
line to maintain backward vasp 4.x compatibility. Defaults
to False.
significant_figures (int): No. of significant figures to
output all quantities. Defaults to 6. Note that positions are
output in fixed point, while velocities are output in
scientific format.
Returns:
String representation of POSCAR.
"""
# This corrects for VASP really annoying bug of crashing on lattices
# which have triple product < 0. We will just invert the lattice
# vectors.
latt = self.structure.lattice
if np.linalg.det(latt.matrix) < 0:
latt = Lattice(-latt.matrix)
format_str = "{{:.{0}f}}".format(significant_figures)
lines = [self.comment, "1.0"]
for v in latt.matrix:
lines.append(" ".join([format_str.format(c) for c in v]))
if self.true_names and not vasp4_compatible:
lines.append(" ".join(self.site_symbols))
lines.append(" ".join([str(x) for x in self.natoms]))
if self.selective_dynamics:
lines.append("Selective dynamics")
lines.append("direct" if direct else "cartesian")
selective_dynamics = self.selective_dynamics
for (i, site) in enumerate(self.structure):
coords = site.frac_coords if direct else site.coords
line = " ".join([format_str.format(c) for c in coords])
if selective_dynamics is not None:
sd = ["T" if j else "F" for j in selective_dynamics[i]]
line += " %s %s %s" % (sd[0], sd[1], sd[2])
line += " " + site.species_string
lines.append(line)
if self.velocities:
try:
lines.append("")
for v in self.velocities:
lines.append(" ".join([format_str.format(i) for i in v]))
except Exception:
warnings.warn("Velocities are missing or corrupted.")
if self.predictor_corrector:
lines.append("")
if self.predictor_corrector_preamble:
lines.append(self.predictor_corrector_preamble)
pred = np.array(self.predictor_corrector)
for col in range(3):
for z in pred[:, col]:
lines.append(" ".join([format_str.format(i) for i in z]))
else:
warnings.warn(
"Preamble information missing or corrupt. "
"Writing Poscar with no predictor corrector data."
)
return "\n".join(lines) + "\n"
def __repr__(self):
return self.get_string()
def __str__(self):
"""
String representation of Poscar file.
"""
return self.get_string()
def write_file(self, filename, **kwargs):
"""
Writes POSCAR to a file. The supported kwargs are the same as those for
the Poscar.get_string method and are passed through directly.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
def as_dict(self):
"""
:return: MSONable dict.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"true_names": self.true_names,
"selective_dynamics": np.array(self.selective_dynamics).tolist(),
"velocities": self.velocities,
"predictor_corrector": self.predictor_corrector,
"comment": self.comment,
}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation.
:return: Poscar
"""
return Poscar(
Structure.from_dict(d["structure"]),
comment=d["comment"],
selective_dynamics=d["selective_dynamics"],
true_names=d["true_names"],
velocities=d.get("velocities", None),
predictor_corrector=d.get("predictor_corrector", None),
)
def set_temperature(self, temperature):
"""
Initializes the velocities based on Maxwell-Boltzmann distribution.
Removes linear, but not angular drift (same as VASP)
Scales the energies to the exact temperature (microcanonical ensemble)
Velocities are given in A/fs. This is the vasp default when
direct/cartesian is not specified (even when positions are given in
direct coordinates)
Overwrites imported velocities, if any.
Args:
temperature (float): Temperature in Kelvin.
"""
# mean 0 variance 1
velocities = np.random.randn(len(self.structure), 3)
# in AMU, (N,1) array
atomic_masses = np.array(
[site.specie.atomic_mass.to("kg") for site in self.structure]
)
dof = 3 * len(self.structure) - 3
# scale velocities due to atomic masses
# mean 0 std proportional to sqrt(1/m)
velocities /= atomic_masses[:, np.newaxis] ** (1 / 2)
# remove linear drift (net momentum)
velocities -= np.average(
atomic_masses[:, np.newaxis] * velocities, axis=0
) / np.average(atomic_masses)
# scale velocities to get correct temperature
energy = np.sum(1 / 2 * atomic_masses * np.sum(velocities ** 2, axis=1))
scale = (temperature * dof / (2 * energy / const.k)) ** (1 / 2)
velocities *= scale * 1e-5 # these are in A/fs
self.temperature = temperature
try:
del self.structure.site_properties["selective_dynamics"]
except KeyError:
pass
try:
del self.structure.site_properties["predictor_corrector"]
except KeyError:
pass
# returns as a list of lists to be consistent with the other
# initializations
self.structure.add_site_property("velocities", velocities.tolist())
cwd = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(cwd, "incar_parameters.json")) as incar_params:
incar_params = json.loads(incar_params.read())
class BadIncarWarning(UserWarning):
"""
Warning class for bad Incar parameters.
"""
pass
class Incar(dict, MSONable):
"""
INCAR object for reading and writing INCAR files. Essentially consists of
a dictionary with some helper functions
"""
def __init__(self, params=None):
"""
Creates an Incar object.
Args:
params (dict): A set of input parameters as a dictionary.
"""
super().__init__()
if params:
# if Incar contains vector-like magmoms given as a list
# of floats, convert to a list of lists
if (
params.get("MAGMOM") and isinstance(params["MAGMOM"][0], (int, float))
) and (params.get("LSORBIT") or params.get("LNONCOLLINEAR")):
val = []
for i in range(len(params["MAGMOM"]) // 3):
val.append(params["MAGMOM"][i * 3: (i + 1) * 3])
params["MAGMOM"] = val
self.update(params)
def __setitem__(self, key, val):
"""
Add parameter-val pair to Incar. Warns if parameter is not in list of
valid INCAR tags. Also cleans the parameter and val by stripping
leading and trailing white spaces.
"""
super().__setitem__(
key.strip(),
Incar.proc_val(key.strip(), val.strip()) if isinstance(val, str) else val,
)
def as_dict(self):
"""
:return: MSONable dict.
"""
d = dict(self)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation.
:return: Incar
"""
if d.get("MAGMOM") and isinstance(d["MAGMOM"][0], dict):
d["MAGMOM"] = [Magmom.from_dict(m) for m in d["MAGMOM"]]
return Incar({k: v for k, v in d.items() if k not in ("@module", "@class")})
def get_string(self, sort_keys=False, pretty=False):
"""
Returns a string representation of the INCAR. The reason why this
method is different from the __str__ method is to provide options for
pretty printing.
Args:
sort_keys (bool): Set to True to sort the INCAR parameters
alphabetically. Defaults to False.
pretty (bool): Set to True for pretty aligned output. Defaults
to False.
"""
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if k == "MAGMOM" and isinstance(self[k], list):
value = []
if (
isinstance(self[k][0], list) or isinstance(self[k][0], Magmom)
) and (self.get("LSORBIT") or self.get("LNONCOLLINEAR")):
value.append(" ".join(str(i) for j in self[k] for i in j))
elif self.get("LSORBIT") or self.get("LNONCOLLINEAR"):
for m, g in itertools.groupby(self[k]):
value.append("3*{}*{}".format(len(tuple(g)), m))
else:
# float() to ensure backwards compatibility between
# float magmoms and Magmom objects
for m, g in itertools.groupby(self[k], lambda x: float(x)):
value.append("{}*{}".format(len(tuple(g)), m))
lines.append([k, " ".join(value)])
elif isinstance(self[k], list):
lines.append([k, " ".join([str(i) for i in self[k]])])
else:
lines.append([k, self[k]])
if pretty:
return str(tabulate([[l[0], "=", l[1]] for l in lines], tablefmt="plain"))
else:
return str_delimited(lines, None, " = ") + "\n"
def __str__(self):
return self.get_string(sort_keys=True, pretty=False)
def write_file(self, filename):
"""
Write Incar to a file.
Args:
filename (str): filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@staticmethod
def from_file(filename):
"""
Reads an Incar object from a file.
Args:
filename (str): Filename for file
Returns:
Incar object
"""
with zopen(filename, "rt") as f:
return Incar.from_string(f.read())
@staticmethod
def from_string(string):
"""
Reads an Incar object from a string.
Args:
string (str): Incar string
Returns:
Incar object
"""
lines = list(clean_lines(string.splitlines()))
params = {}
for line in lines:
for sline in line.split(";"):
m = re.match(r"(\w+)\s*=\s*(.*)", sline.strip())
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Incar.proc_val(key, val)
params[key] = val
return Incar(params)
@staticmethod
def proc_val(key, val):
"""
Static helper method to convert INCAR parameters to proper types, e.g.,
integers, floats, lists, etc.
Args:
key: INCAR parameter key
val: Actual value of INCAR parameter.
"""
list_keys = (
"LDAUU",
"LDAUL",
"LDAUJ",
"MAGMOM",
"DIPOL",
"LANGEVIN_GAMMA",
"QUAD_EFG",
"EINT",
)
bool_keys = (
"LDAU",
"LWAVE",
"LSCALU",
"LCHARG",
"LPLANE",
"LUSE_VDW",
"LHFCALC",
"ADDGRID",
"LSORBIT",
"LNONCOLLINEAR",
)
float_keys = (
"EDIFF",
"SIGMA",
"TIME",
"ENCUTFOCK",
"HFSCREEN",
"POTIM",
"EDIFFG",
"AGGAC",
"PARAM1",
"PARAM2",
)
int_keys = (
"NSW",
"NBANDS",
"NELMIN",
"ISIF",
"IBRION",
"ISPIN",
"ICHARG",
"NELM",
"ISMEAR",
"NPAR",
"LDAUPRINT",
"LMAXMIX",
"ENCUT",
"NSIM",
"NKRED",
"NUPDOWN",
"ISPIND",
"LDAUTYPE",
"IVDW",
)
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key in list_keys:
output = []
toks = re.findall(
r"(-?\d+\.?\d*)\*?(-?\d+\.?\d*)?\*?(-?\d+\.?\d*)?", val
)
for tok in toks:
if tok[2] and "3" in tok[0]:
output.extend(
[smart_int_or_float(tok[2])] * int(tok[0]) * int(tok[1])
)
elif tok[1]:
output.extend([smart_int_or_float(tok[1])] * int(tok[0]))
else:
output.append(smart_int_or_float(tok[0]))
return output
if key in bool_keys:
m = re.match(r"^\.?([T|F|t|f])[A-Za-z]*\.?", val)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(key + " should be a boolean type!")
if key in float_keys:
return float(re.search(r"^-?\d*\.?\d*[e|E]?-?\d*", val).group(0))
if key in int_keys:
return int(re.match(r"^-?[0-9]+", val).group(0))
except ValueError:
pass
# Not in standard keys. We will try a hierarchy of conversions.
try:
val = int(val)
return val
except ValueError:
pass
try:
val = float(val)
return val
except ValueError:
pass
if "true" in val.lower():
return True
if "false" in val.lower():
return False
return val.strip().capitalize()
def diff(self, other):
"""
Diff function for Incar. Compares two Incars and indicates which
parameters are the same and which are not. Useful for checking whether
two runs were done using the same parameters.
Args:
other (Incar): The other Incar object to compare to.
Returns:
Dict of the following format:
{"Same" : parameters_that_are_the_same,
"Different": parameters_that_are_different}
Note that the parameters are return as full dictionaries of values.
E.g. {"ISIF":3}
"""
similar_param = {}
different_param = {}
for k1, v1 in self.items():
if k1 not in other:
different_param[k1] = {"INCAR1": v1, "INCAR2": None}
elif v1 != other[k1]:
different_param[k1] = {"INCAR1": v1, "INCAR2": other[k1]}
else:
similar_param[k1] = v1
for k2, v2 in other.items():
if k2 not in similar_param and k2 not in different_param:
if k2 not in self:
different_param[k2] = {"INCAR1": None, "INCAR2": v2}
return {"Same": similar_param, "Different": different_param}
def __add__(self, other):
"""
Add all the values of another INCAR object to this object.
Facilitates the use of "standard" INCARs.
"""
params = {k: v for k, v in self.items()}
for k, v in other.items():
if k in self and v != self[k]:
raise ValueError("Incars have conflicting values!")
else:
params[k] = v
return Incar(params)
def check_params(self):
"""
Raises a warning for nonsensical or non-existant INCAR tags and
parameters. If a keyword doesn't exist (e.g. theres a typo in a
keyword), your calculation will still run, however VASP will igore the
parameter without letting you know, hence why we have this Incar method.
"""
for k in self.keys():
# First check if this parameter even exists
if k not in incar_params.keys():
warnings.warn(
"Cannot find %s in the list of INCAR flags" % (k),
BadIncarWarning,
stacklevel=2,
)
if k in incar_params.keys():
if type(incar_params[k]).__name__ == "str":
# Now we check if this is an appropriate parameter type
if incar_params[k] == "float":
if not type(self[k]) not in ["float", "int"]:
warnings.warn(
"%s: %s is not real" % (k, self[k]),
BadIncarWarning,
stacklevel=2,
)
elif type(self[k]).__name__ != incar_params[k]:
warnings.warn(
"%s: %s is not a %s" % (k, self[k], incar_params[k]),
BadIncarWarning,
stacklevel=2,
)
# if we have a list of possible parameters, check
# if the user given parameter is in this list
elif type(incar_params[k]).__name__ == "list":
if self[k] not in incar_params[k]:
warnings.warn(
"%s: Cannot find %s in the list of parameters"
% (k, self[k]),
BadIncarWarning,
stacklevel=2,
)
class Kpoints_supported_modes(Enum):
"""
Enum type of all supported modes for Kpoint generation.
"""
Automatic = 0
Gamma = 1
Monkhorst = 2
Line_mode = 3
Cartesian = 4
Reciprocal = 5
def __str__(self):
return self.name
@staticmethod
def from_string(s: str) -> "Kpoints_supported_modes":
"""
:param s: String
:return: Kpoints_supported_modes
"""
c = s.lower()[0]
for m in Kpoints_supported_modes:
if m.name.lower()[0] == c:
return m
raise ValueError("Can't interprete Kpoint mode %s" % s)
class Kpoints(MSONable):
"""
KPOINT reader/writer.
"""
supported_modes = Kpoints_supported_modes
def __init__(
self,
comment="Default gamma",
num_kpts=0,
style=supported_modes.Gamma,
kpts=((1, 1, 1),),
kpts_shift=(0, 0, 0),
kpts_weights=None,
coord_type=None,
labels=None,
tet_number=0,
tet_weight=0,
tet_connections=None,
):
"""
Highly flexible constructor for Kpoints object. The flexibility comes
at the cost of usability and in general, it is recommended that you use
the default constructor only if you know exactly what you are doing and
requires the flexibility. For most usage cases, the three automatic
schemes can be constructed far more easily using the convenience static
constructors (automatic, gamma_automatic, monkhorst_automatic) and it
is recommended that you use those.
Args:
comment (str): String comment for Kpoints
num_kpts: Following VASP method of defining the KPOINTS file, this
parameter is the number of kpoints specified. If set to 0
(or negative), VASP automatically generates the KPOINTS.
style: Style for generating KPOINTS. Use one of the
Kpoints.supported_modes enum types.
kpts (2D array): 2D array of kpoints. Even when only a single
specification is required, e.g. in the automatic scheme,
the kpts should still be specified as a 2D array. e.g.,
[[20]] or [[2,2,2]].
kpts_shift (3x1 array): Shift for Kpoints.
kpts_weights: Optional weights for kpoints. Weights should be
integers. For explicit kpoints.
coord_type: In line-mode, this variable specifies whether the
Kpoints were given in Cartesian or Reciprocal coordinates.
labels: In line-mode, this should provide a list of labels for
each kpt. It is optional in explicit kpoint mode as comments for
k-points.
tet_number: For explicit kpoints, specifies the number of
tetrahedrons for the tetrahedron method.
tet_weight: For explicit kpoints, specifies the weight for each
tetrahedron for the tetrahedron method.
tet_connections: For explicit kpoints, specifies the connections
of the tetrahedrons for the tetrahedron method.
Format is a list of tuples, [ (sym_weight, [tet_vertices]),
...]
The default behavior of the constructor is for a Gamma centered,
1x1x1 KPOINTS with no shift.
"""
if num_kpts > 0 and (not labels) and (not kpts_weights):
raise ValueError(
"For explicit or line-mode kpoints, either the "
"labels or kpts_weights must be specified."
)
self.comment = comment
self.num_kpts = num_kpts
self.kpts = kpts
self.style = style
self.coord_type = coord_type
self.kpts_weights = kpts_weights
self.kpts_shift = kpts_shift
self.labels = labels
self.tet_number = tet_number
self.tet_weight = tet_weight
self.tet_connections = tet_connections
@property
def style(self):
"""
:return: Style for kpoint generation. One of Kpoints_supported_modes
enum.
"""
return self._style
@style.setter
def style(self, style):
"""
:param style: Style
:return: Sets the style for the Kpoints. One of Kpoints_supported_modes
enum.
"""
if isinstance(style, str):
style = Kpoints.supported_modes.from_string(style)
if (
style
in (
Kpoints.supported_modes.Automatic,
Kpoints.supported_modes.Gamma,
Kpoints.supported_modes.Monkhorst,
)
and len(self.kpts) > 1
):
raise ValueError(
"For fully automatic or automatic gamma or monk "
"kpoints, only a single line for the number of "
"divisions is allowed."
)
self._style = style
@staticmethod
def automatic(subdivisions):
"""
Convenient static constructor for a fully automatic Kpoint grid, with
gamma centered Monkhorst-Pack grids and the number of subdivisions
along each reciprocal lattice vector determined by the scheme in the
VASP manual.
Args:
subdivisions: Parameter determining number of subdivisions along
each reciprocal lattice vector.
Returns:
Kpoints object
"""
return Kpoints(
"Fully automatic kpoint scheme",
0,
style=Kpoints.supported_modes.Automatic,
kpts=[[subdivisions]],
)
@staticmethod
def gamma_automatic(kpts=(1, 1, 1), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Gamma centered Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (1,1,1)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
"""
return Kpoints(
"Automatic kpoint scheme",
0,
Kpoints.supported_modes.Gamma,
kpts=[kpts],
kpts_shift=shift,
)
@staticmethod
def monkhorst_automatic(kpts=(2, 2, 2), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Monkhorst pack Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (2,2,2)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
"""
return Kpoints(
"Automatic kpoint scheme",
0,
Kpoints.supported_modes.Monkhorst,
kpts=[kpts],
kpts_shift=shift,
)
@staticmethod
def automatic_density(structure, kppa, force_gamma=False):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes for hexagonal cells and
Monkhorst-Pack grids otherwise.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure (Structure): Input structure
kppa (int): Grid density
force_gamma (bool): Force a gamma centered mesh (default is to
use gamma only for hexagonal cells or odd meshes)
Returns:
Kpoints
"""
comment = "pymatgen v%s with grid density = %.0f / atom" % (__version__, kppa)
if math.fabs((math.floor(kppa ** (1 / 3) + 0.5)) ** 3 - kppa) < 1:
kppa += kppa * 0.01
latt = structure.lattice
lengths = latt.abc
ngrid = kppa / structure.num_sites
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(math.floor(max(mult / l, 1))) for l in lengths]
is_hexagonal = latt.is_hexagonal()
has_odd = any([i % 2 == 1 for i in num_div])
if has_odd or is_hexagonal or force_gamma:
style = Kpoints.supported_modes.Gamma
else:
style = Kpoints.supported_modes.Monkhorst
return Kpoints(comment, 0, style, [num_div], [0, 0, 0])
@staticmethod
def automatic_gamma_density(structure, kppa):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes always. For GW.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure:
Input structure
kppa:
Grid density
"""
latt = structure.lattice
lengths = latt.abc
ngrid = kppa / structure.num_sites
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(round(mult / l)) for l in lengths]
# ensure that numDiv[i] > 0
num_div = [i if i > 0 else 1 for i in num_div]
# VASP documentation recommends to use even grids for n <= 8 and odd
# grids for n > 8.
num_div = [i + i % 2 if i <= 8 else i - i % 2 + 1 for i in num_div]
style = Kpoints.supported_modes.Gamma
comment = (
"pymatgen 4.7.6+ generated KPOINTS with grid density = "
+ "{} / atom".format(kppa)
)
num_kpts = 0
return Kpoints(comment, num_kpts, style, [num_div], [0, 0, 0])
@staticmethod
def automatic_density_by_vol(structure, kppvol, force_gamma=False):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density per inverse Angstrom^3 of reciprocal cell.
Algorithm:
Same as automatic_density()
Args:
structure (Structure): Input structure
kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell
force_gamma (bool): Force a gamma centered mesh
Returns:
Kpoints
"""
vol = structure.lattice.reciprocal_lattice.volume
kppa = kppvol * vol * structure.num_sites
return Kpoints.automatic_density(structure, kppa, force_gamma=force_gamma)
@staticmethod
def automatic_linemode(divisions, ibz):
"""
Convenient static constructor for a KPOINTS in mode line_mode.
gamma centered Monkhorst-Pack grids and the number of subdivisions
along each reciprocal lattice vector determined by the scheme in the
VASP manual.
Args:
divisions: Parameter determining the number of k-points along each
hight symetry lines.
ibz: HighSymmKpath object (pymatgen.symmetry.bandstructure)
Returns:
Kpoints object
"""
kpoints = list()
labels = list()
for path in ibz.kpath["path"]:
kpoints.append(ibz.kpath["kpoints"][path[0]])
labels.append(path[0])
for i in range(1, len(path) - 1):
kpoints.append(ibz.kpath["kpoints"][path[i]])
labels.append(path[i])
kpoints.append(ibz.kpath["kpoints"][path[i]])
labels.append(path[i])
kpoints.append(ibz.kpath["kpoints"][path[-1]])
labels.append(path[-1])
return Kpoints(
"Line_mode KPOINTS file",
style=Kpoints.supported_modes.Line_mode,
coord_type="Reciprocal",
kpts=kpoints,
labels=labels,
num_kpts=int(divisions),
)
@staticmethod
def from_file(filename):
"""
Reads a Kpoints object from a KPOINTS file.
Args:
filename (str): filename to read from.
Returns:
Kpoints object
"""
with zopen(filename, "rt") as f:
return Kpoints.from_string(f.read())
@staticmethod
def from_string(string):
"""
Reads a Kpoints object from a KPOINTS string.
Args:
string (str): KPOINTS string.
Returns:
Kpoints object
"""
lines = [line.strip() for line in string.splitlines()]
comment = lines[0]
num_kpts = int(lines[1].split()[0].strip())
style = lines[2].lower()[0]
# Fully automatic KPOINTS
if style == "a":
return Kpoints.automatic(int(lines[3]))
coord_pattern = re.compile(
r"^\s*([\d+.\-Ee]+)\s+([\d+.\-Ee]+)\s+" r"([\d+.\-Ee]+)"
)
# Automatic gamma and Monk KPOINTS, with optional shift
if style == "g" or style == "m":
kpts = [int(i) for i in lines[3].split()]
kpts_shift = (0, 0, 0)
if len(lines) > 4 and coord_pattern.match(lines[4]):
try:
kpts_shift = [float(i) for i in lines[4].split()]
except ValueError:
pass
return (
Kpoints.gamma_automatic(kpts, kpts_shift)
if style == "g"
else Kpoints.monkhorst_automatic(kpts, kpts_shift)
)
# Automatic kpoints with basis
if num_kpts <= 0:
style = (
Kpoints.supported_modes.Cartesian
if style in "ck"
else Kpoints.supported_modes.Reciprocal
)
kpts = [[float(j) for j in lines[i].split()] for i in range(3, 6)]
kpts_shift = [float(i) for i in lines[6].split()]
return Kpoints(
comment=comment,
num_kpts=num_kpts,
style=style,
kpts=kpts,
kpts_shift=kpts_shift,
)
# Line-mode KPOINTS, usually used with band structures
if style == "l":
coord_type = "Cartesian" if lines[3].lower()[0] in "ck" else "Reciprocal"
style = Kpoints.supported_modes.Line_mode
kpts = []
labels = []
patt = re.compile(
r"([e0-9.\-]+)\s+([e0-9.\-]+)\s+([e0-9.\-]+)" r"\s*!*\s*(.*)"
)
for i in range(4, len(lines)):
line = lines[i]
m = patt.match(line)
if m:
kpts.append(
[float(m.group(1)), float(m.group(2)), float(m.group(3))]
)
labels.append(m.group(4).strip())
return Kpoints(
comment=comment,
num_kpts=num_kpts,
style=style,
kpts=kpts,
coord_type=coord_type,
labels=labels,
)
# Assume explicit KPOINTS if all else fails.
style = (
Kpoints.supported_modes.Cartesian
if style in "ck"
else Kpoints.supported_modes.Reciprocal
)
kpts = []
kpts_weights = []
labels = []
tet_number = 0
tet_weight = 0
tet_connections = None
for i in range(3, 3 + num_kpts):
toks = lines[i].split()
kpts.append([float(j) for j in toks[0:3]])
kpts_weights.append(float(toks[3]))
if len(toks) > 4:
labels.append(toks[4])
else:
labels.append(None)
try:
# Deal with tetrahedron method
if lines[3 + num_kpts].strip().lower()[0] == "t":
toks = lines[4 + num_kpts].split()
tet_number = int(toks[0])
tet_weight = float(toks[1])
tet_connections = []
for i in range(5 + num_kpts, 5 + num_kpts + tet_number):
toks = lines[i].split()
tet_connections.append(
(int(toks[0]), [int(toks[j]) for j in range(1, 5)])
)
except IndexError:
pass
return Kpoints(
comment=comment,
num_kpts=num_kpts,
style=Kpoints.supported_modes[str(style)],
kpts=kpts,
kpts_weights=kpts_weights,
tet_number=tet_number,
tet_weight=tet_weight,
tet_connections=tet_connections,
labels=labels,
)
def write_file(self, filename):
"""
Write Kpoints to a file.
Args:
filename (str): Filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
def __repr__(self):
return self.__str__()
def __str__(self):
lines = [self.comment, str(self.num_kpts), self.style.name]
style = self.style.name.lower()[0]
if style == "l":
lines.append(self.coord_type)
for i in range(len(self.kpts)):
lines.append(" ".join([str(x) for x in self.kpts[i]]))
if style == "l":
lines[-1] += " ! " + self.labels[i]
if i % 2 == 1:
lines[-1] += "\n"
elif self.num_kpts > 0:
if self.labels is not None:
lines[-1] += " %i %s" % (self.kpts_weights[i], self.labels[i])
else:
lines[-1] += " %i" % (self.kpts_weights[i])
# Print tetrahedron parameters if the number of tetrahedrons > 0
if style not in "lagm" and self.tet_number > 0:
lines.append("Tetrahedron")
lines.append("%d %f" % (self.tet_number, self.tet_weight))
for sym_weight, vertices in self.tet_connections:
lines.append(
"%d %d %d %d %d"
% (sym_weight, vertices[0], vertices[1], vertices[2], vertices[3])
)
# Print shifts for automatic kpoints types if not zero.
if self.num_kpts <= 0 and tuple(self.kpts_shift) != (0, 0, 0):
lines.append(" ".join([str(x) for x in self.kpts_shift]))
return "\n".join(lines) + "\n"
def as_dict(self):
"""
:return: MSONable dict.
"""
d = {
"comment": self.comment,
"nkpoints": self.num_kpts,
"generation_style": self.style.name,
"kpoints": self.kpts,
"usershift": self.kpts_shift,
"kpts_weights": self.kpts_weights,
"coord_type": self.coord_type,
"labels": self.labels,
"tet_number": self.tet_number,
"tet_weight": self.tet_weight,
"tet_connections": self.tet_connections,
}
optional_paras = ["genvec1", "genvec2", "genvec3", "shift"]
for para in optional_paras:
if para in self.__dict__:
d[para] = self.__dict__[para]
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation.
:return: Kpoints
"""
comment = d.get("comment", "")
generation_style = d.get("generation_style")
kpts = d.get("kpoints", [[1, 1, 1]])
kpts_shift = d.get("usershift", [0, 0, 0])
num_kpts = d.get("nkpoints", 0)
return cls(
comment=comment,
kpts=kpts,
style=generation_style,
kpts_shift=kpts_shift,
num_kpts=num_kpts,
kpts_weights=d.get("kpts_weights"),
coord_type=d.get("coord_type"),
labels=d.get("labels"),
tet_number=d.get("tet_number", 0),
tet_weight=d.get("tet_weight", 0),
tet_connections=d.get("tet_connections"),
)
def _parse_string(s):
return "{}".format(s.strip())
def _parse_bool(s):
m = re.match(r"^\.?([TFtf])[A-Za-z]*\.?", s)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(s + " should be a boolean type!")
def _parse_float(s):
return float(re.search(r"^-?\d*\.?\d*[eE]?-?\d*", s).group(0))
def _parse_int(s):
return int(re.match(r"^-?[0-9]+", s).group(0))
def _parse_list(s):
return [float(y) for y in re.split(r"\s+", s.strip()) if not y.isalpha()]
Orbital = namedtuple("Orbital", ["n", "l", "j", "E", "occ"])
OrbitalDescription = namedtuple(
"OrbitalDescription", ["l", "E", "Type", "Rcut", "Type2", "Rcut2"]
)
class PotcarSingle:
"""
Object for a **single** POTCAR. The builder assumes the POTCAR contains
the complete untouched data in "data" as a string and a dict of keywords.
.. attribute:: data
POTCAR data as a string.
.. attribute:: keywords
Keywords parsed from the POTCAR as a dict. All keywords are also
accessible as attributes in themselves. E.g., potcar.enmax,
potcar.encut, etc.
md5 hashes of the entire POTCAR file and the actual data are validated
against a database of known good hashes. Appropriate warnings or errors
are raised if a POTCAR hash fails validation.
"""
functional_dir = {
"PBE": "POT_GGA_PAW_PBE",
"PBE_52": "POT_GGA_PAW_PBE_52",
"PBE_54": "POT_GGA_PAW_PBE_54",
"LDA": "POT_LDA_PAW",
"LDA_52": "POT_LDA_PAW_52",
"LDA_54": "POT_LDA_PAW_54",
"PW91": "POT_GGA_PAW_PW91",
"LDA_US": "POT_LDA_US",
"PW91_US": "POT_GGA_US_PW91",
"Perdew-Zunger81": "POT_LDA_PAW",
}
functional_tags = {
"pe": {"name": "PBE", "class": "GGA"},
"91": {"name": "PW91", "class": "GGA"},
"rp": {"name": "revPBE", "class": "GGA"},
"am": {"name": "AM05", "class": "GGA"},
"ps": {"name": "PBEsol", "class": "GGA"},
"pw": {"name": "PW86", "class": "GGA"},
"lm": {"name": "Langreth-Mehl-Hu", "class": "GGA"},
"pb": {"name": "Perdew-Becke", "class": "GGA"},
"ca": {"name": "Perdew-Zunger81", "class": "LDA"},
"hl": {"name": "Hedin-Lundquist", "class": "LDA"},
"wi": {"name": "Wigner Interpoloation", "class": "LDA"},
}
parse_functions = {
"LULTRA": _parse_bool,
"LUNSCR": _parse_bool,
"LCOR": _parse_bool,
"LPAW": _parse_bool,
"EATOM": _parse_float,
"RPACOR": _parse_float,
"POMASS": _parse_float,
"ZVAL": _parse_float,
"RCORE": _parse_float,
"RWIGS": _parse_float,
"ENMAX": _parse_float,
"ENMIN": _parse_float,
"EMMIN": _parse_float,
"EAUG": _parse_float,
"DEXC": _parse_float,
"RMAX": _parse_float,
"RAUG": _parse_float,
"RDEP": _parse_float,
"RDEPT": _parse_float,
"QCUT": _parse_float,
"QGAM": _parse_float,
"RCLOC": _parse_float,
"IUNSCR": _parse_int,
"ICORE": _parse_int,
"NDATA": _parse_int,
"VRHFIN": _parse_string,
"LEXCH": _parse_string,
"TITEL": _parse_string,
"STEP": _parse_list,
"RRKJ": _parse_list,
"GGA": _parse_list,
}
def __init__(self, data, symbol=None):
"""
Args:
data:
Complete and single potcar file as a string.
symbol:
POTCAR symbol corresponding to the filename suffix
e.g. "Tm_3" for POTCAR.TM_3". If not given, pymatgen
will attempt to extract the symbol from the file itself.
However, this is not always reliable!
"""
self.data = data # raw POTCAR as a string
# Vasp parses header in vasprun.xml and this differs from the titel
self.header = data.split("\n")[0].strip()
search_lines = re.search(
r"(?s)(parameters from PSCTR are:" r".*?END of PSCTR-controll parameters)",
data,
).group(1)
self.keywords = {}
for key, val in re.findall(
r"(\S+)\s*=\s*(.*?)(?=;|$)", search_lines, flags=re.MULTILINE
):
try:
self.keywords[key] = self.parse_functions[key](val)
except KeyError:
warnings.warn("Ignoring unknown variable type %s" % key)
PSCTR = OrderedDict()
array_search = re.compile(r"(-*[0-9.]+)")
orbitals = []
descriptions = []
atomic_configuration = re.search(
r"Atomic configuration\s*\n?" r"(.*?)Description", search_lines
)
if atomic_configuration:
lines = atomic_configuration.group(1).splitlines()
num_entries = re.search(r"([0-9]+)", lines[0]).group(1)
num_entries = int(num_entries)
PSCTR["nentries"] = num_entries
for line in lines[1:]:
orbit = array_search.findall(line)
if orbit:
orbitals.append(
self.Orbital(
int(orbit[0]),
int(orbit[1]),
float(orbit[2]),
float(orbit[3]),
float(orbit[4]),
)
)
PSCTR["Orbitals"] = tuple(orbitals)
description_string = re.search(
r"(?s)Description\s*\n"
r"(.*?)Error from kinetic"
r" energy argument \(eV\)",
search_lines,
)
if description_string:
for line in description_string.group(1).splitlines():
description = array_search.findall(line)
if description:
descriptions.append(
OrbitalDescription(
int(description[0]),
float(description[1]),
int(description[2]),
float(description[3]),
int(description[4]) if len(description) > 4 else None,
float(description[5]) if len(description) > 4 else None,
)
)
if descriptions:
PSCTR["OrbitalDescriptions"] = tuple(descriptions)
rrkj_kinetic_energy_string = re.search(
r"(?s)Error from kinetic energy argument \(eV\)\s*\n"
r"(.*?)END of PSCTR-controll parameters",
search_lines,
)
rrkj_array = []
if rrkj_kinetic_energy_string:
for line in rrkj_kinetic_energy_string.group(1).splitlines():
if "=" not in line:
rrkj_array += _parse_list(line.strip("\n"))
if rrkj_array:
PSCTR["RRKJ"] = tuple(rrkj_array)
PSCTR.update(self.keywords)
self.PSCTR = OrderedDict(sorted(PSCTR.items(), key=lambda x: x[0]))
self.hash = self.get_potcar_hash()
if symbol:
self._symbol = symbol
else:
try:
self._symbol = self.keywords["TITEL"].split(" ")[1].strip()
except IndexError:
self._symbol = self.keywords["TITEL"].strip()
def __str__(self):
return self.data + "\n"
@property
def electron_configuration(self):
"""
:return: Electronic configuration of the PotcarSingle.
"""
if not self.nelectrons.is_integer():
warnings.warn(
"POTCAR has non-integer charge, "
"electron configuration not well-defined."
)
return None
el = Element.from_Z(self.atomic_no)
full_config = el.full_electronic_structure
nelect = self.nelectrons
config = []
while nelect > 0:
e = full_config.pop(-1)
config.append(e)
nelect -= e[-1]
return config
def write_file(self, filename: str) -> None:
"""
Writes PotcarSingle to a file.
:param filename: Filename
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@staticmethod
def from_file(filename: str) -> "PotcarSingle":
"""
Reads PotcarSingle from file.
:param filename: Filename.
:return: PotcarSingle.
"""
match = re.search(r"(?<=POTCAR\.)(.*)(?=.gz)", str(filename))
if match:
symbol = match.group(0)
else:
symbol = ""
try:
with zopen(filename, "rt") as f:
return PotcarSingle(f.read(), symbol=symbol or None)
except UnicodeDecodeError:
warnings.warn(
"POTCAR contains invalid unicode errors. "
"We will attempt to read it by ignoring errors."
)
import codecs
with codecs.open(filename, "r", encoding="utf-8", errors="ignore") as f:
return PotcarSingle(f.read(), symbol=symbol or None)
@staticmethod
def from_symbol_and_functional(symbol: str, functional: str = None):
"""
Makes a PotcarSingle from a symbol and functional.
:param symbol: Symbol, e.g., Li_sv
:param functional: E.g., PBE
:return: PotcarSingle
"""
if functional is None:
functional = SETTINGS.get("PMG_DEFAULT_FUNCTIONAL", "PBE")
funcdir = PotcarSingle.functional_dir[functional]
d = SETTINGS.get("PMG_VASP_PSP_DIR")
if d is None:
raise ValueError(
"No POTCAR for %s with functional %s found. "
"Please set the PMG_VASP_PSP_DIR environment in "
".pmgrc.yaml, or you may need to set "
"PMG_DEFAULT_FUNCTIONAL to PBE_52 or PBE_54 if you "
"are using newer psps from VASP." % (symbol, functional)
)
paths_to_try = [
os.path.join(d, funcdir, "POTCAR.{}".format(symbol)),
os.path.join(d, funcdir, symbol, "POTCAR"),
]
for p in paths_to_try:
p = os.path.expanduser(p)
p = zpath(p)
if os.path.exists(p):
psingle = PotcarSingle.from_file(p)
return psingle
raise IOError(
"You do not have the right POTCAR with functional "
+ "{} and label {} in your VASP_PSP_DIR".format(functional, symbol)
)
@property
def element(self):
"""
Attempt to return the atomic symbol based on the VRHFIN keyword.
"""
element = self.keywords["VRHFIN"].split(":")[0].strip()
try:
return Element(element).symbol
except ValueError:
# VASP incorrectly gives the element symbol for Xe as "X"
# Some potentials, e.g., Zr_sv, gives the symbol as r.
if element == "X":
return "Xe"
return Element(self.symbol.split("_")[0]).symbol
@property
def atomic_no(self) -> int:
"""
Attempt to return the atomic number based on the VRHFIN keyword.
"""
return Element(self.element).Z
@property
def nelectrons(self):
"""
:return: Number of electrons
"""
return self.zval
@property
def symbol(self):
"""
:return: The POTCAR symbol, e.g. W_pv
"""
return self._symbol
@property
def potential_type(self) -> str:
"""
:return: Type of PSP. E.g., US, PAW, etc.
"""
if self.lultra:
return "US"
elif self.lpaw:
return "PAW"
else:
return "NC"
@property
def functional(self):
"""
:return: Functional associated with PotcarSingle.
"""
return self.functional_tags.get(self.LEXCH.lower(), {}).get("name")
@property
def functional_class(self):
"""
:return: Functional class associated with PotcarSingle.
"""
return self.functional_tags.get(self.LEXCH.lower(), {}).get("class")
def get_potcar_hash(self):
"""
Computes a hash for the PotcarSingle.
:return: Hash value.
"""
hash_str = ""
for k, v in self.PSCTR.items():
hash_str += "{}".format(k)
if isinstance(v, int):
hash_str += "{}".format(v)
elif isinstance(v, float):
hash_str += "{:.3f}".format(v)
elif isinstance(v, bool):
hash_str += "{}".format(bool)
elif isinstance(v, (tuple, list)):
for item in v:
if isinstance(item, float):
hash_str += "{:.3f}".format(item)
elif isinstance(item, (Orbital, OrbitalDescription)):
for item_v in item:
if isinstance(item_v, (int, str)):
hash_str += "{}".format(item_v)
elif isinstance(item_v, float):
hash_str += "{:.3f}".format(item_v)
else:
hash_str += "{}".format(item_v) if item_v else ""
else:
hash_str += v.replace(" ", "")
self.hash_str = hash_str
return md5(hash_str.lower().encode("utf-8")).hexdigest()
def __getattr__(self, a):
"""
Delegates attributes to keywords. For example, you can use
potcarsingle.enmax to get the ENMAX of the POTCAR.
For float type properties, they are converted to the correct float. By
default, all energies in eV and all length scales are in Angstroms.
"""
try:
return self.keywords[a.upper()]
except Exception:
raise AttributeError(a)
class Potcar(list, MSONable):
"""
Object for reading and writing POTCAR files for calculations. Consists of a
list of PotcarSingle.
"""
FUNCTIONAL_CHOICES = list(PotcarSingle.functional_dir.keys())
def __init__(self, symbols=None, functional=None, sym_potcar_map=None):
"""
Args:
symbols ([str]): Element symbols for POTCAR. This should correspond
to the symbols used by VASP. E.g., "Mg", "Fe_pv", etc.
functional (str): Functional used. To know what functional options
there are, use Potcar.FUNCTIONAL_CHOICES. Note that VASP has
different versions of the same functional. By default, the old
PBE functional is used. If you want the newer ones, use PBE_52 or
PBE_54. Note that if you intend to compare your results with the
Materials Project, you should use the default setting. You can also
override the default by setting PMG_DEFAULT_FUNCTIONAL in your
.pmgrc.yaml.
sym_potcar_map (dict): Allows a user to specify a specific element
symbol to raw POTCAR mapping.
"""
if functional is None:
functional = SETTINGS.get("PMG_DEFAULT_FUNCTIONAL", "PBE")
super().__init__()
self.functional = functional
if symbols is not None:
self.set_symbols(symbols, functional, sym_potcar_map)
def as_dict(self):
"""
:return: MSONable dict representation
"""
return {
"functional": self.functional,
"symbols": self.symbols,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
}
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: Potcar
"""
return Potcar(symbols=d["symbols"], functional=d["functional"])
@staticmethod
def from_file(filename: str):
"""
Reads Potcar from file.
:param filename: Filename
:return: Potcar
"""
try:
with zopen(filename, "rt") as f:
fdata = f.read()
except UnicodeDecodeError:
warnings.warn(
"POTCAR contains invalid unicode errors. "
"We will attempt to read it by ignoring errors."
)
import codecs
with codecs.open(filename, "r", encoding="utf-8", errors="ignore") as f:
fdata = f.read()
potcar = Potcar()
potcar_strings = re.compile(r"\n?(\s*.*?End of Dataset)", re.S).findall(fdata)
functionals = []
for p in potcar_strings:
single = PotcarSingle(p)
potcar.append(single)
functionals.append(single.functional)
if len(set(functionals)) != 1:
raise ValueError("File contains incompatible functionals!")
else:
potcar.functional = functionals[0]
return potcar
def __str__(self):
return "\n".join([str(potcar).strip("\n") for potcar in self]) + "\n"
def write_file(self, filename):
"""
Write Potcar to a file.
Args:
filename (str): filename to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@property
def symbols(self):
"""
Get the atomic symbols of all the atoms in the POTCAR file.
"""
return [p.symbol for p in self]
@symbols.setter
def symbols(self, symbols):
self.set_symbols(symbols, functional=self.functional)
@property
def spec(self):
"""
Get the atomic symbols and hash of all the atoms in the POTCAR file.
"""
return [{"symbol": p.symbol, "hash": p.get_potcar_hash()} for p in self]
def set_symbols(self, symbols, functional=None, sym_potcar_map=None):
"""
Initialize the POTCAR from a set of symbols. Currently, the POTCARs can
be fetched from a location specified in .pmgrc.yaml. Use pmg config
to add this setting.
Args:
symbols ([str]): A list of element symbols
functional (str): The functional to use. If None, the setting
PMG_DEFAULT_FUNCTIONAL in .pmgrc.yaml is used, or if this is
not set, it will default to PBE.
sym_potcar_map (dict): A map of symbol:raw POTCAR string. If
sym_potcar_map is specified, POTCARs will be generated from
the given map data rather than the config file location.
"""
del self[:]
if sym_potcar_map:
for el in symbols:
self.append(PotcarSingle(sym_potcar_map[el]))
else:
for el in symbols:
p = PotcarSingle.from_symbol_and_functional(el, functional)
self.append(p)
class VaspInput(dict, MSONable):
"""
Class to contain a set of vasp input objects corresponding to a run.
"""
def __init__(self, incar, kpoints, poscar, potcar, optional_files=None, **kwargs):
"""
Args:
incar: Incar object.
kpoints: Kpoints object.
poscar: Poscar object.
potcar: Potcar object.
optional_files: Other input files supplied as a dict of {
filename: object}. The object should follow standard pymatgen
conventions in implementing a as_dict() and from_dict method.
"""
super().__init__(**kwargs)
self.update(
{"INCAR": incar, "KPOINTS": kpoints, "POSCAR": poscar, "POTCAR": potcar}
)
if optional_files is not None:
self.update(optional_files)
def __str__(self):
output = []
for k, v in self.items():
output.append(k)
output.append(str(v))
output.append("")
return "\n".join(output)
def as_dict(self):
"""
:return: MSONable dict.
"""
d = {k: v.as_dict() for k, v in self.items()}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation.
:return: VaspInput
"""
dec = MontyDecoder()
sub_d = {"optional_files": {}}
for k, v in d.items():
if k in ["INCAR", "POSCAR", "POTCAR", "KPOINTS"]:
sub_d[k.lower()] = dec.process_decoded(v)
elif k not in ["@module", "@class"]:
sub_d["optional_files"][k] = dec.process_decoded(v)
return cls(**sub_d)
def write_input(self, output_dir=".", make_dir_if_not_present=True):
"""
Write VASP input to a directory.
Args:
output_dir (str): Directory to write to. Defaults to current
directory (".").
make_dir_if_not_present (bool): Create the directory if not
present. Defaults to True.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
for k, v in self.items():
if v is not None:
with zopen(os.path.join(output_dir, k), "wt") as f:
f.write(v.__str__())
@staticmethod
def from_directory(input_dir, optional_files=None):
"""
Read in a set of VASP input from a directory. Note that only the
standard INCAR, POSCAR, POTCAR and KPOINTS files are read unless
optional_filenames is specified.
Args:
input_dir (str): Directory to read VASP input from.
optional_files (dict): Optional files to read in as well as a
dict of {filename: Object type}. Object type must have a
static method from_file.
"""
sub_d = {}
for fname, ftype in [
("INCAR", Incar),
("KPOINTS", Kpoints),
("POSCAR", Poscar),
("POTCAR", Potcar),
]:
try:
fullzpath = zpath(os.path.join(input_dir, fname))
sub_d[fname.lower()] = ftype.from_file(fullzpath)
except FileNotFoundError: # handle the case where there is no KPOINTS file
sub_d[fname.lower()] = None
pass
sub_d["optional_files"] = {}
if optional_files is not None:
for fname, ftype in optional_files.items():
sub_d["optional_files"][fname] = ftype.from_file(
os.path.join(input_dir, fname)
)
return VaspInput(**sub_d)
def run_vasp(
self,
run_dir: PathLike = ".",
vasp_cmd: list = None,
output_file: PathLike = "vasp.out",
err_file: PathLike = "vasp.err",
):
"""
Write input files and run VASP.
:param run_dir: Where to write input files and do the run.
:param vasp_cmd: Args to be supplied to run VASP. Otherwise, the
PMG_VASP_EXE in .pmgrc.yaml is used.
:param output_file: File to write output.
:param err_file: File to write err.
"""
self.write_input(output_dir=run_dir)
vasp_cmd = vasp_cmd or SETTINGS.get("PMG_VASP_EXE")
vasp_cmd = [os.path.expanduser(os.path.expandvars(t)) for t in vasp_cmd]
if not vasp_cmd:
raise RuntimeError(
"You need to supply vasp_cmd or set the PMG_VASP_EXE in .pmgrc.yaml to run VASP."
)
with cd(run_dir):
with open(output_file, "w") as f_std, open(
err_file, "w", buffering=1
) as f_err:
subprocess.check_call(vasp_cmd, stdout=f_std, stderr=f_err)
|
fraricci/pymatgen
|
pymatgen/io/vasp/inputs.py
|
Python
|
mit
| 80,938
|
[
"VASP",
"pymatgen"
] |
79393a95102f1057f0e58e62e7514eadc9edcc141ecd996a786041787b93e50b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.