repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
nerevu/riko | riko/modules/csv.py | 1 | 6880 | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.modules.csv
~~~~~~~~~~~~~~~~
Provides functions for fetching csv files.
Examples:
basic usage::
>>> from riko import get_path
>>> from riko.modules.csv import pipe
>>>
>>> url = get_path('spreadsheet.csv')
>>> next(pipe(conf={'url': url}))['mileage'] == '7213'
True
Attributes:
OPTS (dict): The default pipe options
DEFAULTS (dict): The default parser options
"""
import pygogo as gogo
from meza.io import read_csv
from meza.process import merge
from . import processor
from riko import ENCODING
from riko.bado import coroutine, return_value, io
from riko.utils import fetch, auto_close, get_abspath
OPTS = {'ftype': 'none'}
DEFAULTS = {
'delimiter': ',', 'quotechar': '"', 'encoding': ENCODING, 'skip_rows': 0,
'sanitize': True, 'dedupe': True, 'col_names': None, 'has_header': True}
logger = gogo.Gogo(__name__, monolog=True).logger
@coroutine
def async_parser(_, objconf, skip=False, **kwargs):
""" Asynchronously parses the pipe content
Args:
_ (None): Ignored
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
stream (dict): The original item
Returns:
Iter[dict]: The stream of items
Examples:
>>> from riko import get_path
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>> from meza.fntools import Objectify
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['mileage'])
... url = get_path('spreadsheet.csv')
... conf = {
... 'url': url, 'sanitize': True, 'skip_rows': 0,
... 'encoding': ENCODING}
... objconf = Objectify(conf)
... d = async_parser(None, objconf, stream={})
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
7213
"""
if skip:
stream = kwargs['stream']
else:
url = get_abspath(objconf.url)
r = yield io.async_url_open(url)
first_row, custom_header = objconf.skip_rows, objconf.col_names
renamed = {'first_row': first_row, 'custom_header': custom_header}
rkwargs = merge([objconf, renamed])
stream = auto_close(read_csv(r, **rkwargs), r)
return_value(stream)
def parser(_, objconf, skip=False, **kwargs):
""" Parses the pipe content
Args:
_ (None): Ignored
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
Returns:
Iter[dict]: The stream of items
Examples:
>>> from riko import get_path
>>> from meza.fntools import Objectify
>>>
>>> url = get_path('spreadsheet.csv')
>>> conf = {
... 'url': url, 'sanitize': True, 'skip_rows': 0,
... 'encoding': ENCODING}
>>> objconf = Objectify(conf)
>>> result = parser(None, objconf, stream={})
>>> next(result)['mileage'] == '7213'
True
"""
if skip:
stream = kwargs['stream']
else:
first_row, custom_header = objconf.skip_rows, objconf.col_names
renamed = {'first_row': first_row, 'custom_header': custom_header}
f = fetch(decode=True, **objconf)
rkwargs = merge([objconf, renamed])
stream = auto_close(read_csv(f, **rkwargs), f)
return stream
@processor(DEFAULTS, isasync=True, **OPTS)
def async_pipe(*args, **kwargs):
"""A source that asynchronously fetches the content of a given web site as
a string.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'url'. May
contain the keys 'delimiter', 'quotechar', 'encoding', 'skip_rows',
'sanitize', 'dedupe', 'col_names', or 'has_header'.
url (str): The csv file to fetch
delimiter (str): Field delimiter (default: ',').
quotechar (str): Quote character (default: '"').
encoding (str): File encoding (default: 'utf-8').
has_header (bool): Has header row (default: True).
skip_rows (int): Number of initial rows to skip (zero based,
default: 0).
sanitize (bool): Underscorify and lowercase field names
(default: False).
dedupe (bool): Deduplicate column names (default: False).
col_names (List[str]): Custom column names (default: None).
Returns:
dict: twisted.internet.defer.Deferred item
Examples:
>>> from riko import get_path
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['mileage'])
... d = async_pipe(conf={'url': get_path('spreadsheet.csv')})
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
7213
"""
return async_parser(*args, **kwargs)
@processor(DEFAULTS, **OPTS)
def pipe(*args, **kwargs):
"""A source that fetches and parses a csv file to yield items.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'url'. May
contain the keys 'delimiter', 'quotechar', 'encoding', 'skip_rows',
'sanitize', 'dedupe', 'col_names', or 'has_header'.
url (str): The csv file to fetch
delimiter (str): Field delimiter (default: ',').
quotechar (str): Quote character (default: '"').
encoding (str): File encoding (default: 'utf-8').
has_header (bool): Has header row (default: True).
skip_rows (int): Number of initial rows to skip (zero based,
default: 0).
sanitize (bool): Underscorify and lowercase field names
(default: False).
dedupe (bool): Deduplicate column names (default: False).
col_names (List[str]): Custom column names (default: None).
Yields:
dict: item
Examples:
>>> from riko import get_path
>>> url = get_path('spreadsheet.csv')
>>> next(pipe(conf={'url': url}))['mileage'] == '7213'
True
"""
return parser(*args, **kwargs)
| mit | 9e1db01a15bb83248521312ce851fcb0 | 30.851852 | 79 | 0.563808 | 3.922463 | false | false | false | false |
nerevu/riko | riko/bado/microdom.py | 1 | 33177 | # -*- test-case-name: twisted.web.test.test_xml -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Micro Document Object Model: a partial DOM implementation with SUX.
This is an implementation of what we consider to be the useful subset of the
DOM. The chief advantage of this library is that, not being burdened with
standards compliance, it can remain very stable between versions. We can also
implement utility 'pythonic' ways to access and mutate the XML tree.
Since this has not subjected to a serious trial by fire, it is not recommended
to use this outside of Twisted applications. However, it seems to work just
fine for the documentation generator, which parses a fairly representative
sample of XML.
Microdom mainly focuses on working with HTML and XHTML.
"""
import re
import itertools as it
from io import open, BytesIO, StringIO
from functools import partial
from meza.compat import encode, decode
try:
from twisted.python.util import InsensitiveDict
except ImportError:
pass
from .sux import XMLParser, ParseError
from riko.utils import invert_dict
from riko.parsers import ESCAPE, entity2text, text2entity
from meza.process import merge
HTML_ESCAPE_CHARS = {'&', '<', '>', '"'}
entity_prog = re.compile('&(.*?);')
escape_prog = re.compile("['%s']" % ''.join(ESCAPE))
def unescape(text):
def repl(matchobj):
match = matchobj.group(0)
return entity2text(match) if match in HTML_ESCAPE_CHARS else match
return entity_prog.sub(repl, text)
def escape(text):
repl = lambda matchobj: text2entity(matchobj.group(0))
return escape_prog.sub(repl, text)
def unescape_dict(d):
return {k: unescape(v) for k, v in d.items()}
def get_elements_by_tag_name(iNode, path, icase=False):
"""
Return a list of all child elements of C{iNode} with a name matching
C{name}.
@param iNode: An element at which to begin searching. If C{iNode} has a
name matching C{name}, it will be included in the result.
@param name: A C{str} giving the name of the elements to return.
@return: A C{list} of direct or indirect child elements of C{iNode} with
the name C{name}. This may include C{iNode}.
"""
same = lambda x, y: x.lower() == y.lower() if icase else x == y
is_node = hasattr(iNode, 'nodeName')
has_bracket = '[' in path
if is_node and not has_bracket and same(iNode.nodeName, path):
yield iNode
if is_node and iNode.hasChildNodes():
for c in get_elements_by_tag_name(iNode.childNodes, path, icase):
yield c
if not is_node:
name = path[:path.find('[')] if has_bracket else path
pos = int(path[path.find('[') + 1:-1]) - 1 if has_bracket else 0
nodes = [n for n in iNode if same(n.nodeName, name)]
if pos < len(nodes):
yield nodes[pos]
for child in iNode:
for c in get_elements_by_tag_name(child, path, icase):
yield c
def get_element_by_id(nodes, node_id):
for node in nodes:
if node.getAttribute('id') == node_id:
return node
else:
for node in nodes:
return get_element_by_id(node.childNodes, node_id)
class MismatchedTags(Exception):
def __init__(self, *args):
(
self.filename, self.expect, self.got, self.begLine, self.begCol,
self.endLine, self.endCol) = args
def __str__(self):
msg = 'expected </%(expect)s>, got </%(got)s> line: %(endLine)s '
msg += 'col: %(endCol)s, began line: %(begLine)s, col: %(begCol)s'
return (msg % self.__dict__)
class Node(object):
nodeName = "Node"
def __init__(self, parentNode=None):
self.parentNode = parentNode
self.childNodes = []
def isEqualToNode(self, other):
"""
Compare this node to C{other}. If the nodes have the same number of
children and corresponding children are equal to each other, return
C{True}, otherwise return C{False}.
@type other: L{Node}
@rtype: C{bool}
"""
if len(self.childNodes) != len(other.childNodes):
return False
for a, b in zip(self.childNodes, other.childNodes):
if not a.isEqualToNode(b):
return False
return True
def writexml(self, *args, **kwargs):
raise NotImplementedError()
def toxml(self, *args, **kwargs):
s = StringIO()
self.writexml(s, *args, **kwargs)
return s.getvalue()
def writeprettyxml(self, stream, *args, **kwargs):
return self.writexml(stream, *args, **kwargs)
def toprettyxml(self, **kwargs):
return self.toxml(**kwargs)
def cloneNode(self, deep=0, parent=None):
raise NotImplementedError()
def hasChildNodes(self):
return self.childNodes
def appendChild(self, child):
"""
Make the given L{Node} the last child of this node.
@param child: The L{Node} which will become a child of this node.
@raise TypeError: If C{child} is not a C{Node} instance.
"""
if not isinstance(child, Node):
raise TypeError("expected Node instance")
self.childNodes.append(child)
child.parentNode = self
def insertBefore(self, new, ref):
"""
Make the given L{Node} C{new} a child of this node which comes before
the L{Node} C{ref}.
@param new: A L{Node} which will become a child of this node.
@param ref: A L{Node} which is already a child of this node which
C{new} will be inserted before.
@raise TypeError: If C{new} or C{ref} is not a C{Node} instance.
@return: C{new}
"""
if not isinstance(new, Node) or not isinstance(ref, Node):
raise TypeError("expected Node instance")
i = self.childNodes.index(ref)
new.parentNode = self
self.childNodes.insert(i, new)
return new
def removeChild(self, child):
"""
Remove the given L{Node} from this node's children.
@param child: A L{Node} which is a child of this node which will no
longer be a child of this node after this method is called.
@raise TypeError: If C{child} is not a C{Node} instance.
@return: C{child}
"""
if not isinstance(child, Node):
raise TypeError("expected Node instance")
if child in self.childNodes:
self.childNodes.remove(child)
child.parentNode = None
return child
def replaceChild(self, newChild, oldChild):
"""
Replace a L{Node} which is already a child of this node with a
different node.
@param newChild: A L{Node} which will be made a child of this node.
@param oldChild: A L{Node} which is a child of this node which will
give up its position to C{newChild}.
@raise TypeError: If C{newChild} or C{oldChild} is not a C{Node}
instance.
@raise ValueError: If C{oldChild} is not a child of this C{Node}.
"""
if not (isinstance(newChild, Node) or isinstance(oldChild, Node)):
raise TypeError("expected Node instance")
if oldChild.parentNode is not self:
raise ValueError("oldChild is not a child of this node")
self.childNodes[self.childNodes.index(oldChild)] = newChild
oldChild.parentNodem, newChild.parentNode = None, self
def lastChild(self):
return self.childNodes[-1]
def firstChild(self):
return self.childNodes[0]
class Document(Node):
def __init__(self, documentElement=None):
Node.__init__(self)
if documentElement:
self.appendChild(documentElement)
def cloneNode(self, deep=0, parent=None):
d = Document()
d.doctype = self.doctype
if deep:
newEl = self.documentElement.cloneNode(1, self)
else:
newEl = self.documentElement
d.appendChild(newEl)
return d
doctype = None
def isEqualToDocument(self, n):
return (self.doctype == n.doctype) and Node.isEqualToNode(self, n)
isEqualToNode = isEqualToDocument
def get_documentElement(self):
return self.childNodes[0]
documentElement = property(get_documentElement)
def appendChild(self, child):
"""
Make the given L{Node} the I{document element} of this L{Document}.
@param child: The L{Node} to make into this L{Document}'s document
element.
@raise ValueError: If this document already has a document element.
"""
if self.childNodes:
raise ValueError("Only one element per document.")
Node.appendChild(self, child)
def writexml(self, stream, *args, **kwargs):
newl = kwargs['newl']
stream.write('<?xml version="1.0"?>' + newl)
if self.doctype:
stream.write("<!DOCTYPE " + self.doctype + ">" + newl)
self.documentElement.writexml(stream, *args, **kwargs)
# of dubious utility (?)
def createElement(self, name, **kw):
return Element(name, **kw)
def createTextNode(self, text):
return Text(text)
def createComment(self, text):
return Comment(text)
def getElementsByTagName(self, name):
icase = self.documentElement.case_insensitive
return get_elements_by_tag_name(self.childNodes, name, icase)
def getElementById(self, node_id):
return get_element_by_id(self.childNodes, node_id)
class EntityReference(Node):
def __init__(self, eref, parentNode=None):
Node.__init__(self, parentNode)
self.eref = eref
self.nodeValue = self.data = "&" + eref + ";"
def isEqualToEntityReference(self, n):
if not isinstance(n, EntityReference):
return 0
return (self.eref == n.eref) and (self.nodeValue == n.nodeValue)
isEqualToNode = isEqualToEntityReference
def writexml(self, stream, *args, **kwargs):
stream.write(self.nodeValue)
def cloneNode(self, deep=0, parent=None):
return EntityReference(self.eref, parent)
class CharacterData(Node):
def __init__(self, data, parentNode=None):
Node.__init__(self, parentNode)
self.value = self.data = self.nodeValue = data
def isEqualToCharacterData(self, n):
return self.value == n.value
isEqualToNode = isEqualToCharacterData
class Comment(CharacterData):
"""A comment node"""
def writexml(self, stream, *args, **kwargs):
val = encode(self.data)
stream.write("<!--%s-->" % val)
def cloneNode(self, deep=0, parent=None):
return Comment(self.nodeValue, parent)
class Text(CharacterData):
def __init__(self, data, parentNode=None, raw=0):
CharacterData.__init__(self, data, parentNode)
self.raw = raw
def isEqualToNode(self, other):
"""
Compare this text to C{text}. If the underlying values and the C{raw}
flag are the same, return C{True}, otherwise return C{False}.
"""
is_equal = CharacterData.isEqualToNode(self, other)
return (is_equal and self.raw == other.raw)
def cloneNode(self, deep=0, parent=None):
return Text(self.nodeValue, parent, self.raw)
def writexml(self, stream, *args, **kwargs):
if self.raw:
val = decode(self.nodeValue)
else:
v = decode(self.nodeValue)
v = ' '.join(v.split()) if kwargs.get('strip') else v
val = escape(v)
val = encode(val)
stream.write(val)
def __repr__(self):
return "Text(%s" % repr(self.nodeValue) + ')'
class CDATASection(CharacterData):
def cloneNode(self, deep=0, parent=None):
return CDATASection(self.nodeValue, parent)
def writexml(self, stream, *args, **kwargs):
stream.write("<![CDATA[")
stream.write(self.nodeValue)
stream.write("]]>")
class _Attr(CharacterData):
"Support class for getAttributeNode."
class Element(Node):
nsprefixes = None
create_attr = lambda k, v: (' ', k, '="', escape(v), '"')
SINGLETONS = (
'img', 'br', 'hr', 'base', 'meta', 'link', 'param',
'area', 'input', 'col', 'basefont', 'isindex', 'frame')
BLOCKELEMENTS = (
'html', 'head', 'body', 'noscript', 'ins', 'del',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'script',
'ul', 'ol', 'dl', 'pre', 'hr', 'blockquote',
'address', 'p', 'div', 'fieldset', 'table', 'tr',
'form', 'object', 'fieldset', 'applet', 'map')
NICEFORMATS = ('tr', 'ul', 'ol', 'head')
def __init__(
self, tagName, attributes=None, parentNode=None, filename=None,
markpos=None, case_insensitive=1, namespace=None
):
Node.__init__(self, parentNode)
preserve_case = not case_insensitive
tagName = tagName if preserve_case else tagName.lower()
unescaped = unescape_dict(attributes or {})
if case_insensitive:
self.attributes = InsensitiveDict(unescaped, preserve=preserve_case)
else:
self.attributes = unescaped
self.preserve_case = not case_insensitive
self.case_insensitive = case_insensitive
self.endTagName = self.nodeName = self.tagName = tagName
self._filename = filename
self._markpos = markpos
self.namespace = namespace
self.tag_is_blockelement = tagName in self.BLOCKELEMENTS
self.tag_is_nice_format = tagName in self.NICEFORMATS
self.tag_is_singleton = tagName.lower() in self.SINGLETONS
def addPrefixes(self, pfxs):
if self.nsprefixes is None:
self.nsprefixes = pfxs
else:
self.nsprefixes.update(pfxs)
def endTag(self, endTagName):
if self.case_insensitive:
endTagName = endTagName.lower()
self.endTagName = endTagName
def isEqualToElement(self, n):
same_attrs = self.attributes == n.attributes
if self.case_insensitive:
eq = same_attrs and (self.nodeName.lower() == n.nodeName.lower())
else:
eq = same_attrs and (self.nodeName == n.nodeName)
return eq
def isEqualToNode(self, other):
"""
Compare this element to C{other}. If the C{nodeName}, C{namespace},
C{attributes}, and C{childNodes} are all the same, return C{True},
otherwise return C{False}.
"""
is_lower = self.nodeName.lower() == other.nodeName.lower()
same_name = self.namespace == other.namespace
same_attrs = self.attributes == other.attributes
is_equal = Node.isEqualToNode(self, other)
return all([is_lower, same_name, same_attrs, is_equal])
def cloneNode(self, deep=0, parent=None):
clone = Element(
self.tagName, parentNode=parent, namespace=self.namespace,
case_insensitive=self.case_insensitive)
clone.attributes.update(self.attributes)
if deep:
clone.childNodes = [
child.cloneNode(1, clone) for child in self.childNodes]
else:
clone.childNodes = []
return clone
def getElementsByTagName(self, name):
icase = self.case_insensitive
return get_elements_by_tag_name(self.childNodes, name, icase)
def hasAttributes(self):
return 1
def getAttribute(self, name, default=None):
return self.attributes.get(name, default)
def getAttributeNS(self, ns, name, default=None):
nsk = (ns, name)
if nsk in self.attributes:
return self.attributes[nsk]
if ns == self.namespace:
return self.attributes.get(name, default)
return default
def getAttributeNode(self, name):
return _Attr(self.getAttribute(name), self)
def setAttribute(self, name, attr):
self.attributes[name] = attr
def removeAttribute(self, name):
if name in self.attributes:
del self.attributes[name]
def hasAttribute(self, name):
return name in self.attributes
def gen_prefixes(self, nsprefixes):
for k, v in self.nsprefixes.items():
if k not in nsprefixes:
yield (k, v)
def _writexml(self, namespace, nsprefixes, newl, indent):
newprefixes = dict(self.gen_prefixes(nsprefixes))
begin = [newl, indent, '<'] if self.tag_is_blockelement else ['<']
is_same_namespace = self.namespace and namespace == self.namespace
# Make a local for tracking what end tag will be used. If namespace
# prefixes are involved, this will be changed to account for that
# before it's actually used.
endTagName = self.endTagName
if not is_same_namespace and self.namespace in nsprefixes:
# This tag's namespace already has a prefix bound to it. Use
# that prefix.
prefix = nsprefixes[self.namespace]
begin.extend(prefix + ':' + self.tagName)
# Also make sure we use it for the end tag.
endTagName = prefix + ':' + self.endTagName
elif not is_same_namespace:
# This tag's namespace has no prefix bound to it. Change the
# default namespace to this tag's namespace so we don't need
# prefixes. Alternatively, we could add a new prefix binding.
# I'm not sure why the code was written one way rather than the
# other. -exarkun
begin.extend(self.tagName)
begin.extend(self.create_attr("xmlns", self.namespace))
# The default namespace just changed. Make sure any children
# know about this.
namespace = self.namespace
else:
# This tag has no namespace or its namespace is already the default
# namespace. Nothing extra to do here.
begin.extend(self.tagName)
prefixes = ('p%s' % str(i) for i in it.count())
for attr, val in sorted(self.attributes.items()):
if val and isinstance(attr, tuple):
ns, key = attr
if ns in nsprefixes:
prefix = nsprefixes[ns]
else:
newprefixes[ns] = prefix = next(prefixes)
begin.extend(self.create_attr(prefix + ':' + key, val))
elif val:
begin.extend(self.create_attr(attr, val))
return begin, namespace, endTagName, newprefixes
def _write_child(self, stream, newl, newindent, **kwargs):
for child in self.childNodes:
if self.tag_is_blockelement and self.tag_is_nice_format:
stream.write(''.join((newl, newindent)))
child.writexml(stream, newl=newl, newindent=newindent, **kwargs)
def writexml(self, stream, *args, **kwargs):
"""
Serialize this L{Element} to the given stream.
@param stream: A file-like object to which this L{Element} will be
written.
@param nsprefixes: A C{dict} mapping namespace URIs as C{str} to
prefixes as C{str}. This defines the prefixes which are already in
scope in the document at the point at which this L{Element} exists.
This is essentially an implementation detail for namespace support.
Applications should not try to use it.
@param namespace: The namespace URI as a C{str} which is the default at
the point in the document at which this L{Element} exists. This is
essentially an implementation detail for namespace support.
Applications should not try to use it.
"""
indent = kwargs.get('indent', '')
addindent = kwargs.get('addindent', '')
newl = kwargs.get('newl', '')
strip = kwargs.get('strip', 0)
nsprefixes = kwargs.get('nsprefixes', {})
namespace = kwargs.get('namespace', '')
# this should never be necessary unless people start
# changing .tagName on the fly(?)
if self.case_insensitive:
self.endTagName = self.tagName
_args = (namespace, nsprefixes, newl, indent)
begin, namespace, endTagName, newprefixes = self._writexml(*_args)
for ns, prefix in newprefixes.items():
if prefix:
begin.extend(self.create_attr('xmlns:' + prefix, ns))
newprefixes.update(nsprefixes)
downprefixes = newprefixes
stream.write(''.join(begin))
if self.childNodes:
stream.write(">")
newindent = indent + addindent
kwargs = {
'newindent': newindent,
'addindent': addindent,
'newl': newl,
'strip': strip,
'downprefixes': downprefixes,
'namespace': namespace}
self._write_child(stream, newl, newindent, **kwargs)
if self.tag_is_blockelement:
stream.write(''.join((newl, indent)))
stream.write(''.join(('</', endTagName, '>')))
elif not self.tag_is_singleton:
stream.write(''.join(('></', endTagName, '>')))
else:
stream.write(" />")
def __repr__(self):
rep = "Element(%s" % repr(self.nodeName)
if self.attributes:
rep += ", attributes=%r" % (self.attributes,)
if self._filename:
rep += ", filename=%r" % (self._filename,)
if self._markpos:
rep += ", markpos=%r" % (self._markpos,)
return rep + ')'
def __str__(self):
rep = "<" + self.nodeName
if self._filename or self._markpos:
rep += " ("
if self._filename:
rep += repr(self._filename)
if self._markpos:
rep += " line %s column %s" % self._markpos
if self._filename or self._markpos:
rep += ")"
for item in self.attributes.items():
rep += " %s=%r" % item
if self.hasChildNodes():
rep += " >...</%s>" % self.nodeName
else:
rep += " />"
return rep
class MicroDOMParser(XMLParser):
# <dash> glyph: a quick scan thru the DTD says BODY, AREA, LINK, IMG, HR,
# P, DT, DD, LI, INPUT, OPTION, THEAD, TFOOT, TBODY, COLGROUP, COL, TR, TH,
# TD, HEAD, BASE, META, HTML all have optional closing tags
def_soon_closers = 'area link br img hr input base meta'.split()
def_later_closers = {
'p': ['p', 'dt'],
'dt': ['dt', 'dd'],
'dd': ['dt', 'dd'],
'li': ['li'],
'tbody': ['thead', 'tfoot', 'tbody'],
'thead': ['thead', 'tfoot', 'tbody'],
'tfoot': ['thead', 'tfoot', 'tbody'],
'colgroup': ['colgroup'],
'col': ['col'],
'tr': ['tr'],
'td': ['td'],
'th': ['th'],
'head': ['body'],
'title': ['head', 'body'], # this looks wrong...
'option': ['option'],
}
def __init__(self, case_insensitive=True, **kwargs):
# Protocol is an old style class so we can't use super
XMLParser.__init__(self, **kwargs)
self.elementstack = []
d = {'xmlns': 'xmlns', '': None}
dr = invert_dict(d)
self.nsstack = [(d, None, dr)]
self.documents = []
self._mddoctype = None
self.case_insensitive = case_insensitive
self.preserve_case = case_insensitive
self.soonClosers = kwargs.get('soonClosers', self.def_soon_closers)
self.laterClosers = kwargs.get('laterClosers', self.def_later_closers)
self.indentlevel = 0
def shouldPreserveSpace(self):
for idx, _ in enumerate(self.elementstack):
el = self.elementstack[-idx]
preserve = el.getAttribute("xml:space", '') == 'preserve'
if (el.tagName == 'pre') or preserve:
return 1
return 0
def _getparent(self):
if self.elementstack:
return self.elementstack[-1]
else:
return None
COMMENT = re.compile(r"\s*/[/*]\s*")
def _fixScriptElement(self, el):
# this deals with case where there is comment or CDATA inside
# <script> tag and we want to do the right thing with it
if self.strict or not len(el.childNodes) == 1:
return
c = el.firstChild()
if isinstance(c, Text):
# deal with nasty people who do stuff like:
# <script> // <!--
# x = 1;
# // --></script>
# tidy does this, for example.
prefix = ""
oldvalue = c.value
match = self.COMMENT.match(oldvalue)
if match:
prefix = match.group()
oldvalue = oldvalue[len(prefix):]
# now see if contents are actual node and comment or CDATA
try:
e = parseString("<a>%s</a>" % oldvalue).childNodes[0]
except (ParseError, MismatchedTags):
return
if len(e.childNodes) != 1:
return
e = e.firstChild()
if isinstance(e, (CDATASection, Comment)):
el.childNodes = [e] + ([Text(prefix)] if prefix else [])
def gotDoctype(self, doctype):
self._mddoctype = doctype
def _check_parent(self, parent, name):
if (self.lenient and isinstance(parent, Element)):
parentName = parent.tagName
myName = name
if self.case_insensitive:
parentName = parentName.lower()
myName = myName.lower()
if myName in self.laterClosers.get(parentName, []):
self.gotTagEnd(parent.tagName)
parent = self._getparent()
return parent
def _gen_attrs(self, attributes, namespaces):
for k, v in attributes.items():
ksplit = k.split(':', 1)
if len(ksplit) == 2:
pfx, tv = ksplit
if pfx != 'xml' and pfx in namespaces:
yield ((namespaces[pfx], tv), v)
else:
yield (k, v)
else:
yield (k, v)
def _gen_newspaces(self, unesc_attributes):
for k, v in unesc_attributes.items():
if k.startswith('xmlns'):
spacenames = k.split(':', 1)
if len(spacenames) == 2:
yield (spacenames[1], v)
else:
yield ('', v)
def _gen_new_attrs(self, unesc_attributes):
for k, v in unesc_attributes.items():
if not k.startswith('xmlns'):
yield (k, v)
def gotTagStart(self, name, attributes):
# logger.debug('%s<%s>', ' ' * self.indentlevel, name)
self.indentlevel += 2
parent = self._getparent()
parent = self._check_parent(parent, name)
unesc_attributes = unescape_dict(attributes)
namespaces = self.nsstack[-1][0]
newspaces = dict(self._gen_newspaces(unesc_attributes))
new_unesc_attributes = dict(self._gen_new_attrs(unesc_attributes))
new_namespaces = merge([namespaces, newspaces])
gen_attr_args = (new_unesc_attributes, new_namespaces)
new_attributes = dict(self._gen_attrs(*gen_attr_args))
el_args = (name, new_attributes, parent, self.filename, self.saveMark())
kwargs = {
'case_insensitive': self.case_insensitive,
'namespace': new_namespaces.get('')}
el = Element(*el_args, **kwargs)
revspaces = invert_dict(newspaces)
el.addPrefixes(revspaces)
if newspaces:
rscopy = merge([self.nsstack[-1][2], revspaces])
self.nsstack.append((new_namespaces, el, rscopy))
self.elementstack.append(el)
if parent:
parent.appendChild(el)
if (self.lenient and el.tagName in self.soonClosers):
self.gotTagEnd(name)
def _gotStandalone(self, factory, data):
parent = self._getparent()
te = factory(data, parent)
if parent:
parent.appendChild(te)
elif self.lenient:
self.documents.append(te)
def gotText(self, data):
if data.strip() or self.shouldPreserveSpace():
self._gotStandalone(Text, data)
def gotComment(self, data):
self._gotStandalone(Comment, data)
def gotEntityReference(self, entityRef):
self._gotStandalone(EntityReference, entityRef)
def gotCData(self, cdata):
self._gotStandalone(CDATASection, cdata)
def _check_name(self, name, el):
pfxdix = self.nsstack[-1][2]
nsplit = name.split(':', 1)
if len(nsplit) == 2:
pfx, newname = nsplit
ns = pfxdix.get(pfx, None)
if (el.namespace != ns) and ns and self.strict:
first = (self.filename, el.tagName, name)
args = first + self.saveMark() + el._markpos
raise MismatchedTags(*args)
def _update_stacks(self, lastEl, nstuple, el, name, cname):
updated = False
for idx, element in enumerate(reversed(self.elementstack)):
if element.tagName == cname:
element.endTag(name)
break
else:
# this was a garbage close tag; wait for a real one
self.elementstack.append(el)
self.nsstack.append(nstuple) if nstuple else None
updated = True
if not updated:
del self.elementstack[-(idx + 1):]
if not (updated or self.elementstack):
self.documents.append(lastEl)
updated = True
return updated
def _update_el(self, updated, name, el):
if not updated:
el.endTag(name)
if not (updated or self.elementstack):
self.documents.append(el)
if not updated and self.lenient and el.tagName == "script":
self._fixScriptElement(el)
def gotTagEnd(self, name):
self.indentlevel -= 2
# logger.debug('%s</%s>', ' ' * self.indentlevel, name)
if self.lenient and not self.elementstack:
return
elif not self.elementstack:
args = (self.filename, "NOTHING", name) + self.saveMark() + (0, 0)
raise MismatchedTags(*args)
el = self.elementstack.pop()
nstuple = self.nsstack.pop() if self.nsstack[-1][1] is el else None
tn, cname = el.tagName, name
if self.case_insensitive:
tn, cname = tn.lower(), cname.lower()
self._check_name(name, el)
tn_is_cname = tn == cname
lenient_stack = self.lenient and self.elementstack
if not tn_is_cname and lenient_stack:
lastEl = self.elementstack[0]
updated = self._update_stacks(lastEl, nstuple, el, name, cname)
elif not tn_is_cname:
first = (self.filename, el.tagName, name)
raise MismatchedTags(*(first + self.saveMark() + el._markpos))
else:
updated = False
self._update_el(updated, name, el)
def connectionLost(self, reason):
XMLParser.connectionLost(self, reason) # This can cause more events!
if self.elementstack:
if self.lenient:
self.documents.append(self.elementstack[0])
else:
first = (self.filename, self.elementstack[-1], "END_OF_FILE")
args = first + self.saveMark() + self.elementstack[-1]._markpos
raise MismatchedTags(*args)
def parse(f, *args, **kwargs):
"""Parse HTML or XML readable."""
fp = f.fp if hasattr(f, 'fp') else f
readable = fp if hasattr(fp, 'read') else open(f, 'rb')
filename = getattr(readable, 'name', 'unnamed')
mdp = MicroDOMParser(filename=filename, **kwargs)
mdp.makeConnection(None)
try:
mdp.dataReceived(readable.getvalue())
except AttributeError:
sentinel = b'' if 'BufferedReader' in str(type(readable)) else ''
for r in iter(partial(readable.read, 1024), sentinel):
mdp.dataReceived(r)
mdp.connectionLost(None)
if not mdp.documents:
raise ParseError(mdp.filename, 0, 0, "No top-level Nodes in document")
d = mdp.documents[0]
is_element = isinstance(d, Element)
if mdp.lenient and len(mdp.documents) == 1 and not is_element:
el = Element("html")
el.appendChild(d)
d = el
elif mdp.lenient:
d = Element("html")
[d.appendChild(child) for child in mdp.documents]
doc = Document(d)
doc.doctype = mdp._mddoctype
return doc
def parseString(content, *args, **kwargs):
f = BytesIO(encode(content))
return parse(f, *args, **kwargs)
def parseXML(readable):
"""Parse an XML readable object."""
return parse(readable, case_insensitive=True)
def parseXMLString(content):
"""Parse an XML readable object."""
return parseString(content, case_insensitive=True)
| mit | c31252e9ca53c3dd6a255dfdb045c43a | 31.086074 | 80 | 0.584652 | 3.927667 | false | false | false | false |
eerimoq/asn1tools | asn1tools/codecs/ber.py | 1 | 57074 | """Basic Encoding Rules (BER) codec.
"""
import time
import math
import binascii
from copy import copy
import datetime
from ..parser import EXTENSION_MARKER
from . import BaseType, format_bytes, DecodeError, ErrorWithLocation
from . import EncodeError
from . import DecodeError
from . import format_or
from . import compiler
from . import utc_time_to_datetime
from . import utc_time_from_datetime
from . import generalized_time_to_datetime
from . import generalized_time_from_datetime
from .compiler import enum_values_as_dict
from .compiler import clean_bit_string_value
class Class(object):
UNIVERSAL = 0x00
APPLICATION = 0x40
CONTEXT_SPECIFIC = 0x80
PRIVATE = 0xc0
class Encoding(object):
PRIMITIVE = 0x00
CONSTRUCTED = 0x20
class Tag(object):
END_OF_CONTENTS = 0x00
BOOLEAN = 0x01
INTEGER = 0x02
BIT_STRING = 0x03
OCTET_STRING = 0x04
NULL = 0x05
OBJECT_IDENTIFIER = 0x06
OBJECT_DESCRIPTOR = 0x07
EXTERNAL = 0x08
REAL = 0x09
ENUMERATED = 0x0a
EMBEDDED_PDV = 0x0b
UTF8_STRING = 0x0c
RELATIVE_OID = 0x0d
SEQUENCE = 0x10
SET = 0x11
NUMERIC_STRING = 0x12
PRINTABLE_STRING = 0x13
T61_STRING = 0x14
VIDEOTEX_STRING = 0x15
IA5_STRING = 0x16
UTC_TIME = 0x17
GENERALIZED_TIME = 0x18
GRAPHIC_STRING = 0x19
VISIBLE_STRING = 0x1a
GENERAL_STRING = 0x1b
UNIVERSAL_STRING = 0x1c
CHARACTER_STRING = 0x1d
BMP_STRING = 0x1e
DATE = 0x1f
TIME_OF_DAY = 0x20
DATE_TIME = 0x21
END_OF_CONTENTS_OCTETS = b'\x00\x00'
TAG_MISMATCH = object()
def flatten(l):
"""
Flatten irregular nested list
:param l:
:return:
"""
if isinstance(l, (list, tuple)):
return [a for i in l for a in flatten(i)]
else:
return [l]
def is_end_of_data(data, offset, end_offset):
# Detect end of data
if end_offset is not None:
if offset >= end_offset:
return True, offset
elif detect_end_of_contents_tag(data, offset):
return True, offset + 2
return False, offset
def detect_end_of_contents_tag(data, offset):
"""
Determine whether end of contents tag is present at offset in data
:param bytes data:
:param int offset:
:return:
"""
two_bytes = data[offset:offset + 2]
if two_bytes == END_OF_CONTENTS_OCTETS:
return True
# Detect missing data
elif len(two_bytes) != 2:
raise OutOfByteDataError(
'Ran out of data when trying to find End of Contents tag for indefinite length field',
offset=offset)
else:
return False
def check_decode_error(asn_type, decoded_value, data, offset):
"""
Checks if decode result corresponds to TAG_MISMATCH, if so, raise DecodeTagError
:return:
"""
if decoded_value == TAG_MISMATCH:
raise DecodeTagError(asn_type, data, offset, location=asn_type)
class MissingMandatoryFieldError(DecodeError):
"""
Error for when there is no data for a mandatory field member
"""
def __init__(self, member, offset):
super().__init__('{} is missing and has no default value'.format(member),
offset=offset,
location=member)
class OutOfByteDataError(DecodeError):
"""
Error for when running out of / missing data missing when attempting to decode
"""
pass
class MissingDataError(OutOfByteDataError):
"""
Special variant of OutOfByteDataError for when remaining data length is less than decoded element length
"""
def __init__(self, message, offset, expected_length, location=None):
super().__init__(message, offset, location=location)
self.expected_length = expected_length
class DecodeTagError(DecodeError):
"""
ASN.1 tag decode error for BER and DER codecs
"""
def __init__(self, asn_type, data, offset, location=None):
"""
:param StandardDecodeMixin, Type asn_type: ASN type instance error occurred for
:param bytes data: ASN data
:param int offset:
:param str location: Name of ASN1 element error occurred in
"""
self.actual_tag = format_bytes(read_tag(data, offset)
if asn_type.tag_len is None
else data[offset:offset + asn_type.tag_len])
self.asn_type = asn_type
tag = asn_type.format_tag()
message = "Expected {} with {}, but got '{}'.".format(
asn_type.type_label(),
'tags {}'.format(tag) if isinstance(tag, list) else "tag '{}'".format(tag),
self.actual_tag)
super(DecodeTagError, self).__init__(message, offset=offset, location=location)
class NoEndOfContentsTagError(DecodeError):
"""
Exception for when end-of-contents tag (00) could not be found for indefinite-length field
"""
pass
def encode_length_definite(length):
if length <= 127:
encoded = bytearray([length])
else:
encoded = bytearray()
while length > 0:
encoded.append(length & 0xff)
length >>= 8
encoded.append(0x80 | len(encoded))
encoded.reverse()
return encoded
def decode_length(encoded, offset, enforce_definite=True):
"""
Decode definite or indefinite length of an ASN.1 node
:param bytes encoded:
:param int offset:
:param bool enforce_definite: Whether to raise error if length is indefinite
:return:
"""
try:
length = encoded[offset]
except IndexError:
raise OutOfByteDataError('Ran out of data when trying to read length',
offset=offset)
offset += 1
if length & 0x80: # Faster than > 127
# Handle indefinite length
if length == 128:
if enforce_definite:
raise DecodeError('Expected definite length, but got indefinite.',
offset=offset - 1)
return None, offset
else:
# Handle long length
number_of_bytes = (length & 0x7f)
encoded_length = encoded[offset:number_of_bytes + offset]
# Verify all the length bytes exist
if len(encoded_length) != number_of_bytes:
raise OutOfByteDataError('Expected {} length byte(s) at offset {}, but got {}.'.format(
number_of_bytes, offset, len(encoded_length)),
offset=offset)
length = int(binascii.hexlify(encoded_length), 16)
offset += number_of_bytes
# Detect missing data
data_length = len(encoded)
if offset + length > data_length:
raise MissingDataError(
'Expected at least {} contents byte(s), but got {}.'.format(length, data_length - offset),
offset,
length
)
return length, offset
def encode_signed_integer(number):
byte_length = (8 + (number + (number < 0)).bit_length()) // 8
return number.to_bytes(length=byte_length, byteorder='big', signed=True)
def encode_tag(number, flags):
if number < 31:
tag = bytearray([flags | number])
else:
tag = bytearray([flags | 0x1f])
encoded = bytearray()
while number > 0:
encoded.append(0x80 | (number & 0x7f))
number >>= 7
encoded[0] &= 0x7f
encoded.reverse()
tag.extend(encoded)
return tag
def skip_tag(data, offset):
try:
byte = data[offset]
offset += 1
if byte & 0x1f == 0x1f:
while data[offset] & 0x80:
offset += 1
offset += 1
except IndexError:
raise OutOfByteDataError('Ran out of data when reading tag',
offset=offset)
if offset >= len(data):
raise OutOfByteDataError('Ran out of data when reading tag',
offset=offset)
return offset
def read_tag(data, offset):
return data[offset:skip_tag(data, offset)]
def skip_tag_length_contents(data, offset):
"""
Get offset position at end of node (skip tag, length and contents)
:param data:
:param offset:
:return:
"""
offset = skip_tag(data, offset)
return sum(decode_length(data, offset))
def encode_real(data):
if data == float('inf'):
data = b'\x40'
elif data == float('-inf'):
data = b'\x41'
elif math.isnan(data):
data = b'\x42'
elif data == 0.0:
data = b''
else:
if data >= 0:
negative_bit = 0
else:
negative_bit = 0x40
data *= -1
mantissa, exponent = math.frexp(abs(data))
mantissa = int(mantissa * 2 ** 53)
lowest_set_bit = compiler.lowest_set_bit(mantissa)
mantissa >>= lowest_set_bit
mantissa |= (0x80 << (8 * ((mantissa.bit_length() // 8) + 1)))
mantissa = binascii.unhexlify(hex(mantissa)[4:].rstrip('L'))
exponent = (52 - lowest_set_bit - exponent)
if -129 < exponent < 128:
exponent = [0x80 | negative_bit, ((0xff - exponent) & 0xff)]
elif -32769 < exponent < 32768:
exponent = ((0xffff - exponent) & 0xffff)
exponent = [0x81 | negative_bit, (exponent >> 8), exponent & 0xff]
else:
raise NotImplementedError(
'REAL exponent {} out of range.'.format(exponent))
data = bytearray(exponent) + mantissa
return data
def decode_real_binary(control, data):
if control in [0x80, 0xc0]:
exponent = data[1]
if exponent & 0x80:
exponent -= 0x100
offset = 2
elif control in [0x81, 0xc1]:
exponent = ((data[1] << 8) | data[2])
if exponent & 0x8000:
exponent -= 0x10000
offset = 3
else:
raise DecodeError(
'Unsupported binary REAL control word 0x{:02x}.'.format(control))
mantissa = int(binascii.hexlify(data[offset:]), 16)
decoded = float(mantissa * 2 ** exponent)
if control & 0x40:
decoded *= -1
return decoded
def decode_real_special(control):
try:
return {
0x40: float('inf'),
0x41: float('-inf'),
0x42: float('nan'),
0x43: -0.0
}[control]
except KeyError:
raise DecodeError(
'Unsupported special REAL control word 0x{:02x}.'.format(control))
def decode_real_decimal(data):
return float(data[1:].replace(b',', b'.'))
def decode_real(data):
if len(data) == 0:
decoded = 0.0
else:
control = data[0]
if control & 0x80:
decoded = decode_real_binary(control, data)
elif control & 0x40:
decoded = decode_real_special(control)
else:
decoded = decode_real_decimal(data)
return decoded
def encode_object_identifier(data):
identifiers = [int(identifier) for identifier in data.split('.')]
first_subidentifier = (40 * identifiers[0] + identifiers[1])
encoded_subidentifiers = encode_object_identifier_subidentifier(
first_subidentifier)
for identifier in identifiers[2:]:
encoded_subidentifiers += encode_object_identifier_subidentifier(
identifier)
return encoded_subidentifiers
def encode_object_identifier_subidentifier(subidentifier):
encoded = [subidentifier & 0x7f]
subidentifier >>= 7
while subidentifier > 0:
encoded.append(0x80 | (subidentifier & 0x7f))
subidentifier >>= 7
return encoded[::-1]
def decode_object_identifier(data, offset, end_offset):
subidentifier, offset = decode_object_identifier_subidentifier(data,
offset)
decoded = [subidentifier // 40, subidentifier % 40]
while offset < end_offset:
subidentifier, offset = decode_object_identifier_subidentifier(data,
offset)
decoded.append(subidentifier)
return '.'.join([str(v) for v in decoded])
def decode_object_identifier_subidentifier(data, offset):
decoded = 0
while data[offset] & 0x80:
decoded += (data[offset] & 0x7f)
decoded <<= 7
offset += 1
decoded += data[offset]
return decoded, offset + 1
class Type(BaseType):
"""
Base type class for BER types
"""
def __init__(self, name, type_name, number, flags=0):
"""
:param str name: Name of type instance
:param str type_name: ASN1 Type name
:param int number: Tag number
:param flags:
"""
super().__init__(name, type_name)
if number is None:
self.tag = None
self.tag_len = None
else:
self.tag = encode_tag(number, flags)
self.tag_len = len(self.tag)
def decode(self, data, offset, values=None):
"""
Decode type value from byte data
:param bytearray data: Binary ASN1 data to decode
:param int offset: Current byte offset
:param dict values:
:return: Tuple of (decoded_value, end_offset)
"""
raise NotImplementedError()
def encode(self, data, encoded, values=None):
"""
Encode value into byte data
:param data: Value to be encoded
:param bytearray encoded: Existing byte data to add encoded data to
:param values:
:return: None (extend 'encoded' bytearray)
"""
raise NotImplementedError()
def set_tag(self, number, flags):
self.tag = encode_tag(number, flags)
self.tag_len = len(self.tag)
def format_tag(self):
"""
Get formatted hex string representation of this type's tag
:return:
"""
return format_bytes(self.tag) if self.tag is not None else '(Unknown)'
def set_size_range(self, minimum, maximum, has_extension_marker):
pass
class StandardDecodeMixin(object):
"""
Type class mixin for standard decoding logic
(single predefined tag to be matched against, then decode length and contents)
"""
indefinite_allowed = False # Whether indefinite length encoding is allowed for this type
def decode(self, data, offset, values=None):
"""
Implements standard decode logic (single predefined tag to be matched against, then decode length and contents)
:param bytearray data: Binary ASN1 data to decode
:param int offset: Current byte offset
:param dict values:
:return: Tuple of (decoded_value, end_offset)
"""
start_offset = offset
offset += self.tag_len
# Validate tag
tag_data = data[start_offset:offset]
if tag_data != self.tag:
# Check for missing data
if len(tag_data) != self.tag_len:
raise OutOfByteDataError('Ran out of data when reading tag',
offset=start_offset)
# return TAG_MISMATCH Instead of raising DecodeTagError for better performance so that MembersType does
# not have to catch exception for every missing optional type
# CompiledType.decode_with_length() will detect TAG_MISMATCH returned value and raise appropriate exception
return TAG_MISMATCH, start_offset
# Decode length
length, offset = decode_length(data, offset, enforce_definite=not self.indefinite_allowed)
return self.decode_content(data, offset, length)
def decode_content(self, data, offset, length):
"""
Type-specific logic to decode content
:param bytearray data: Binary data to decode
:param int offset: Offset for start of content bytes
:param int length: Length of content bytes (None if indefinite)
:return: Tuple of (decoded_value, end_offset)
"""
raise NotImplementedError('Type {} does not implement decode_content() method'.format(type(self).__name__))
class StandardEncodeMixin(object):
"""
Type class mixin for standard encoding logic (append tag + length(content) + content)
"""
def encode(self, data, encoded, values=None):
"""
Encode value into byte data
:param data: Value to be encoded
:param bytearray encoded: Existing byte data to add encoded data to
:param values:
:return:
"""
encoded_data = bytearray(self.encode_content(data, values=values))
encoded.extend(self.tag + encode_length_definite(len(encoded_data)) + encoded_data)
def encode_content(self, data, values=None):
"""
Encode data value into bytearray
:param data:
:param values:
:return:
"""
raise NotImplementedError()
class PrimitiveOrConstructedType(Type):
"""
Base type class for types which can be either primitive or constructed (BitString, OctetString, String)
"""
def __init__(self, name, type_name, number, segment, flags=0):
super(PrimitiveOrConstructedType, self).__init__(name,
type_name,
number,
flags)
self.segment = segment
self.constructed_tag = copy(self.tag)
self.constructed_tag[0] |= Encoding.CONSTRUCTED
def set_tag(self, number, flags):
self.tag = encode_tag(number, flags)
self.constructed_tag = copy(self.tag)
self.constructed_tag[0] |= Encoding.CONSTRUCTED
self.tag_len = len(self.tag)
def decode(self, data, start_offset, values=None):
"""
Custom decode logic to handle primitive or constructed types
Return decoded value and new offset
"""
offset = start_offset + self.tag_len
tag = data[start_offset:offset]
# Validate tag
if tag == self.tag:
is_primitive = True
elif tag == self.constructed_tag:
is_primitive = False
elif len(tag) != self.tag_len:
# Detect out of data
raise OutOfByteDataError('Ran out of data when reading tag',
offset=start_offset)
else:
# Tag mismatch. Return DECODE_FAILED instead of raising DecodeError for performance
return TAG_MISMATCH, start_offset
length, offset = decode_length(data, offset, enforce_definite=False)
if is_primitive:
end_offset = offset + length
return self.decode_primitive_contents(data, offset, length), end_offset
else:
return self.decode_constructed_contents(data, offset, length)
def decode_constructed_contents(self, data, offset, length):
segments = []
end_offset = None if length is None else offset + length
while True:
end_of_data, offset = is_end_of_data(data, offset, end_offset)
if end_of_data:
break
decoded, offset = self.segment.decode(data, offset)
check_decode_error(self.segment, decoded, data, offset)
segments.append(decoded)
return self.decode_constructed_segments(segments), offset
def decode_primitive_contents(self, data, offset, length):
raise NotImplementedError('To be implemented by subclasses.')
def decode_constructed_segments(self, segments):
raise NotImplementedError('To be implemented by subclasses.')
class StringType(StandardEncodeMixin, PrimitiveOrConstructedType):
TAG = None
ENCODING = None
def __init__(self, name):
super(StringType, self).__init__(name,
self.__class__.__name__,
self.TAG,
OctetString(name))
def encode_content(self, data, values=None):
return data.encode(self.ENCODING)
def decode_primitive_contents(self, data, offset, length):
return data[offset:offset + length].decode(self.ENCODING)
def decode_constructed_segments(self, segments):
return bytearray().join(segments).decode(self.ENCODING)
class MembersType(StandardEncodeMixin, StandardDecodeMixin, Type):
indefinite_allowed = True
def __init__(self, name, tag_name, tag, root_members, additions):
super(MembersType, self).__init__(name,
tag_name,
tag,
Encoding.CONSTRUCTED)
self.root_members = root_members
self.additions = additions
def set_tag(self, number, flags):
super(MembersType, self).set_tag(number,
flags | Encoding.CONSTRUCTED)
def encode_content(self, data, values=None):
encoded_members = bytearray()
for member in self.root_members:
self.encode_member(member, data, encoded_members)
if self.additions:
self.encode_additions(data, encoded_members)
return encoded_members
def encode_additions(self, data, encoded_members):
try:
for addition in self.additions:
encoded_addition = bytearray()
if isinstance(addition, list):
for member in addition:
self.encode_member(member, data, encoded_addition)
else:
self.encode_member(addition,
data,
encoded_addition)
encoded_members.extend(encoded_addition)
except EncodeError:
pass
def encode_member(self, member, data, encoded_members):
name = member.name
if name in data:
value = data[name]
try:
if isinstance(member, AnyDefinedBy):
member.encode(value, encoded_members, data)
elif not member.is_default(value):
member.encode(value, encoded_members)
except ErrorWithLocation as e:
# Add member location
e.add_location(member)
raise e
elif member.optional:
pass
elif not member.has_default():
raise EncodeError("{} member '{}' not found in {}.".format(
self.__class__.__name__,
name,
data))
def decode_content(self, data, offset, length):
end_offset = None if length is None else offset + length
values = {}
offset, out_of_data = self.decode_members(self.root_members, data, values, offset, end_offset)
# Decode additions (even if out of data already, so defaults can be added)
if self.additions:
offset, out_of_data = self.decode_members(flatten(self.additions), data, values, offset, end_offset,
ignore_missing=True)
if out_of_data:
return values, offset
if end_offset is None:
raise NoEndOfContentsTagError('Could not find end-of-contents tag for indefinite length field.',
offset=offset)
else:
# Extra data is allowed in cases of versioned additions
return values, end_offset
def decode_members(self, members, data, values, offset, end_offset, ignore_missing=False):
"""
Decode values for members from data starting from offset
Supports member data encoded in different order than members specified
:param list members: List of member types
:param bytearray data:
:param dict values:
:param int offset:
:param int end_offset: End offset of member data (None if indefinite length field)
:param bool ignore_missing: Whether to not raise DecodeError for missing mandatory fields with no defaults
:return:
"""
# Decode member values from data
remaining_members = members
# Outer loop to enable decoding members out of order
while True:
undecoded_members = []
decode_success = False # Whether at least one member was successfully decoded
out_of_data, offset = is_end_of_data(data, offset, end_offset)
# Attempt to decode remaining members. If they are encoded in same order, should decode all in one loop
# Otherwise will require multiple iterations of outer loop
for member in remaining_members:
# Dont attempt decode if already out of data, just add member to list of undecoded
if out_of_data:
undecoded_members.append(member)
continue
# Attempt decode
try:
value, offset = member.decode(data, offset, values=values)
except ErrorWithLocation as e:
# Add member location
e.add_location(member)
raise e
if value == TAG_MISMATCH:
undecoded_members.append(member)
else:
decode_success = True
values[member.name] = value
# Detect end of data
out_of_data, offset = is_end_of_data(data, offset, end_offset)
remaining_members = undecoded_members
if out_of_data:
break
if not decode_success:
# No members are able to decode data, exit loop
break
# Handle remaining members that there is no data for
# (will raise error if member is not optional and has no default)
for member in remaining_members:
if member.optional:
continue
if member.has_default():
values[member.name] = member.get_default()
elif ignore_missing:
break
elif out_of_data:
raise MissingMandatoryFieldError(member, offset)
else:
raise DecodeTagError(member, data, offset, location=member)
return offset, out_of_data
def __repr__(self):
return '{}({}, [{}])'.format(
self.__class__.__name__,
self.name,
', '.join([repr(member) for member in self.root_members]))
class ArrayType(StandardEncodeMixin, StandardDecodeMixin, Type):
indefinite_allowed = True
def __init__(self, name, tag_name, tag, element_type):
super(ArrayType, self).__init__(name,
tag_name,
tag,
Encoding.CONSTRUCTED)
self.element_type = element_type
def set_tag(self, number, flags):
super(ArrayType, self).set_tag(number,
flags | Encoding.CONSTRUCTED)
def encode_content(self, data, values=None):
encoded_elements = bytearray()
for entry in data:
self.element_type.encode(entry, encoded_elements)
return encoded_elements
def decode_content(self, data, offset, length):
decoded = []
start_offset = offset
# Loop through data until length exceeded or end-of-contents tag reached.
while True:
if length is None:
# Find end of indefinite sequence.
if detect_end_of_contents_tag(data, offset):
offset += 2
break
elif (offset - start_offset) >= length:
# End of definite length sequence.
break
decoded_element, offset = self.element_type.decode(data, offset)
# Invalid Tag
check_decode_error(self.element_type, decoded_element, data, offset)
decoded.append(decoded_element)
return decoded, offset
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__,
self.name,
self.element_type)
class Boolean(StandardEncodeMixin, StandardDecodeMixin, Type):
def __init__(self, name):
super(Boolean, self).__init__(name,
'BOOLEAN',
Tag.BOOLEAN)
def encode_content(self, data, values=None):
return bytearray([0xff * data])
def decode_content(self, data, offset, length):
if length != 1:
raise DecodeError(
'Expected BOOLEAN contents length 1, but '
'got {}.'.format(length), offset=offset-1)
return bool(data[offset]), offset + length
class Integer(StandardEncodeMixin, StandardDecodeMixin, Type):
def __init__(self, name):
super(Integer, self).__init__(name,
'INTEGER',
Tag.INTEGER)
def encode_content(self, data, values=None):
return encode_signed_integer(data)
def decode_content(self, data, offset, length):
end_offset = offset + length
return int.from_bytes(data[offset:end_offset], byteorder='big', signed=True), end_offset
class Real(StandardEncodeMixin, StandardDecodeMixin, Type):
def __init__(self, name):
super(Real, self).__init__(name, 'REAL', Tag.REAL)
def encode_content(self, data, values=None):
return encode_real(data)
def decode_content(self, data, offset, length):
end_offset = offset + length
decoded = decode_real(data[offset:end_offset])
return decoded, end_offset
class Null(StandardDecodeMixin, Type):
def __init__(self, name):
super(Null, self).__init__(name, 'NULL', Tag.NULL)
def is_default(self, value):
return False
def encode(self, _, encoded):
encoded.extend(self.tag)
encoded.append(0)
def decode_content(self, data, offset, length):
return None, offset
class BitString(StandardEncodeMixin, PrimitiveOrConstructedType):
def __init__(self, name, has_named_bits):
super(BitString, self).__init__(name,
'BIT STRING',
Tag.BIT_STRING,
self)
self.has_named_bits = has_named_bits
def is_default(self, value):
if self.default is None:
return False
clean_value = clean_bit_string_value(value,
self.has_named_bits)
clean_default = clean_bit_string_value(self.default,
self.has_named_bits)
return clean_value == clean_default
def encode_content(self, data, values=None):
number_of_bytes, number_of_rest_bits = divmod(data[1], 8)
data = bytearray(data[0])
if number_of_rest_bits == 0:
data = data[:number_of_bytes]
number_of_unused_bits = 0
else:
last_byte = data[number_of_bytes]
last_byte &= ((0xff >> number_of_rest_bits) ^ 0xff)
data = data[:number_of_bytes]
data.append(last_byte)
number_of_unused_bits = (8 - number_of_rest_bits)
return bytearray([number_of_unused_bits]) + data
def decode_primitive_contents(self, data, offset, length):
length -= 1
number_of_bits = 8 * length - data[offset]
offset += 1
return (data[offset:offset + length], number_of_bits)
def decode_constructed_segments(self, segments):
decoded = bytearray()
number_of_bits = 0
for data, length in segments:
decoded.extend(data)
number_of_bits += length
return (bytes(decoded), number_of_bits)
class OctetString(StandardEncodeMixin, PrimitiveOrConstructedType):
def __init__(self, name):
super(OctetString, self).__init__(name,
'OCTET STRING',
Tag.OCTET_STRING,
self)
def encode_content(self, data, values=None):
return data
def decode_primitive_contents(self, data, offset, length):
return bytes(data[offset:offset + length])
def decode_constructed_segments(self, segments):
return bytes().join(segments)
class ObjectIdentifier(StandardEncodeMixin, StandardDecodeMixin, Type):
def __init__(self, name):
super(ObjectIdentifier, self).__init__(name,
'OBJECT IDENTIFIER',
Tag.OBJECT_IDENTIFIER)
def encode_content(self, data, values=None):
return encode_object_identifier(data)
def decode_content(self, data, offset, length):
end_offset = offset + length
decoded = decode_object_identifier(data, offset, end_offset)
return decoded, end_offset
class Enumerated(StandardEncodeMixin, StandardDecodeMixin, Type):
def __init__(self, name, values, numeric):
super(Enumerated, self).__init__(name,
'ENUMERATED',
Tag.ENUMERATED)
if numeric:
self.value_to_data = {k: k for k in enum_values_as_dict(values)}
self.data_to_value = self.value_to_data
else:
self.value_to_data = enum_values_as_dict(values)
self.data_to_value = {v: k for k, v in self.value_to_data.items()}
self.has_extension_marker = (EXTENSION_MARKER in values)
def format_names(self):
return format_or(sorted(list(self.value_to_data.values())))
def format_values(self):
return format_or(sorted(list(self.value_to_data)))
def encode_content(self, data, values=None):
try:
value = self.data_to_value[data]
except KeyError:
raise EncodeError(
"Expected enumeration value {}, but got '{}'.".format(
self.format_names(),
data))
return encode_signed_integer(value)
def decode_content(self, data, offset, length):
end_offset = offset + length
value = int.from_bytes(data[offset:end_offset], byteorder='big', signed=True)
if value in self.value_to_data:
return self.value_to_data[value], end_offset
elif self.has_extension_marker:
return None, end_offset
else:
raise DecodeError(
'Expected enumeration value {}, but got {}.'.format(
self.format_values(),
value), offset=offset)
class Sequence(MembersType):
def __init__(self, name, root_members, additions):
super(Sequence, self).__init__(name,
'SEQUENCE',
Tag.SEQUENCE,
root_members,
additions)
class SequenceOf(ArrayType):
def __init__(self, name, element_type):
super(SequenceOf, self).__init__(name,
'SEQUENCE OF',
Tag.SEQUENCE,
element_type)
class Set(MembersType):
def __init__(self, name, root_members, additions):
super(Set, self).__init__(name,
'SET',
Tag.SET,
root_members,
additions)
class SetOf(ArrayType):
def __init__(self, name, element_type):
super(SetOf, self).__init__(name,
'SET OF',
Tag.SET,
element_type)
class Choice(Type):
def __init__(self, name, root_members, additions):
super(Choice, self).__init__(name, 'CHOICE', None)
members = root_members
if additions is not None:
for addition in additions:
if isinstance(addition, list):
members += addition
else:
members.append(addition)
self.has_extension_marker = True
else:
self.has_extension_marker = False
self.members = members
self.name_to_member = {member.name: member for member in self.members}
self.tag_to_member = {}
self.add_tags(self.members)
def add_tags(self, members):
for member in members:
tags = self.get_member_tags(member)
for tag in tags:
self.tag_to_member[tag] = member
def get_member_tags(self, member):
tags = []
if isinstance(member, Choice):
tags = self.get_choice_tags(member)
elif isinstance(member, Recursive):
if member.inner is None:
member.choice_parents.append(self)
else:
tags = self.get_member_tags(member.inner)
else:
tags.append(bytes(member.tag))
if hasattr(member, 'constructed_tag'):
tags.append(bytes(member.constructed_tag))
return tags
def get_choice_tags(self, choice):
tags = []
for member in choice.members:
tags.extend(self.get_member_tags(member))
return tags
def format_tag(self):
return [format_bytes(tag) for tag in self.tag_to_member]
def format_names(self):
return format_or(sorted([member.name for member in self.members]))
def encode(self, data, encoded, values=None):
try:
member = self.name_to_member[data[0]]
except KeyError:
raise EncodeError(
"Expected choice {}, but got '{}'.".format(
self.format_names(),
data[0]))
try:
member.encode(data[1], encoded)
except ErrorWithLocation as e:
# Add member location
e.add_location(member)
raise e
def decode(self, data, offset, values=None):
tag = bytes(read_tag(data, offset))
if tag in self.tag_to_member:
member = self.tag_to_member[tag]
elif self.has_extension_marker:
offset = skip_tag_length_contents(data, offset)
return (None, None), offset
else:
return TAG_MISMATCH, offset
try:
decoded, offset = member.decode(data, offset)
except ErrorWithLocation as e:
# Add member location
e.add_location(member)
raise e
return (member.name, decoded), offset
def __repr__(self):
return 'Choice({}, [{}])'.format(
self.name,
', '.join([repr(member) for member in self.members]))
class UTF8String(StringType):
TAG = Tag.UTF8_STRING
ENCODING = 'utf-8'
class NumericString(StringType):
TAG = Tag.NUMERIC_STRING
ENCODING = 'ascii'
class PrintableString(StringType):
TAG = Tag.PRINTABLE_STRING
ENCODING = 'ascii'
class IA5String(StringType):
TAG = Tag.IA5_STRING
ENCODING = 'ascii'
class VisibleString(StringType):
TAG = Tag.VISIBLE_STRING
ENCODING = 'ascii'
class GeneralString(StringType):
TAG = Tag.GENERAL_STRING
ENCODING = 'latin-1'
class BMPString(StringType):
TAG = Tag.BMP_STRING
ENCODING = 'utf-16-be'
class GraphicString(StringType):
TAG = Tag.GRAPHIC_STRING
ENCODING = 'latin-1'
class UniversalString(StringType):
TAG = Tag.UNIVERSAL_STRING
ENCODING = 'utf-32-be'
class TeletexString(StringType):
TAG = Tag.T61_STRING
ENCODING = 'iso-8859-1'
class ObjectDescriptor(GraphicString):
TAG = Tag.OBJECT_DESCRIPTOR
class UTCTime(StandardEncodeMixin, StandardDecodeMixin, Type):
def __init__(self, name):
super(UTCTime, self).__init__(name,
'UTCTime',
Tag.UTC_TIME)
def encode_content(self, data, values=None):
return utc_time_from_datetime(data).encode('ascii')
def decode_content(self, data, offset, length):
end_offset = offset + length
decoded = data[offset:end_offset].decode('ascii')
return utc_time_to_datetime(decoded), end_offset
class GeneralizedTime(StandardEncodeMixin, StandardDecodeMixin, Type):
def __init__(self, name):
super(GeneralizedTime, self).__init__(name,
'GeneralizedTime',
Tag.GENERALIZED_TIME)
def encode_content(self, data, values=None):
return generalized_time_from_datetime(data).encode('ascii')
def decode_content(self, data, offset, length):
end_offset = offset + length
decoded = data[offset:end_offset].decode('ascii')
return generalized_time_to_datetime(decoded), end_offset
class Date(StandardEncodeMixin, StandardDecodeMixin, Type):
def __init__(self, name):
super(Date, self).__init__(name, 'DATE', Tag.DATE)
def encode_content(self, data, values=None):
return str(data).replace('-', '').encode('ascii')
def decode_content(self, data, offset, length):
end_offset = offset + length
decoded = data[offset:end_offset].decode('ascii')
decoded = datetime.date(*time.strptime(decoded, '%Y%m%d')[:3])
return decoded, end_offset
class TimeOfDay(StandardEncodeMixin, StandardDecodeMixin, Type):
def __init__(self, name):
super(TimeOfDay, self).__init__(name,
'TIME-OF-DAY',
Tag.TIME_OF_DAY)
def encode_content(self, data, values=None):
return str(data).replace(':', '').encode('ascii')
def decode_content(self, data, offset, length):
end_offset = offset + length
decoded = data[offset:end_offset].decode('ascii')
decoded = datetime.time(*time.strptime(decoded, '%H%M%S')[3:6])
return decoded, end_offset
class DateTime(StandardEncodeMixin, StandardDecodeMixin, Type):
def __init__(self, name):
super(DateTime, self).__init__(name,
'DATE-TIME',
Tag.DATE_TIME)
def encode_content(self, data, values=None):
return '{:04d}{:02d}{:02d}{:02d}{:02d}{:02d}'.format(*data.timetuple()).encode('ascii')
def decode_content(self, data, offset, length):
end_offset = offset + length
decoded = data[offset:end_offset].decode('ascii')
decoded = datetime.datetime(*time.strptime(decoded, '%Y%m%d%H%M%S')[:6])
return decoded, end_offset
class Any(Type):
def __init__(self, name):
super(Any, self).__init__(name, 'ANY', None)
def encode(self, data, encoded):
encoded.extend(data)
def decode(self, data, offset, values=None):
start = offset
offset = skip_tag(data, offset)
length, offset = decode_length(data, offset)
end_offset = offset + length
return data[start:end_offset], end_offset
class AnyDefinedBy(Type):
def __init__(self, name, type_member, choices):
super(AnyDefinedBy, self).__init__(name,
'ANY DEFINED BY',
None,
None)
self.type_member = type_member
self.choices = choices
def encode(self, data, encoded, values):
if self.choices:
try:
self.choices[values[self.type_member]].encode(data, encoded)
except KeyError:
raise EncodeError('Bad AnyDefinedBy choice {}.'.format(
values[self.type_member]))
else:
encoded.extend(data)
def decode(self, data, offset, values):
"""
:param data:
:param int offset: Byte offset in ASN1 data
:param dict values: Dictionary of already decoded values in containing type
:return:
"""
if self.choices:
try:
return self.choices[values[self.type_member]].decode(data,
offset)
except KeyError:
raise DecodeError('Bad AnyDefinedBy choice {}.'.format(
values[self.type_member]),
offset=offset)
else:
start = offset
offset = skip_tag(data, offset)
length, offset = decode_length(data, offset)
end_offset = offset + length
return data[start:end_offset], end_offset
class ExplicitTag(StandardEncodeMixin, StandardDecodeMixin, Type):
# no_error_location = True
indefinite_allowed = True
def __init__(self, name, inner):
super(ExplicitTag, self).__init__(name, 'ExplicitTag', None)
self.inner = inner
def set_default(self, value):
self.inner.set_default(value)
def get_default(self):
return self.inner.get_default()
def is_default(self, value):
return self.inner.is_default(value)
def has_default(self):
return self.inner.has_default()
def set_tag(self, number, flags):
super(ExplicitTag, self).set_tag(number,
flags | Encoding.CONSTRUCTED)
def encode_content(self, data, values=None):
encoded_inner = bytearray()
self.inner.encode(data, encoded_inner)
return encoded_inner
def decode_content(self, data, offset, length):
values, end_offset = self.inner.decode(data, offset)
check_decode_error(self.inner, values, data, offset)
# Verify End of Contents tag exists for Indefinite field
if length is None:
if not detect_end_of_contents_tag(data, end_offset):
raise NoEndOfContentsTagError('Expected end-of-contents tag.',
offset=end_offset,
location=self)
end_offset += 2
return values, end_offset
class Recursive(compiler.Recursive, Type):
def __init__(self, name, type_name, module_name):
super(Recursive, self).__init__(name, 'RECURSIVE', None)
self.type_name = type_name
self.module_name = module_name
self.tag_number = None
self.tag_flags = None
self.inner = None
self.choice_parents = []
def set_tag(self, number, flags):
self.tag_number = number
self.tag_flags = flags
def set_inner_type(self, inner):
self.inner = copy(inner)
if self.tag_number is not None:
self.inner.set_tag(self.tag_number, self.tag_flags)
for choice_parent in self.choice_parents:
choice_parent.add_tags([self])
def encode(self, data, encoded, values=None):
self.inner.encode(data, encoded)
def decode(self, data, offset, values=None):
return self.inner.decode(data, offset)
class CompiledType(compiler.CompiledType):
def encode(self, data):
encoded = bytearray()
try:
self._type.encode(data, encoded)
except ErrorWithLocation as e:
# Add member location
e.add_location(self._type)
raise e
return encoded
def decode(self, data):
return self.decode_with_length(data)[0]
def decode_with_length(self, data):
"""
Decode and return decoded values as well as length of binary data decoded
:param data:
:return:
"""
try:
decoded, offset = self._type.decode(bytearray(data), 0)
# Raise DecodeError
check_decode_error(self._type, decoded, data, offset)
except ErrorWithLocation as e:
# Add member location
e.add_location(self._type)
raise e
return decoded, offset
def get_tag_no_encoding(member):
value = (member.tag[0] & ~Encoding.CONSTRUCTED)
return bytearray([value]) + member.tag[1:]
class Compiler(compiler.Compiler):
def process_type(self, type_name, type_descriptor, module_name):
compiled_type = self.compile_type(type_name,
type_descriptor,
module_name)
return CompiledType(compiled_type)
def compile_implicit_type(self, name, type_descriptor, module_name):
type_name = type_descriptor['type']
if type_name == 'SEQUENCE':
compiled = Sequence(
name,
*self.compile_members(type_descriptor['members'],
module_name))
elif type_name == 'SEQUENCE OF':
compiled = SequenceOf(name,
self.compile_type('',
type_descriptor['element'],
module_name))
elif type_name == 'SET':
compiled = Set(
name,
*self.compile_members(type_descriptor['members'],
module_name,
sort_by_tag=True))
elif type_name == 'SET OF':
compiled = SetOf(name,
self.compile_type('',
type_descriptor['element'],
module_name))
elif type_name == 'CHOICE':
compiled = Choice(
name,
*self.compile_members(type_descriptor['members'],
module_name))
elif type_name == 'INTEGER':
compiled = Integer(name)
elif type_name == 'REAL':
compiled = Real(name)
elif type_name == 'ENUMERATED':
compiled = Enumerated(name,
self.get_enum_values(type_descriptor,
module_name),
self._numeric_enums)
elif type_name == 'BOOLEAN':
compiled = Boolean(name)
elif type_name == 'OBJECT IDENTIFIER':
compiled = ObjectIdentifier(name)
elif type_name == 'OCTET STRING':
compiled = OctetString(name)
elif type_name == 'TeletexString':
compiled = TeletexString(name)
elif type_name == 'NumericString':
compiled = NumericString(name)
elif type_name == 'PrintableString':
compiled = PrintableString(name)
elif type_name == 'IA5String':
compiled = IA5String(name)
elif type_name == 'VisibleString':
compiled = VisibleString(name)
elif type_name == 'GeneralString':
compiled = GeneralString(name)
elif type_name == 'UTF8String':
compiled = UTF8String(name)
elif type_name == 'BMPString':
compiled = BMPString(name)
elif type_name == 'GraphicString':
compiled = GraphicString(name)
elif type_name == 'UTCTime':
compiled = UTCTime(name)
elif type_name == 'UniversalString':
compiled = UniversalString(name)
elif type_name == 'GeneralizedTime':
compiled = GeneralizedTime(name)
elif type_name == 'DATE':
compiled = Date(name)
elif type_name == 'TIME-OF-DAY':
compiled = TimeOfDay(name)
elif type_name == 'DATE-TIME':
compiled = DateTime(name)
elif type_name == 'BIT STRING':
has_named_bits = ('named-bits' in type_descriptor)
compiled = BitString(name, has_named_bits)
elif type_name == 'ANY':
compiled = Any(name)
elif type_name == 'ANY DEFINED BY':
choices = {}
for key, value in type_descriptor['choices'].items():
choices[key] = self.compile_type(key,
value,
module_name)
compiled = AnyDefinedBy(name,
type_descriptor['value'],
choices)
elif type_name == 'NULL':
compiled = Null(name)
elif type_name == 'EXTERNAL':
compiled = Sequence(
name,
*self.compile_members(self.external_type_descriptor()['members'],
module_name))
compiled.set_tag(Tag.EXTERNAL, 0)
elif type_name == 'ObjectDescriptor':
compiled = ObjectDescriptor(name)
else:
if type_name in self.types_backtrace:
compiled = Recursive(name,
type_name,
module_name)
self.recursive_types.append(compiled)
else:
compiled = self.compile_user_type(name,
type_name,
module_name)
return compiled
def compile_type(self, name, type_descriptor, module_name):
module_name = self.get_module_name(type_descriptor, module_name)
compiled = self.compile_implicit_type(name,
type_descriptor,
module_name)
if self.is_explicit_tag(type_descriptor):
compiled = ExplicitTag(name, compiled)
# Set any given tag.
if 'tag' in type_descriptor:
compiled = self.copy(compiled)
tag = type_descriptor['tag']
class_ = tag.get('class', None)
if class_ == 'APPLICATION':
flags = Class.APPLICATION
elif class_ == 'PRIVATE':
flags = Class.PRIVATE
elif class_ == 'UNIVERSAL':
flags = 0
else:
flags = Class.CONTEXT_SPECIFIC
compiled.set_tag(tag['number'], flags)
return compiled
def compile_members(self,
members,
module_name,
sort_by_tag=False):
compiled_members = []
in_extension = False
additions = None
for member in members:
if member == EXTENSION_MARKER:
in_extension = not in_extension
if in_extension:
additions = []
elif in_extension:
self.compile_extension_member(member,
module_name,
additions)
else:
self.compile_root_member(member,
module_name,
compiled_members)
if sort_by_tag:
compiled_members = sorted(compiled_members, key=get_tag_no_encoding)
return compiled_members, additions
def compile_extension_member(self,
member,
module_name,
additions):
if isinstance(member, list):
compiled_group = []
for memb in member:
compiled_member = self.compile_member(memb,
module_name)
compiled_group.append(compiled_member)
additions.append(compiled_group)
else:
compiled_member = self.compile_member(member,
module_name)
additions.append(compiled_member)
def compile_dict(specification, numeric_enums=False):
return Compiler(specification, numeric_enums).process()
def decode_full_length(data):
"""
Get total byte length of ASN1 element (tag + contents length)
:param data:
:return:
"""
try:
return skip_tag_length_contents(bytearray(data), 0)
except MissingDataError as e:
return e.offset + e.expected_length
except OutOfByteDataError:
return None
| mit | 001243dddd09ddb16cee2b6c71f40ad4 | 30.902739 | 119 | 0.550286 | 4.408961 | false | false | false | false |
eerimoq/asn1tools | asn1tools/codecs/compiler.py | 1 | 41188 | """Base Compiler class used by all codecs.
"""
import binascii
import sys
from operator import attrgetter
import bitstruct
from copy import copy
from copy import deepcopy
from ..errors import CompileError
from ..parser import EXTENSION_MARKER
def flatten(dlist):
flist = []
for item in dlist:
if isinstance(item, list):
flist.extend(item)
else:
flist.append(item)
return flist
def is_object_class_type_name(type_name):
return '&' in type_name
def is_open_type(type_name):
index = type_name.find('&')
if index == -1:
return False
return type_name[index + 1].isupper()
def is_type_name(type_name):
"""Does not handle keywords.
"""
return type_name[0].isupper()
def lowest_set_bit(value):
offset = (value & -value).bit_length() - 1
if offset < 0:
offset = 0
return offset
def rstrip_bit_string_zeros(data):
data = data.rstrip(b'\x00')
if len(data) == 0:
number_of_bits = 0
else:
number_of_bits = 8 * len(data) - lowest_set_bit(data[-1])
return (data, number_of_bits)
def clean_bit_string_value(value, has_named_bits):
data = bytearray(value[0])
number_of_bits = value[1]
number_of_bytes, number_of_rest_bits = divmod(number_of_bits, 8)
if number_of_rest_bits == 0:
data = data[:number_of_bytes]
else:
data = data[:number_of_bytes + 1]
data[number_of_bytes] &= ((0xff >> number_of_rest_bits) ^ 0xff)
if has_named_bits:
return rstrip_bit_string_zeros(data)
else:
return (data, number_of_bits)
class CompiledType(object):
def __init__(self, type_):
self.constraints_checker = None
self.type_checker = None
self._type = type_
@property
def type(self):
return self._type
@property
def name(self):
return self._type.name
def check_types(self, data):
return self.type_checker.encode(data)
def check_constraints(self, data):
return self.constraints_checker.encode(data)
def encode(self, data):
raise NotImplementedError('This codec does not support encode().')
def decode(self, data):
raise NotImplementedError('This codec does not support decode().')
def decode_with_length(self, data):
raise NotImplementedError('This codec does not support decode_with_length().')
def __repr__(self):
return repr(self._type)
class Recursive(object):
"""
Mixin used to identify Recursive types
"""
def __repr__(self):
return '{}({})'.format(type(self).__name__,
self.type_name)
class OpenType(object):
def __init__(self, name, table):
self._name = name
self._object_set_name = table[0]
self._table = []
for item in table[1]:
offset = item.count('.')
offset *= -1
self._table.append((offset, item.lstrip('.')))
def __repr__(self):
return 'OpenType({}, {}, {})'.format(self._name,
self._object_set_name,
self._table)
class OpenTypeSequence(object):
def __init__(self, name, members):
self._name = name
self._members = members
def __repr__(self):
return 'OpenTypeSequence({}, {})'.format(self._name, self._members)
class OpenTypeSequenceOf(object):
def __init__(self, name, element_type):
self._name = name
self._element_type = element_type
def __repr__(self):
return 'OpenTypeSequenceOf({}, {})'.format(self._name,
self._element_type)
class CompiledOpenTypes(CompiledType):
def __init__(self, compiled_open_types, compiled_type):
super(CompiledOpenTypes, self).__init__(compiled_type)
self._compiled_open_types = compiled_open_types
@property
def type(self):
return self._type.type
def encode(self, data, **kwargs):
# print()
# print('data:', data)
# print(self._compiled_open_types)
return self._type.encode(data, **kwargs)
def decode(self, data):
return self._type.decode(data)
class Compiler(object):
def __init__(self, specification, numeric_enums=False):
self._specification = specification
self._numeric_enums = numeric_enums
self._types_backtrace = []
self.recursive_types = []
self.compiled = {}
self.current_type_descriptor = None
def types_backtrace_push(self, type_name):
self._types_backtrace.append(type_name)
def types_backtrace_pop(self):
self._types_backtrace.pop()
@property
def types_backtrace(self):
return self._types_backtrace
def process(self):
self.pre_process()
compiled = {}
for module_name in self._specification:
items = self._specification[module_name]['types'].items()
for type_name, type_descriptor in items:
self.types_backtrace_push(type_name)
compiled_type = self.process_type(type_name,
type_descriptor,
module_name)
compiled_type.type_name = type_name
compiled_type.module_name = module_name
compiled_open_types = self.compile_open_types(type_name,
type_descriptor,
module_name)
if compiled_open_types:
compiled_type = CompiledOpenTypes(compiled_open_types,
compiled_type)
self.types_backtrace_pop()
if module_name not in compiled:
compiled[module_name] = {}
compiled[module_name][type_name] = compiled_type
for recursive_type in self.recursive_types:
compiled_module = compiled[recursive_type.module_name]
inner_type = compiled_module[recursive_type.type_name].type
recursive_type.set_inner_type(inner_type)
return compiled
def pre_process(self):
for module_name, module in self._specification.items():
types = module['types']
type_descriptors = types.values()
self.pre_process_components_of(type_descriptors, module_name)
self.pre_process_extensibility_implied(module, type_descriptors)
self.pre_process_tags(module, module_name)
self.pre_process_default_value(type_descriptors, module_name)
for module_name, module in self._specification.items():
self.pre_process_parameterization_step_1(module['types'],
module_name)
for module_name, module in self._specification.items():
types = module['types']
types = self.pre_process_parameterization_step_2(types)
module['types'] = types
return self._specification
def pre_process_components_of(self, type_descriptors, module_name):
"""COMPONENTS OF expansion.
"""
for type_descriptor in type_descriptors:
self.pre_process_components_of_type(type_descriptor,
module_name)
def pre_process_components_of_type(self, type_descriptor, module_name):
if 'members' not in type_descriptor:
return
type_descriptor['members'] = self.pre_process_components_of_expand_members(
type_descriptor['members'],
module_name)
def pre_process_components_of_expand_members(self, members, module_name):
expanded_members = []
for member in members:
if member != EXTENSION_MARKER and 'components-of' in member:
type_descriptor, inner_module_name = self.lookup_type_descriptor(
member['components-of'],
module_name)
inner_members = self.pre_process_components_of_expand_members(
type_descriptor['members'],
inner_module_name)
for inner_member in inner_members:
if inner_member == EXTENSION_MARKER:
break
expanded_members.append(deepcopy(inner_member))
else:
expanded_members.append(member)
return expanded_members
def pre_process_extensibility_implied(self, module, type_descriptors):
"""Make all types extensible.
"""
if not module['extensibility-implied']:
return
for type_descriptor in type_descriptors:
self.pre_process_extensibility_implied_type(type_descriptor)
def pre_process_extensibility_implied_type(self, type_descriptor):
if 'members' not in type_descriptor:
return
members = type_descriptor['members']
for member in members:
if member == EXTENSION_MARKER:
continue
if isinstance(member, list):
for type_descriptor in member:
self.pre_process_extensibility_implied_type(type_descriptor)
else:
self.pre_process_extensibility_implied_type(member)
if EXTENSION_MARKER not in members:
members.append(EXTENSION_MARKER)
def pre_process_tags(self, module, module_name):
"""Add tags where missing.
"""
module_tags = module.get('tags', 'EXPLICIT')
for type_descriptor in module['types'].values():
self.current_type_descriptor = type_descriptor
self.pre_process_tags_type(type_descriptor,
module_tags,
module_name)
def is_dummy_reference(self, type_name):
if 'parameters' not in self.current_type_descriptor:
return False
if type_name in self.current_type_descriptor['parameters']:
return True
return False
def pre_process_tags_type(self,
type_descriptor,
module_tags,
module_name):
type_name = type_descriptor['type']
if 'tag' in type_descriptor:
tag = type_descriptor['tag']
resolved_type_name = self.resolve_type_name(type_name, module_name)
if 'kind' not in tag:
if resolved_type_name == 'CHOICE':
tag['kind'] = 'EXPLICIT'
elif self.is_dummy_reference(type_name):
tag['kind'] = 'EXPLICIT'
elif module_tags in ['IMPLICIT', 'EXPLICIT']:
tag['kind'] = module_tags
else:
tag['kind'] = 'IMPLICIT'
# SEQUENCE, SET and CHOICE.
if 'members' in type_descriptor:
self.pre_process_tags_type_members(type_descriptor,
module_tags,
module_name)
# SEQUENCE OF and SET OF.
if 'element' in type_descriptor:
self.pre_process_tags_type(type_descriptor['element'],
module_tags,
module_name)
def pre_process_tags_type_members(self,
type_descriptor,
module_tags,
module_name):
def is_any_member_tagged(members):
for member in members:
if member == EXTENSION_MARKER:
continue
if 'tag' in member:
return True
return False
number = None
members = flatten(type_descriptor['members'])
# Add tag number to all members if AUTOMATIC TAGS are
# selected and no member is tagged.
if module_tags == 'AUTOMATIC' and not is_any_member_tagged(members):
number = 0
for member in members:
if member == EXTENSION_MARKER:
continue
if number is not None:
if 'tag' not in member:
member['tag'] = {}
member['tag']['number'] = number
number += 1
self.pre_process_tags_type(member,
module_tags,
module_name)
def pre_process_default_value(self, type_descriptors, module_name):
"""SEQUENCE and SET default member value cleanup.
"""
sequences_and_sets = self.get_type_descriptors(
type_descriptors,
['SEQUENCE', 'SET'])
for type_descriptor in sequences_and_sets:
for member in type_descriptor['members']:
if member == EXTENSION_MARKER:
continue
if 'default' not in member:
continue
resolved_member = self.resolve_type_descriptor(member,
module_name)
if resolved_member['type'] == 'BIT STRING':
self.pre_process_default_value_bit_string(member,
resolved_member)
if resolved_member['type'] == 'OCTET STRING':
self.pre_process_default_value_octet_string(member)
if resolved_member['type'] == 'ENUMERATED' and self._numeric_enums:
for key, value in resolved_member['values']:
if key == member['default']:
member['default'] = value
break
def pre_process_default_value_bit_string(self, member, resolved_member):
default = member['default']
if isinstance(default, tuple):
# Already pre-processed.
return
if isinstance(default, list):
named_bits = dict(resolved_member['named-bits'])
reversed_mask = 0
for name in default:
reversed_mask |= (1 << int(named_bits[name]))
mask = int(bin(reversed_mask)[2:][::-1], 2)
number_of_bits = reversed_mask.bit_length()
elif default.startswith('0x'):
if len(default) % 2 == 1:
default += '0'
default = '01' + default[2:]
mask = int(default, 16)
mask >>= lowest_set_bit(mask)
number_of_bits = mask.bit_length() - 1
mask ^= (1 << number_of_bits)
elif default == '0b':
number_of_bits = 0
else:
mask = int(default, 2)
mask >>= lowest_set_bit(mask)
number_of_bits = len(default) - 2
if number_of_bits > 0:
mask = bitstruct.pack('u{}'.format(number_of_bits), mask)
else:
mask = b''
member['default'] = (mask, number_of_bits)
def pre_process_default_value_octet_string(self, member):
default = member['default']
if sys.version_info[0] > 2 and isinstance(default, bytes):
# Already pre-processed.
return
if default.startswith('0b'):
default = default[2:]
if len(default) % 8 != 0:
default += '0' * (-len(default) % 8)
member['default'] = binascii.unhexlify(
hex(int('11111111' + default, 2))[4:]
)
elif default.startswith('0x'):
default = default[2:]
if len(default) % 2 == 1:
default += '0'
member['default'] = binascii.unhexlify(default)
def pre_process_parameterization_step_1(self, types, module_name):
"""X.683 parameterization pre processing - step 1.
"""
for type_name, type_descriptor in types.items():
# Skip if the type is parameterized.
if 'parameters' in type_descriptor:
continue
self.pre_process_parameterization_step_1_type(type_descriptor,
type_name,
module_name)
def pre_process_parameterization_step_1_type(self,
type_descriptor,
type_name,
module_name):
"""Recursively find and replace all parameterized types with their
actual types.
"""
# SEQUENCE, SET and CHOICE.
if 'members' in type_descriptor:
for member in type_descriptor['members']:
if member == EXTENSION_MARKER:
continue
self.pre_process_parameterization_step_1_type(member,
type_name,
module_name)
# SEQUENCE OF and SET OF.
if 'element' in type_descriptor:
self.pre_process_parameterization_step_1_type(
type_descriptor['element'],
type_name,
module_name)
# Just return if the type is not using a parameterized type.
if 'actual-parameters' not in type_descriptor:
return
(parameterized_type_descriptor,
parameterized_module_name) = self.lookup_type_descriptor(
type_descriptor['type'],
module_name)
if 'parameters' not in parameterized_type_descriptor:
raise CompileError(
"Type '{}' in module '{}' is not parameterized.".format(
type_descriptor['type'],
parameterized_module_name))
dummy_parameters = parameterized_type_descriptor['parameters']
actual_parameters = type_descriptor['actual-parameters']
if len(dummy_parameters) != len(actual_parameters):
raise CompileError(
"Parameterized type '{}' in module '{}' takes {} "
"parameters, but {} are given in type '{}' in "
"module '{}'.".format(type_descriptor['type'],
parameterized_module_name,
len(dummy_parameters),
len(actual_parameters),
type_name,
module_name))
parameterized_type_descriptor = deepcopy(parameterized_type_descriptor)
self.pre_process_parameterization_step_1_dummy_to_actual_type(
parameterized_type_descriptor,
dummy_parameters,
actual_parameters,
parameterized_module_name)
self.pre_process_parameterization_step_1_type(
parameterized_type_descriptor,
type_name,
parameterized_module_name)
type_descriptor.update(parameterized_type_descriptor)
if 'module-name' not in type_descriptor:
if module_name != parameterized_module_name:
type_descriptor['module-name'] = parameterized_module_name
if 'parameters' in type_descriptor:
del type_descriptor['parameters']
del type_descriptor['actual-parameters']
def pre_process_parameterization_step_1_dummy_to_actual_type(
self,
type_descriptor,
dummy_parameters,
actual_parameters,
module_name):
if 'members' in type_descriptor:
for member in type_descriptor['members']:
if member == EXTENSION_MARKER:
continue
self.pre_process_parameterization_step_1_dummy_to_actual_type(
member,
dummy_parameters,
actual_parameters,
module_name)
elif 'element' in type_descriptor:
self.pre_process_parameterization_step_1_dummy_to_actual_type(
type_descriptor['element'],
dummy_parameters,
actual_parameters,
module_name)
# Replace dummy with actual in current type descriptor.
for dummy_parameter, actual_parameter in zip(dummy_parameters,
actual_parameters):
if type_descriptor['type'] == dummy_parameter:
type_descriptor.update(actual_parameter)
if 'actual-parameters' in type_descriptor:
for i, parameter in enumerate(type_descriptor['actual-parameters']):
if parameter['type'] == dummy_parameter:
type_descriptor['actual-parameters'][i] = actual_parameter
if 'size' in type_descriptor:
actual_size = []
for item in type_descriptor['size']:
if isinstance(item, tuple):
minimum, maximum = item
if minimum == dummy_parameter:
minimum = actual_parameter
if maximum == dummy_parameter:
maximum = actual_parameter
item = (minimum, maximum)
elif item == dummy_parameter:
item = actual_parameter
actual_size.append(item)
type_descriptor['size'] = actual_size
if 'restricted-to' in type_descriptor:
actual_restricted_to = []
for minimum, maximum in type_descriptor['restricted-to']:
if minimum == dummy_parameter:
minimum = actual_parameter
if maximum == dummy_parameter:
maximum = actual_parameter
actual_restricted_to.append((minimum, maximum))
type_descriptor['restricted-to'] = actual_restricted_to
def pre_process_parameterization_step_2(self, types):
"""X.683 parameterization pre processing - step 2.
"""
# Remove parameterized types as they are no longer needed.
return {
type_name: type_descriptor
for type_name, type_descriptor in types.items()
if 'parameters' not in type_descriptor
}
def resolve_type_name(self, type_name, module_name):
"""Returns the ASN.1 type name of given type.
"""
try:
while True:
if is_object_class_type_name(type_name):
type_name, module_name = self.lookup_object_class_type_name(
type_name,
module_name)
else:
type_descriptor, module_name = self.lookup_type_descriptor(
type_name,
module_name)
type_name = type_descriptor['type']
except CompileError:
pass
return type_name
def resolve_type_descriptor(self, type_descriptor, module_name):
type_name = type_descriptor['type']
try:
while True:
if is_object_class_type_name(type_name):
type_name, module_name = self.lookup_object_class_type_name(
type_name,
module_name)
else:
type_descriptor, module_name = self.lookup_type_descriptor(
type_name,
module_name)
type_name = type_descriptor['type']
except CompileError:
pass
return type_descriptor
def get_type_descriptors(self, type_descriptors, type_names):
result = []
for type_descriptor in type_descriptors:
result += self.get_type_descriptors_type(type_descriptor,
type_names)
return result
def get_type_descriptors_type(self, type_descriptor, type_names):
type_descriptors = []
type_name = type_descriptor['type']
if type_name in type_names:
type_descriptors.append(type_descriptor)
if 'members' in type_descriptor:
for member in type_descriptor['members']:
if member == EXTENSION_MARKER:
continue
if isinstance(member, list):
type_descriptors.extend(self.get_type_descriptors(member,
type_names))
else:
type_descriptors += self.get_type_descriptors_type(member,
type_names)
if 'element' in type_descriptor:
type_descriptors += self.get_type_descriptors_type(
type_descriptor['element'],
type_names)
return type_descriptors
def process_type(self, type_name, type_descriptor, module_name):
return NotImplementedError('To be implemented by subclasses.')
def compile_type(self, name, type_descriptor, module_name):
return NotImplementedError('To be implemented by subclasses.')
def compile_open_types(self, name, type_descriptor, module_name):
"""Compile the open types wrapper for given type. Returns ``None`` if
given type does not have any open types.
"""
compiled = None
type_name = type_descriptor['type']
if type_name in ['SEQUENCE', 'SET']:
compiled_members = []
for member in type_descriptor['members']:
if member == EXTENSION_MARKER:
continue
if isinstance(member, list):
# ToDo: Handle groups.
continue
if is_open_type(member['type']):
if 'table' in member:
table = member['table']
if isinstance(table, list):
compiled_members.append(OpenType(member['name'],
table))
else:
compiled_member = self.compile_open_types(member['name'],
member,
module_name)
if compiled_member is not None:
compiled_members.append(compiled_member)
if compiled_members:
compiled = OpenTypeSequence(name, compiled_members)
elif type_name in ['SEQUENCE OF', 'SET OF']:
compiled_element = self.compile_open_types('',
type_descriptor['element'],
module_name)
if compiled_element:
compiled = OpenTypeSequenceOf(name, compiled_element)
elif type_name == 'CHOICE':
# ToDo: Handle CHOICE.
pass
else:
pass
return compiled
def compile_user_type(self, name, type_name, module_name):
compiled = self.get_compiled_type(name,
type_name,
module_name)
if compiled is None:
self.types_backtrace_push(type_name)
compiled = self.compile_type(
name,
*self.lookup_type_descriptor(
type_name,
module_name))
compiled.type_name = type_name
compiled.module_name = module_name
self.types_backtrace_pop()
self.set_compiled_type(name,
type_name,
module_name,
compiled)
return compiled
def compile_members(self,
members,
module_name,
sort_by_tag=False):
compiled_members = []
has_extension_marker = False
for member in members:
if member == EXTENSION_MARKER:
has_extension_marker = True
continue
if isinstance(member, list):
group_members, _ = self.compile_members(member,
module_name)
compiled_members.extend(group_members)
continue
compiled_member = self.compile_member(member, module_name)
compiled_members.append(compiled_member)
if sort_by_tag:
compiled_members = sorted(compiled_members, key=attrgetter('tag'))
return compiled_members, has_extension_marker
def compile_root_member(self, member, module_name, compiled_members):
compiled_member = self.compile_member(member,
module_name)
compiled_members.append(compiled_member)
def compile_member(self, member, module_name):
if is_object_class_type_name(member['type']):
member, class_module_name = self.convert_object_class_type_descriptor(
member,
module_name)
compiled_member = self.compile_type(member['name'],
member,
class_module_name)
else:
compiled_member = self.compile_type(member['name'],
member,
module_name)
if 'optional' in member:
compiled_member = self.copy(compiled_member)
compiled_member.optional = member['optional']
if 'default' in member:
compiled_member = self.copy(compiled_member)
compiled_member.set_default(member['default'])
if 'size' in member:
compiled_member = self.copy(compiled_member)
compiled_member.set_size_range(*self.get_size_range(member,
module_name))
return compiled_member
def get_size_range(self, type_descriptor, module_name):
"""Returns a tuple of the minimum and maximum values allowed according
the the ASN.1 specification SIZE parameter. Returns (None,
None, None) if the type does not have a SIZE parameter.
"""
size = type_descriptor.get('size', None)
if size is None:
minimum = None
maximum = None
has_extension_marker = None
else:
if isinstance(size[0], tuple):
minimum, maximum = size[0]
else:
minimum = size[0]
maximum = size[0]
has_extension_marker = (EXTENSION_MARKER in size)
if isinstance(minimum, str):
if minimum != 'MIN':
minimum = self.lookup_value(minimum, module_name)[0]['value']
if isinstance(maximum, str):
if maximum != 'MAX':
maximum = self.lookup_value(maximum, module_name)[0]['value']
return minimum, maximum, has_extension_marker
def get_enum_values(self, type_descriptor, module_name):
"""Converts the enum values to ints if they are value references.
"""
enum_values = []
for value in type_descriptor['values']:
if value != EXTENSION_MARKER and not isinstance(value[1], int):
lookup = self.lookup_value(value[1], module_name)
enum_values.append((value[0], lookup[0]['value']))
else:
enum_values.append(value)
return enum_values
def get_restricted_to_range(self, type_descriptor, module_name):
def convert_value(value):
try:
value = float(value)
except ValueError:
if not is_type_name(value):
try:
resolved_type_descriptor = self.resolve_type_descriptor(
type_descriptor,
module_name)
value = resolved_type_descriptor['named-numbers'][value]
except KeyError:
value = self.lookup_value(value,
module_name)[0]['value']
return value
restricted_to = type_descriptor['restricted-to']
if isinstance(restricted_to[0], tuple):
minimum, maximum = restricted_to[0]
else:
minimum = restricted_to[0]
maximum = restricted_to[0]
if isinstance(minimum, str):
minimum = convert_value(minimum)
if isinstance(maximum, str):
maximum = convert_value(maximum)
has_extension_marker = (EXTENSION_MARKER in restricted_to)
return minimum, maximum, has_extension_marker
def get_with_components(self, type_descriptor):
return type_descriptor.get('with-components', None)
def get_named_bits(self, type_descriptor, module_name):
named_bits = type_descriptor.get('named-bits', None)
if named_bits is not None:
named_bit_values = []
for value in named_bits:
if value != EXTENSION_MARKER and not value[1].isdigit():
lookup = self.lookup_value(value[1], module_name)
named_bit_values.append((value[0], lookup[0]['value']))
else:
named_bit_values.append((value[0], int(value[1])))
return named_bit_values
def is_explicit_tag(self, type_descriptor):
try:
return type_descriptor['tag']['kind'] == 'EXPLICIT'
except KeyError:
pass
return False
def get_module_name(self, type_descriptor, module_name):
module_name = type_descriptor.get('module-name', module_name)
try:
_, module_name = self.lookup_type_descriptor(type_descriptor['type'],
module_name)
except CompileError:
pass
return module_name
def lookup_in_modules(self, section, debug_string, name, module_name):
begin_debug_string = debug_string[:1].upper() + debug_string[1:]
module = self._specification[module_name]
if name in module[section]:
return module[section][name], module_name
else:
for from_module_name, imports in module['imports'].items():
if name not in imports:
continue
if from_module_name not in self._specification:
raise CompileError(
"Module '{}' cannot import {} '{}' from missing "
"module '{}'.".format(module_name,
debug_string,
name,
from_module_name))
try:
return self.lookup_in_modules(section,
debug_string,
name,
from_module_name)
except CompileError:
raise CompileError(
"{} '{}' imported by module '{}' not found in "
"module '{}'.".format(
begin_debug_string,
name,
module_name,
from_module_name))
raise CompileError("{} '{}' not found in module '{}'.".format(
begin_debug_string,
name,
module_name))
def lookup_type_descriptor(self, type_name, module_name):
return self.lookup_in_modules('types', 'type', type_name, module_name)
def lookup_value(self, value_name, module_name):
return self.lookup_in_modules('values', 'value', value_name, module_name)
def lookup_object_class_descriptor(self, object_class_name, module_name):
return self.lookup_in_modules('object-classes',
'object class',
object_class_name,
module_name)
def lookup_object_class_type_name(self, type_name, module_name):
class_name, member_name = type_name.split('.')
result = self.lookup_object_class_descriptor(class_name,
module_name)
object_class_descriptor, module_name = result
for member in object_class_descriptor['members']:
if member['name'] == member_name:
return member['type'], module_name
def get_compiled_type(self, name, type_name, module_name):
try:
return self.compiled[module_name][type_name][name]
except KeyError:
return None
def set_compiled_type(self, name, type_name, module_name, compiled):
if module_name not in self.compiled:
self.compiled[module_name] = {}
if type_name not in self.compiled[module_name]:
self.compiled[module_name][type_name] = {}
self.compiled[module_name][type_name][name] = compiled
def convert_object_class_type_descriptor(self, type_descriptor, module_name):
type_name, module_name = self.lookup_object_class_type_name(
type_descriptor['type'],
module_name)
type_descriptor = deepcopy(type_descriptor)
type_descriptor['type'] = type_name
return type_descriptor, module_name
def copy(self, compiled_type):
if not isinstance(compiled_type, Recursive):
compiled_type = copy(compiled_type)
return compiled_type
def set_compiled_restricted_to(self, compiled, type_descriptor, module_name):
compiled = self.copy(compiled)
compiled.set_restricted_to_range(
*self.get_restricted_to_range(type_descriptor,
module_name))
return compiled
def external_type_descriptor(self):
return {
'type': 'SEQUENCE',
'tag': {
'class': 'UNIVERSAL',
'number': 8,
'kind': 'IMPLICIT'
},
'members': [
{
'name': 'direct-reference',
'type': 'OBJECT IDENTIFIER',
'optional': True
},
{
'name': 'indirect-reference',
'type': 'INTEGER',
'optional': True
},
{
'name': 'data-value-descriptor',
'type': 'ObjectDescriptor',
'optional': True
},
{
'name': 'encoding',
'type': 'CHOICE',
'members': [
{
'name': 'single-ASN1-type',
'type': 'NULL', # ToDo: Should be ABSTRACT-SYNTAX.&Type
'tag': {
'number': 0
}
},
{
'name': 'octet-aligned',
'type': 'OCTET STRING',
'tag': {
'number': 1,
'kind': 'IMPLICIT'
}
},
{
'name': 'arbitrary',
'type': 'BIT STRING',
'tag': {
'number': 2,
'kind': 'IMPLICIT'
}
}
]
}
]
}
def enum_values_as_dict(values):
return {
value[1]: value[0]
for value in values
if value != EXTENSION_MARKER
}
def enum_values_split(values):
if EXTENSION_MARKER in values:
index = values.index(EXTENSION_MARKER)
return values[:index], values[index + 1:]
else:
return values, None
def pre_process(specification):
return Compiler(specification).pre_process()
| mit | 83e27553e63a2cb3e4dd263f8592e4d0 | 33.409357 | 86 | 0.503763 | 4.865682 | false | false | false | false |
beproud/beproudbot | src/haro/plugins/kudo.py | 1 | 2335 | from slackbot.bot import respond_to, listen_to
from sqlalchemy import func
from db import Session
from haro.botmessage import botsend
from haro.decorators import call_when_sls_haro_not_installed
from haro.plugins.kudo_models import KudoHistory
from haro.slack import get_user_name
HELP = """
- `<name>++`: 指定された名称に対して++します
- `$kudo help`: kudoコマンドの使い方を返す
"""
@listen_to(r'^(.*)\s*(?<!\+)\+\+$')
@call_when_sls_haro_not_installed
def update_kudo(message, names):
""" 指定された名前に対して ++ する
OK:
name++、name ++、name ++、@name++、name1 name2++
NG:
name+ +、name++hoge、 name1,name2++
:param message: slackbot.dispatcher.Message
:param name str: ++する対象の名前
"""
slack_id = message.body['user']
name_list = []
for name in [x for x in names.split(' ') if x]:
# slackのsuggest機能でユーザーを++した場合(例: @wan++)、name引数は
# `<@{slack_id}>` というstr型で渡ってくるので対応
if get_user_name(name.lstrip('<@').rstrip('>')):
name = get_user_name(name.lstrip('<@').rstrip('>'))
s = Session()
kudo = (s.query(KudoHistory)
.filter(KudoHistory.name == name)
.filter(KudoHistory.from_user_id == slack_id)
.one_or_none())
if kudo is None:
# name ×from_user_id の組み合わせが存在していない -> 新規登録
s.add(KudoHistory(name=name, from_user_id=slack_id, delta=1))
s.commit()
else:
# name ×from_user_id の組み合わせが存在 -> 更新
kudo.delta = kudo.delta + 1
s.commit()
q = (s.query(
func.sum(KudoHistory.delta).label('total_count'))
.filter(KudoHistory.name == name))
total_count = q.one().total_count
name_list.append((name, total_count))
msg = ['({}: 通算 {})'.format(n, tc) for n, tc in name_list]
botsend(message, '\n'.join(msg))
@respond_to(r'^kudo\s+help$')
@call_when_sls_haro_not_installed
def show_help_alias_commands(message):
"""Kudoコマンドのhelpを表示
:param message: slackbotの各種パラメータを保持したclass
"""
botsend(message, HELP)
| mit | 260a274421718f2d6b2a69bcdd13227f | 28.471429 | 73 | 0.588463 | 2.553218 | false | false | false | false |
scikit-learn-contrib/imbalanced-learn | imblearn/metrics/pairwise.py | 1 | 7881 | """Metrics to perform pairwise computation."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import numpy as np
from scipy.spatial import distance_matrix
from sklearn.base import BaseEstimator
from sklearn.utils import check_consistent_length
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.validation import check_is_fitted
class ValueDifferenceMetric(BaseEstimator):
r"""Class implementing the Value Difference Metric.
This metric computes the distance between samples containing only
categorical features. The distance between feature values of two samples is
defined as:
.. math::
\delta(x, y) = \sum_{c=1}^{C} |p(c|x_{f}) - p(c|y_{f})|^{k} \ ,
where :math:`x` and :math:`y` are two samples and :math:`f` a given
feature, :math:`C` is the number of classes, :math:`p(c|x_{f})` is the
conditional probability that the output class is :math:`c` given that
the feature value :math:`f` has the value :math:`x` and :math:`k` an
exponent usually defined to 1 or 2.
The distance for the feature vectors :math:`X` and :math:`Y` is
subsequently defined as:
.. math::
\Delta(X, Y) = \sum_{f=1}^{F} \delta(X_{f}, Y_{f})^{r} \ ,
where :math:`F` is the number of feature and :math:`r` an exponent usually
defined equal to 1 or 2.
The definition of this distance was propoed in [1]_.
Read more in the :ref:`User Guide <vdm>`.
.. versionadded:: 0.8
Parameters
----------
n_categories : "auto" or array-like of shape (n_features,), default="auto"
The number of unique categories per features. If `"auto"`, the number
of categories will be computed from `X` at `fit`. Otherwise, you can
provide an array-like of such counts to avoid computation. You can use
the fitted attribute `categories_` of the
:class:`~sklearn.preprocesssing.OrdinalEncoder` to deduce these counts.
k : int, default=1
Exponent used to compute the distance between feature value.
r : int, default=2
Exponent used to compute the distance between the feature vector.
Attributes
----------
n_categories_ : ndarray of shape (n_features,)
The number of categories per features.
proba_per_class_ : list of ndarray of shape (n_categories, n_classes)
List of length `n_features` containing the conditional probabilities
for each category given a class.
See Also
--------
sklearn.neighbors.DistanceMetric : Interface for fast metric computation.
Notes
-----
The input data `X` are expected to be encoded by an
:class:`~sklearn.preprocessing.OrdinalEncoder` and the data type is used
should be `np.int32`. If other data types are given, `X` will be converted
to `np.int32`.
References
----------
.. [1] Stanfill, Craig, and David Waltz. "Toward memory-based reasoning."
Communications of the ACM 29.12 (1986): 1213-1228.
Examples
--------
>>> import numpy as np
>>> X = np.array(["green"] * 10 + ["red"] * 10 + ["blue"] * 10).reshape(-1, 1)
>>> y = [1] * 8 + [0] * 5 + [1] * 7 + [0] * 9 + [1]
>>> from sklearn.preprocessing import OrdinalEncoder
>>> encoder = OrdinalEncoder(dtype=np.int32)
>>> X_encoded = encoder.fit_transform(X)
>>> from imblearn.metrics.pairwise import ValueDifferenceMetric
>>> vdm = ValueDifferenceMetric().fit(X_encoded, y)
>>> pairwise_distance = vdm.pairwise(X_encoded)
>>> pairwise_distance.shape
(30, 30)
>>> X_test = np.array(["green", "red", "blue"]).reshape(-1, 1)
>>> X_test_encoded = encoder.transform(X_test)
>>> vdm.pairwise(X_test_encoded)
array([[0. , 0.04, 1.96],
[0.04, 0. , 1.44],
[1.96, 1.44, 0. ]])
"""
def __init__(self, *, n_categories="auto", k=1, r=2):
self.n_categories = n_categories
self.k = k
self.r = r
def fit(self, X, y):
"""Compute the necessary statistics from the training set.
Parameters
----------
X : ndarray of shape (n_samples, n_features), dtype=np.int32
The input data. The data are expected to be encoded with a
:class:`~sklearn.preprocessing.OrdinalEncoder`.
y : ndarray of shape (n_features,)
The target.
Returns
-------
self : object
Return the instance itself.
"""
check_consistent_length(X, y)
X, y = self._validate_data(X, y, reset=True, dtype=np.int32)
if isinstance(self.n_categories, str) and self.n_categories == "auto":
# categories are expected to be encoded from 0 to n_categories - 1
self.n_categories_ = X.max(axis=0) + 1
else:
if len(self.n_categories) != self.n_features_in_:
raise ValueError(
f"The length of n_categories is not consistent with the "
f"number of feature in X. Got {len(self.n_categories)} "
f"elements in n_categories and {self.n_features_in_} in "
f"X."
)
self.n_categories_ = np.array(self.n_categories, copy=False)
classes = unique_labels(y)
# list of length n_features of ndarray (n_categories, n_classes)
# compute the counts
self.proba_per_class_ = [
np.empty(shape=(n_cat, len(classes)), dtype=np.float64)
for n_cat in self.n_categories_
]
for feature_idx in range(self.n_features_in_):
for klass_idx, klass in enumerate(classes):
self.proba_per_class_[feature_idx][:, klass_idx] = np.bincount(
X[y == klass, feature_idx],
minlength=self.n_categories_[feature_idx],
)
# normalize by the summing over the classes
with np.errstate(invalid="ignore"):
# silence potential warning due to in-place division by zero
for feature_idx in range(self.n_features_in_):
self.proba_per_class_[feature_idx] /= (
self.proba_per_class_[feature_idx].sum(axis=1).reshape(-1, 1)
)
np.nan_to_num(self.proba_per_class_[feature_idx], copy=False)
return self
def pairwise(self, X, Y=None):
"""Compute the VDM distance pairwise.
Parameters
----------
X : ndarray of shape (n_samples, n_features), dtype=np.int32
The input data. The data are expected to be encoded with a
:class:`~sklearn.preprocessing.OrdinalEncoder`.
Y : ndarray of shape (n_samples, n_features), dtype=np.int32
The input data. The data are expected to be encoded with a
:class:`~sklearn.preprocessing.OrdinalEncoder`.
Returns
-------
distance_matrix : ndarray of shape (n_samples, n_samples)
The VDM pairwise distance.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False, dtype=np.int32)
n_samples_X = X.shape[0]
if Y is not None:
Y = self._validate_data(Y, reset=False, dtype=np.int32)
n_samples_Y = Y.shape[0]
else:
n_samples_Y = n_samples_X
distance = np.zeros(shape=(n_samples_X, n_samples_Y), dtype=np.float64)
for feature_idx in range(self.n_features_in_):
proba_feature_X = self.proba_per_class_[feature_idx][X[:, feature_idx]]
if Y is not None:
proba_feature_Y = self.proba_per_class_[feature_idx][Y[:, feature_idx]]
else:
proba_feature_Y = proba_feature_X
distance += (
distance_matrix(proba_feature_X, proba_feature_Y, p=self.k) ** self.r
)
return distance
| mit | 5f6653799defeaf61496ff0782cd5e28 | 37.072464 | 87 | 0.592184 | 3.765409 | false | false | false | false |
scikit-learn-contrib/imbalanced-learn | imblearn/over_sampling/tests/test_common.py | 1 | 4049 | from collections import Counter
import pytest
import numpy as np
from imblearn.over_sampling import (
ADASYN,
BorderlineSMOTE,
KMeansSMOTE,
SMOTE,
SMOTEN,
SMOTENC,
SVMSMOTE,
)
from imblearn.utils.testing import _CustomNearestNeighbors
@pytest.fixture
def numerical_data():
rng = np.random.RandomState(0)
X = rng.randn(100, 2)
y = np.repeat([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0], 5)
return X, y
@pytest.fixture
def categorical_data():
rng = np.random.RandomState(0)
feature_1 = ["A"] * 10 + ["B"] * 20 + ["C"] * 30
feature_2 = ["A"] * 40 + ["B"] * 20
feature_3 = ["A"] * 20 + ["B"] * 20 + ["C"] * 10 + ["D"] * 10
X = np.array([feature_1, feature_2, feature_3], dtype=object).T
rng.shuffle(X)
y = np.array([0] * 20 + [1] * 40, dtype=np.int32)
y_labels = np.array(["not apple", "apple"], dtype=object)
y = y_labels[y]
return X, y
@pytest.fixture
def heterogeneous_data():
rng = np.random.RandomState(42)
X = np.empty((30, 4), dtype=object)
X[:, :2] = rng.randn(30, 2)
X[:, 2] = rng.choice(["a", "b", "c"], size=30).astype(object)
X[:, 3] = rng.randint(3, size=30)
y = np.array([0] * 10 + [1] * 20)
return X, y, [2, 3]
@pytest.mark.parametrize(
"smote", [BorderlineSMOTE(), SVMSMOTE()], ids=["borderline", "svm"]
)
def test_smote_m_neighbors(numerical_data, smote):
# check that m_neighbors is properly set. Regression test for:
# https://github.com/scikit-learn-contrib/imbalanced-learn/issues/568
X, y = numerical_data
_ = smote.fit_resample(X, y)
assert smote.nn_k_.n_neighbors == 6
assert smote.nn_m_.n_neighbors == 11
@pytest.mark.parametrize(
"smote, neighbor_estimator_name",
[
(ADASYN(random_state=0), "n_neighbors"),
(BorderlineSMOTE(random_state=0), "k_neighbors"),
(KMeansSMOTE(random_state=1), "k_neighbors"),
(SMOTE(random_state=0), "k_neighbors"),
(SVMSMOTE(random_state=0), "k_neighbors"),
],
ids=["adasyn", "borderline", "kmeans", "smote", "svm"],
)
def test_numerical_smote_custom_nn(numerical_data, smote, neighbor_estimator_name):
X, y = numerical_data
params = {
neighbor_estimator_name: _CustomNearestNeighbors(n_neighbors=5),
}
smote.set_params(**params)
X_res, _ = smote.fit_resample(X, y)
assert X_res.shape[0] >= 120
def test_categorical_smote_k_custom_nn(categorical_data):
X, y = categorical_data
smote = SMOTEN(k_neighbors=_CustomNearestNeighbors(n_neighbors=5))
X_res, y_res = smote.fit_resample(X, y)
assert X_res.shape == (80, 3)
assert Counter(y_res) == {"apple": 40, "not apple": 40}
def test_heterogeneous_smote_k_custom_nn(heterogeneous_data):
X, y, categorical_features = heterogeneous_data
smote = SMOTENC(
categorical_features, k_neighbors=_CustomNearestNeighbors(n_neighbors=5)
)
X_res, y_res = smote.fit_resample(X, y)
assert X_res.shape == (40, 4)
assert Counter(y_res) == {0: 20, 1: 20}
@pytest.mark.parametrize(
"smote",
[BorderlineSMOTE(random_state=0), SVMSMOTE(random_state=0)],
ids=["borderline", "svm"],
)
def test_numerical_smote_extra_custom_nn(numerical_data, smote):
X, y = numerical_data
smote.set_params(m_neighbors=_CustomNearestNeighbors(n_neighbors=5))
X_res, y_res = smote.fit_resample(X, y)
assert X_res.shape == (120, 2)
assert Counter(y_res) == {0: 60, 1: 60}
# FIXME: to be removed in 0.12
@pytest.mark.parametrize(
"sampler",
[
ADASYN(random_state=0),
BorderlineSMOTE(random_state=0),
SMOTE(random_state=0),
SMOTEN(random_state=0),
SMOTENC([0], random_state=0),
SVMSMOTE(random_state=0),
],
)
def test_n_jobs_deprecation_warning(numerical_data, sampler):
X, y = numerical_data
sampler.set_params(n_jobs=2)
warning_msg = "The parameter `n_jobs` has been deprecated"
with pytest.warns(FutureWarning, match=warning_msg):
sampler.fit_resample(X, y)
| mit | 3edbfdd7f1b47929f7689ada96c23e38 | 28.34058 | 83 | 0.619659 | 2.847398 | false | true | false | false |
scikit-learn-contrib/imbalanced-learn | imblearn/metrics/tests/test_pairwise.py | 2 | 6396 | """Test for the metrics that perform pairwise distance computation."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import numpy as np
import pytest
from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import LabelEncoder, OrdinalEncoder
from sklearn.utils._testing import _convert_container
from imblearn.metrics.pairwise import ValueDifferenceMetric
@pytest.fixture
def data():
rng = np.random.RandomState(0)
feature_1 = ["A"] * 10 + ["B"] * 20 + ["C"] * 30
feature_2 = ["A"] * 40 + ["B"] * 20
feature_3 = ["A"] * 20 + ["B"] * 20 + ["C"] * 10 + ["D"] * 10
X = np.array([feature_1, feature_2, feature_3], dtype=object).T
rng.shuffle(X)
y = rng.randint(low=0, high=2, size=X.shape[0])
y_labels = np.array(["not apple", "apple"], dtype=object)
y = y_labels[y]
return X, y
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("k, r", [(1, 1), (1, 2), (2, 1), (2, 2)])
@pytest.mark.parametrize("y_type", ["list", "array"])
@pytest.mark.parametrize("encode_label", [True, False])
def test_value_difference_metric(data, dtype, k, r, y_type, encode_label):
# Check basic feature of the metric:
# * the shape of the distance matrix is (n_samples, n_samples)
# * computing pairwise distance of X is the same than explicitely between
# X and X.
X, y = data
y = _convert_container(y, y_type)
if encode_label:
y = LabelEncoder().fit_transform(y)
encoder = OrdinalEncoder(dtype=dtype)
X_encoded = encoder.fit_transform(X)
vdm = ValueDifferenceMetric(k=k, r=r)
vdm.fit(X_encoded, y)
dist_1 = vdm.pairwise(X_encoded)
dist_2 = vdm.pairwise(X_encoded, X_encoded)
np.testing.assert_allclose(dist_1, dist_2)
assert dist_1.shape == (X.shape[0], X.shape[0])
assert dist_2.shape == (X.shape[0], X.shape[0])
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("k, r", [(1, 1), (1, 2), (2, 1), (2, 2)])
@pytest.mark.parametrize("y_type", ["list", "array"])
@pytest.mark.parametrize("encode_label", [True, False])
def test_value_difference_metric_property(dtype, k, r, y_type, encode_label):
# Check the property of the vdm distance. Let's check the property
# described in "Improved Heterogeneous Distance Functions", D.R. Wilson and
# T.R. Martinez, Journal of Artificial Intelligence Research 6 (1997) 1-34
# https://arxiv.org/pdf/cs/9701101.pdf
#
# "if an attribute color has three values red, green and blue, and the
# application is to identify whether or not an object is an apple, red and
# green would be considered closer than red and blue because the former two
# both have similar correlations with the output class apple."
# defined our feature
X = np.array(["green"] * 10 + ["red"] * 10 + ["blue"] * 10).reshape(-1, 1)
# 0 - not an apple / 1 - an apple
y = np.array([1] * 8 + [0] * 5 + [1] * 7 + [0] * 9 + [1])
y_labels = np.array(["not apple", "apple"], dtype=object)
y = y_labels[y]
y = _convert_container(y, y_type)
if encode_label:
y = LabelEncoder().fit_transform(y)
encoder = OrdinalEncoder(dtype=dtype)
X_encoded = encoder.fit_transform(X)
vdm = ValueDifferenceMetric(k=k, r=r)
vdm.fit(X_encoded, y)
sample_green = encoder.transform([["green"]])
sample_red = encoder.transform([["red"]])
sample_blue = encoder.transform([["blue"]])
for sample in (sample_green, sample_red, sample_blue):
# computing the distance between a sample of the same category should
# give a null distance
dist = vdm.pairwise(sample).squeeze()
assert dist == pytest.approx(0)
# check the property explained in the introduction example
dist_1 = vdm.pairwise(sample_green, sample_red).squeeze()
dist_2 = vdm.pairwise(sample_blue, sample_red).squeeze()
dist_3 = vdm.pairwise(sample_blue, sample_green).squeeze()
# green and red are very close
# blue is closer to red than green
assert dist_1 < dist_2
assert dist_1 < dist_3
assert dist_2 < dist_3
def test_value_difference_metric_categories(data):
# Check that "auto" is equivalent to provide the number categories
# beforehand
X, y = data
encoder = OrdinalEncoder(dtype=np.int32)
X_encoded = encoder.fit_transform(X)
n_categories = np.array([len(cat) for cat in encoder.categories_])
vdm_auto = ValueDifferenceMetric().fit(X_encoded, y)
vdm_categories = ValueDifferenceMetric(n_categories=n_categories)
vdm_categories.fit(X_encoded, y)
np.testing.assert_array_equal(vdm_auto.n_categories_, n_categories)
np.testing.assert_array_equal(vdm_auto.n_categories_, vdm_categories.n_categories_)
def test_value_difference_metric_categories_error(data):
# Check that we raise an error if n_categories is inconsistent with the
# number of features in X
X, y = data
encoder = OrdinalEncoder(dtype=np.int32)
X_encoded = encoder.fit_transform(X)
n_categories = [1, 2]
vdm = ValueDifferenceMetric(n_categories=n_categories)
err_msg = "The length of n_categories is not consistent with the number"
with pytest.raises(ValueError, match=err_msg):
vdm.fit(X_encoded, y)
def test_value_difference_metric_missing_categories(data):
# Check that we don't get issue when a category is missing between 0
# n_categories - 1
X, y = data
encoder = OrdinalEncoder(dtype=np.int32)
X_encoded = encoder.fit_transform(X)
n_categories = np.array([len(cat) for cat in encoder.categories_])
# remove a categories that could be between 0 and n_categories
X_encoded[X_encoded[:, -1] == 1] = 0
np.testing.assert_array_equal(np.unique(X_encoded[:, -1]), [0, 2, 3])
vdm = ValueDifferenceMetric(n_categories=n_categories)
vdm.fit(X_encoded, y)
for n_cats, proba in zip(n_categories, vdm.proba_per_class_):
assert proba.shape == (n_cats, len(np.unique(y)))
def test_value_difference_value_unfitted(data):
# Check that we raise a NotFittedError when `fit` is not not called before
# pairwise.
X, y = data
encoder = OrdinalEncoder(dtype=np.int32)
X_encoded = encoder.fit_transform(X)
with pytest.raises(NotFittedError):
ValueDifferenceMetric().pairwise(X_encoded)
| mit | e1c3d3986a4aed5d357059152ea2d158 | 35.971098 | 87 | 0.665572 | 3.233569 | false | true | false | false |
scikit-learn-contrib/imbalanced-learn | imblearn/over_sampling/_smote/filter.py | 1 | 19979 | """SMOTE variant applying some filtering before the generation process."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Fernando Nogueira
# Christos Aridas
# Dzianis Dudnik
# License: MIT
import warnings
import numpy as np
from scipy import sparse
from sklearn.base import clone
from sklearn.svm import SVC
from sklearn.utils import check_random_state
from sklearn.utils import _safe_indexing
from ..base import BaseOverSampler
from ...utils import check_neighbors_object
from ...utils import Substitution
from ...utils._docstring import _n_jobs_docstring
from ...utils._docstring import _random_state_docstring
from ...utils._validation import _deprecate_positional_args
from .base import BaseSMOTE
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class BorderlineSMOTE(BaseSMOTE):
"""Over-sampling using Borderline SMOTE.
This algorithm is a variant of the original SMOTE algorithm proposed in
[2]_. Borderline samples will be detected and used to generate new
synthetic samples.
Read more in the :ref:`User Guide <smote_adasyn>`.
.. versionadded:: 0.4
Parameters
----------
{sampling_strategy}
{random_state}
k_neighbors : int or object, default=5
The nearest neighbors used to define the neighborhood of samples to use
to generate the synthetic samples. You can pass:
- an `int` corresponding to the number of neighbors to use. A
`~sklearn.neighbors.NearestNeighbors` instance will be fitted in this
case.
- an instance of a compatible nearest neighbors algorithm that should
implement both methods `kneighbors` and `kneighbors_graph`. For
instance, it could correspond to a
:class:`~sklearn.neighbors.NearestNeighbors` but could be extended to
any compatible class.
{n_jobs}
.. deprecated:: 0.10
`n_jobs` has been deprecated in 0.10 and will be removed in 0.12.
It was previously used to set `n_jobs` of nearest neighbors
algorithm. From now on, you can pass an estimator where `n_jobs` is
already set instead.
m_neighbors : int or object, default=10
The nearest neighbors used to determine if a minority sample is in
"danger". You can pass:
- an `int` corresponding to the number of neighbors to use. A
`~sklearn.neighbors.NearestNeighbors` instance will be fitted in this
case.
- an instance of a compatible nearest neighbors algorithm that should
implement both methods `kneighbors` and `kneighbors_graph`. For
instance, it could correspond to a
:class:`~sklearn.neighbors.NearestNeighbors` but could be extended to
any compatible class.
kind : {{"borderline-1", "borderline-2"}}, default='borderline-1'
The type of SMOTE algorithm to use one of the following options:
``'borderline-1'``, ``'borderline-2'``.
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
nn_k_ : estimator object
Validated k-nearest neighbours created from the `k_neighbors` parameter.
nn_m_ : estimator object
Validated m-nearest neighbours created from the `m_neighbors` parameter.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
See Also
--------
SMOTE : Over-sample using SMOTE.
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
SVMSMOTE : Over-sample using SVM-SMOTE variant.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
See the original papers: [2]_ for more details.
Supports multi-class resampling. A one-vs.-rest scheme is used as
originally proposed in [1]_.
References
----------
.. [1] N. V. Chawla, K. W. Bowyer, L. O.Hall, W. P. Kegelmeyer, "SMOTE:
synthetic minority over-sampling technique," Journal of artificial
intelligence research, 321-357, 2002.
.. [2] H. Han, W. Wen-Yuan, M. Bing-Huan, "Borderline-SMOTE: a new
over-sampling method in imbalanced data sets learning," Advances in
intelligent computing, 878-887, 2005.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import \
BorderlineSMOTE # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> sm = BorderlineSMOTE(random_state=42)
>>> X_res, y_res = sm.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 900}})
"""
@_deprecate_positional_args
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
k_neighbors=5,
n_jobs=None,
m_neighbors=10,
kind="borderline-1",
):
super().__init__(
sampling_strategy=sampling_strategy,
random_state=random_state,
k_neighbors=k_neighbors,
n_jobs=n_jobs,
)
self.m_neighbors = m_neighbors
self.kind = kind
def _validate_estimator(self):
super()._validate_estimator()
self.nn_m_ = check_neighbors_object(
"m_neighbors", self.m_neighbors, additional_neighbor=1
)
if self.kind not in ("borderline-1", "borderline-2"):
raise ValueError(
f'The possible "kind" of algorithm are '
f'"borderline-1" and "borderline-2".'
f"Got {self.kind} instead."
)
def _fit_resample(self, X, y):
# FIXME: to be removed in 0.12
if self.n_jobs is not None:
warnings.warn(
"The parameter `n_jobs` has been deprecated in 0.10 and will be "
"removed in 0.12. You can pass an nearest neighbors estimator where "
"`n_jobs` is already set instead.",
FutureWarning,
)
self._validate_estimator()
X_resampled = X.copy()
y_resampled = y.copy()
for class_sample, n_samples in self.sampling_strategy_.items():
if n_samples == 0:
continue
target_class_indices = np.flatnonzero(y == class_sample)
X_class = _safe_indexing(X, target_class_indices)
self.nn_m_.fit(X)
danger_index = self._in_danger_noise(
self.nn_m_, X_class, class_sample, y, kind="danger"
)
if not any(danger_index):
continue
self.nn_k_.fit(X_class)
nns = self.nn_k_.kneighbors(
_safe_indexing(X_class, danger_index), return_distance=False
)[:, 1:]
# divergence between borderline-1 and borderline-2
if self.kind == "borderline-1":
# Create synthetic samples for borderline points.
X_new, y_new = self._make_samples(
_safe_indexing(X_class, danger_index),
y.dtype,
class_sample,
X_class,
nns,
n_samples,
)
if sparse.issparse(X_new):
X_resampled = sparse.vstack([X_resampled, X_new])
else:
X_resampled = np.vstack((X_resampled, X_new))
y_resampled = np.hstack((y_resampled, y_new))
elif self.kind == "borderline-2":
random_state = check_random_state(self.random_state)
fractions = random_state.beta(10, 10)
# only minority
X_new_1, y_new_1 = self._make_samples(
_safe_indexing(X_class, danger_index),
y.dtype,
class_sample,
X_class,
nns,
int(fractions * (n_samples + 1)),
step_size=1.0,
)
# we use a one-vs-rest policy to handle the multiclass in which
# new samples will be created considering not only the majority
# class but all over classes.
X_new_2, y_new_2 = self._make_samples(
_safe_indexing(X_class, danger_index),
y.dtype,
class_sample,
_safe_indexing(X, np.flatnonzero(y != class_sample)),
nns,
int((1 - fractions) * n_samples),
step_size=0.5,
)
if sparse.issparse(X_resampled):
X_resampled = sparse.vstack([X_resampled, X_new_1, X_new_2])
else:
X_resampled = np.vstack((X_resampled, X_new_1, X_new_2))
y_resampled = np.hstack((y_resampled, y_new_1, y_new_2))
return X_resampled, y_resampled
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class SVMSMOTE(BaseSMOTE):
"""Over-sampling using SVM-SMOTE.
Variant of SMOTE algorithm which use an SVM algorithm to detect sample to
use for generating new synthetic samples as proposed in [2]_.
Read more in the :ref:`User Guide <smote_adasyn>`.
.. versionadded:: 0.4
Parameters
----------
{sampling_strategy}
{random_state}
k_neighbors : int or object, default=5
The nearest neighbors used to define the neighborhood of samples to use
to generate the synthetic samples. You can pass:
- an `int` corresponding to the number of neighbors to use. A
`~sklearn.neighbors.NearestNeighbors` instance will be fitted in this
case.
- an instance of a compatible nearest neighbors algorithm that should
implement both methods `kneighbors` and `kneighbors_graph`. For
instance, it could correspond to a
:class:`~sklearn.neighbors.NearestNeighbors` but could be extended to
any compatible class.
{n_jobs}
.. deprecated:: 0.10
`n_jobs` has been deprecated in 0.10 and will be removed in 0.12.
It was previously used to set `n_jobs` of nearest neighbors
algorithm. From now on, you can pass an estimator where `n_jobs` is
already set instead.
m_neighbors : int or object, default=10
The nearest neighbors used to determine if a minority sample is in
"danger". You can pass:
- an `int` corresponding to the number of neighbors to use. A
`~sklearn.neighbors.NearestNeighbors` instance will be fitted in this
case.
- an instance of a compatible nearest neighbors algorithm that should
implement both methods `kneighbors` and `kneighbors_graph`. For
instance, it could correspond to a
:class:`~sklearn.neighbors.NearestNeighbors` but could be extended to
any compatible class.
svm_estimator : estimator object, default=SVC()
A parametrized :class:`~sklearn.svm.SVC` classifier can be passed.
A scikit-learn compatible estimator can be passed but it is required
to expose a `support_` fitted attribute.
out_step : float, default=0.5
Step size when extrapolating.
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
nn_k_ : estimator object
Validated k-nearest neighbours created from the `k_neighbors` parameter.
nn_m_ : estimator object
Validated m-nearest neighbours created from the `m_neighbors` parameter.
svm_estimator_ : estimator object
The validated SVM classifier used to detect samples from which to
generate new synthetic samples.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
See Also
--------
SMOTE : Over-sample using SMOTE.
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
SMOTEN : Over-sample using the SMOTE variant specifically for categorical
features only.
BorderlineSMOTE : Over-sample using Borderline-SMOTE.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
See the original papers: [2]_ for more details.
Supports multi-class resampling. A one-vs.-rest scheme is used as
originally proposed in [1]_.
References
----------
.. [1] N. V. Chawla, K. W. Bowyer, L. O.Hall, W. P. Kegelmeyer, "SMOTE:
synthetic minority over-sampling technique," Journal of artificial
intelligence research, 321-357, 2002.
.. [2] H. M. Nguyen, E. W. Cooper, K. Kamei, "Borderline over-sampling for
imbalanced data classification," International Journal of Knowledge
Engineering and Soft Data Paradigms, 3(1), pp.4-21, 2009.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import \
SVMSMOTE # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> sm = SVMSMOTE(random_state=42)
>>> X_res, y_res = sm.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 900}})
"""
@_deprecate_positional_args
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
k_neighbors=5,
n_jobs=None,
m_neighbors=10,
svm_estimator=None,
out_step=0.5,
):
super().__init__(
sampling_strategy=sampling_strategy,
random_state=random_state,
k_neighbors=k_neighbors,
n_jobs=n_jobs,
)
self.m_neighbors = m_neighbors
self.svm_estimator = svm_estimator
self.out_step = out_step
def _validate_estimator(self):
super()._validate_estimator()
self.nn_m_ = check_neighbors_object(
"m_neighbors", self.m_neighbors, additional_neighbor=1
)
if self.svm_estimator is None:
self.svm_estimator_ = SVC(gamma="scale", random_state=self.random_state)
else:
self.svm_estimator_ = clone(self.svm_estimator)
def _fit_resample(self, X, y):
# FIXME: to be removed in 0.12
if self.n_jobs is not None:
warnings.warn(
"The parameter `n_jobs` has been deprecated in 0.10 and will be "
"removed in 0.12. You can pass an nearest neighbors estimator where "
"`n_jobs` is already set instead.",
FutureWarning,
)
self._validate_estimator()
random_state = check_random_state(self.random_state)
X_resampled = X.copy()
y_resampled = y.copy()
for class_sample, n_samples in self.sampling_strategy_.items():
if n_samples == 0:
continue
target_class_indices = np.flatnonzero(y == class_sample)
X_class = _safe_indexing(X, target_class_indices)
self.svm_estimator_.fit(X, y)
if not hasattr(self.svm_estimator_, "support_"):
raise RuntimeError(
"`svm_estimator` is required to exposed a `support_` fitted "
"attribute. Such estimator belongs to the familly of Support "
"Vector Machine."
)
support_index = self.svm_estimator_.support_[
y[self.svm_estimator_.support_] == class_sample
]
support_vector = _safe_indexing(X, support_index)
self.nn_m_.fit(X)
noise_bool = self._in_danger_noise(
self.nn_m_, support_vector, class_sample, y, kind="noise"
)
support_vector = _safe_indexing(
support_vector, np.flatnonzero(np.logical_not(noise_bool))
)
danger_bool = self._in_danger_noise(
self.nn_m_, support_vector, class_sample, y, kind="danger"
)
safety_bool = np.logical_not(danger_bool)
self.nn_k_.fit(X_class)
fractions = random_state.beta(10, 10)
n_generated_samples = int(fractions * (n_samples + 1))
if np.count_nonzero(danger_bool) > 0:
nns = self.nn_k_.kneighbors(
_safe_indexing(support_vector, np.flatnonzero(danger_bool)),
return_distance=False,
)[:, 1:]
X_new_1, y_new_1 = self._make_samples(
_safe_indexing(support_vector, np.flatnonzero(danger_bool)),
y.dtype,
class_sample,
X_class,
nns,
n_generated_samples,
step_size=1.0,
)
if np.count_nonzero(safety_bool) > 0:
nns = self.nn_k_.kneighbors(
_safe_indexing(support_vector, np.flatnonzero(safety_bool)),
return_distance=False,
)[:, 1:]
X_new_2, y_new_2 = self._make_samples(
_safe_indexing(support_vector, np.flatnonzero(safety_bool)),
y.dtype,
class_sample,
X_class,
nns,
n_samples - n_generated_samples,
step_size=-self.out_step,
)
if np.count_nonzero(danger_bool) > 0 and np.count_nonzero(safety_bool) > 0:
if sparse.issparse(X_resampled):
X_resampled = sparse.vstack([X_resampled, X_new_1, X_new_2])
else:
X_resampled = np.vstack((X_resampled, X_new_1, X_new_2))
y_resampled = np.concatenate((y_resampled, y_new_1, y_new_2), axis=0)
elif np.count_nonzero(danger_bool) == 0:
if sparse.issparse(X_resampled):
X_resampled = sparse.vstack([X_resampled, X_new_2])
else:
X_resampled = np.vstack((X_resampled, X_new_2))
y_resampled = np.concatenate((y_resampled, y_new_2), axis=0)
elif np.count_nonzero(safety_bool) == 0:
if sparse.issparse(X_resampled):
X_resampled = sparse.vstack([X_resampled, X_new_1])
else:
X_resampled = np.vstack((X_resampled, X_new_1))
y_resampled = np.concatenate((y_resampled, y_new_1), axis=0)
return X_resampled, y_resampled
| mit | 47fbd00867b7be5625cb485e1ec7d173 | 35.994444 | 87 | 0.583121 | 3.996999 | false | false | false | false |
scikit-learn-contrib/imbalanced-learn | imblearn/over_sampling/_random_over_sampler.py | 1 | 9497 | """Class to perform random over-sampling."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from collections.abc import Mapping
from numbers import Real
import numpy as np
from scipy import sparse
from sklearn.utils import check_array, check_random_state
from sklearn.utils import _safe_indexing
from sklearn.utils.sparsefuncs import mean_variance_axis
from .base import BaseOverSampler
from ..utils import check_target_type
from ..utils import Substitution
from ..utils._docstring import _random_state_docstring
from ..utils._validation import _deprecate_positional_args
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
random_state=_random_state_docstring,
)
class RandomOverSampler(BaseOverSampler):
"""Class to perform random over-sampling.
Object to over-sample the minority class(es) by picking samples at random
with replacement. The bootstrap can be generated in a smoothed manner.
Read more in the :ref:`User Guide <random_over_sampler>`.
Parameters
----------
{sampling_strategy}
{random_state}
shrinkage : float or dict, default=None
Parameter controlling the shrinkage applied to the covariance matrix.
when a smoothed bootstrap is generated. The options are:
- if `None`, a normal bootstrap will be generated without perturbation.
It is equivalent to `shrinkage=0` as well;
- if a `float` is given, the shrinkage factor will be used for all
classes to generate the smoothed bootstrap;
- if a `dict` is given, the shrinkage factor will specific for each
class. The key correspond to the targeted class and the value is
the shrinkage factor.
The value needs of the shrinkage parameter needs to be higher or equal
to 0.
.. versionadded:: 0.8
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
shrinkage_ : dict or None
The per-class shrinkage factor used to generate the smoothed bootstrap
sample. When `shrinkage=None` a normal bootstrap will be generated.
.. versionadded:: 0.8
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
See Also
--------
BorderlineSMOTE : Over-sample using the borderline-SMOTE variant.
SMOTE : Over-sample using SMOTE.
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
SMOTEN : Over-sample using the SMOTE variant specifically for categorical
features only.
SVMSMOTE : Over-sample using SVM-SMOTE variant.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
Supports multi-class resampling by sampling each class independently.
Supports heterogeneous data as object array containing string and numeric
data.
When generating a smoothed bootstrap, this method is also known as Random
Over-Sampling Examples (ROSE) [1]_.
.. warning::
Since smoothed bootstrap are generated by adding a small perturbation
to the drawn samples, this method is not adequate when working with
sparse matrices.
References
----------
.. [1] G Menardi, N. Torelli, "Training and assessing classification
rules with imbalanced data," Data Mining and Knowledge
Discovery, 28(1), pp.92-122, 2014.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import \
RandomOverSampler # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> ros = RandomOverSampler(random_state=42)
>>> X_res, y_res = ros.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 900}})
"""
@_deprecate_positional_args
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
shrinkage=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.random_state = random_state
self.shrinkage = shrinkage
def _check_X_y(self, X, y):
y, binarize_y = check_target_type(y, indicate_one_vs_all=True)
X, y = self._validate_data(
X,
y,
reset=True,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
)
return X, y, binarize_y
def _fit_resample(self, X, y):
random_state = check_random_state(self.random_state)
if isinstance(self.shrinkage, Real):
self.shrinkage_ = {
klass: self.shrinkage for klass in self.sampling_strategy_
}
elif self.shrinkage is None or isinstance(self.shrinkage, Mapping):
self.shrinkage_ = self.shrinkage
else:
raise ValueError(
f"`shrinkage` should either be a positive floating number or "
f"a dictionary mapping a class to a positive floating number. "
f"Got {repr(self.shrinkage)} instead."
)
if self.shrinkage_ is not None:
missing_shrinkage_keys = (
self.sampling_strategy_.keys() - self.shrinkage_.keys()
)
if missing_shrinkage_keys:
raise ValueError(
f"`shrinkage` should contain a shrinkage factor for "
f"each class that will be resampled. The missing "
f"classes are: {repr(missing_shrinkage_keys)}"
)
for klass, shrink_factor in self.shrinkage_.items():
if shrink_factor < 0:
raise ValueError(
f"The shrinkage factor needs to be >= 0. "
f"Got {shrink_factor} for class {klass}."
)
# smoothed bootstrap imposes to make numerical operation; we need
# to be sure to have only numerical data in X
try:
X = check_array(X, accept_sparse=["csr", "csc"], dtype="numeric")
except ValueError as exc:
raise ValueError(
"When shrinkage is not None, X needs to contain only "
"numerical data to later generate a smoothed bootstrap "
"sample."
) from exc
X_resampled = [X.copy()]
y_resampled = [y.copy()]
sample_indices = range(X.shape[0])
for class_sample, num_samples in self.sampling_strategy_.items():
target_class_indices = np.flatnonzero(y == class_sample)
bootstrap_indices = random_state.choice(
target_class_indices,
size=num_samples,
replace=True,
)
sample_indices = np.append(sample_indices, bootstrap_indices)
if self.shrinkage_ is not None:
# generate a smoothed bootstrap with a perturbation
n_samples, n_features = X.shape
smoothing_constant = (4 / ((n_features + 2) * n_samples)) ** (
1 / (n_features + 4)
)
if sparse.issparse(X):
_, X_class_variance = mean_variance_axis(
X[target_class_indices, :],
axis=0,
)
X_class_scale = np.sqrt(X_class_variance, out=X_class_variance)
else:
X_class_scale = np.std(X[target_class_indices, :], axis=0)
smoothing_matrix = np.diagflat(
self.shrinkage_[class_sample] * smoothing_constant * X_class_scale
)
X_new = random_state.randn(num_samples, n_features)
X_new = X_new.dot(smoothing_matrix) + X[bootstrap_indices, :]
if sparse.issparse(X):
X_new = sparse.csr_matrix(X_new, dtype=X.dtype)
X_resampled.append(X_new)
else:
# generate a bootstrap
X_resampled.append(_safe_indexing(X, bootstrap_indices))
y_resampled.append(_safe_indexing(y, bootstrap_indices))
self.sample_indices_ = np.array(sample_indices)
if sparse.issparse(X):
X_resampled = sparse.vstack(X_resampled, format=X.format)
else:
X_resampled = np.vstack(X_resampled)
y_resampled = np.hstack(y_resampled)
return X_resampled, y_resampled
def _more_tags(self):
return {
"X_types": ["2darray", "string", "sparse", "dataframe"],
"sample_indices": True,
"allow_nan": True,
}
| mit | 059d0e65de20f001a2ded9dabaf3a97e | 35.519231 | 86 | 0.598104 | 4.186508 | false | false | false | false |
probml/pyprobml | deprecated/scripts/pcaStandardization.py | 2 | 1556 | import superimport
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from pathlib import Path
import os
import scipy.io
# Get Data
data_dir = Path('.').absolute().parent / 'data' / 'heightWeight'
data = scipy.io.loadmat(data_dir / "heightWeight.mat")['heightWeightData']
data = pd.DataFrame(data).rename(columns = {0:'gender', 1: 'height', 2: 'weight'})
# Function for plotting categorical scatter plot with 1D PCA line
def make_pca_plot(data):
pca = PCA(1)
X_reconstr = pca.inverse_transform(pca.fit_transform(data[['height','weight']].values))
X_reconstr = np.sort(X_reconstr,axis=0)
fig, ax = plt.subplots(figsize=(6, 6))
for i, (name, group) in enumerate(data.groupby('gender')):
color = 'red' if i==1 else 'blue'
marker = 'o' if i==1 else 'x'
fc = 'none' if i==1 else 'blue'
ax.scatter(x=group['height'], y=group['weight'], color=color, marker=marker, facecolor=fc, s=100)
ax.set_ylabel('weight')
ax.set_xlabel('height')
ax.plot(X_reconstr[:,0], X_reconstr[:,1], color='black',linewidth=2)
return fig, ax
# Save figure function
figdir = "../figures"
def save_fig(fname):
plt.savefig(os.path.join(figdir, fname))
# Create and save figures
fig, ax = make_pca_plot(data)
ax.set_title('heightWeightPCA')
save_fig('heightWeightPCA.pdf')
data_std = (data - data.mean())/data.std()
fig, ax = make_pca_plot(data_std)
ax.set_title('heightWeightPCAstnd')
save_fig('heightWeightPCAstnd.pdf') | mit | ffdf2c7b7681f3e9acd73037b3ff9ad3 | 30.14 | 105 | 0.665167 | 3.130785 | false | false | false | false |
probml/pyprobml | deprecated/scripts/parzen_window_demo2.py | 1 | 2303 | # Demonstrate a non-parametric (parzen) density estimator in 1D
# Author: Gerardo Durán Martín
import superimport
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import norm
plt.rcParams["axes.spines.right"] = False
plt.rcParams["axes.spines.top"] = False
def K(u, axis=0): return np.all(np.abs(u) <= 1/2, axis=axis)
def p1(x, X, h):
"""
KDE under a unit hypercube
"""
N, D = X.shape
xden, _ = x.shape
u = ((x - X.T) / h).reshape(D, xden, N)
ku = K(u).sum(axis=1) / (N * h ** D)
return ku
def kdeg(x, X, h, return_components=False):
"""
KDE under a gaussian kernel
"""
N, D = X.shape
nden, _ = x.shape
Xhat = X.reshape(D, 1, N)
xhat = x.reshape(D, nden, 1)
u = xhat - Xhat
u = norm(u, ord=2, axis=0) ** 2 / (2 * h ** 2) # (N, nden)
px = np.exp(-u)
if not return_components:
px = px.sum(axis=1)
px = px / (N * h * np.sqrt(2 * np.pi))
return px
def main():
data = np.array([-2.1, -1.3, -0.4, 1.9, 5.1, 6.2])[:, None]
yvals = np.zeros_like(data)
xv = np.linspace(-5, 10, 100)[:, None]
fig, ax = plt.subplots(2, 2)
# Uniform h=1
ax[0,0].scatter(data, yvals, marker="x", c="tab:gray")
ax[0,0].step(xv, p1(xv, data, 1), c="tab:blue", alpha=0.7)
ax[0,0].set_title("unif, h=1.0")
# Uniform h=2
ax[0,1].scatter(data, yvals, marker="x", c="tab:gray")
ax[0,1].step(xv, p1(xv, data, 2), c="tab:blue", alpha=0.7)
ax[0,1].set_title("unif, h=2.0")
# Gaussian h=1
ax[1,0].scatter(data, yvals, marker="x", c="tab:gray", zorder=3)
ax[1,0].plot(xv, kdeg(xv, data, 1), c="tab:blue", alpha=0.7, zorder=2)
ax[1,0].plot(xv, kdeg(xv, data, 1, True), c="tab:red", alpha=0.7,
linestyle="--", zorder=1, linewidth=1)
ax[1,0].set_title("gauss, h=1.0")
# Gaussian h=2
ax[1,1].scatter(data, yvals, marker="x", c="tab:gray", zorder=3)
ax[1,1].plot(xv, kdeg(xv, data, 2), c="tab:blue", alpha=0.7, zorder=2)
ax[1,1].plot(xv, kdeg(xv, data, 2, True), c="tab:red", alpha=0.7,
linestyle="--", zorder=1, linewidth=1)
ax[1,1].set_title("gauss, h=2.0")
plt.tight_layout()
plt.savefig("../figures/parzen_window2.pdf", dpi=300)
plt.show()
if __name__ == "__main__":
main()
| mit | f80b6bd2deb4ed9ebdb3231f0325ba70 | 28.126582 | 74 | 0.552369 | 2.432347 | false | false | false | false |
probml/pyprobml | deprecated/scripts/dirichlet_3d_spiky_plot.py | 1 | 2056 | import superimport
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from mpl_toolkits.mplot3d import proj3d
from scipy.stats import dirichlet
grain = 100 # 20 #how many points along each axis to plot
edgedist = 0.005 # 0.008 #How close to an extreme value of say [1,0,0] are we willing to plot.
weight = np.linspace(0, 1, grain)
#Most extreme corners of the sample space
Corner1 = np.array([1.0 - edgedist*2, edgedist, edgedist])
Corner2 = np.array([edgedist, 1.0 - edgedist*2, edgedist])
Corner3 = np.array([edgedist, edgedist, 1.0 - edgedist*2])
#Probability density function that accepts 2D coordiantes
def dpdf(v1,v2, alphavec):
if (v1 + v2)>1:
out = np.nan
else:
vec = v1 * Corner1 + v2 * Corner2 + (1.0 - v1 - v2)*Corner3
out = dirichlet.pdf(vec, alphavec)
return(out)
#Dirichlet parameter
alphas = [ [20,20,20], [3,3,20], [0.1,0.1,0.1] ]
for i in range(len(alphas)):
alphavec = np.array(alphas[i])
azim = 20
probs = np.array([dpdf(v1, v2, alphavec) for v1 in weight for v2 in weight]).reshape(-1,grain)
#fig = plt.figure(figsize=(20,15))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X,Y = np.meshgrid(weight, weight)
ax.plot_surface(Y, X, probs, cmap = 'jet', vmin=0, vmax=3,rstride=1,cstride=1, linewidth=0)
ax.view_init(elev=25, azim=azim)
ax.set_zlabel('p')
ttl = ','.join(['{:0.2f}'.format(d) for d in alphavec])
ax.set_title(ttl, fontsize=14)
alpha = int(np.round(alphavec[0]*10))
plt.tight_layout()
pml.savefig('dirSimplexAlpha{}Legible.png'.format(alpha))
plt.show()
if 0:
fig = plt.figure(figsize=(20,15))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(Y, X, probs, cmap = 'jet', vmin=0, vmax=3,rstride=1,cstride=1, linewidth=0)
ax.view_init(elev=25, azim=200)
ax.set_zlabel('p')
ttl = ','.join(['{:0.2f}'.format(d) for d in alphavec])
ax.set_title(ttl)
alpha = np.round(alphavec[0]*10)
pml.savefig('alpha.pdf')
plt.show()
| mit | 48f38f49bf2be28cdf2e1ae49b560a28 | 32.704918 | 98 | 0.645428 | 2.666667 | false | false | false | false |
probml/pyprobml | deprecated/scripts/mcmc_utils.py | 1 | 1764 | '''
Author : Ang Ming Liang
'''
import superimport
import numpy as np
#from tqdm.notebook import tqdm
from tqdm import tqdm
def slice_sample(init, dist, iters, sigma, burnin, step_out=True, rng=None):
"""
based on http://homepages.inf.ed.ac.uk/imurray2/teaching/09mlss/
"""
# set up empty sample holder
D = len(init)
samples = np.zeros((D, iters))
sigma = 5*np.ones(init.shape[-1])
# initialize
xx = init.copy()
for i in tqdm(range(iters)):
perm = list(range(D))
rng.shuffle(perm)
last_llh = dist(xx)
for d in perm:
llh0 = last_llh + np.log(rng.random())
rr = rng.random(1)
x_l = xx.copy()
x_l[d] = x_l[d] - rr * sigma[d]
x_r = xx.copy()
x_r[d] = x_r[d] + (1 - rr) * sigma[d]
if step_out:
llh_l = dist(x_l)
while llh_l > llh0:
x_l[d] = x_l[d] - sigma[d]
llh_l = dist(x_l)
llh_r = dist(x_r)
while llh_r > llh0:
x_r[d] = x_r[d] + sigma[d]
llh_r = dist(x_r)
x_cur = xx.copy()
while True:
xd = rng.random() * (x_r[d] - x_l[d]) + x_l[d]
x_cur[d] = xd.copy()
last_llh = dist(x_cur)
if last_llh > llh0:
xx[d] = xd.copy()
break
elif xd > xx[d]:
x_r[d] = xd
elif xd < xx[d]:
x_l[d] = xd
else:
raise RuntimeError('Slice sampler shrank too far.')
samples[:, i] = xx.copy().ravel()
return samples[:, burnin:]
| mit | 4aba05b930bdf8504de3148fb0d5a3a3 | 26.5625 | 76 | 0.424036 | 3.089317 | false | false | false | false |
winpython/winpython | diff.py | 2 | 11721 | # -*- coding: utf-8 -*-
#
# Copyright © 2013 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see winpython/__init__.py for details)
"""
WinPython diff script
Created on Tue Jan 29 11:56:54 2013
"""
from __future__ import print_function, with_statement
import os
# import os.path as osp
from pathlib import Path
import re
import shutil
# Local imports
from winpython import utils
# pep503 defines normalized package names: www.python.org/dev/peps/pep-0503
def normalize(name):
return re.sub(r"[-_.]+", "-", name).lower()
# CHANGELOGS_DIR = osp.join(
# osp.dirname(__file__), 'changelogs'
# )
CHANGELOGS_DIR = str(Path(__file__).parent / 'changelogs')
# assert osp.isdir(CHANGELOGS_DIR)
assert Path(CHANGELOGS_DIR).is_dir()
class Package(object):
# SourceForge Wiki syntax:
PATTERN = r'\[([a-zA-Z\-\:\/\.\_0-9]*)\]\(([^\]\ ]*)\) \| ([^\|]*) \| ([^\|]*)'
# Google Code Wiki syntax:
PATTERN_OLD = r'\[([a-zA-Z\-\:\/\.\_0-9]*) ([^\]\ ]*)\] \| ([^\|]*) \| ([^\|]*)'
def __init__(self):
self.name = None
self.version = None
self.description = None
self.url = None
def __str__(self):
text = "%s %s" % (self.name, self.version)
text += "\r\n%s\r\nWebsite: %s" % (
self.description,
self.url,
)
return text
def from_text(self, text):
try:
self.url, self.name, self.version, self.description = re.match(
self.PATTERN_OLD, text
).groups()
except AttributeError:
self.name, self.url, self.version, self.description = re.match(
self.PATTERN, text
).groups()
def to_wiki(self):
return " * [%s](%s) %s (%s)\r\n" % (
self.name,
self.url,
self.version,
self.description,
)
def upgrade_wiki(self, other):
# wheel replace '-' per '_' in package name
assert (
self.name.replace('-', '_').lower()
== other.name.replace('-', '_').lower()
)
return " * [%s](%s) %s → %s (%s)\r\n" % (
self.name,
self.url,
other.version,
self.version,
self.description,
)
class PackageIndex(object):
WINPYTHON_PATTERN = (
r'\#\# WinPython\-*[0-9b-t]* ([0-9\.a-zA-Z]*)'
)
TOOLS_LINE = '### Tools'
PYTHON_PACKAGES_LINE = '### Python packages'
HEADER_LINE1 = 'Name | Version | Description'
HEADER_LINE2 = '-----|---------|------------'
def __init__(
self,
version,
basedir=None,
flavor='',
architecture=64,
):
self.version = version
self.other_packages = {}
self.python_packages = {}
self.flavor = flavor
self.basedir = basedir
self.architecture = architecture
self.from_file(basedir)
def from_file(self, basedir):
# fname = osp.join(basedir, 'build%s' % self.flavor,
#fname = osp.join(
# CHANGELOGS_DIR,
# 'WinPython%s-%sbit-%s.md'
# % (
# self.flavor,
# self.architecture,
# self.version,
# ),
#)
fname = str(Path(CHANGELOGS_DIR) /
f'WinPython{self.flavor}-{self.architecture}bit-{self.version}.md')
with open(
fname, 'r'
) as fdesc: # python3 doesn't like 'rb'
text = fdesc.read()
self.from_text(text)
def from_text(self, text):
version = re.match(
self.WINPYTHON_PATTERN + self.flavor, text
).groups()[0]
assert version == self.version
tools_flag = False
python_flag = False
for line in text.splitlines():
if line:
if line == self.TOOLS_LINE:
tools_flag = True
continue
elif line == self.PYTHON_PACKAGES_LINE:
tools_flag = False
python_flag = True
continue
elif line in (
self.HEADER_LINE1,
self.HEADER_LINE2,
'<details>',
'</details>'
):
continue
if tools_flag or python_flag:
package = Package()
package.from_text(line)
if tools_flag:
self.other_packages[
package.name
] = package
else:
self.python_packages[
package.name
] = package
def diff_package_dicts(dict1_in, dict2_in):
"""Return difference between package dict1 and package dict2"""
text = ""
# wheel replace '-' per '_' in key
dict1 = {}
dict2 = {}
for key in dict1_in:
dict1[key.replace('-', '_').lower()] = dict1_in[key]
for key in dict2_in:
dict2[key.replace('-', '_').lower()] = dict2_in[key]
set1, set2 = set(dict1.keys()), set(dict2.keys())
# New packages
new = sorted(set2 - set1)
if new:
text += "New packages:\r\n\r\n"
for name in new:
package = dict2[name]
text += package.to_wiki()
text += '\r\n'
# Upgraded packages
upgraded_list = []
for name in sorted(set1 & set2):
package1 = dict1[name]
package2 = dict2[name]
if package1.version != package2.version:
upgraded_list.append(
package2.upgrade_wiki(package1)
)
if upgraded_list:
text += (
"Upgraded packages:\r\n\r\n%s\r\n"
% "".join(upgraded_list)
)
# Removed packages
removed = sorted(set1 - set2)
if removed:
text += "Removed packages:\r\n\r\n"
for name in removed:
package = dict1[name]
text += package.to_wiki()
text += '\r\n'
return text
def find_closer_version(
version1, basedir=None, flavor='', architecture=64
):
"""Find version which is the closest to `version`"""
# builddir = osp.join(basedir, 'bu%s' % flavor)
builddir = str(Path(basedir) / f'bu{flavor}')
func = lambda name: re.match(
r'WinPython%s-%sbit-([0-9\.]*)\.(txt|md)'
% (flavor, architecture),
name,
)
versions = [
func(name).groups()[0]
for name in os.listdir(builddir)
if func(name)
]
try:
index = versions.index(version1)
except ValueError:
raise ValueError("Unknown version %s" % version1)
if index == 0:
print("No version prior to %s" % version1)
index += 1 # we don't want to fail on this
return versions[index - 1]
def compare_package_indexes(
version2,
version1=None,
basedir=None,
flavor='',
flavor1=None,
architecture=64,
):
"""Compare two package index Wiki pages"""
if version1 is None:
version1 = find_closer_version(
version2,
basedir=basedir,
flavor=flavor,
architecture=architecture,
)
flavor1 = flavor1 if flavor1 is not None else flavor
text = '\r\n'.join(
[
"## History of changes for WinPython-%sbit %s"
% (architecture, version2 + flavor),
"",
"The following changes were made to WinPython-%sbit"
" distribution since version %s."
% (architecture, version1 + flavor1),
"",
"<details>",
"",
]
)
pi1 = PackageIndex(
version1,
basedir=basedir,
flavor=flavor1,
architecture=architecture,
)
pi2 = PackageIndex(
version2,
basedir=basedir,
flavor=flavor,
architecture=architecture,
)
tools_text = diff_package_dicts(
pi1.other_packages, pi2.other_packages
)
if tools_text:
text += (
PackageIndex.TOOLS_LINE
+ '\r\n\r\n'
+ tools_text
)
py_text = diff_package_dicts(
pi1.python_packages, pi2.python_packages
)
if py_text:
text += (
PackageIndex.PYTHON_PACKAGES_LINE
+ '\r\n\r\n'
+ py_text
)
text += '\r\n</details>\r\n* * *\r\n'
return text
def _copy_all_changelogs(
version, basedir, flavor='', architecture=64
):
basever = '.'.join(version.split('.')[:2])
for name in os.listdir(CHANGELOGS_DIR):
if re.match(
r'WinPython%s-%sbit-%s([0-9\.]*)\.(txt|md)'
% (flavor, architecture, basever),
name,
):
shutil.copyfile(
# osp.join(CHANGELOGS_DIR, name),
str(Path(CHANGELOGS_DIR) / name),
# osp.join(basedir, 'bu%s' % flavor, name),
str(Path(basedir) / f'bu{flavor}' / name),
)
def write_changelog(
version2,
version1=None,
basedir=None,
flavor='',
release_level='',
architecture=64,
):
"""Write changelog between version1 and version2 of WinPython"""
_copy_all_changelogs(
version2,
basedir,
flavor=flavor,
architecture=architecture,
)
print(
'comparing_package_indexes',
version2,
basedir,
flavor,
architecture,
)
text = compare_package_indexes(
version2,
version1,
basedir=basedir,
flavor=flavor,
architecture=architecture,
)
#fname = osp.join(
# basedir,
# 'bu%s' % flavor,
# 'WinPython%s-%sbit-%s_History.md'
# % (flavor, architecture, version2),
#)
fname = str(Path(basedir) /
f'bu{flavor}' /
f'WinPython{flavor}-{architecture}bit-{version2}_History.md')
with open(
fname, 'w', encoding='utf-8-sig'
) as fdesc: # python 3 need
fdesc.write(text)
# Copy to winpython/changelogs
shutil.copyfile(
#fname, osp.join(CHANGELOGS_DIR, osp.basename(fname))
fname, str(Path(CHANGELOGS_DIR) / Path(fname).name)
)
def test_parse_package_index_wiki(
version, basedir=None, flavor='', architecture=64
):
"""Parse the package index Wiki page"""
pi = PackageIndex(
version,
basedir=basedir,
flavor=flavor,
architecture=architecture,
)
utils.print_box("WinPython %s:" % pi.version)
utils.print_box("Tools:")
for package in pi.other_packages.values():
print(package)
print('')
utils.print_box("Python packages:")
for package in pi.python_packages.values():
print(package)
print('')
def test_compare(
basedir, version2, version1, architecture=64
):
print(
compare_package_indexes(
basedir,
version2,
version1,
architecture=architecture,
)
)
if __name__ == '__main__':
print(
compare_package_indexes(
version2='3.7.4.0',
version1='3.7.2.0',
basedir=r'C:\WinP\bd37',
flavor='Zero',
flavor1='Zero',
architecture=32
))
write_changelog(
version2='3.7.4.0',
version1='3.7.2.0',
basedir=r'C:\WinP\bd37',
flavor='Ps2',
architecture=64
)
# test_parse_package_index_wiki('2.7.3.3')
# print(compare_package_indexes('2.7.3.3', '2.7.3.1'))
# write_changelog('2.7.4.1', '2.7.4.0')
# write_changelog('3.3.0.0beta2', '3.3.0.0beta1')
| mit | c951e5d677580f1cc3172526d6c09504 | 26.507042 | 84 | 0.509217 | 3.709402 | false | false | false | false |
probml/pyprobml | deprecated/scripts/broadcasting_fig.py | 1 | 8003 | # Modified from Jake VanderPlas' code
# https://jakevdp.github.io/PythonDataScienceHandbook/06.00-figure-code.html#Broadcasting
# Adapted from astroML: see http://www.astroml.org/book_figures/appendix/fig_broadcast_visual.html
import superimport
#import numpy as np
import matplotlib.pyplot as plt
import os
figdir = "../figures"
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
#------------------------------------------------------------
# Draw a figure and axis with no boundary
fig = plt.figure(figsize=(6, 4.5), facecolor='w')
ax = plt.axes([0, 0, 1, 1], xticks=[], yticks=[], frameon=False)
def draw_cube(ax, xy, size, depth=0.4,
edges=None, label=None, label_kwargs=None, **kwargs):
"""draw and label a cube. edges is a list of numbers between
1 and 12, specifying which of the 12 cube edges to draw"""
if edges is None:
edges = range(1, 13)
x, y = xy
if 1 in edges:
ax.plot([x, x + size],
[y + size, y + size], **kwargs)
if 2 in edges:
ax.plot([x + size, x + size],
[y, y + size], **kwargs)
if 3 in edges:
ax.plot([x, x + size],
[y, y], **kwargs)
if 4 in edges:
ax.plot([x, x],
[y, y + size], **kwargs)
if 5 in edges:
ax.plot([x, x + depth],
[y + size, y + depth + size], **kwargs)
if 6 in edges:
ax.plot([x + size, x + size + depth],
[y + size, y + depth + size], **kwargs)
if 7 in edges:
ax.plot([x + size, x + size + depth],
[y, y + depth], **kwargs)
if 8 in edges:
ax.plot([x, x + depth],
[y, y + depth], **kwargs)
if 9 in edges:
ax.plot([x + depth, x + depth + size],
[y + depth + size, y + depth + size], **kwargs)
if 10 in edges:
ax.plot([x + depth + size, x + depth + size],
[y + depth, y + depth + size], **kwargs)
if 11 in edges:
ax.plot([x + depth, x + depth + size],
[y + depth, y + depth], **kwargs)
if 12 in edges:
ax.plot([x + depth, x + depth],
[y + depth, y + depth + size], **kwargs)
if label:
if label_kwargs is None:
label_kwargs = {}
ax.text(x + 0.5 * size, y + 0.5 * size, label,
ha='center', va='center', **label_kwargs)
solid = dict(c='black', ls='-', lw=1,
label_kwargs=dict(color='k'))
dotted = dict(c='black', ls='-', lw=0.5, alpha=0.5,
label_kwargs=dict(color='gray'))
depth = 0.3
#------------------------------------------------------------
# Draw top operation: vector plus scalar
draw_cube(ax, (1, 10), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)
draw_cube(ax, (2, 10), 1, depth, [1, 2, 3, 6, 9], '1', **solid)
draw_cube(ax, (3, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)
draw_cube(ax, (6, 10), 1, depth, [1, 2, 3, 4, 5, 6, 7, 9, 10], '5', **solid)
draw_cube(ax, (7, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '5', **dotted)
draw_cube(ax, (8, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '5', **dotted)
draw_cube(ax, (12, 10), 1, depth, [1, 2, 3, 4, 5, 6, 9], '5', **solid)
draw_cube(ax, (13, 10), 1, depth, [1, 2, 3, 6, 9], '6', **solid)
draw_cube(ax, (14, 10), 1, depth, [1, 2, 3, 6, 7, 9, 10], '7', **solid)
ax.text(5, 10.5, '+', size=12, ha='center', va='center')
ax.text(10.5, 10.5, '=', size=12, ha='center', va='center')
ax.text(1, 11.5, r'${\tt np.arange(3) + 5}$',
size=12, ha='left', va='bottom')
#------------------------------------------------------------
# Draw middle operation: matrix plus vector
# first block
draw_cube(ax, (1, 7.5), 1, depth, [1, 2, 3, 4, 5, 6, 9], '1', **solid)
draw_cube(ax, (2, 7.5), 1, depth, [1, 2, 3, 6, 9], '1', **solid)
draw_cube(ax, (3, 7.5), 1, depth, [1, 2, 3, 6, 7, 9, 10], '1', **solid)
draw_cube(ax, (1, 6.5), 1, depth, [2, 3, 4], '1', **solid)
draw_cube(ax, (2, 6.5), 1, depth, [2, 3], '1', **solid)
draw_cube(ax, (3, 6.5), 1, depth, [2, 3, 7, 10], '1', **solid)
draw_cube(ax, (1, 5.5), 1, depth, [2, 3, 4], '1', **solid)
draw_cube(ax, (2, 5.5), 1, depth, [2, 3], '1', **solid)
draw_cube(ax, (3, 5.5), 1, depth, [2, 3, 7, 10], '1', **solid)
# second block
draw_cube(ax, (6, 7.5), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)
draw_cube(ax, (7, 7.5), 1, depth, [1, 2, 3, 6, 9], '1', **solid)
draw_cube(ax, (8, 7.5), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)
draw_cube(ax, (6, 6.5), 1, depth, range(2, 13), '0', **dotted)
draw_cube(ax, (7, 6.5), 1, depth, [2, 3, 6, 7, 9, 10, 11], '1', **dotted)
draw_cube(ax, (8, 6.5), 1, depth, [2, 3, 6, 7, 9, 10, 11], '2', **dotted)
draw_cube(ax, (6, 5.5), 1, depth, [2, 3, 4, 7, 8, 10, 11, 12], '0', **dotted)
draw_cube(ax, (7, 5.5), 1, depth, [2, 3, 7, 10, 11], '1', **dotted)
draw_cube(ax, (8, 5.5), 1, depth, [2, 3, 7, 10, 11], '2', **dotted)
# third block
draw_cube(ax, (12, 7.5), 1, depth, [1, 2, 3, 4, 5, 6, 9], '1', **solid)
draw_cube(ax, (13, 7.5), 1, depth, [1, 2, 3, 6, 9], '2', **solid)
draw_cube(ax, (14, 7.5), 1, depth, [1, 2, 3, 6, 7, 9, 10], '3', **solid)
draw_cube(ax, (12, 6.5), 1, depth, [2, 3, 4], '1', **solid)
draw_cube(ax, (13, 6.5), 1, depth, [2, 3], '2', **solid)
draw_cube(ax, (14, 6.5), 1, depth, [2, 3, 7, 10], '3', **solid)
draw_cube(ax, (12, 5.5), 1, depth, [2, 3, 4], '1', **solid)
draw_cube(ax, (13, 5.5), 1, depth, [2, 3], '2', **solid)
draw_cube(ax, (14, 5.5), 1, depth, [2, 3, 7, 10], '3', **solid)
ax.text(5, 7.0, '+', size=12, ha='center', va='center')
ax.text(10.5, 7.0, '=', size=12, ha='center', va='center')
ax.text(1, 9.0, r'${\tt np.ones((3,\, 3)) + np.arange(3)}$',
size=12, ha='left', va='bottom')
#------------------------------------------------------------
# Draw bottom operation: vector plus vector, double broadcast
# first block
draw_cube(ax, (1, 3), 1, depth, [1, 2, 3, 4, 5, 6, 7, 9, 10], '0', **solid)
draw_cube(ax, (1, 2), 1, depth, [2, 3, 4, 7, 10], '1', **solid)
draw_cube(ax, (1, 1), 1, depth, [2, 3, 4, 7, 10], '2', **solid)
draw_cube(ax, (2, 3), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '0', **dotted)
draw_cube(ax, (2, 2), 1, depth, [2, 3, 7, 10, 11], '1', **dotted)
draw_cube(ax, (2, 1), 1, depth, [2, 3, 7, 10, 11], '2', **dotted)
draw_cube(ax, (3, 3), 1, depth, [1, 2, 3, 6, 7, 9, 10, 11], '0', **dotted)
draw_cube(ax, (3, 2), 1, depth, [2, 3, 7, 10, 11], '1', **dotted)
draw_cube(ax, (3, 1), 1, depth, [2, 3, 7, 10, 11], '2', **dotted)
# second block
draw_cube(ax, (6, 3), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)
draw_cube(ax, (7, 3), 1, depth, [1, 2, 3, 6, 9], '1', **solid)
draw_cube(ax, (8, 3), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)
draw_cube(ax, (6, 2), 1, depth, range(2, 13), '0', **dotted)
draw_cube(ax, (7, 2), 1, depth, [2, 3, 6, 7, 9, 10, 11], '1', **dotted)
draw_cube(ax, (8, 2), 1, depth, [2, 3, 6, 7, 9, 10, 11], '2', **dotted)
draw_cube(ax, (6, 1), 1, depth, [2, 3, 4, 7, 8, 10, 11, 12], '0', **dotted)
draw_cube(ax, (7, 1), 1, depth, [2, 3, 7, 10, 11], '1', **dotted)
draw_cube(ax, (8, 1), 1, depth, [2, 3, 7, 10, 11], '2', **dotted)
# third block
draw_cube(ax, (12, 3), 1, depth, [1, 2, 3, 4, 5, 6, 9], '0', **solid)
draw_cube(ax, (13, 3), 1, depth, [1, 2, 3, 6, 9], '1', **solid)
draw_cube(ax, (14, 3), 1, depth, [1, 2, 3, 6, 7, 9, 10], '2', **solid)
draw_cube(ax, (12, 2), 1, depth, [2, 3, 4], '1', **solid)
draw_cube(ax, (13, 2), 1, depth, [2, 3], '2', **solid)
draw_cube(ax, (14, 2), 1, depth, [2, 3, 7, 10], '3', **solid)
draw_cube(ax, (12, 1), 1, depth, [2, 3, 4], '2', **solid)
draw_cube(ax, (13, 1), 1, depth, [2, 3], '3', **solid)
draw_cube(ax, (14, 1), 1, depth, [2, 3, 7, 10], '4', **solid)
ax.text(5, 2.5, '+', size=12, ha='center', va='center')
ax.text(10.5, 2.5, '=', size=12, ha='center', va='center')
ax.text(1, 4.5, r'${\tt np.arange(3).reshape((3,\, 1)) + np.arange(3)}$',
ha='left', size=12, va='bottom')
ax.set_xlim(0, 16)
ax.set_ylim(0.5, 12.5)
save_fig('broadcasting.pdf')
save_fig('broadcasting.png')
plt.show()
| mit | a052f77a58ccc307dc07db3665cde384 | 39.21608 | 98 | 0.488192 | 2.360767 | false | false | false | false |
probml/pyprobml | deprecated/scripts/logreg_poly_demo.py | 1 | 3533 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 27 10:08:08 2020
@author: kpmurphy
"""
# Fit logistic regression models to 2d data using polynomial features
import superimport
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_classification, make_blobs
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LogisticRegression
import matplotlib.colors as mcol
import os
import pyprobml_utils as pml
def plot_data(ax, X, y, is_train=True):
X0 = X[:,0]; X1 = X[:,1]
colors = [ 'red', 'blue']
if is_train:
markers = ['x', '*']
else:
markers = ['o', 's']
for x0, x1, cls in zip(X0, X1, y):
color = colors[int(cls)-1]
marker = markers[int(cls)-1]
ax.scatter(x0, x1, marker=marker, color=color)
ax.set_ylim(-2.75,2.75)
def plot_predictions(ax, xx, yy, transformer, model):
grid = np.c_[xx.ravel(), yy.ravel()]
grid2 = transformer.transform(grid)[:, 1:]
Z = model.predict(grid2).reshape(xx.shape)
ax.pcolormesh(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.1)
#plt.axis('off')
def make_data(ntrain, ntest):
n = ntrain + ntest
X, y = make_classification(
n_samples=n, n_features=2, n_redundant=0,
n_classes=2, n_clusters_per_class=2,
class_sep=0.1, random_state=1)
X0, y0 = make_blobs(n_samples=[n, n], n_features=2,
cluster_std=2, random_state=1)
Xtrain = X[:ntrain, :]; ytrain = y[:ntrain]
Xtest = X[ntrain:, :]; ytest = y[ntrain:]
xmin = np.min(X[:,0]); xmax = np.max(X[:,0]);
ymin = np.min(X[:,1]); ymax = np.max(X[:,1]);
xx, yy = np.meshgrid(np.linspace(xmin, xmax, n), np.linspace(ymin, ymax, 200))
return Xtrain, ytrain, Xtest, ytest, xx, yy
ntrain = 50; ntest = 1000;
Xtrain, ytrain, Xtest, ytest, xx, yy = make_data(ntrain, ntest)
### Try different strngth regularizers
degree = 4
# C =1/lambda, so large C is large variance is small regularization
C_list = np.logspace(0, 5, 7)
#C_list = [1, 10, 100, 200, 500, 1000]
plot_list = C_list
err_train_list = []
err_test_list = []
w_list = []
for i, C in enumerate(C_list):
transformer = PolynomialFeatures(degree)
name = 'Reg{:d}-Degree{}'.format(int(C), degree)
XXtrain = transformer.fit_transform(Xtrain)[:, 1:] # skip the first column of 1s
model = LogisticRegression(C=C)
model = model.fit(XXtrain, ytrain)
w = model.coef_[0]
w_list.append(w)
ytrain_pred = model.predict(XXtrain)
nerrors_train = np.sum(ytrain_pred != ytrain)
err_train_list.append(nerrors_train / ntrain)
XXtest = transformer.fit_transform(Xtest)[:, 1:] # skip the first column of 1s
ytest_pred = model.predict(XXtest)
nerrors_test = np.sum(ytest_pred != ytest)
err_test_list.append(nerrors_test / ntest)
if C in plot_list:
fig, ax = plt.subplots()
plot_predictions(ax, xx, yy, transformer, model)
plot_data(ax, Xtrain, ytrain, is_train=True)
#plot_data(ax, Xtest, ytest, is_train=False)
ax.set_title(name)
fname = 'logreg_poly_surface-{}.png'.format(name)
pml.save_fig(fname)
plt.draw()
plt.figure()
plt.plot(C_list, err_train_list, 'x-', label='train')
plt.plot(C_list, err_test_list, 'o-', label='test')
plt.legend()
plt.xscale('log')
plt.xlabel('Inverse regularization')
plt.ylabel('error rate')
pml.save_fig('logreg_poly_vs_reg-Degree{}.pdf'.format(degree))
plt.show()
| mit | 578f7b3be9bd77bb60546b6424e4510a | 31.412844 | 85 | 0.626946 | 2.93195 | false | true | false | false |
probml/pyprobml | deprecated/scripts/gpc_demo_2d_sklearn.py | 1 | 1888 | # Gaussian Process Classifier demo
# Author: Drishtii@
# Based on
# https://github.com/probml/pmtk3/blob/master/demos/gpcDemo2d.m
# See also gpc_demo_2d_pytorch for a Gpytorch version of this demo.
import superimport
import pyprobml_utils as pml
import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# make synthetic data
np.random.seed(9)
n1=80
n2=40
S1 = np.eye(2)
S2 = np.array([[1, 0.95], [0.95, 1]])
m1 = np.array([0.75, 0]).reshape(-1, 1)
m2 = np.array([-0.75, 0])
xx = np.repeat(m1, n1).reshape(2, n1)
yy = np.repeat(m2, n2).reshape(2, n2)
x1 = np.linalg.cholesky(S1).T @ np.random.randn(2,n1) + xx
x2 = np.linalg.cholesky(S2).T @ np.random.randn(2,n2) + yy
x = np.concatenate([x1.T, x2.T])
y1 = -np.ones(n1).reshape(-1, 1)
y2 = np.ones(n2).reshape(-1, 1)
y = np.concatenate([y1, y2])
q = np.linspace(-4, 4, 81)
r = np.linspace(-4, 4, 81)
t1, t2 = np.meshgrid(q, r)
t = np.hstack([t1.reshape(-1, 1), t2.reshape(-1, 1)])
def g(x):
return 5. - x[:, 1] - .5 * x[:, 0] ** 2
y_true = g(t)
y_true = y_true.reshape(81, 81)
def make_plot(gp):
plt.figure()
y_prob = gp.predict_proba(t)[:, 1]
y_prob = y_prob.reshape(81, 81)
plt.scatter(x1[0, :], x1[1, :], marker='o')
plt.scatter(x2[0, :], x2[1, :], marker='+')
plt.contour(t1, t2, y_prob, levels = np.linspace(0.1, 0.9, 9))
plt.contour(t1, t2, y_prob, [0.5], colors=['red'])
plt.title(gp.kernel_)
# GP without fitting the kernel hyper-parameters
# Note that 10.0 ~- 3.16**2
kernel = 10.0 * RBF(length_scale=0.5)
gp1 = GaussianProcessClassifier(kernel=kernel, optimizer=None)
gp1.fit(x, y)
make_plot(gp1)
pml.savefig('gpc2d_init_params.pdf')
# GP where we optimize the kernel parameters
gp2 = GaussianProcessClassifier(kernel=kernel)
gp2.fit(x, y)
make_plot(gp2)
pml.savefig('gpc2d_learned_params.pdf') | mit | b4fdac704032fb361069c249a69226b4 | 28.061538 | 67 | 0.666314 | 2.371859 | false | false | false | false |
probml/pyprobml | deprecated/scripts/colormap_turbo.py | 1 | 14343 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 4 12:22:42 2020
@author: kpmurphy
"""
# https://gist.github.com/FedeMiorelli/640bbc66b2038a14802729e609abfe89
# This script registers the "turbo" colormap to matplotlib, and the reversed version as "turbo_r"
# Reference: https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html
import superimport
import numpy as np
import matplotlib.pyplot as plt
turbo_colormap_data = np.array(
[[0.18995,0.07176,0.23217],
[0.19483,0.08339,0.26149],
[0.19956,0.09498,0.29024],
[0.20415,0.10652,0.31844],
[0.20860,0.11802,0.34607],
[0.21291,0.12947,0.37314],
[0.21708,0.14087,0.39964],
[0.22111,0.15223,0.42558],
[0.22500,0.16354,0.45096],
[0.22875,0.17481,0.47578],
[0.23236,0.18603,0.50004],
[0.23582,0.19720,0.52373],
[0.23915,0.20833,0.54686],
[0.24234,0.21941,0.56942],
[0.24539,0.23044,0.59142],
[0.24830,0.24143,0.61286],
[0.25107,0.25237,0.63374],
[0.25369,0.26327,0.65406],
[0.25618,0.27412,0.67381],
[0.25853,0.28492,0.69300],
[0.26074,0.29568,0.71162],
[0.26280,0.30639,0.72968],
[0.26473,0.31706,0.74718],
[0.26652,0.32768,0.76412],
[0.26816,0.33825,0.78050],
[0.26967,0.34878,0.79631],
[0.27103,0.35926,0.81156],
[0.27226,0.36970,0.82624],
[0.27334,0.38008,0.84037],
[0.27429,0.39043,0.85393],
[0.27509,0.40072,0.86692],
[0.27576,0.41097,0.87936],
[0.27628,0.42118,0.89123],
[0.27667,0.43134,0.90254],
[0.27691,0.44145,0.91328],
[0.27701,0.45152,0.92347],
[0.27698,0.46153,0.93309],
[0.27680,0.47151,0.94214],
[0.27648,0.48144,0.95064],
[0.27603,0.49132,0.95857],
[0.27543,0.50115,0.96594],
[0.27469,0.51094,0.97275],
[0.27381,0.52069,0.97899],
[0.27273,0.53040,0.98461],
[0.27106,0.54015,0.98930],
[0.26878,0.54995,0.99303],
[0.26592,0.55979,0.99583],
[0.26252,0.56967,0.99773],
[0.25862,0.57958,0.99876],
[0.25425,0.58950,0.99896],
[0.24946,0.59943,0.99835],
[0.24427,0.60937,0.99697],
[0.23874,0.61931,0.99485],
[0.23288,0.62923,0.99202],
[0.22676,0.63913,0.98851],
[0.22039,0.64901,0.98436],
[0.21382,0.65886,0.97959],
[0.20708,0.66866,0.97423],
[0.20021,0.67842,0.96833],
[0.19326,0.68812,0.96190],
[0.18625,0.69775,0.95498],
[0.17923,0.70732,0.94761],
[0.17223,0.71680,0.93981],
[0.16529,0.72620,0.93161],
[0.15844,0.73551,0.92305],
[0.15173,0.74472,0.91416],
[0.14519,0.75381,0.90496],
[0.13886,0.76279,0.89550],
[0.13278,0.77165,0.88580],
[0.12698,0.78037,0.87590],
[0.12151,0.78896,0.86581],
[0.11639,0.79740,0.85559],
[0.11167,0.80569,0.84525],
[0.10738,0.81381,0.83484],
[0.10357,0.82177,0.82437],
[0.10026,0.82955,0.81389],
[0.09750,0.83714,0.80342],
[0.09532,0.84455,0.79299],
[0.09377,0.85175,0.78264],
[0.09287,0.85875,0.77240],
[0.09267,0.86554,0.76230],
[0.09320,0.87211,0.75237],
[0.09451,0.87844,0.74265],
[0.09662,0.88454,0.73316],
[0.09958,0.89040,0.72393],
[0.10342,0.89600,0.71500],
[0.10815,0.90142,0.70599],
[0.11374,0.90673,0.69651],
[0.12014,0.91193,0.68660],
[0.12733,0.91701,0.67627],
[0.13526,0.92197,0.66556],
[0.14391,0.92680,0.65448],
[0.15323,0.93151,0.64308],
[0.16319,0.93609,0.63137],
[0.17377,0.94053,0.61938],
[0.18491,0.94484,0.60713],
[0.19659,0.94901,0.59466],
[0.20877,0.95304,0.58199],
[0.22142,0.95692,0.56914],
[0.23449,0.96065,0.55614],
[0.24797,0.96423,0.54303],
[0.26180,0.96765,0.52981],
[0.27597,0.97092,0.51653],
[0.29042,0.97403,0.50321],
[0.30513,0.97697,0.48987],
[0.32006,0.97974,0.47654],
[0.33517,0.98234,0.46325],
[0.35043,0.98477,0.45002],
[0.36581,0.98702,0.43688],
[0.38127,0.98909,0.42386],
[0.39678,0.99098,0.41098],
[0.41229,0.99268,0.39826],
[0.42778,0.99419,0.38575],
[0.44321,0.99551,0.37345],
[0.45854,0.99663,0.36140],
[0.47375,0.99755,0.34963],
[0.48879,0.99828,0.33816],
[0.50362,0.99879,0.32701],
[0.51822,0.99910,0.31622],
[0.53255,0.99919,0.30581],
[0.54658,0.99907,0.29581],
[0.56026,0.99873,0.28623],
[0.57357,0.99817,0.27712],
[0.58646,0.99739,0.26849],
[0.59891,0.99638,0.26038],
[0.61088,0.99514,0.25280],
[0.62233,0.99366,0.24579],
[0.63323,0.99195,0.23937],
[0.64362,0.98999,0.23356],
[0.65394,0.98775,0.22835],
[0.66428,0.98524,0.22370],
[0.67462,0.98246,0.21960],
[0.68494,0.97941,0.21602],
[0.69525,0.97610,0.21294],
[0.70553,0.97255,0.21032],
[0.71577,0.96875,0.20815],
[0.72596,0.96470,0.20640],
[0.73610,0.96043,0.20504],
[0.74617,0.95593,0.20406],
[0.75617,0.95121,0.20343],
[0.76608,0.94627,0.20311],
[0.77591,0.94113,0.20310],
[0.78563,0.93579,0.20336],
[0.79524,0.93025,0.20386],
[0.80473,0.92452,0.20459],
[0.81410,0.91861,0.20552],
[0.82333,0.91253,0.20663],
[0.83241,0.90627,0.20788],
[0.84133,0.89986,0.20926],
[0.85010,0.89328,0.21074],
[0.85868,0.88655,0.21230],
[0.86709,0.87968,0.21391],
[0.87530,0.87267,0.21555],
[0.88331,0.86553,0.21719],
[0.89112,0.85826,0.21880],
[0.89870,0.85087,0.22038],
[0.90605,0.84337,0.22188],
[0.91317,0.83576,0.22328],
[0.92004,0.82806,0.22456],
[0.92666,0.82025,0.22570],
[0.93301,0.81236,0.22667],
[0.93909,0.80439,0.22744],
[0.94489,0.79634,0.22800],
[0.95039,0.78823,0.22831],
[0.95560,0.78005,0.22836],
[0.96049,0.77181,0.22811],
[0.96507,0.76352,0.22754],
[0.96931,0.75519,0.22663],
[0.97323,0.74682,0.22536],
[0.97679,0.73842,0.22369],
[0.98000,0.73000,0.22161],
[0.98289,0.72140,0.21918],
[0.98549,0.71250,0.21650],
[0.98781,0.70330,0.21358],
[0.98986,0.69382,0.21043],
[0.99163,0.68408,0.20706],
[0.99314,0.67408,0.20348],
[0.99438,0.66386,0.19971],
[0.99535,0.65341,0.19577],
[0.99607,0.64277,0.19165],
[0.99654,0.63193,0.18738],
[0.99675,0.62093,0.18297],
[0.99672,0.60977,0.17842],
[0.99644,0.59846,0.17376],
[0.99593,0.58703,0.16899],
[0.99517,0.57549,0.16412],
[0.99419,0.56386,0.15918],
[0.99297,0.55214,0.15417],
[0.99153,0.54036,0.14910],
[0.98987,0.52854,0.14398],
[0.98799,0.51667,0.13883],
[0.98590,0.50479,0.13367],
[0.98360,0.49291,0.12849],
[0.98108,0.48104,0.12332],
[0.97837,0.46920,0.11817],
[0.97545,0.45740,0.11305],
[0.97234,0.44565,0.10797],
[0.96904,0.43399,0.10294],
[0.96555,0.42241,0.09798],
[0.96187,0.41093,0.09310],
[0.95801,0.39958,0.08831],
[0.95398,0.38836,0.08362],
[0.94977,0.37729,0.07905],
[0.94538,0.36638,0.07461],
[0.94084,0.35566,0.07031],
[0.93612,0.34513,0.06616],
[0.93125,0.33482,0.06218],
[0.92623,0.32473,0.05837],
[0.92105,0.31489,0.05475],
[0.91572,0.30530,0.05134],
[0.91024,0.29599,0.04814],
[0.90463,0.28696,0.04516],
[0.89888,0.27824,0.04243],
[0.89298,0.26981,0.03993],
[0.88691,0.26152,0.03753],
[0.88066,0.25334,0.03521],
[0.87422,0.24526,0.03297],
[0.86760,0.23730,0.03082],
[0.86079,0.22945,0.02875],
[0.85380,0.22170,0.02677],
[0.84662,0.21407,0.02487],
[0.83926,0.20654,0.02305],
[0.83172,0.19912,0.02131],
[0.82399,0.19182,0.01966],
[0.81608,0.18462,0.01809],
[0.80799,0.17753,0.01660],
[0.79971,0.17055,0.01520],
[0.79125,0.16368,0.01387],
[0.78260,0.15693,0.01264],
[0.77377,0.15028,0.01148],
[0.76476,0.14374,0.01041],
[0.75556,0.13731,0.00942],
[0.74617,0.13098,0.00851],
[0.73661,0.12477,0.00769],
[0.72686,0.11867,0.00695],
[0.71692,0.11268,0.00629],
[0.70680,0.10680,0.00571],
[0.69650,0.10102,0.00522],
[0.68602,0.09536,0.00481],
[0.67535,0.08980,0.00449],
[0.66449,0.08436,0.00424],
[0.65345,0.07902,0.00408],
[0.64223,0.07380,0.00401],
[0.63082,0.06868,0.00401],
[0.61923,0.06367,0.00410],
[0.60746,0.05878,0.00427],
[0.59550,0.05399,0.00453],
[0.58336,0.04931,0.00486],
[0.57103,0.04474,0.00529],
[0.55852,0.04028,0.00579],
[0.54583,0.03593,0.00638],
[0.53295,0.03169,0.00705],
[0.51989,0.02756,0.00780],
[0.50664,0.02354,0.00863],
[0.49321,0.01963,0.00955],
[0.47960,0.01583,0.01055]])
def RGBToPyCmap(rgbdata):
nsteps = rgbdata.shape[0]
stepaxis = np.linspace(0, 1, nsteps)
rdata=[]; gdata=[]; bdata=[]
for istep in range(nsteps):
r = rgbdata[istep,0]
g = rgbdata[istep,1]
b = rgbdata[istep,2]
rdata.append((stepaxis[istep], r, r))
gdata.append((stepaxis[istep], g, g))
bdata.append((stepaxis[istep], b, b))
mpl_data = {'red': rdata,
'green': gdata,
'blue': bdata}
return mpl_data
mpl_data = RGBToPyCmap(turbo_colormap_data)
plt.register_cmap(name='turbo', data=mpl_data, lut=turbo_colormap_data.shape[0])
mpl_data_r = RGBToPyCmap(turbo_colormap_data[::-1,:])
plt.register_cmap(name='turbo_r', data=mpl_data_r, lut=turbo_colormap_data.shape[0])
def demo():
XX, YY = np.meshgrid(np.linspace(0,1,100), np.linspace(0,1,100))
ZZ = np.sqrt(XX**2 + YY**2)
plt.figure()
plt.imshow(ZZ, cmap='turbo')
plt.colorbar()
plt.figure()
plt.imshow(ZZ, cmap='turbo_r')
plt.colorbar()
plt.show()
demo() | mit | 9dad1854549ec8fcc791ac6367dc38f4 | 43.685358 | 98 | 0.389528 | 2.902854 | false | false | false | false |
probml/pyprobml | deprecated/scripts/kmeans_yeast_demo.py | 1 | 1414 | import superimport
from scipy.io import loadmat
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import requests
from io import BytesIO
url = 'https://github.com/probml/probml-data/blob/main/data/yeastData310.mat?raw=true'
response = requests.get(url)
rawdata = BytesIO(response.content)
data = loadmat(rawdata) # dictionary containing 'X', 'genes', 'times'
X = data['X']
# Cluster yeast data using Kmeans
kmeans = KMeans(n_clusters=16, random_state=0, algorithm='full').fit(X)
times = data['times']
X = X.transpose()
labels = kmeans.labels_
clu_cen = kmeans.cluster_centers_
clusters = [[] for i in range(0, 16)]
for (i, l) in enumerate(labels):
clusters[l].append(i)
times = times.reshape((7,))
# Visualizing all the time series assigned to each cluster
plt.figure()
for l in range(0, 16):
plt.subplot(4, 4, l + 1)
if clusters[l] != []:
plt.plot(times, X[:, clusters[l]])
plt.suptitle("K-Means Clustering of Profiles")
pml.savefig("yeastKmeans16.pdf")
plt.show()
# Visualizing the 16 cluster centers as prototypical time series.
plt.figure()
for l in range(0, 16):
plt.subplot(4, 4, l + 1).axis('off')
plt.plot(times, clu_cen[l, :])
plt.suptitle("K-Means centroids")
pml.savefig("clusterYeastKmeansCentroids16.pdf")
plt.show()
| mit | 75303f1c0c5ee1e2cd6cad351fce62f4 | 24.709091 | 86 | 0.720651 | 3.034335 | false | false | false | false |
probml/pyprobml | deprecated/scripts/gpr_demo_marglik.py | 1 | 4830 | # Example of a Gaussian Process Regression with multiple local minima
# in the marginal log-likelihood as a function of the hyperparameters
# Based on: https://github.com/probml/pmtk3/blob/master/demos/gprDemoMarglik.m
# Authors: Drishti Patel & Gerardo Durán-Martín
import superimport
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from numpy.linalg import inv, slogdet
from scipy.optimize import minimize
def k(u, v, sigma_f, l=1):
return sigma_f ** 2 * np.exp(-(u - v) ** 2 / (2 * l ** 2))
def gp_predictive_post(xstar, x, y, k, sigma_y, *args, **kwargs):
"""
Compute predictive distribution of a 1D-Gaussian Process for
regression
Parameters
----------
xstar: array(nt, 1)
Values to perform inference on
x: array(n, 1)
Training independent variables
y: array(n, 1)
Training dependent variables
k: function
Kernel function to evaluate the GP
sigma_y: float
data-noise term
*args: additional arguments of k
**kwargs: additional keyword-arguments of k
Returns
-------
* array(nt, 1):
Array of predicted (mean) values
* array(nt, nt):
Posterior covariance matrix
"""
n, _ = x.shape
kstar = k(x, xstar.T, *args, **kwargs)
Kxx = k(x, x.T, *args) + sigma_y ** 2 * np.eye(n)
kxx_star = k(xstar, xstar.T, *args, **kwargs)
Kxx_inv = inv(Kxx)
ystar = kstar.T @ Kxx_inv @ y
Sigma_post = kxx_star - kstar.T @ Kxx_inv @ kstar
return ystar, Sigma_post
def log_likelihood(x, y, sigma_f, l, sigma_y):
"""
Compute marginal log-likelihood of a regression GP
with rbf kernel
Parameters
----------
x: array(n, 1)
Training independent variables
y: array(n, 1)
Training dependent variables
sigma_f: float
Vertical-scale parameter
l: float
Horizontal-scale parameter
sigma_y: float
data noise
Returns
-------
* float:
Marginal log-likelihood as the specified hyperparameters
"""
n, _ = x.shape
x = x / np.exp(l)
Kxx = k(x, x.T, sigma_f) + np.exp(2 * sigma_y) * np.eye(n)
_, DKxx = slogdet(Kxx)
l = -1/2 * (y.T @ inv(Kxx) @ y + DKxx + n * np.log(2 * np.pi))
return l.item()
def plot_gp_pred(x, y, xstar, k, sigma_f, l, sigma_y, ax):
ystar, Sigma_post = gp_predictive_post(xstar, x, y, k, sigma_y, sigma_f, l)
upper_bound = ystar.ravel() + 2 * np.sqrt(np.diag(Sigma_post))
lower_bound = ystar.ravel() - 2 * np.sqrt(np.diag(Sigma_post))
ax.scatter(x, y, marker="+", s=100, c="black")
ax.plot(xstar, ystar, c="black")
ax.fill_between(xstar.ravel(), lower_bound, upper_bound, color="tab:gray", alpha=0.3, edgecolor="none")
ax.set_xlim(-7.5, 7.5)
ax.set_ylim(-2, 2.5)
def plot_marginal_likelihood_surface(x, y, sigma_f, l_space, sigma_y_space, ax, levels=None):
P = np.stack(np.meshgrid(l_space, sigma_y_space), axis=0)
Z = np.apply_along_axis(lambda p: log_likelihood(x, y, sigma_f, *p), 0, P)
ax.contour(*np.exp(P), Z, levels=levels)
ax.set_xlabel("characteristic length scale")
ax.set_ylabel("noise standard deviation")
ax.set_xscale("log")
ax.set_yscale("log")
if __name__ == "__main__":
plt.rcParams["axes.spines.right"] = False
plt.rcParams["axes.spines.top"] = False
sigma_f=1.0
x = np.array([-1.3089, 6.7612, 1.0553, -1.1734, -2.9339, 7.2530, -6.5843])[:, None]
y = np.array([1.6218, 1.8558, 0.4102, 1.2526, -0.0133, 1.6380, 0.2189])[:, None]
xstar = np.linspace(-7.5, 7.5, 201)
ngrid = 41
l_space = np.linspace(np.log(0.5), np.log(80), ngrid)
sigma_y_space = np.linspace(np.log(0.03), np.log(3), ngrid)
P = np.stack(np.meshgrid(l_space, sigma_y_space), axis=0)
configs = [(1.0, 0.2), (10, 0.8)]
fig, ax = plt.subplots()
plot_gp_pred(x, y, xstar, k, sigma_f, *configs[0], ax)
pml.savefig("gpr_config0.pdf")
fig, ax = plt.subplots()
plot_gp_pred(x, y, xstar, k, sigma_f, *configs[1], ax)
pml.savefig("gpr_config1.pdf")
ngrid = 41
w01 = np.array([np.log(1), np.log(0.1)])
w02 = np.array([np.log(10), np.log(0.8)])
s0 = minimize(lambda p: -log_likelihood(x, y, sigma_f, *p), w01)
s1 = minimize(lambda p: -log_likelihood(x, y, sigma_f, *p), w02)
levels = -np.array([8.3, 8.5, 8.9, 9.3, 9.8, 11.5, 15])[::-1]
l_space = np.linspace(np.log(0.5), np.log(80), ngrid)
sigma_y_space = np.linspace(np.log(0.03), np.log(3), ngrid)
fig, ax = plt.subplots()
plot_marginal_likelihood_surface(x, y, sigma_f, l_space, sigma_y_space, ax, levels=levels)
plt.scatter(*np.exp(s0.x), marker="+", s=100, c="tab:blue")
plt.scatter(*np.exp(s1.x), marker="+", s=100, c="tab:blue")
pml.savefig("gpr_marginal_likelihood.pdf")
plt.show()
| mit | 5da4b8ffd62a623af68fefb7432f8649 | 32.068493 | 107 | 0.603355 | 2.741624 | false | false | false | false |
probml/pyprobml | deprecated/scripts/mix_gauss_mle_vs_map.py | 1 | 2735 | # Demonstrate failure of MLE for GMMs in high-D case, whereas MAP works
# Based on: https://github.com/probml/pmtk3/blob/master/demos/mixGaussMLvsMAP.m
# Author: Gerardo Durán-Martín
import superimport
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import randn, seed
from numpy.linalg import cholesky, LinAlgError
import pyprobml_utils as pml
import gmm_lib
def fill_cov(S, dim):
m, m = S.shape
S_eye = np.identity(dim - m)
S_fill = np.zeros((m, dim - m))
S_fill_left = np.r_[S_fill, S_eye]
S_final = np.r_[S, S_fill.T]
S_final = np.c_[S_final, S_fill_left]
return S_final
def attempt_em_fit(X, k, pi, Sigma, n_attempts=5):
N, M = X.shape
eta = M + 2
n_success_ml = 0
n_success_map = 0
S = X.std(axis=0)
S = np.diag(S ** 2) / (k ** (1 / M))
for n in range(n_attempts):
mu = randn(k, dim)
try:
gmm_lib.apply_em(X, pi, mu, Sigma)
n_success_ml += 1
except LinAlgError:
pass
try:
gmm_lib.apply_em(X, pi, mu, Sigma, S=S, eta=eta)
n_success_map += 1
except LinAlgError:
pass
pct_ml = n_success_ml / n_attempts
pct_map = n_success_map / n_attempts
return pct_ml, pct_map
if __name__ == "__main__":
seed(314)
plt.rcParams["axes.spines.right"] = False
plt.rcParams["axes.spines.top"] = False
pi = np.ones(3) / 3
hist_ml, hist_map = [], []
test_dims = np.arange(10, 110, 10)
n_samples = 150
for dim in test_dims:
mu_base = np.array([[-1, 1], [1, -1], [3, -1]])
Sigma1_base = np.array([[1, -0.7], [-0.7, 1]])
Sigma2_base = np.array([[1, 0.7], [0.7, 1]])
Sigma3_base = np.array([[1, 0.9], [0.9, 1]])
mu = np.c_[mu_base, np.zeros((3, dim - 2))]
Sigma1 = fill_cov(Sigma1_base, dim)
Sigma2 = fill_cov(Sigma2_base, dim)
Sigma3 = fill_cov(Sigma3_base, dim)
Sigma = np.stack((Sigma1, Sigma2, Sigma3), axis=0)
R = cholesky(Sigma)
samples = np.ones((n_samples, 1, 1)) * mu[None, ...]
noise = randn(n_samples, dim)
noise = np.einsum("kjm,nj->nkm", R, noise)
samples = samples + noise
samples = samples.reshape(-1, dim)
ml, map = attempt_em_fit(samples, 3, pi, Sigma)
hist_ml.append(1 - ml)
hist_map.append(1 - map)
fig, ax = plt.subplots()
ax.plot(test_dims, hist_ml, c="tab:red", marker="o", label="MLE")
ax.plot(test_dims, hist_map, c="black", marker="o", linestyle="--", label="MAP")
ax.set_xlabel("dimensionality")
ax.set_ylabel("fraction of times EM for GMM fails")
ax.legend()
pml.savefig("gmm_mle_vs_map.pdf")
plt.show()
| mit | ab4f80ae1e2439c8944d7b31f46ab10b | 29.366667 | 84 | 0.562752 | 2.763397 | false | false | false | false |
probml/pyprobml | deprecated/scripts/dirichlet_3d_simplex_plots.py | 1 | 3886 | # Plot 3-dim dirichlet on 2d simplex as heatmap and samples
# Author: Thomas Boggs
# https://gist.github.com/tboggs/8778945
# http://blog.bogatron.net/blog/2014/02/02/visualizing-dirichlet-distributions/
import superimport
from functools import reduce
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
import matplotlib.tri as tri
_corners = np.array([[0, 0], [1, 0], [0.5, 0.75**0.5]])
_triangle = tri.Triangulation(_corners[:, 0], _corners[:, 1])
_midpoints = [(_corners[(i + 1) % 3] + _corners[(i + 2) % 3]) / 2.0 \
for i in range(3)]
def xy2bc(xy, tol=1.e-3):
'''Converts 2D Cartesian coordinates to barycentric.
Arguments:
`xy`: A length-2 sequence containing the x and y value.
'''
s = [(_corners[i] - _midpoints[i]).dot(xy - _midpoints[i]) / 0.75 \
for i in range(3)]
return np.clip(s, tol, 1.0 - tol)
class Dirichlet(object):
def __init__(self, alpha):
'''Creates Dirichlet distribution with parameter `alpha`.'''
from math import gamma
from operator import mul
self._alpha = np.array(alpha)
self._coef = gamma(np.sum(self._alpha)) / \
reduce(mul, [gamma(a) for a in self._alpha])
def pdf(self, x):
'''Returns pdf value for `x`.'''
from operator import mul
return self._coef * reduce(mul, [xx ** (aa - 1)
for (xx, aa)in zip(x, self._alpha)])
def sample(self, N):
'''Generates a random sample of size `N`.'''
return np.random.dirichlet(self._alpha, N)
def draw_pdf_contours(dist, border=False, nlevels=200, subdiv=8, **kwargs):
'''Draws pdf contours over an equilateral triangle (2-simplex).
Arguments:
`dist`: A distribution instance with a `pdf` method.
`border` (bool): If True, the simplex border is drawn.
`nlevels` (int): Number of contours to draw.
`subdiv` (int): Number of recursive mesh subdivisions to create.
kwargs: Keyword args passed on to `plt.triplot`.
'''
refiner = tri.UniformTriRefiner(_triangle)
trimesh = refiner.refine_triangulation(subdiv=subdiv)
pvals = [dist.pdf(xy2bc(xy)) for xy in zip(trimesh.x, trimesh.y)]
from matplotlib import cm
plt.tricontourf(trimesh, pvals, nlevels, cmap=cm.jet, **kwargs)
plt.axis('equal')
plt.xlim(0, 1)
plt.ylim(0, 0.75**0.5)
plt.axis('off')
if border is True:
#plt.hold(1)
plt.triplot(_triangle, linewidth=1)
def plot_points(X, barycentric=True, border=True, **kwargs):
'''Plots a set of points in the simplex.
Arguments:
`X` (ndarray): A 2xN array (if in Cartesian coords) or 3xN array
(if in barycentric coords) of points to plot.
`barycentric` (bool): Indicates if `X` is in barycentric coords.
`border` (bool): If True, the simplex border is drawn.
kwargs: Keyword args passed on to `plt.plot`.
'''
if barycentric is True:
X = X.dot(_corners)
plt.plot(X[:, 0], X[:, 1], 'k.', ms=1, **kwargs)
plt.axis('equal')
plt.xlim(0, 1)
plt.ylim(0, 0.75**0.5)
plt.axis('off')
if border is True:
#plt.hold(1)
plt.triplot(_triangle, linewidth=1)
f = plt.figure(figsize=(8, 6))
alphas = [[0.1] * 3,
[1] * 3,
[5] * 3,
[50] * 3,
[2, 5, 15]]
for (i, alpha) in enumerate(alphas):
plt.figure(figsize=(8,8))
dist = Dirichlet(alpha)
draw_pdf_contours(dist)
title = r'$\alpha$ = (%.3f, %.3f, %.3f)' % tuple(alpha)
plt.title(title, fontdict={'fontsize': 12})
a0 = int(np.round(alpha[0]*10))
pml.savefig('dirichletHeatmap{}.jpg'.format(a0))
plt.show()
plt.figure(figsize=(8,8))
plot_points(dist.sample(5000))
pml.savefig('dirichletSample{}.jpg'.format(a0))
plt.show()
| mit | 6d791c6ddc44969b5927a5d464c75cc3 | 33.39823 | 79 | 0.595214 | 3.1364 | false | false | false | false |
probml/pyprobml | deprecated/scripts/anscombes_quartet.py | 1 | 1610 | # Anscombe's quartet
# Author: Drishtii
import superimport
import seaborn as sns
import matplotlib.pyplot as plt
import pyprobml_utils as pml
sns.set_theme(style="ticks")
df = sns.load_dataset("anscombe")
g = sns.lmplot(x="x", y="y", col="dataset", hue="dataset", data=df, col_wrap=4, ci=None, palette="muted",
height=4, scatter_kws={"s": 50, "alpha": 1}, legend_out=True, truncate=False)
g.set(xlim=(2.5, 20.5 ))
pml.savefig("anscombes_quartet.pdf")
names = df['dataset'].unique()
for name in names:
print(name)
ndx = df['dataset']==name
df2 = df[ndx]
lm = sns.lmplot(x="x", y="y", data=df2, ci=None, truncate=False)
ax = plt.gca()
ax.set_xlim(0, 20)
ax.set_ylim(0, 14)
mx = df2['x'].to_numpy().mean();
my = df2['y'].to_numpy().mean()
ax.set_title(f'{name}, mx={mx:0.3f}, my={my:0.3f}', fontsize=12)
print(df2[['x', 'y']].agg(['count', 'mean', 'var']))
pml.savefig(f"anscombes_quartet_{name}.pdf")
# Compare the two different estimators for the variance
# https://github.com/probml/pml-book/issues/264
for d in ['I', 'II', 'III', 'IV']:
print('dataset ', d)
x = df[df['dataset'] == d]['x'].to_numpy()
print('var x, MLE = {:.2f}'.format(((x - x.mean()) ** 2).mean()))
print('var x, numpy: {:.2f}'.format(x.var()))
print('var x, unbiased estimator: {:.2f}\n'.format(x.var(ddof=1)))
y = df[df['dataset'] == d]['y'].to_numpy()
print('var y, MLE = {:.2f}'.format(((y - y.mean())**2).mean()))
print('var y, numpy: {:.2f}'.format(y.var()))
print('var y, unbiased estimator: {:.2f}\n'.format(y.var(ddof=1)))
| mit | 25c3ab9c6f04448bcc7de9cb7c6f052f | 33.255319 | 105 | 0.587578 | 2.58427 | false | false | false | false |
probml/pyprobml | deprecated/scripts/ais_demo.py | 1 | 1503 | # https://agustinus.kristia.de/techblog/2017/12/23/annealed-importance-sampling/
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
def f_0(x):
"""
Target distribution: \propto N(-5, 2)
"""
return np.exp(-(x+5)**2/2/2)
def f_j(x, beta):
"""
Intermediate distribution: interpolation between f_0 and f_n
"""
return f_0(x)**beta * f_n(x)**(1-beta)
# Proposal distribution: 1/Z * f_n
p_n = st.norm(0, 1)
def T(x, f, n_steps=10):
"""
Transition distribution: T(x'|x) using n-steps Metropolis sampler
"""
for t in range(n_steps):
# Proposal
x_prime = x + np.random.randn()
# Acceptance prob
a = f(x_prime) / f(x)
if np.random.rand() < a:
x = x_prime
return x
x = np.arange(-10, 5, 0.1)
n_inter = 50 # num of intermediate dists
betas = np.linspace(0, 1, n_inter)
# Sampling
n_samples = 100
samples = np.zeros(n_samples)
weights = np.zeros(n_samples)
for t in range(n_samples):
# Sample initial point from q(x)
x = p_n.rvs()
w = 1
for n in range(1, len(betas)):
# Transition
x = T(x, lambda x: f_j(x, betas[n]), n_steps=5)
# Compute weight in log space (log-sum):
# w *= f_{n-1}(x_{n-1}) / f_n(x_{n-1})
w += np.log(f_j(x, betas[n])) - np.log(f_j(x, betas[n-1]))
samples[t] = x
weights[t] = np.exp(w) # Transform back using exp
# Compute expectation
a = 1/np.sum(weights) * np.sum(weights * samples)
| mit | e698bca8dc830c65c2a7a05ba68f920c | 21.772727 | 80 | 0.572189 | 2.693548 | false | false | false | false |
probml/pyprobml | deprecated/scripts/ngram_character_demo.py | 1 | 2970 | '''
This script consists of functions that allow users to fit ngram model, sample from an ngram model and calculate the
log likelihood of the given sequence given an ngram model.
Author : Aleyna Kara(@karalleyna)
'''
import superimport
from nltk.util import ngrams
from nltk import FreqDist, LidstoneProbDist
from dataclasses import dataclass
from collections import defaultdict
import re
import string
import numpy as np
import requests
@dataclass
class NGram:
freq_dist: FreqDist
prob_dists: defaultdict
N: int
def preprocessing(text, case_folding=False):
preprocessed_text = text.lower() if case_folding else text
preprocessed_text = re.sub(r'\d+', '', preprocessed_text)
preprocessed_text = preprocessed_text.translate(str.maketrans('', '', string.punctuation + "’↵·"))
preprocessed_text = re.sub(r'\s+', ' ', preprocessed_text)
return preprocessed_text
def read_file(filepath):
f = open(filepath, 'r')
text = f.read()
return text
def ngram_model_fit(n, data, smoothing=1):
n_grams = ngrams(data, n)
ngram_fd = FreqDist(n_grams)
probs_dists = get_probs_dist(ngram_fd, smoothing)
model = NGram(ngram_fd, probs_dists, n)
return model
def get_probs_dist(freq_dist, smoothing=1):
freq_dists = defaultdict(FreqDist)
for ngram, freq in freq_dist.items():
*prefix, cur = ngram
key = ''.join(prefix)
freq_dists[key].update({cur: freq_dist[ngram]})
probs_dist = defaultdict(LidstoneProbDist)
for prefix, fd in freq_dists.items():
probs_dist[prefix] = LidstoneProbDist(fd, gamma=smoothing)
return probs_dist
def ngram_model_sample(model, text_length, prefix, seed=0):
assert len(prefix) >= model.N - 1
np.random.seed(seed)
text = prefix
for _ in range(text_length):
cur_prefix = text[-1 * model.N + 1:]
if cur_prefix not in model.prob_dists:
return text
new_char = model.prob_dists[cur_prefix].generate()
text = text + new_char
return text
def ngram_loglikelihood(model, seq):
assert len(seq) >= model.N - 1
prefix = seq[:model.N - 1]
if prefix not in model.prob_dists:
return float("-inf")
ll = np.log(model.prob_dists[prefix].freqdist().N() / model.freq_dist.N())
for i in range(model.N - 1, len(seq)):
prev = seq[i - model.N + 1: i]
cur = seq[i]
if prev not in model.prob_dists:
return float("-inf")
ll += model.prob_dists[prev].logprob(cur)
return ll
url = 'https://raw.githubusercontent.com/probml/probml-data/main/data/bible.txt'
response = requests.get(url)
text = response.content.decode("utf-8")
data = preprocessing(text, case_folding=True)
n = 10
model = ngram_model_fit(n, data, smoothing=1)
sample = ngram_model_sample(model, text_length=500, prefix='christian', seed=0)
print(sample)
# ngram statistics
#print(f'Most common {n}-grams\n', model.freq_dist.most_common(10))
| mit | e1168bd225e78fbf8bc3bb2976d01e46 | 26.201835 | 115 | 0.667116 | 3.229847 | false | false | false | false |
probml/pyprobml | deprecated/scripts/vb_logreg_2d_demo.py | 1 | 1811 | # Variational Bayes for binary logistic regression
# Written by Amazasp Shaumyan
#https://github.com/AmazaspShumik/sklearn-bayes/blob/master/ipython_notebooks_tutorials/linear_models/bayesian_logistic_regression_demo.ipynb
import superimport
#from skbayes.linear_models import EBLogisticRegression,VBLogisticRegression
from bayes_logistic import EBLogisticRegression, VBLogisticRegression
import numpy as np
import matplotlib.pyplot as plt
from pyprobml_utils import save_fig
from scipy import stats
from matplotlib import cm
# create data set
np.random.seed(0)
n_samples = 500
x = np.random.randn(n_samples,2)
x[0:250,0] = x[0:250,0] + 3
x[0:250,1] = x[0:250,1] - 3
y = -1*np.ones(500)
y[0:250] = 1
eblr = EBLogisticRegression(tol_solver = 1e-3)
vblr = VBLogisticRegression()
eblr.fit(x,y)
vblr.fit(x,y)
# create grid for heatmap
n_grid = 500
max_x = np.max(x,axis = 0)
min_x = np.min(x,axis = 0)
X1 = np.linspace(min_x[0],max_x[0],n_grid)
X2 = np.linspace(min_x[1],max_x[1],n_grid)
x1,x2 = np.meshgrid(X1,X2)
Xgrid = np.zeros([n_grid**2,2])
Xgrid[:,0] = np.reshape(x1,(n_grid**2,))
Xgrid[:,1] = np.reshape(x2,(n_grid**2,))
eblr_grid = eblr.predict_proba(Xgrid)[:,1]
vblr_grid = vblr.predict_proba(Xgrid)[:,1]
grids = [eblr_grid, vblr_grid]
lev = np.linspace(0,1,11)
titles = ['Type II Bayesian Logistic Regression', 'Variational Logistic Regression']
for title, grid in zip(titles, grids):
plt.figure(figsize=(8,6))
plt.contourf(X1,X2,np.reshape(grid,(n_grid,n_grid)),
levels = lev,cmap=cm.coolwarm)
plt.plot(x[y==-1,0],x[y==-1,1],"bo", markersize = 3)
plt.plot(x[y==1,0],x[y==1,1],"ro", markersize = 3)
plt.colorbar()
plt.title(title)
plt.xlabel("x")
plt.ylabel("y")
plt.show()
| mit | 0f7181b0b86b2dd75c9466cb613a8d1b | 30.241379 | 141 | 0.66593 | 2.522284 | false | false | false | false |
probml/pyprobml | deprecated/scripts/beta_binom_approx_post_pymc3.py | 1 | 4344 | # 1d approixmation to beta binomial model
# https://github.com/aloctavodia/BAP
import superimport
import pymc3 as pm
import numpy as np
import seaborn as sns
import scipy.stats as stats
import matplotlib.pyplot as plt
import arviz as az
import math
import pyprobml_utils as pml
#data = np.repeat([0, 1], (10, 3))
data = np.repeat([0, 1], (10, 1))
h = data.sum()
t = len(data) - h
# Exact
plt.figure()
x = np.linspace(0, 1, 100)
xs = x #grid
dx_exact = xs[1]-xs[0]
post_exact = stats.beta.pdf(xs, h+1, t+1)
post_exact = post_exact / np.sum(post_exact)
plt.plot(xs, post_exact)
plt.yticks([])
plt.title('exact posterior')
pml.savefig('bb_exact.pdf')
# Grid
def posterior_grid(heads, tails, grid_points=100):
grid = np.linspace(0, 1, grid_points)
prior = np.repeat(1/grid_points, grid_points) # uniform prior
likelihood = stats.binom.pmf(heads, heads+tails, grid)
posterior = likelihood * prior
posterior /= posterior.sum()
#posterior = posterior * grid_points
return grid, posterior
n = 20
grid, posterior = posterior_grid(h, t, n)
dx_grid = grid[1] - grid[0]
sf = dx_grid / dx_exact # Jacobian scale factor
plt.figure()
#plt.stem(grid, posterior, use_line_collection=True)
plt.bar(grid, posterior, width=1/n, alpha=0.2)
plt.plot(xs, post_exact*sf)
plt.title('grid approximation')
plt.yticks([])
plt.xlabel('θ');
pml.savefig('bb_grid.pdf')
# Laplace
with pm.Model() as normal_aproximation:
theta = pm.Beta('theta', 1., 1.)
y = pm.Binomial('y', n=1, p=theta, observed=data) # Bernoulli
mean_q = pm.find_MAP()
std_q = ((1/pm.find_hessian(mean_q, vars=[theta]))**0.5)[0]
mu = mean_q['theta']
print([mu, std_q])
plt.figure()
plt.plot(xs, stats.norm.pdf(xs, mu, std_q), '--', label='Laplace')
post_exact = stats.beta.pdf(xs, h+1, t+1)
plt.plot(xs, post_exact, label='exact')
plt.title('Quadratic approximation')
plt.xlabel('θ', fontsize=14)
plt.yticks([])
plt.legend()
pml.savefig('bb_laplace.pdf');
# HMC
with pm.Model() as hmc_model:
theta = pm.Beta('theta', 1., 1.)
y = pm.Binomial('y', n=1, p=theta, observed=data) # Bernoulli
trace = pm.sample(1000, random_seed=42, cores=1, chains=2)
thetas = trace['theta']
axes = az.plot_posterior(thetas, hdi_prob=0.95)
pml.savefig('bb_hmc.pdf');
az.plot_trace(trace)
pml.savefig('bb_hmc_trace.pdf', dpi=300)
# ADVI
with pm.Model() as mf_model:
theta = pm.Beta('theta', 1., 1.)
y = pm.Binomial('y', n=1, p=theta, observed=data) # Bernoulli
mean_field = pm.fit(method='advi')
trace_mf = mean_field.sample(1000)
thetas = trace_mf['theta']
axes = az.plot_posterior(thetas, hdi_prob=0.95)
pml.savefig('bb_mf.pdf');
plt.show()
# track mean and std
with pm.Model() as mf_model:
theta = pm.Beta('theta', 1., 1.)
y = pm.Binomial('y', n=1, p=theta, observed=data) # Bernoulli
advi = pm.ADVI()
tracker = pm.callbacks.Tracker(
mean=advi.approx.mean.eval, # callable that returns mean
std=advi.approx.std.eval # callable that returns std
)
approx = advi.fit(callbacks=[tracker])
trace_approx = approx.sample(1000)
thetas = trace_approx['theta']
plt.figure()
plt.plot(tracker['mean'])
plt.title('Mean')
pml.savefig('bb_mf_mean.pdf');
plt.figure()
plt.plot(tracker['std'])
plt.title('Std ')
pml.savefig('bb_mf_std.pdf');
plt.figure()
plt.plot(advi.hist)
plt.title('Negative ELBO');
pml.savefig('bb_mf_elbo.pdf');
plt.figure()
sns.kdeplot(thetas);
plt.title('KDE of posterior samples')
pml.savefig('bb_mf_kde.pdf');
fig,axs = plt.subplots(1,4, figsize=(30,10))
mu_ax = axs[0]
std_ax = axs[1]
elbo_ax = axs[2]
kde_ax = axs[3]
mu_ax.plot(tracker['mean'])
mu_ax.set_title('Mean')
std_ax.plot(tracker['std'])
std_ax.set_title('Std ')
elbo_ax.plot(advi.hist)
elbo_ax.set_title('Negative ELBO');
kde_ax = sns.kdeplot(thetas);
kde_ax.set_title('KDE of posterior samples')
pml.savefig('bb_mf_panel.pdf');
fig = plt.figure(figsize=(16, 9))
mu_ax = fig.add_subplot(221)
std_ax = fig.add_subplot(222)
hist_ax = fig.add_subplot(212)
mu_ax.plot(tracker['mean'])
mu_ax.set_title('Mean track')
std_ax.plot(tracker['std'])
std_ax.set_title('Std track')
hist_ax.plot(advi.hist)
hist_ax.set_title('Negative ELBO track');
pml.savefig('bb_mf_tracker.pdf');
trace_approx = approx.sample(1000)
thetas = trace_approx['theta']
axes = az.plot_posterior(thetas, hdi_prob=0.95)
plt.show() | mit | c374b04872962f9e963105fd5fff2f1f | 23.95977 | 66 | 0.668125 | 2.564678 | false | false | false | false |
probml/pyprobml | internal/book2/handle_book1_notebooks.py | 1 | 1062 | from glob import glob
from itertools import count
import nbformat as nbf
book1_nb = glob("notebooks/book1/*/*.ipynb")
book1_nb_to_chap = {}
for nb in book1_nb:
name = nb.split("/")[-1]
chap = nb.split("/")[-2]
book1_nb_to_chap[name] = chap
colab_base_url = "https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/book1"
prefix = "Source of this notebook is here:"
book2_nb = glob("notebooks/book2/*/*.ipynb")
counter = 0
for nb in book2_nb:
name = nb.split("/")[-1]
if name in book1_nb_to_chap:
# read
nb_content = nbf.read(nb, as_version=4)
# replace with redirected link
book1_chap = book1_nb_to_chap[name]
# create new cell
new_cell = nbf.v4.new_markdown_cell(f"{prefix} {colab_base_url}/{book1_chap}/{name}")
nb_content["cells"] = [new_cell]
# write
nbf.write(nb_content, nb)
print(f"{nb} is redirected to {colab_base_url}/{book1_chap}/{name}")
counter += 1
print("Done. {} notebooks are redirected.".format(counter))
| mit | fefae59d24c83a4b41b46dd2cb881e3d | 28.5 | 103 | 0.623352 | 2.96648 | false | false | false | false |
probml/pyprobml | deprecated/scripts/vanishing_gradients.py | 1 | 1063 | # Vanishing gradients for certain activation functions
# Based on
#https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
import superimport
import numpy as np
import matplotlib.pyplot as plt
import os
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_grad(x):
p = sigmoid(x)
return p*(1-p)
def relu(x):
return np.maximum(0, x)
def heaviside(x):
return (x > 0)
def relu_grad(x):
return heaviside(x)
x = np.linspace(-10, 10, 100)
y = sigmoid(x);
plt.figure()
plt.plot(x, y)
plt.title('sigmoid function')
plt.savefig('../figures/sigmoid.pdf')
plt.show()
y = sigmoid_grad(x);
plt.figure()
plt.plot(x, y)
plt.title('derivative of sigmoid')
plt.ylim(0,1)
plt.savefig('../figures/sigmoid_deriv.pdf')
plt.show()
x = np.linspace(-10, 10, 100)
y = relu(x);
plt.figure()
plt.plot(x, y)
plt.title('relu function')
plt.savefig('../figures/relu.pdf')
plt.show()
y = relu_grad(x);
plt.figure()
plt.plot(x, y)
plt.title('derivative of relu')
plt.ylim(-0.1,1.1)
plt.savefig('../figures/relu_deriv.pdf')
plt.show() | mit | c660270f7a846930910fcdce9069bf39 | 17.033898 | 77 | 0.674506 | 2.561446 | false | false | false | false |
probml/pyprobml | deprecated/scripts/agglomDemo.py | 1 | 1031 | # Agglomerative Clustering Demo
# Author: Animesh Gupta
import superimport
import numpy as np
from scipy.cluster.hierarchy import dendrogram, linkage
import matplotlib.pyplot as plt
import pyprobml_utils as pml
X = np.array([[1,2],
[2.5,4.5],
[2,2],
[4,1.5],
[4,2.5],])
labels = range(1, 6)
plt.figure(figsize=(10, 6))
plt.yticks(np.linspace(0,5,11))
plt.ylim(0,5)
plt.grid(color='gray', linestyle='dashed')
plt.scatter(X[:,0],X[:,1], label='True Position')
for label, x, y in zip(labels, X[:, 0], X[:, 1]):
plt.annotate(
label,
xy=(x, y), xytext=(-3, 3),
textcoords='offset points', ha='right', va='bottom', fontsize=25, color="red")
pml.savefig("agglom_demo_data.pdf", dpi=300)
linked = linkage(X, 'single')
labelList = range(1, 6)
plt.figure(figsize=(10, 7))
dendrogram(linked,
orientation='top',
labels=labelList,
distance_sort='descending',
show_leaf_counts=True)
pml.savefig("agglom_demo_dendrogram.pdf", dpi=300)
plt.show() | mit | d49d320a381fce99bc25ba2f193bffa2 | 23 | 86 | 0.631426 | 2.824658 | false | false | false | false |
probml/pyprobml | deprecated/scripts/KLfwdReverseMixGauss.py | 1 | 1402 | # Visualize difference between KL(p,q) and KL(q,p) where p is a mix of two
# 2d Gaussians, and q is a single 2d Gaussian
# Author: animesh-007
import superimport
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
mu = np.array([[-1,-1],[1,1]])
Sigma = np.zeros((2,2,2))
Sigma[:,:,0] = [[1/2,1/4],[1/4,1]]
Sigma[:,:,1] = [[1/2,-1/4],[-1/4,1]]
SigmaKL = np.array([[3,2],[2,3]])
x1 = np.arange(-4,4.1,0.1).T
x2 = x1
n1 = np.size(x1)
n2 = np.size(x2)
f1 = np.zeros((n1,n2))
f2 = np.zeros((n1,n2))
klf = np.zeros((n1,n2))
kll = np.zeros((n1,n2))
klr = np.zeros((n1,n2))
for i in range(n1):
x_tile = np.tile(x1[i],(n2,1))
x_tile = x_tile.reshape(-1)
x_final = np.array([x_tile,x2])
x_final = x_final.T
f1[i,:] = multivariate_normal.pdf(x_final,mu[0,:],Sigma[:,:,0])
f2[i,:] = multivariate_normal.pdf(x_final,mu[1,:],Sigma[:,:,1])
klf[i,:] = multivariate_normal.pdf(x_final,[0,0],SigmaKL)
kll[i,:] = multivariate_normal.pdf(x_final,mu[0,:],Sigma[:,:,0]*0.6)
klr[i,:] = multivariate_normal.pdf(x_final,mu[1,:],Sigma[:,:,1]*0.6)
f = f1 + f2
plots = [klf, kll, klr]
fig, ax = plt.subplots(1,3,figsize=(8,8))
for axi, plot_ in zip(ax.flat,plots):
axi.axis('off')
axi.contour(x1, x2, f, colors='b', zorder=1)
axi.contour(x1,x2,plot_, colors='r',zorder=10)
fig.savefig('../figures/klfwdzrevmixgauss.pdf', dpi=300)
plt.show()
| mit | 9d3d4bf5d65e658601e9fa4b30d3ec2b | 24.490909 | 74 | 0.611983 | 2.200942 | false | false | false | false |
probml/pyprobml | deprecated/scripts/splines_cherry_blossoms.py | 1 | 3601 | # splines in 1d
# We use the cherry blossom daa from sec 4.5 of "Statistical Rethinking"
# We use temperature as the target variable, to match a draft version of the book,
# https://github.com/Booleans/statistical-rethinking/blob/master/Statistical%20Rethinking%202nd%20Edition.pdf
# The published version uses day of year as target, which is less visually interesting.
# This an MLE version of the Bayesian numpyro code from
# https://fehiepsi.github.io/rethinking-numpyro/04-geocentric-models.html
import superimport
import numpy as np
np.set_printoptions(precision=3)
import matplotlib.pyplot as plt
import math
import os
import warnings
import pandas as pd
from scipy.interpolate import BSpline
from scipy import stats
from patsy import bs, dmatrix
import sklearn
from sklearn.linear_model import LinearRegression, Ridge
#https://stackoverflow.com/questions/61807542/generate-a-b-spline-basis-in-scipy-like-bs-in-r
def make_splines_scipy(x, num_knots, degree=3):
knot_list = np.quantile(x, q=np.linspace(0, 1, num=num_knots))
knots = np.pad(knot_list, (3, 3), mode="edge")
B = BSpline(knots, np.identity(num_knots + 2), k=degree)(x)
# according to scipy documentation
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.html
# if degree = k, ncoef = n, nknots = n + k + 1
# so if k=3, ncoef = nknots - 4
# where nknots = num_knot + 6 (because of 3 pad on left, 3 on right)
# so ncoef= num_knots + 6 - 4 = num_knots + 2
return B
def make_splines_patsy(x, num_knots, degree=3):
knot_list = np.quantile(x, q=np.linspace(0, 1, num=num_knots))
#B = bs(x, knots=knot_list, degree=degree) # ncoef = knots + degree + 1
B = bs(x, df=num_knots, degree=degree) # uses quantiles
return B
def plot_basis(x, B, w=None):
if w is None: w = np.ones((B.shape[1]))
fig, ax = plt.subplots()
ax.set_xlim(np.min(x), np.max(x))
for i in range(B.shape[1]):
ax.plot(x, (w[i] * B[:, i]), "k", alpha=0.5)
return ax
def plot_basis_with_vertical_line(x, B, xstar):
ax = plot_basis(x, B)
num_knots = B.shape[1]
ndx = np.where(x==xstar)[0][0]
for i in range(num_knots):
yy = B[ndx,i]
if yy>0:
ax.scatter(xstar, yy, s=40)
ax.axvline(x=xstar)
return ax
def plot_pred(mu, x, y):
plt.figure()
plt.scatter(x, y, alpha=0.5)
plt.plot(x, mu, 'k-', linewidth=4)
def main():
url = 'https://raw.githubusercontent.com/fehiepsi/rethinking-numpyro/master/data/cherry_blossoms.csv'
cherry_blossoms = pd.read_csv(url, sep=';')
df = cherry_blossoms
display(df.sample(n=5, random_state=1))
display(df.describe())
df2 = df[df.temp.notna()] # complete cases
x = df2.year.values.astype(float)
y = df2.temp.values.astype(float)
xlabel = 'year'
ylabel = 'temp'
nknots = 15
#B = make_splines_scipy(x, nknots)
B = make_splines_patsy(x, nknots)
print(B.shape)
plot_basis_with_vertical_line(x, B, 1200)
plt.tight_layout()
plt.savefig(f'../figures/splines_basis_vertical_MLE_{nknots}_{ylabel}.pdf', dpi=300)
#reg = LinearRegression().fit(B, y)
reg = Ridge().fit(B, y)
w = reg.coef_
a = reg.intercept_
print(w)
print(a)
plot_basis(x, B, w)
plt.tight_layout()
plt.savefig(f'../figures/splines_basis_weighted_MLE_{nknots}_{ylabel}.pdf', dpi=300)
mu = a + B @ w
plot_pred(mu, x, y)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tight_layout()
plt.savefig(f'../figures/splines_point_pred_MLE_{nknots}_{ylabel}.pdf', dpi=300)
main()
| mit | 73ae26155f53ab08dccecf3ef76d306e | 27.587302 | 109 | 0.655096 | 2.709556 | false | false | false | false |
probml/pyprobml | deprecated/scripts/iris_logreg_loss_surface.py | 1 | 2862 | # Plot 2d NLL loss surface for binary logistic regression with 1 feature
# Loosely based on
# https://peterroelants.github.io/posts/neural-network-implementation-part02/
import superimport
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from mpl_toolkits.mplot3d import axes3d, Axes3D
np.random.seed(0)
import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
iris = datasets.load_iris()
X = iris["data"][:, 3:] # petal width
y = (iris["target"] == 2).astype(np.int) # 1 if Iris-Virginica, else 0
log_reg = LogisticRegression(solver="lbfgs", fit_intercept=True, penalty='none')
log_reg.fit(X, y)
w_mle = log_reg.coef_[0][0] # 12.947270212450366
b_mle = log_reg.intercept_[0] # -21.125250539711022
ypred = log_reg.predict_proba(X)
# Add column of 1s to end of X to capture bias term
N = X.shape[0]
ones = np.ones((N,1))
X1 = np.hstack((X, ones))
log_reg1 = LogisticRegression(solver="lbfgs", fit_intercept=False, penalty='none')
log_reg1.fit(X1, y)
w_mle1 = log_reg1.coef_[0][0]
b_mle1 = log_reg1.coef_[0][1]
ypred1 = log_reg1.predict_proba(X1)
assert np.isclose(w_mle, w_mle1)
assert np.isclose(b_mle, b_mle1)
assert np.isclose(ypred[0], ypred1[0]).all()
# Define the logistic function
def logistic(z):
return 1. / (1 + np.exp(-z))
# Define the prediction function y = 1 / (1 + numpy.exp(-x*w))
def predict_prob(x, w):
return logistic(x.dot(w.T))
# Define the NLL loss function (y=probability, t=binary target)
def loss(y, t):
return - np.mean(
np.multiply(t, np.log(y)) + np.multiply((1-t), np.log(1-y)))
params =np.asmatrix([[w_mle, b_mle]])
ypred2 = predict_prob(X1,params)
#assert np.isclose(ypred1[:,1], ypred2).all()
# We compute the loss on a grid of (w, b) values.
# We use for loops for simplicity.
ngrid = 50
sf = 0.5
ws = np.linspace(-sf*w_mle, +sf*w_mle, ngrid)
bs = np.linspace(-sf*b_mle, +sf*b_mle, ngrid)
grid_w, grid_b = np.meshgrid(ws, bs)
loss_grid = np.zeros((ngrid, ngrid))
for i in range(ngrid):
for j in range(ngrid):
params = np.asmatrix([grid_w[i,j], grid_b[i,j]])
p = predict_prob(X1, params)
loss_grid[i,j] = loss(p, y)
# Plot the loss function surface
plt.figure()
plt.contourf(grid_w, grid_b, loss_grid, 20)
cbar = plt.colorbar()
cbar.ax.set_ylabel('NLL', fontsize=12)
plt.xlabel('$w$', fontsize=12)
plt.ylabel('$b$', fontsize=12)
plt.title('Loss function surface')
pml.savefig('logregIrisLossHeatmap.pdf')
plt.show()
fig,ax = plt.subplots()
CS = plt.contour(grid_w, grid_b, loss_grid, cmap='jet')
#plt.plot(b_mle, w_mle, 'x') # Plot centered at MLE
pml.savefig('logregIrisLossContours.pdf')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(grid_w, grid_b, loss_grid)
pml.savefig('logregIrisLossSurf.pdf')
plt.show()
| mit | 962c76f121dd49e6565df7911a9df485 | 26.519231 | 83 | 0.680643 | 2.590045 | false | false | false | false |
probml/pyprobml | deprecated/scripts/ae_celeba_lightning.py | 1 | 6609 | # -*- coding: utf-8 -*-
"""
Author: Ang Ming Liang
Please run the following command before running the script
wget -q https://raw.githubusercontent.com/sayantanauddy/vae_lightning/main/data.py
or curl https://raw.githubusercontent.com/sayantanauddy/vae_lightning/main/data.py > data.py
Then, make sure to get your kaggle.json from kaggle.com then run
mkdir /root/.kaggle
cp kaggle.json /root/.kaggle/kaggle.json
chmod 600 /root/.kaggle/kaggle.json
rm kaggle.json
to copy kaggle.json into a folder first
"""
import superimport
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from pytorch_lightning import LightningModule, Trainer
from data import CelebADataModule
IMAGE_SIZE = 64
BATCH_SIZE = 256
CROP = 128
DATA_PATH = "kaggle"
trans = []
trans.append(transforms.RandomHorizontalFlip())
if CROP > 0:
trans.append(transforms.CenterCrop(CROP))
trans.append(transforms.Resize(IMAGE_SIZE))
trans.append(transforms.ToTensor())
transform = transforms.Compose(trans)
class AE(LightningModule):
"""
Standard VAE with Gaussian Prior and approx posterior.
"""
def __init__(
self,
input_height: int,
enc_type: str = 'resnet18',
first_conv: bool = False,
maxpool1: bool = False,
hidden_dims = None,
in_channels = 3,
enc_out_dim: int = 512,
kl_coeff: float = 0.1,
latent_dim: int = 256,
lr: float = 1e-4,
**kwargs
):
"""
Args:
input_height: height of the images
enc_type: option between resnet18 or resnet50
first_conv: use standard kernel_size 7, stride 2 at start or
replace it with kernel_size 3, stride 1 conv
maxpool1: use standard maxpool to reduce spatial dim of feat by a factor of 2
enc_out_dim: set according to the out_channel count of
encoder used (512 for resnet18, 2048 for resnet50)
kl_coeff: coefficient for kl term of the loss
latent_dim: dim of latent space
lr: learning rate for Adam
"""
super(AE, self).__init__()
self.save_hyperparameters()
self.lr = lr
self.kl_coeff = kl_coeff
self.enc_out_dim = enc_out_dim
self.latent_dim = latent_dim
self.input_height = input_height
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Sigmoid())
@staticmethod
def pretrained_weights_available():
return list(AE.pretrained_urls.keys())
def from_pretrained(self, checkpoint_name):
if checkpoint_name not in AE.pretrained_urls:
raise KeyError(str(checkpoint_name) + ' not present in pretrained weights.')
return self.load_from_checkpoint(AE.pretrained_urls[checkpoint_name], strict=False)
def encode(self, x):
x = self.encoder(x)
x = torch.flatten(x, start_dim=1)
mu = self.fc_mu(x)
return mu
def decode(self, z):
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def forward(self, x):
z = self.encode(x)
return self.decode(z)
def step(self, batch, batch_idx):
x, y = batch
x_hat= self(x)
loss = F.mse_loss(x_hat, x, reduction='mean')
logs = {
"loss": loss,
}
return loss, logs
def training_step(self, batch, batch_idx):
loss, logs = self.step(batch, batch_idx)
self.log_dict({f"train_{k}": v for k, v in logs.items()}, on_step=True, on_epoch=False)
return loss
def validation_step(self, batch, batch_idx):
loss, logs = self.step(batch, batch_idx)
self.log_dict({f"val_{k}": v for k, v in logs.items()})
return loss
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.lr)
if __name__ == "__main__":
m = AE(input_height=IMAGE_SIZE)
runner = Trainer(gpus = 2,gradient_clip_val=0.5,
max_epochs = 15)
dm = CelebADataModule(data_dir=DATA_PATH,
target_type='attr',
train_transform=transform,
val_transform=transform,
download=True,
batch_size=BATCH_SIZE,
num_workers=3)
runner.fit(m, datamodule=dm)
torch.save(m.state_dict(), "ae-celeba-latent-dim-256.ckpt")
| mit | 1c8befde9cad5003a36a3e8b2bfce296 | 31.55665 | 93 | 0.531851 | 3.851399 | false | false | false | false |
probml/pyprobml | deprecated/scripts/conditional_bernoulli_mix_lib.py | 1 | 12546 | # Necessary functions for demo and ClassConditionalBMM
# Author : Aleyna Kara(@karalleyna)
import jax.numpy as jnp
from jax import vmap, jit, value_and_grad
from jax.random import PRNGKey, split, permutation
from jax.lax import scan
from jax.scipy.special import expit, logit
from jax.experimental import optimizers
import distrax
from distrax._src.utils import jittable
from mixture_lib import MixtureSameFamily
import itertools
opt_init, opt_update, get_params = optimizers.adam(1e-1)
class ClassConditionalBMM(jittable.Jittable):
def __init__(self, mixing_coeffs, probs, class_priors, n_char, threshold=1e-10):
self.mixing_coeffs = mixing_coeffs
self.probs = probs
self.class_priors = class_priors
self.model = (logit(mixing_coeffs), logit(probs))
self.num_of_classes = n_char
self.threshold = threshold
self.log_threshold = jnp.log(threshold)
@property
def class_priors(self):
return self._class_priors
@property
def model(self):
return self._model
@model.setter
def model(self, value):
mixing_coeffs_logits, probs_logits = value
self._model = MixtureSameFamily(mixture_distribution=distrax.Categorical(logits=mixing_coeffs_logits),
components_distribution=distrax.Independent(
distrax.Bernoulli(logits=probs_logits),
reinterpreted_batch_ndims=1))
@class_priors.setter
def class_priors(self, value):
self._class_priors = distrax.Categorical(probs=value)
def _cluster(self, observations, targets):
'''
Arranges the observations so that the number of the observations belonging
to each class is equal and separates from each other.
Parameters
----------
observations : array
Dataset
targets : array
The class labels of the given dataset
Returns
-------
* array
The new dataset that has one additional axis to separate the observations
of different classes
'''
clusters = []
min_n_sample = float('inf')
for c in range(self.num_of_classes):
obs_of_same_class = observations[jnp.nonzero(targets == c)]
n_obs = obs_of_same_class.shape[0]
min_n_sample = min(min_n_sample, n_obs)
clusters.append(obs_of_same_class.reshape((n_obs, -1)))
return jnp.vstack([obs_of_same_class[jnp.newaxis, 0:min_n_sample, :] for obs_of_same_class in clusters])
@jit
def loglikelihood(self, X, c):
'''
Calculates the log likelihood of the observations with its ground-truth class.
Parameters
----------
X : array
The collection of data points that belongs to the same class
c : int
The ground-truth class
Returns
-------
* array
Log likelihoods found per data point
'''
mixing_loglikelihood = jnp.clip(self._model.mixture_distribution.logits[c], a_max=0, a_min=self.log_threshold)
bern_loglikelihood_1 = X @ jnp.clip(self._model.components_distribution.distribution.logits[c].T, a_max=0,
a_min=self.log_threshold)
bern_loglikelihood_0 = (1 - X) @ jnp.clip(
jnp.log(1 - jnp.clip(self._model.components_distribution.distribution.probs[c],
a_min=self.threshold, a_max=1.0)), a_min=self.log_threshold, a_max=0).T
mix_loglikelihood = mixing_loglikelihood + bern_loglikelihood_1 + bern_loglikelihood_0
return mix_loglikelihood
@jit
def logsumexp(self, matrix, keepdims=True):
'''
Traditional logsumexp except constraining the values to be greater than
a pre-determined lower bound.
Parameters
----------
matrix : array
keepdims : bool
Returns
-------
* array
'''
M = jnp.max(matrix, axis=-1)
M = M[:, None]
bern_sum = jnp.sum(jnp.exp(matrix - M), axis=1, keepdims=keepdims)
bern_sum = jnp.where(bern_sum < self.threshold, self.threshold, bern_sum)
log_bern_sum = M + jnp.log(bern_sum)
return log_bern_sum
def _sample_minibatches(self, iterables, batch_size):
'''
Creates mini-batches generator in which there are given number of elements
Parameters
----------
iterables : array
Data points with their class labels
batch_size : int
The number of observation sequences that will be included in
each minibatch
Returns
-------
* tuple
Minibatches
'''
observations, targets = iterables
N = len(observations)
for idx in range(0, N, batch_size):
yield observations[idx:min(idx + batch_size, N)], targets[idx:min(idx + batch_size, N)]
@jit
def expectation(self, X, c):
'''
E step
Parameters
----------
X : array
The collection of data points that belongs to the same class
c : int
The ground-truth class
Returns
-------
* array
Gamma
* array
The mean of loglikelihoods with class priors
'''
mix_loglikelihood = self.loglikelihood(X, c)
mix_loglikelihood_sum = self.logsumexp(mix_loglikelihood)
gamma = jnp.exp(mix_loglikelihood - mix_loglikelihood_sum)
gamma = jnp.where(gamma > self.threshold, gamma, self.threshold)
return gamma, jnp.mean(mix_loglikelihood_sum)
@jit
def maximization(self, X, gamma):
'''
Maximization step
Parameters
----------
X : array
Dataset
Returns
-------
* array
Mixing coefficients
* array
Probabilities
'''
gamma_sum = jnp.sum(gamma, axis=0, keepdims=True)
# mu
probs = gamma.T @ X / gamma_sum.T
probs = jnp.where(probs < self.threshold, self.threshold, probs)
probs = jnp.where(probs < 1, probs, 1 - self.threshold)
# sigma
mixing_coeffs = gamma_sum / X.shape[0]
mixing_coeffs = jnp.where(mixing_coeffs < self.threshold, self.threshold, mixing_coeffs)
mixing_coeffs = jnp.where(mixing_coeffs < 1, mixing_coeffs, 1 - self.threshold).squeeze()
return mixing_coeffs, probs
def fit_em(self, observations, targets, num_of_iters=10):
'''
Fits the model using em algorithm.
Parameters
----------
observations : array
Dataset
targets : array
Ground-truth class labels
num_of_iters : int
The number of iterations the training process that takes place
Returns
-------
* array
Log likelihoods found per iteration
'''
iterations = jnp.arange(num_of_iters)
classes = jnp.arange(self.num_of_classes)
X = self._cluster(observations, targets)
def train_step(params, i):
self.model = params
# Expectation
gamma, log_likelihood = vmap(self.expectation, in_axes=(0, 0))(X, classes)
# Maximization
mixing_coeffs, probs = vmap(self.maximization, in_axes=(0, 0))(X, gamma)
return (logit(mixing_coeffs), logit(probs)), -jnp.mean(log_likelihood)
initial_params = (logit(self.mixing_coeffs),
logit(self.probs))
final_params, history = scan(train_step, initial_params, iterations)
self.model = final_params
return history
@jit
def loss_fn(self, params, batch):
'''
Calculates expected mean negative loglikelihood.
Parameters
----------
params : tuple
Consists of mixing coefficients and probabilities of the Bernoulli distribution respectively.
batch : array
The subset of observations with their targets
Returns
-------
* int
Negative log likelihood
'''
observations, targets = batch
mixing_coeffs_logits, probs_logits = params
self.model = (mixing_coeffs_logits, probs_logits)
mix_loglikelihood = vmap(self.loglikelihood)(observations, targets)
mix_loglikelihood_sum = jnp.nan_to_num(self.logsumexp(mix_loglikelihood), nan=self.log_threshold)
return -jnp.mean(mix_loglikelihood_sum)
@jit
def update(self, i, opt_state, batch):
'''
Updates the optimizer state after taking derivative
i : int
The current iteration
opt_state : jax.experimental.optimizers.OptimizerState
The current state of the parameters
batch : array
The subset of observations with their targets
Returns
-------
* jax.experimental.optimizers.OptimizerState
The updated state
* int
Loss value calculated on the current batch
'''
params = get_params(opt_state)
loss, grads = value_and_grad(self.loss_fn)(params, batch)
return opt_update(i, grads, opt_state), loss
def fit_sgd(self, observations, targets, batch_size, rng_key=None, optimizer=None, num_epochs=1):
'''
Fits the class conditional bernoulli mixture model using gradient descent algorithm with the given hyperparameters.
Parameters
----------
observations : array
The observation sequences which Bernoulli Mixture Model is trained on
targets : array
The ground-truth classes
batch_size : int
The size of the batch
rng_key : array
Random key of shape (2,) and dtype uint32
optimizer : jax.experimental.optimizers.Optimizer
Optimizer to be used
num_epochs : int
The number of epoch the training process takes place
Returns
-------
* array
Mean loss values found per epoch
'''
global opt_init, opt_update, get_params
if rng_key is None:
rng_key = PRNGKey(0)
if optimizer is not None:
opt_init, opt_update, get_params = optimizer
opt_state = opt_init((logit(self.mixing_coeffs), logit(self.probs)))
itercount = itertools.count()
num_complete_batches, leftover = jnp.divmod(num_epochs, batch_size)
num_batches = num_complete_batches + jnp.where(leftover == 0, 0, 1)
def epoch_step(opt_state, key):
perm = permutation(key, len(observations))
_observatios, _targets = observations[perm], targets[perm]
sample_generator = self._sample_minibatches((_observatios, _targets), batch_size)
def train_step(opt_state, i):
opt_state, loss = self.update(next(itercount), opt_state, next(sample_generator))
return opt_state, loss
opt_state, losses = scan(train_step, opt_state, jnp.arange(num_batches))
return opt_state, losses.mean()
epochs = split(rng_key, num_epochs)
opt_state, history = scan(epoch_step, opt_state, epochs)
params = get_params(opt_state)
mixing_coeffs_logits, probs_logits = params
self.model = (mixing_coeffs_logits, probs_logits)
self.mixing_coeffs = expit(mixing_coeffs_logits)
self.probs = expit(probs_logits)
return history
def predict(self, X):
'''
Predicts the class labels of the given observations
Parameters
----------
observations : array
Dataset
Returns
-------
* array
Predicted classes
* array
Log likelihoods given class labels
'''
N, _ = X.shape
classes = jnp.arange(self.num_of_classes)
def ll(cls):
mix_loglikelihood = self.loglikelihood(X, cls)
sum_mix_loglikelihood = self.logsumexp(mix_loglikelihood)
bayes = self.class_priors.logits[..., cls] + sum_mix_loglikelihood
return bayes.flatten()
ll_given_c = vmap(ll, out_axes=(1))(classes)
predictions = jnp.argmax(ll_given_c, axis=-1)
return predictions, ll_given_c | mit | a869557758db0ba860d216f549efbf96 | 31.416021 | 123 | 0.583785 | 4.145406 | false | false | false | false |
probml/pyprobml | deprecated/scripts/vae_conv_load_tf.py | 1 | 7281 | # Load pre-trained ConvVAE model (eg trained in colab)
# See https://github.com/probml/pyprobml/blob/master/notebooks/lvm/vae_mnist_2d_tf.ipynb for training script
import superimport
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pyprobml_utils as pml
import os
figdir = "../figures"
import tensorflow as tf
from tensorflow import keras
#import tensorflow_datasets as tfds
import pickle
folder = '/home/murphyk/Downloads'
if 0:
with open(os.path.join(folder, 'mnist_small.pkl'), 'rb') as f:
Xsmall = pickle.load(f)
X = Xsmall[0,:,:,0].numpy();
plt.imshow(X)
input_shape = [28, 28, 1] # MNIST
num_colors = input_shape[2]
with open(os.path.join(folder, 'celeba_small.pkl'), 'rb') as f:
Xsmall = pickle.load(f)
X = Xsmall[0,:,:,:].numpy();
plt.imshow(X)
input_shape = [64, 64, 3]
num_colors = input_shape[2]
from tensorflow.keras.layers import Input, Conv2D, Flatten, Dense, Conv2DTranspose, Reshape, Lambda, Activation, BatchNormalization, LeakyReLU, Dropout
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
def make_encoder(
input_dim,
z_dim,
encoder_conv_filters,
encoder_conv_kernel_size,
encoder_conv_strides,
use_batch_norm = False,
use_dropout= False
):
encoder_input = Input(shape=input_dim, name='encoder_input')
x = encoder_input
n_layers_encoder = len(encoder_conv_filters)
for i in range(n_layers_encoder):
conv_layer = Conv2D(
filters = encoder_conv_filters[i]
, kernel_size = encoder_conv_kernel_size[i]
, strides = encoder_conv_strides[i]
, padding = 'same'
, name = 'encoder_conv_' + str(i)
)
x = conv_layer(x)
if use_batch_norm:
x = BatchNormalization()(x)
x = LeakyReLU()(x)
if use_dropout:
x = Dropout(rate = 0.25)(x)
shape_before_flattening = K.int_shape(x)[1:]
x = Flatten()(x)
mu = Dense(z_dim, name='mu')(x) # no activation
log_var = Dense(z_dim, name='log_var')(x) # no activation
encoder = Model(encoder_input, (mu, log_var))
return encoder, shape_before_flattening
def make_decoder(
shape_before_flattening,
z_dim,
decoder_conv_t_filters,
decoder_conv_t_kernel_size,
decoder_conv_t_strides,
use_batch_norm = False,
use_dropout= False
):
decoder_input = Input(shape=(z_dim,), name='decoder_input')
x = Dense(np.prod(shape_before_flattening))(decoder_input)
x = Reshape(shape_before_flattening)(x)
n_layers_decoder = len(decoder_conv_t_filters)
for i in range(n_layers_decoder):
conv_t_layer = Conv2DTranspose(
filters = decoder_conv_t_filters[i]
, kernel_size = decoder_conv_t_kernel_size[i]
, strides = decoder_conv_t_strides[i]
, padding = 'same'
, name = 'decoder_conv_t_' + str(i)
)
x = conv_t_layer(x)
if i < n_layers_decoder - 1:
if use_batch_norm:
x = BatchNormalization()(x)
x = LeakyReLU()(x)
if use_dropout:
x = Dropout(rate = 0.25)(x)
# No activation fn in final layer since returns logits
#else:
# x = Activation('sigmoid')(x)
decoder_output = x
decoder = Model(decoder_input, decoder_output)
return decoder
def log_normal_pdf(sample, mean, logvar, raxis=1):
log2pi = tf.math.log(2. * np.pi)
return tf.reduce_sum(
-.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi),
axis=raxis)
def sample_gauss(mean, logvar):
eps = tf.random.normal(shape=mean.shape)
return eps * tf.exp(logvar * .5) + mean
class ConvVAE(tf.keras.Model):
def __init__(self,
input_dim,
latent_dim,
encoder_conv_filters,
encoder_conv_kernel_size,
encoder_conv_strides,
decoder_conv_t_filters,
decoder_conv_t_kernel_size,
decoder_conv_t_strides,
use_batch_norm = False,
use_dropout= False,
recon_loss_scaling = 1,
kl_loss_scaling = 1,
use_mse_loss = False
):
super(ConvVAE, self).__init__()
# Save all args so we can reconstruct this object later
self.input_dim = input_dim
self.latent_dim = latent_dim
self.encoder_conv_filters = encoder_conv_filters
self.encoder_conv_kernel_size = encoder_conv_kernel_size
self.encoder_conv_strides = encoder_conv_strides
self.decoder_conv_t_filters = decoder_conv_t_filters
self.decoder_conv_t_kernel_size = decoder_conv_t_kernel_size
self.decoder_conv_t_strides = decoder_conv_t_strides
self.use_batch_norm = use_batch_norm
self.use_dropout = use_dropout
self.recon_loss_scaling = recon_loss_scaling
self.kl_loss_scaling = kl_loss_scaling
self.use_mse_loss = use_mse_loss
self.inference_net, self.shape_before_flattening = make_encoder(
input_dim,
latent_dim,
encoder_conv_filters,
encoder_conv_kernel_size,
encoder_conv_strides,
use_batch_norm,
use_dropout)
self.generative_net = make_decoder(
self.shape_before_flattening,
latent_dim,
decoder_conv_t_filters,
decoder_conv_t_kernel_size,
decoder_conv_t_strides,
use_batch_norm,
use_dropout)
@tf.function
def sample(self, nsamples=1):
eps = tf.random.normal(shape=(nsamples, self.latent_dim))
return self.decode(eps, apply_sigmoid=True)
def encode_stochastic(self, x):
mean, logvar = self.inference_net(x)
return sample_gauss(mean, logvar)
def decode(self, z, apply_sigmoid=True):
logits = self.generative_net(z)
if apply_sigmoid:
probs = tf.sigmoid(logits)
return probs
return logits
@tf.function
def compute_loss(self, x):
mean, logvar = self.inference_net(x)
z = sample_gauss(mean, logvar)
if self.use_mse_loss:
x_probs = self.decode(z, apply_sigmoid=True)
mse = tf.reduce_mean( (x - x_probs) ** 2, axis=[1, 2, 3])
logpx_z = -0.5*mse # log exp(-0.5 (x-mu)^2)
else:
x_logit = self.decode(z, apply_sigmoid=False)
cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=x) # -sum_{c=0}^1 p_c log q_c
logpx_z = -tf.reduce_sum(cross_ent, axis=[1, 2, 3]) # sum over H,W,C
logpz = log_normal_pdf(z, 0., 0.) # prior: mean=0, logvar=0
logqz_x = log_normal_pdf(z, mean, logvar)
kl_loss = logpz - logqz_x # MC approximation
return -tf.reduce_mean(self.recon_loss_scaling * logpx_z + self.kl_loss_scaling * kl_loss) # -ve ELBO
@tf.function
def compute_gradients(self, x):
with tf.GradientTape() as tape:
loss = self.compute_loss(x)
gradients = tape.gradient(loss, self.trainable_variables)
return gradients
#############
def load_model(model_class, folder):
with open(os.path.join(folder, 'params.pkl'), 'rb') as f:
params = pickle.load(f)
model = model_class(*params)
model.load_weights(os.path.join(folder, 'weights.h5'))
return model
model = load_model(ConvVAE, folder)
# Check results match colab
L = model.compute_loss(Xsmall)
print(L)
M, V = model.inference_net(Xsmall)
print(M)
| mit | d4ad4600564cdfea53554c3362ff1840 | 30.79476 | 151 | 0.627249 | 3.153313 | false | false | false | false |
probml/pyprobml | deprecated/scripts/vb_gauss_cholesky.py | 1 | 6064 | '''
It implements the full covariance FFVB method from 3.5.1 of https://arxiv.org/abs/2103.01327
For original Matlab code, please see Example4.zip in https://github.com/VBayesLab/Tutorial-on-VB.
Author : Aleyna Kara(@karalleyna)
'''
import jax
import jax.numpy as jnp
from jax import random, jit, grad, lax, ops
import optax
def init_grads(nfeatures):
inital_grad_mean = jax.tree_map(lambda n: jnp.zeros(n, ), nfeatures)
initial_grad_lower = jax.tree_map(lambda n:
jnp.zeros(((n * (n + 1)) // 2,)),
nfeatures)
return inital_grad_mean, initial_grad_lower
def clip(X, threshold=10, norm=None):
# gradient clipping
if norm is None:
X_leaves = jax.tree_leaves(X)
norm = sum(jax.tree_map(jnp.linalg.norm, X_leaves))
def true_fun(x):
return (threshold / norm) * x
def false_fun(x):
return x
X = jax.tree_map(lambda x: jax.lax.cond(norm > threshold, true_fun, false_fun, x), X)
return X
def vechinv(v, d):
X = jnp.zeros((d, d))
X = ops.index_update(X, jnp.tril_indices(d, k=0), v.squeeze())
return X
def make_vb_gauss_chol_fns(loglikelihood_fn, logprior_fn, nfeatures, num_samples):
def logjoint(params, data):
return -loglikelihood_fn(params, *data) - logprior_fn(params)
take_grad = jit(grad(logjoint))
def sample(key, variational_params):
# Take a single sample from a Gaussian distribution.
mean, std = variational_params
epsilon = jax.tree_map(lambda x: random.normal(key, x.shape), mean)
params = jax.tree_map(lambda mu, sigma, eps: mu + sigma @ eps,
mean, std, epsilon)
return params, epsilon
def estimate_lower_bound_grad(variational_params, grad_mu, grad_lower):
grad_mu = jax.tree_map(lambda x: x / num_samples, grad_mu)
_, std = variational_params
diagonal = jax.tree_map(lambda L: jnp.diag(jnp.diag(L)), std)
grad_lower = jax.tree_map(lambda dL, D, n: dL / num_samples + D[jnp.tril_indices(n)],
grad_lower, diagonal, nfeatures)
return grad_mu, grad_lower
def step(key, variational_params, grads, data):
def update(grads_and_lb, key):
grad_mu, grad_lower, lower_bound = grads_and_lb
params, epsilon = sample(key, variational_params)
grad_logjoint = take_grad(params, data)
grad_mu = jax.tree_map(lambda x, y: x + y.flatten(),
grad_mu, grad_logjoint)
tmp = jax.tree_map(jnp.outer, grad_logjoint, epsilon)
grad_lower = jax.tree_map(lambda x, y: x + y[jnp.tril_indices(len(y))],
grad_lower, tmp)
lower_bound = lower_bound + logjoint(params, data)
return (grad_mu, grad_lower, lower_bound), None
keys = random.split(key, num_samples)
lower_bound = 0
(grad_mu, grad_lower, lower_bound), _ = lax.scan(update, (*grads, lower_bound), keys)
grads = estimate_lower_bound_grad(variational_params, grad_mu, grad_lower)
return grads, -lower_bound
return step
def vb_gauss_chol(key, loglikelihood_fn, logprior_fn,
data, optimizer, mean,
lower_triangular=None, num_samples=20,
window_size=10, niters=500,
eps=0.1, smooth=True):
'''
Arguments:
num_samples : number of Monte Carlo samples,
mean : prior mean of the distribution family
'''
nfeatures = jax.tree_map(lambda x: x.shape[0], mean)
if lower_triangular is None:
# initializes the lower triangular matrices
lower_triangular = jax.tree_map(lambda n: eps * jnp.eye(n), nfeatures)
# Initialize parameters of the model + optimizer.
variational_params = (mean, lower_triangular)
params = (mean, jax.tree_map(lambda L, n: L[jnp.tril_indices(n)][..., None],
lower_triangular, nfeatures))
opt_state = optimizer.init(params)
step_fn = make_vb_gauss_chol_fns(loglikelihood_fn, logprior_fn,
nfeatures, num_samples)
def iter_fn(all_params, key):
variational_params, params, opt_state = all_params
grads = init_grads(nfeatures)
lower_bound = 0
grads, lower_bound = step_fn(key, variational_params, grads, data)
grads = jax.tree_map(lambda x: x[..., None] if len(x.shape) == 1 else x, grads)
grads = clip(grads)
updates, opt_state = optimizer.update(grads, opt_state)
params = optax.apply_updates(params, updates)
mean, std = params
variational_params = (mean, jax.tree_map(lambda s, d: vechinv(s, d), std, nfeatures))
cholesky = jax.tree_map(lambda L: jnp.log(jnp.linalg.det(L @ L.T)), variational_params[1])
lb = jax.tree_map(lambda chol, n: lower_bound / num_samples + 1 / 2 * chol + n / 2,
cholesky, nfeatures)
return (variational_params, params, opt_state), (variational_params, lb)
keys = jax.random.split(key, niters)
_, (variational_params, lower_bounds) = jax.lax.scan(iter_fn, (variational_params, params, opt_state), keys)
lower_bounds = jax.tree_leaves(lower_bounds)[0]
if smooth:
def simple_moving_average(cur_sum, i):
diff = (lower_bounds[i] - lower_bounds[i - window_size]) / window_size
cur_sum += diff
return cur_sum, cur_sum
indices = jnp.arange(window_size, niters)
cur_sum = jnp.sum(lower_bounds[:window_size]) / window_size
_, lower_bounds = jax.lax.scan(simple_moving_average, cur_sum, indices)
lower_bounds = jnp.append(jnp.array([cur_sum]), lower_bounds)
i = jnp.argmax(lower_bounds) + window_size - 1 if smooth else jnp.argmax(lower_bounds)
best_params = jax.tree_map(lambda x: x[i], variational_params)
return best_params, lower_bounds
| mit | c79106b2df7c120c1c39478df96fd470 | 36.664596 | 112 | 0.597955 | 3.367018 | false | false | false | false |
probml/pyprobml | deprecated/scripts/mixexpDemo.py | 1 | 3555 | import superimport
import pyprobml_utils as pml
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.special import logsumexp
from sklearn.linear_model import LinearRegression
from scipy.stats import multivariate_normal
n = 200
np.random.seed(1)
y = np.random.rand(n, 1)
eta = np.random.randn(n,1)*0.05
x = y + 0.3*np.sin(2*3.1415*y) + eta
data = np.concatenate((x, y), axis=1)
K = 3
X = x.reshape(-1, 1)
y = y.reshape(-1, 1)
xtest = (x)
ytest = (y)
plt.figure()
plt.scatter(x, y, edgecolors='blue', color="none")
plt.title('Inverse problem')
plt.savefig('Inverse_problem')
plt.show()
def normalizelogspace(x):
L = logsumexp(x, axis=1).reshape(-1, 1)
Lnew = np.repeat(L, 3, axis=1)
y = x - Lnew
return y, Lnew
def is_pos_def(x):
return np.all(np.linalg.eigvals(x) > 0)
K = 3 #nmix
D = np.size(X, axis=1)
N = np.size(X, axis=0)
norm = 50
max_iter = 39
iteration = 0
r = np.zeros((N, K))
while iteration < max_iter:
#E-step :
np.random.seed(iteration)
Wy = 0.1*np.random.randn(D, K)
bias = 0.3*np.random.randn(D, K)
mixweights = np.random.rand(1, K)
normmw = np.linalg.norm(mixweights)
mixweights = mixweights/normmw
sigma2 = 0.1*np.random.randn(1, K)
q = np.log(mixweights)
logprior = np.repeat(q, N, axis=0)
loglik = np.zeros((N, K))
for k in range(K):
vecM = X*Wy[:, k] + bias[:, k]
vecM = vecM.reshape(200, )
cov = sigma2[0, k]
cov = np.abs(cov)
vecX = y
x = multivariate_normal.logpdf(vecX, mean=vecM, cov=cov)
x = x /norm
loglik[:, k] = x
logpost = loglik + logprior
logpost, logZ = normalizelogspace(logpost)
ll = np.sum(logZ)
post = np.exp(logpost)
#M-step:
r = post
mixweights = np.sum(r, axis=0)/N
mixweights = mixweights.reshape(1, -1)
for k in range(K):
reg = LinearRegression()
model = reg.fit(X, y, r[:, k])
Wy[:, k] = model.coef_
bias[:, k] = model.intercept_
yhat_ = np.multiply(X, Wy[:, k]) + bias[:, k]
sigma2[:, k] = np.sum(np.multiply(r[:, k], np.square(y-yhat_))) / sum(r[:, k])
iteration = iteration + 1
N = np.size(X, axis=0)
D = np.size(X, axis=1)
K = 3
weights = np.repeat(mixweights, N, axis=0)
muk = np.zeros((N, K))
vk = np.zeros((N, K))
mu = np.zeros((N, ))
v = np.zeros((N, 1))
b = 0.3*np.random.randn(D, K)
for k in range(K):
w = X*Wy[:, k] + bias[:, k]
w = w.reshape(-1, )
muk[:, k] = w
q = np.multiply(weights[:, k], muk[:, k])
mu = mu + q
vk[:, k] = sigma2[:, k]
v = v + np.multiply(weights[:, k], (vk[:, k] + np.square(muk[:, k]))).reshape(-1, 1)
v = v - np.square(mu).reshape(-1, 1)
plt.figure()
plt.scatter(xtest, y, edgecolors='blue', color="none")
plt.plot(xtest, muk[:, 0])
plt.plot(xtest, muk[:, 1])
plt.plot(xtest, muk[:, 2])
plt.title('Expert-predictions')
pml.save_fig('mixexp_expert_predictions.pdf')
plt.show()
plt.figure()
for i in range(K):
plt.scatter(y, post[:, i])
plt.title('Gating functions')
pml.save_fig('mixexp_gating_functions.pdf')
plt.show()
map = np.empty((K, 1))
map = np.argmax(post, axis=1)
map = map.reshape(-1, 1)
yhat = np.empty((N, 1))
for i in range(N):
yhat[i, 0] = muk[i, map[i, 0]]
plt.figure()
plt.scatter(xtest, yhat, marker=6, color='black')
plt.scatter(xtest, mu, marker='X', color='red')
plt.scatter(xtest, y, edgecolors='blue', color="none")
plt.title('prediction')
plt.legend(['mode', 'mean'])
pml.save_fig('mixexp_predictions.pdf')
plt.show()
| mit | f410005e34aaee869ea150755b82d561 | 24.76087 | 88 | 0.591842 | 2.566787 | false | true | false | false |
probml/pyprobml | deprecated/scripts/prior_post_pred_binom_pymc3.py | 1 | 2070 | # prior and posterior predctiive for beta binomial
# fig 1.6 of 'Bayeysian Modeling and Computation'
import superimport
import arviz as az
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pymc3 as pm
from scipy import stats
from scipy.stats import entropy
from scipy.optimize import minimize
import pyprobml_utils as pml
np.random.seed(0)
Y = stats.bernoulli(0.7).rvs(20)
with pm.Model() as model:
θ = pm.Beta("θ", 1, 1)
y_obs = pm.Binomial("y_obs",n=1, p=θ, observed=Y)
trace = pm.sample(1000, cores=1, chains=2, return_inferencedata=False)
idata = az.from_pymc3(trace)
pred_dists = (pm.sample_prior_predictive(1000, model)["y_obs"],
pm.sample_posterior_predictive(idata, 1000, model)["y_obs"])
dist=pred_dists[0]
print(dist.shape)
num_success = dist.sum(1)
print(num_success.shape)
fig, ax = plt.subplots()
az.plot_dist(pred_dists[0].sum(1), hist_kwargs={"color":"0.5", "bins":range(0, 22)})
ax.set_title(f"Prior predictive distribution",fontweight='bold')
ax.set_xlim(-1, 21)
ax.set_ylim(0, 0.15)
ax.set_xlabel("number of success")
fig, ax = plt.subplots()
az.plot_dist(pred_dists[1].sum(1), hist_kwargs={"color":"0.5", "bins":range(0, 22)})
ax.set_title(f"Posterior predictive distribution",fontweight='bold')
ax.set_xlim(-1, 21)
ax.set_ylim(0, 0.15)
ax.set_xlabel("number of success")
pml.savefig('Posterior_predictive_distribution.pdf')
fig, ax = plt.subplots()
az.plot_dist(θ.distribution.random(size=1000), plot_kwargs={"color":"0.5"},
fill_kwargs={'alpha':1})
ax.set_title("Prior distribution", fontweight='bold')
ax.set_xlim(0, 1)
ax.set_ylim(0, 4)
ax.tick_params(axis='both', pad=7)
ax.set_xlabel("θ")
pml.savefig('Prior_distribution.pdf')
fig, ax = plt.subplots()
az.plot_dist(idata.posterior["θ"], plot_kwargs={"color":"0.5"},
fill_kwargs={'alpha':1})
ax.set_title("Posterior distribution", fontweight='bold')
ax.set_xlim(0, 1)
ax.set_ylim(0, 4)
ax.tick_params(axis='both', pad=7)
ax.set_xlabel("θ")
pml.savefig('Posterior_distribution.pdf')
plt.show() | mit | 25361f5a7d43dcff701691b52318491c | 27.666667 | 84 | 0.696558 | 2.696732 | false | false | false | false |
probml/pyprobml | deprecated/scripts/word_embedding_spacy.py | 1 | 3197 |
# Demo of word embeddigns using the Spacy library
# Based on https://spacy.io/usage/vectors-similarity
# and https://nlpforhackers.io/complete-guide-to-spacy/
# Follow installation instructions at https://spacy.io/usage/
# Then run the command below to get a word embedding model (medium sized)
# python -m spacy download en_core_web_md
import superimport
import spacy
import numpy as np
import pandas as pd
from scipy import spatial
nlp = spacy.load('en_core_web_md', disable=['tagger','parser','ner']) # Just tokenize
cosine_similarity = lambda x, y: 1 - spatial.distance.cosine(x, y)
# Pairwise similarity
tokens = nlp(u'dog cat banana') # type spacy.tokens.doc.Doc
N = len(tokens)
S = np.empty((N,N))
S2 = np.empty((N,N))
for i in range(N):
for j in range(N):
t1 = tokens[i]
t2 = tokens[j]
S[i,j] = t1.similarity(t2)
S2[i,j] = cosine_similarity(t1.vector, t2.vector)
assert np.isclose(S, S2).all()
df=pd.DataFrame(data=S,columns=tokens,index=tokens)
print(df)
"""
dog cat banana
dog 1.000000 0.801686 0.243276
cat 0.801686 1.000000 0.281544
banana 0.243276 0.281544 1.000000
"""
# The vector embedding of a doc is the average of the vectors of each token
# Each token has a D=300 dimensional embedding
tokens = nlp(u"The cat sat on the mat.")
token_embeds = np.array([t.vector for t in tokens]) # N*D
assert np.isclose(tokens.vector, np.mean(token_embeds,axis=0)).all()
# we can use this for document retrieval
target = nlp("Cats are beautiful animals.")
doc1 = nlp("Dogs are awesome.")
doc2 = nlp("Some gorgeous creatures are felines.")
doc3 = nlp("Dolphins are swimming mammals.")
print(target.similarity(doc1)) # 0.8901765218466683
print(target.similarity(doc2)) # 0.9115828449161616
print(target.similarity(doc3)) # 0.7822956752876101
# Vector space arithmetic
man = nlp.vocab['man'].vector
woman = nlp.vocab['woman'].vector
queen = nlp.vocab['queen'].vector
king = nlp.vocab['king'].vector
# We now need to find the closest vector in the vocabulary to the result of "man" - "woman" + "queen"
maybe_king = man - woman + queen
computed_similarities = []
from time import time
time_start = time() # slow to search through 1,344,233 words
print("searching through {} words for nearest neighbors".format(len(nlp.vocab)))
for word in nlp.vocab:
if not word.has_vector:
continue
similarity = cosine_similarity(maybe_king, word.vector)
computed_similarities.append((word, similarity))
print('time spent training {:0.3f}'.format(time() - time_start))
computed_similarities = sorted(computed_similarities, key=lambda item: -item[1])
print([w[0].text for w in computed_similarities[:10]])
# ['Queen', 'QUEEN', 'queen', 'King', 'KING', 'king', 'KIng', 'KINGS', 'kings', 'Kings']
"""
X = [word.vector for word in nlp.vocab if word.has_vector]
print(np.shape(X)) # (684 755, 300)
N = 10000; D = 300;
X = np.random.rand(N,D)
# datascience book p89 - correct, but gets memory error if N is too large
time_start = time()
sqdst = np.sum((X[:,np.newaxis,:] - X[np.newaxis,:,:]) ** 2, axis=-1)
print(time() - time_start)
i=10;j=200;assert np.isclose(sqdst[i,j], np.sum((X[i,:]-X[j,:])**2))
"""
| mit | 14342106412076568d6b58b1f87e4980 | 29.740385 | 101 | 0.69096 | 2.836735 | false | false | false | false |
winpython/winpython | winpython/_vendor/qtpy/tests/test_patch_qheaderview.py | 3 | 3355 | import sys
import pytest
from qtpy import PYQT6, PYSIDE2, PYSIDE6, QT_VERSION
from qtpy.QtWidgets import QApplication
from qtpy.QtWidgets import QHeaderView
from qtpy.QtCore import Qt
from qtpy.QtCore import QAbstractListModel
def get_qapp(icon_path=None):
qapp = QApplication.instance()
if qapp is None:
qapp = QApplication([''])
return qapp
@pytest.mark.skipif(
QT_VERSION.startswith('5.15') or PYSIDE6 or PYQT6 or
((PYSIDE2) and sys.version_info.major == 3 and sys.version_info.minor >= 8
and (sys.platform == 'darwin' or sys.platform.startswith('linux'))
),
reason="Segfaults with Qt 5.15; and PySide2/Python 3.8+ on Mac and Linux")
def test_patched_qheaderview():
"""
This will test whether QHeaderView has the new methods introduced in Qt5.
It will then create an instance of QHeaderView and test that no exceptions
are raised and that some basic behaviour works.
"""
assert QHeaderView.sectionsClickable is not None
assert QHeaderView.sectionsMovable is not None
assert QHeaderView.sectionResizeMode is not None
assert QHeaderView.setSectionsClickable is not None
assert QHeaderView.setSectionsMovable is not None
assert QHeaderView.setSectionResizeMode is not None
# setup a model and add it to a headerview
qapp = get_qapp()
headerview = QHeaderView(Qt.Horizontal)
# Fails here on PySide 2 and Python 3.8 due a bug: https://bugreports.qt.io/browse/PYSIDE-1140
class Model(QAbstractListModel):
pass
model = Model()
headerview.setModel(model)
assert headerview.count() == 1
# test it
assert isinstance(headerview.sectionsClickable(), bool)
assert isinstance(headerview.sectionsMovable(), bool)
if PYSIDE2:
assert isinstance(headerview.sectionResizeMode(0),
QHeaderView.ResizeMode)
else:
assert isinstance(headerview.sectionResizeMode(0), int)
headerview.setSectionsClickable(True)
assert headerview.sectionsClickable() == True
headerview.setSectionsClickable(False)
assert headerview.sectionsClickable() == False
headerview.setSectionsMovable(True)
assert headerview.sectionsMovable() == True
headerview.setSectionsMovable(False)
assert headerview.sectionsMovable() == False
headerview.setSectionResizeMode(QHeaderView.Interactive)
assert headerview.sectionResizeMode(0) == QHeaderView.Interactive
headerview.setSectionResizeMode(QHeaderView.Fixed)
assert headerview.sectionResizeMode(0) == QHeaderView.Fixed
headerview.setSectionResizeMode(QHeaderView.Stretch)
assert headerview.sectionResizeMode(0) == QHeaderView.Stretch
headerview.setSectionResizeMode(QHeaderView.ResizeToContents)
assert headerview.sectionResizeMode(0) == QHeaderView.ResizeToContents
headerview.setSectionResizeMode(0, QHeaderView.Interactive)
assert headerview.sectionResizeMode(0) == QHeaderView.Interactive
headerview.setSectionResizeMode(0, QHeaderView.Fixed)
assert headerview.sectionResizeMode(0) == QHeaderView.Fixed
headerview.setSectionResizeMode(0, QHeaderView.Stretch)
assert headerview.sectionResizeMode(0) == QHeaderView.Stretch
headerview.setSectionResizeMode(0, QHeaderView.ResizeToContents)
assert headerview.sectionResizeMode(0) == QHeaderView.ResizeToContents
| mit | f002435e49c53627f330a2cd994bbe21 | 38.470588 | 98 | 0.752012 | 3.821185 | false | true | false | false |
probml/pyprobml | deprecated/scripts/vae_conv_mnist_flax_main.py | 1 | 1247 | from absl import app
from absl import flags
from vae_conv_mnist_flax_lib import VAE_mnist
FLAGS = flags.FLAGS
flags.DEFINE_string(
'figdir', default="mnist_results",
help=('The dataset we are interested to train out vae on')
)
flags.DEFINE_float(
'learning_rate', default=1e-3,
help=('The learning rate for the Adam optimizer.')
)
flags.DEFINE_float(
'kl_coeff', default=1,
help=('The kl coefficient for loss.')
)
flags.DEFINE_integer(
'batch_size', default=256,
help=('Batch size for training.')
)
flags.DEFINE_integer(
'num_epochs', default=5,
help=('Number of training epochs.')
)
flags.DEFINE_integer(
'latents', default=7,
help=('Number of latent variables.')
)
flags.DEFINE_integer(
'train_set_size', default=50000,
help=('Number of latent variables.')
)
flags.DEFINE_integer(
'test_set_size', default=10000,
help=('Number of latent variables.')
)
def main(argv):
del argv
vae = VAE_mnist(
FLAGS.figdir,
FLAGS.train_set_size,
FLAGS.test_set_size,
FLAGS.num_epochs,
FLAGS.batch_size,
FLAGS.learning_rate,
FLAGS.kl_coeff,
FLAGS.latents)
vae.main()
if __name__ == '__main__':
app.run(main)
| mit | 2ee527c7df7c6c221f13aeb67b3672da | 18.793651 | 62 | 0.638332 | 3.23057 | false | false | false | false |
probml/pyprobml | deprecated/scripts/linear_bandit.py | 1 | 4542 | import enum
import jax.numpy as jnp
from jax import lax
from jax import random
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
class ExplorationPolicy(enum.Enum):
"""Possible exploration policies."""
linear_ucb_policy = 1
linear_thompson_sampling_policy = 2
linear_epsilon_greedy_policy = 3
class LinearBandit:
def __init__(self, num_features, num_arms, exploration_policy,
eta=6.0, lmbda=0.25, alpha=1.0, epsilon=0):
self.num_features = num_features
self.num_arms = num_arms
self.eta = eta
self.lmbda = lmbda
self._alpha = alpha
self._epsilon = epsilon
if exploration_policy == ExplorationPolicy.linear_ucb_policy:
self._policy = self.ucb_policy
elif exploration_policy == ExplorationPolicy.linear_thompson_sampling_policy:
self._policy = self.thompson_sampling_policy
elif exploration_policy == ExplorationPolicy.linear_epsilon_greedy_policy:
self._policy = self.epsilon_greedy_policy
else:
raise NotImplemented
def _get_epsilon(self):
if callable(self._epsilon):
return self._epsilon()
else:
return self._epsilon
def init_bel(self, contexts, actions, rewards):
mu = jnp.zeros((self.num_arms, self.num_features))
Sigma = jnp.eye(self.num_features) * \
jnp.ones((self.num_arms, 1, 1)) / self.lmbda
a = self.eta * jnp.ones((self.num_arms,))
b = self.eta * jnp.ones((self.num_arms,))
bel = (mu, Sigma, a, b)
def update(bel, cur): # could do batch update
context, action, reward = cur
bel = self.update_bel(bel, context, action, reward)
return bel, None
if contexts and actions and rewards:
assert len(contexts) == len(actions) == len(rewards)
bel, _ = lax.scan(update, bel, (contexts, actions, rewards))
return bel
def update_bel(self, bel, context, action, reward):
mu, Sigma, a, b = bel
mu_k, Sigma_k = mu[action], Sigma[action]
Lambda_k = jnp.linalg.inv(Sigma_k)
a_k, b_k = a[action], b[action]
# weight params
Lambda_update = jnp.outer(context, context) + Lambda_k
Sigma_update = jnp.linalg.inv(Lambda_update)
mu_update = Sigma_update @ (Lambda_k @ mu_k + context * reward)
# noise params
a_update = a_k + 1 / 2
b_update = b_k + (reward ** 2 + mu_k.T @ Lambda_k @
mu_k - mu_update.T @ Lambda_update @ mu_update) / 2
# Update only the chosen action at time t
mu = mu.at[action].set(mu_update)
Sigma = Sigma.at[action].set(Sigma_update)
a = a.at[action].set(a_update)
b = b.at[action].set(b_update)
bel = (mu, Sigma, a, b)
return bel
def _sample_params(self, key, bel):
mu, Sigma, a, b = bel
sigma_key, w_key = random.split(key, 2)
sigma2_samp = tfd.InverseGamma(
concentration=a, scale=b).sample(seed=sigma_key)
covariance_matrix = sigma2_samp[:, None, None] * Sigma
w = tfd.MultivariateNormalFullCovariance(
loc=mu, covariance_matrix=covariance_matrix).sample(seed=w_key)
return w
def thompson_sampling_policy(self, key, bel, context):
w = self._sample_params(key, bel)
predicted_reward = jnp.einsum("m,km->k", context, w)
action = predicted_reward.argmax()
return action
def ucb_policy(self, key, bel, context):
mu, Sigma, a, b = bel
covariance_matrix = Sigma * Sigma
predicted_reward = jnp.einsum("m,km->k", context, mu)
predicted_variance = jnp.einsum(
"n,knm,m->k", context, covariance_matrix, context)
rewards_for_argmax = predicted_reward + \
self._alpha * jnp.sqrt(predicted_variance)
return rewards_for_argmax.argmax()
def greedy_policy(self, key, bel, context):
mu, Sigma, a, b = bel
predicted_reward = jnp.einsum("m,km->k", context, mu)
return predicted_reward.argmax()
def epsilon_greedy_policy(self, key, bel, context):
rng = random.uniform(key)
if rng < self._get_epsilon():
return random.uniform(key, maxval=self.num_arms)
else:
return self.greedy_policy(key, bel, context)
def choose_action(self, key, bel, context):
return self._policy(key, bel, context)
| mit | 3a766f8b9fbf19651f5aaf655a4d70f5 | 32.895522 | 85 | 0.594232 | 3.518203 | false | false | false | false |
probml/pyprobml | deprecated/scripts/unigauss_vb_demo.py | 1 | 6165 | # Variational Bayes (VB) for univariate gaussian
# based on: https://github.com/probml/pmtk3/blob/master/demos/unigaussVbDemo.m
# Author: Gerardo Durán-Martín (@gerdm)
import superimport
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
from jax import random
from jax.scipy import stats
from dataclasses import dataclass
from jax.scipy.special import gammaln
import pyprobml_utils as pml
@dataclass
class GaussGamma:
mu: float
beta: float
a: float
b: float
def generate_data(key, N):
data = random.normal(key, (N, ))
data = (data - data.mean()) / data.std()
return data
def gaussian_gamma_pdf(mu, lmbda, params):
"""
Proabbility density function of a univariate gaussian-gamma distribution
Parameters
----------
mu: float
Mean of the distribution
lmbda: float
Precision of the distribution
params: GaussGamma
Parameters of the distribution
"""
N_part = stats.norm.pdf(mu, loc=params.mu, scale=1 / jnp.sqrt(params.beta * lmbda))
G_part = stats.gamma.pdf(lmbda, params.a, scale=1 / params.b)
return N_part * G_part
def vb_est_pdf(mu, lmbda, params):
"""
Variational-bayes pdf of a univariate gaussian-gamma distribution
Parameters
----------
mu: float
Mean of the distribution
lmbda: float
Precision of the distribution
params: GaussGamma
Parameters of the distribution
"""
N_part = stats.norm.pdf(mu, params.mu, 1 / jnp.sqrt(params.beta))
G_part = stats.gamma.pdf(lmbda, params.a, scale=1 / params.b)
return N_part * G_part
# vmapped functions for contour plots
gaussian_gamma_pdf_vmap = jax.vmap(gaussian_gamma_pdf, in_axes=(0, None, None))
gaussian_gamma_pdf_vmap = jax.vmap(gaussian_gamma_pdf_vmap, in_axes=(None, 0, None))
vb_est_pdf_vmap = jax.vmap(vb_est_pdf, in_axes=(0, None, None))
vb_est_pdf_vmap = jax.vmap(vb_est_pdf_vmap, in_axes=(None, 0, None))
def plot_gauss_exact_vb(ax, exact_params, vb_params, mu_min=-1, mu_max=1,
lambda_min=0, lambda_max=2, npoints=500, levels=5):
mu_range = jnp.linspace(mu_min, mu_max, npoints)
lambda_range = jnp.linspace(lambda_min, lambda_max, npoints)
proba_exact_space = gaussian_gamma_pdf_vmap(mu_range, lambda_range, exact_params)
proba_vb_space = vb_est_pdf_vmap(mu_range, lambda_range, vb_params)
contour_exact = ax.contour(mu_range, lambda_range, proba_exact_space, colors="tab:orange", levels=levels)
contour_vb = ax.contour(mu_range, lambda_range, proba_vb_space, colors="tab:blue", levels=levels)
ax.set_xlabel(r"$\mu$", fontsize=15)
ax.set_ylabel(r"$\tau$", fontsize=15);
contour_exact.collections[0].set_label("exact")
contour_vb.collections[0].set_label("VB")
ax.legend()
ax.axis("equal")
def vb_unigauss_learn(data, params_prior, params_init, eps=1e-6):
"""
Variational Bayes (VB) procedure for estimating the parameters
of a univariate gaussian distribution
Parameters
----------
data: jnp.ndarray
Data to estimate the parameters of the distribution
params_prior: GaussGamma
Prior parameters of the distribution
params_init: GaussGamma
Initial parameters of the VB estimation
eps: float
Tolerance for the convergence of the VB procedure
Returns
-------
* The VB-estimated parameters
* History of the estimation of parameters after updating
the mean and after updating the precision
"""
lower_bound = -jnp.inf
xbar = jnp.mean(data)
mu0, beta0 = params_prior.mu, params_prior.beta
a0, b0 = params_prior.a, params_prior.b
muN, betaN = params_init.mu, params_init.beta
aN, bN = params_init.a, params_init.b
converge = False
params_hist = []
while not converge:
est_params = GaussGamma(muN, betaN, aN, bN)
params_hist.append(est_params)
# update q_mu
e_tau = aN / bN
muN = (beta0 * mu0 + N * xbar) / (beta0 + N)
betaN = (beta0 + N) * e_tau
est_params = GaussGamma(muN, betaN, aN, bN)
params_hist.append(est_params)
# update q_tau
e_mu = xbar
e_mu2 = 1 / betaN + muN ** 2
aN = a0 + (N + 1) / 2
bN = b0 + beta0 * (e_mu2 + mu0 ** 2 - 2 * mu0 * e_mu) / 2 + (data ** 2 + e_mu2 - 2 * e_mu * data).sum() / 2
est_params = GaussGamma(muN, betaN, aN, bN)
params_hist.append(est_params)
lower_bound_new = - jnp.log(betaN) - gammaln(aN) * jnp.log(bN)
if abs(lower_bound_new / lower_bound - 1) < eps:
converge = True
est_params = GaussGamma(muN, betaN, aN, bN)
params_hist.append(est_params)
else:
lower_bound = lower_bound_new
return est_params, params_hist
if __name__ == "__main__":
plt.rcParams["axes.spines.right"] = False
plt.rcParams["axes.spines.top"] = False
key = random.PRNGKey(3141)
N = 10
data = generate_data(key, N)
# prior parameters of the distribution
mu0, beta0, a0, b0 = jnp.zeros(4)
params_prior = GaussGamma(mu0, beta0, a0, b0)
# exact parameters of the posterior distribution
# (according to the specified priors with values zero)
x_bar = jnp.mean(data)
mu_post = jnp.mean(data)
beta_post = N
a_post = N / 2
b_post = jnp.sum((data - mu_post) ** 2) / 2
posterior_params = GaussGamma(mu_post, beta_post, a_post, b_post)
# variational-bayes (VB) estimation of the posterior parameters
params_vb_init = GaussGamma(mu=0.5, beta=5, a=2.5, b=1)
_, params_hist = vb_unigauss_learn(data, params_prior, params_vb_init)
# We take the indices of
# 1. our initial estimate,
# 2. after q_mu update in the first iteration
# 3. after q_tau update in the second iteration
# 4. after convergence
params_ix = [0, 1, 2, -1]
for i, ix_param in enumerate(params_ix):
fig, ax = plt.subplots()
vb_params = params_hist[ix_param]
plot_gauss_exact_vb(ax, posterior_params, vb_params)
pml.savefig(f"unigauss_vb_{i}.pdf")
plt.show()
| mit | 8357758b435429260b5ef9b267fc0ca1 | 30.443878 | 115 | 0.631024 | 3.106351 | false | false | false | false |
probml/pyprobml | deprecated/scripts/mix_gauss_em_faithful.py | 1 | 2489 | #!pip install distrax
'''
Visualize fitting a mixture of Gaussians by em algorithm to the old faithful dataset
reproduce Bishop fig 9.8
Author: Gerardo Durán-Martín, Aleyna Kara(@karalleyna)
'''
import superimport
import numpy as np
import jax.numpy as jnp
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from mix_gauss_lib import GMM
from matplotlib.colors import ListedColormap
import requests
from io import BytesIO
# 64-bit precision is needed to attain the same results when scipy.stats.multivariate_normal is used.
from jax.config import config
config.update("jax_enable_x64", True)
def create_colormap():
# Creates color map
N = 256
vals = np.ones((N, 4))
vals[:, 0] = np.linspace(31 / 256, 214 / 256, N)
vals[:, 1] = np.linspace(119 / 256, 39 / 256, N)
vals[:, 2] = np.linspace(180 / 256, 40 / 256, N)
cmap = ListedColormap(vals)
return cmap
def main():
cmap = create_colormap()
colors = ["tab:red", "tab:blue"]
url = 'https://raw.githubusercontent.com/probml/probml-data/main/data/faithful.txt'
response = requests.get(url)
rawdata = BytesIO(response.content)
observations = np.loadtxt(rawdata)
# Normalize data
observations = (observations - observations.mean(axis=0)) / (observations.std(axis=0))
# Initial configuration
mixing_coeffs = jnp.array([0.5, 0.5])
means = jnp.vstack([jnp.array([-1.5, 1.5]),
jnp.array([1.5, -1.5])])
covariances = jnp.array([jnp.eye(2) * 0.1,
jnp.eye(2) * 0.1])
gmm = GMM(mixing_coeffs, means, covariances)
num_of_iters = 50
history = gmm.fit_em(observations, num_of_iters=num_of_iters)
ll_hist, mix_dist_probs_hist, comp_dist_loc_hist, comp_dist_cov_hist, responsibility_hist = history
# Create grid-plot
hist_index = [0, 10, 25, 30, 35, 40]
fig, ax = plt.subplots(2, 3)
ax = ax.ravel()
for idx, axi in zip(hist_index, ax):
means = comp_dist_loc_hist[idx]
covariances = comp_dist_cov_hist[idx]
responsibility = responsibility_hist[idx]
if idx == 0:
responsibility = np.ones_like(responsibility)
color_map = cmap if idx > 0 else "Dark2"
gmm.plot(observations, means, covariances, responsibility, cmap=color_map, colors=colors, ax=axi)
axi.set_title(f"Iteration {idx}")
plt.tight_layout()
pml.savefig('gmm_faithful.pdf')
plt.show()
if __name__ == "__main__":
main()
| mit | 3cd0a34872bd77959915f92c8b0f16e0 | 28.963855 | 105 | 0.646562 | 3.120452 | false | false | false | false |
probml/pyprobml | deprecated/scripts/svm_regression_1d.py | 1 | 2154 |
# SVM for regression in 1d
# Code is based on
# https://github.com/ageron/handson-ml2/blob/master/05_support_vector_machines.ipynb
import superimport
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVR
from sklearn.svm import SVR
def plot_svm_regression(svm_reg, X, y, axes):
x1s = np.linspace(axes[0], axes[1], 100).reshape(100, 1)
y_pred = svm_reg.predict(x1s)
plt.plot(x1s, y_pred, "k-", linewidth=2, label=r"$\hat{y}$")
plt.plot(x1s, y_pred + svm_reg.epsilon, "k--")
plt.plot(x1s, y_pred - svm_reg.epsilon, "k--")
plt.scatter(X[svm_reg.support_], y[svm_reg.support_], s=180, facecolors='#FFAAAA')
plt.plot(X, y, "bo")
plt.xlabel(r"$x_1$", fontsize=18)
#plt.legend(loc="upper left", fontsize=18)
plt.axis(axes)
np.random.seed(42)
m = 100
X = 2 * np.random.rand(m, 1) - 1
#y = (0.2 + 0.1 * X + 0.5 * X**2 + np.random.randn(m, 1)/10).ravel()
y = (0.2 + 0.1 * X + 0.5 * X**2 + 0.1 * X**3 + np.random.randn(m, 1)/10).ravel()
epsilons = [0.1, 0.05]
eps_names = ['0p1', '0p05']
for i, eps in enumerate(epsilons):
#svm_poly_reg1 = SVR(kernel="poly", degree=5, C=1e3, epsilon=eps, gamma="scale")
#svm_poly_reg2 = SVR(kernel="poly", degree=5, C=1e-3, epsilon=eps, gamma="scale")
svm_reg1 = SVR(kernel="rbf", gamma=1, C=100, epsilon=eps)
svm_reg2 = SVR(kernel="rbf", gamma=1, C=0.01, epsilon=eps)
svm_reg1.fit(X, y)
svm_reg2.fit(X, y)
fig, axes = plt.subplots(ncols=2, figsize=(9, 4), sharey=True)
plt.sca(axes[0])
plot_svm_regression(svm_reg1, X, y, [-1, 1, 0, 1])
plt.title(r"$C={}, \epsilon = {}$".format(svm_reg1.C, svm_reg1.epsilon), fontsize=18)
plt.ylabel(r"$y$", fontsize=18, rotation=0)
plt.sca(axes[1])
plot_svm_regression(svm_reg2, X, y, [-1, 1, 0, 1])
plt.title(r"$C={}, \epsilon = {}$".format(svm_reg2.C, svm_reg2.epsilon), fontsize=18)
fname = '../figures/svm_regression_e{}.pdf'.format(eps_names[i])
plt.savefig(fname, dpi=300)
plt.show()
| mit | a62f3161d86bb0043988f81fc84f2c05 | 33.190476 | 89 | 0.631383 | 2.525205 | false | false | false | false |
probml/pyprobml | deprecated/scripts/gibbs_gauss_demo.py | 1 | 1440 | # Illustration of gibbs sampling for 2-dim Gaussian
# Author: Gerardo Durán-Martín
# Translated from gibbsGaussDemo.m
import superimport
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal, norm
import pyprobml_utils as pml
μ = np.zeros(2)
Σ = np.array([[1, 0.99], [0.99, 1]])
mvn = multivariate_normal(μ, Σ)
# Plot one contour of the multivariate normal
X = np.mgrid[-3:3:0.01, -3:3:0.01]
density = np.apply_along_axis(mvn.pdf, 0, X)
# Gibbs-Sampling path
blue_seq = np.array([
[-1/2,-1],
[-1/2,0],
[1,0],
[1,1],
[-1/2,1],
[-1/2,1/2],
[1.5,1/2],
[1.5,1.5],
]) / 3
# Compute marginal parameters x0|x1
x0_range = np.arange(-2, 2, 0.01)
x0_obs = 0.7
Σ0_cond = Σ[0,0] - Σ[0, 1] * Σ[1, 0] / Σ[1, 1]
μ0_cond = μ[0] + Σ[0, 1] * (x0_obs - μ[1]) / Σ[1, 1]
plt.plot(*blue_seq.T)
plt.contour(*X, density, levels=[0.07], colors="tab:red")
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.scatter(0, 0, marker="x", c="tab:red", s=300)
plt.text(0, 1.7, "L", size=11)
plt.text(1.2, -2, "l", size=11)
plt.annotate("", xy=(-2.5, 1.5), xytext=(2.5, 1.5), arrowprops=dict(arrowstyle='<->'))
plt.annotate("", xy=(0.5, -2), xytext=(1, -2), arrowprops=dict(arrowstyle='<->'))
# Scaled down and shifted marginal gaussian
plt.plot(x0_range, norm(μ0_cond, np.sqrt(Σ0_cond)).pdf(x0_range)* 0.3 - 3, c="tab:green")
plt.tight_layout()
pml.savefig('gmm_singularity.pdf')
plt.show() | mit | 535e471face276028b457a2c91886be1 | 26.901961 | 89 | 0.620956 | 2.191063 | false | false | false | false |
probml/pyprobml | deprecated/scripts/spectral_clustering_demo.py | 1 | 3806 | import superimport
import itertools
import matplotlib.pyplot as plt
import numpy as np
from scipy.linalg import eigh
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import rbf_kernel
import pyprobml_utils as pml
plt.style.use('classic')
def spectral_clustering_demo():
np.random.seed(0)
num_clusters = 2
for data_type, data in (('circle', sample_circle(num_clusters)),
('spiral', sample_spiral())):
kmeans = KMeans(n_clusters=num_clusters, random_state=0)
kmeans.fit(data)
assignments = kmeans.predict(data)
plot_data(data, assignments, 'k-means clustering', data_type)
sigma = 0.1
gamma = 1 / (2 * sigma ** 2)
W = rbf_kernel(data, gamma=gamma)
d = np.sum(W, 1, keepdims=True)
sqrt_d = np.sqrt(d)
normalized_W = (W / sqrt_d) / sqrt_d.T
paranoid_assert(W, normalized_W, False)
# We select the largest eigen values of normalized_W, rather
# than the smallest eigenvalues of I - normalized_W. The two
# problems are equivalent. The eigen values can be converted
# between the two problems via `1 - eigen_values`. The eigen
# vectors are the same between both problems.
eigen_values, eigen_vectors = eigh(normalized_W,
# Get only the top num_clusters eigenvalues
eigvals=(data.shape[0] - num_clusters, data.shape[0]-1))
eigen_vectors = eigen_vectors / np.linalg.norm(eigen_vectors, axis=1, keepdims=True)
kmeans.fit(eigen_vectors)
assignments = kmeans.predict(eigen_vectors)
plot_data(data, assignments, 'spectral clustering', data_type)
plt.show()
def paranoid_assert(W, normalized_W, enable):
if not enable:
return
D = np.diag(np.sum(W, 1))
L = D - W
D_inv_sqrt = np.diag(1 / np.diag(np.sqrt(D)))
np.testing.assert_almost_equal(np.sum(L, 1), 0, err_msg="Rows of Laplacian must sum to 0.")
np.testing.assert_allclose(normalized_W, D_inv_sqrt * W * D_inv_sqrt, rtol=0, atol=1)
def sample_circle(num_clusters):
points_per_cluster = 500
bandwidth = 0.1
data = np.zeros((num_clusters * points_per_cluster, 2))
for k, n in itertools.product(range(num_clusters), range(points_per_cluster)):
theta = 2 * np.pi * np.random.uniform()
rho = k + 1 + np.random.randn() * bandwidth
x, y = pol2cart(theta, rho)
idx = k * points_per_cluster + n
data[idx, 0] = x
data[idx, 1] = y
data = data.reshape((num_clusters * points_per_cluster, 2))
return data
def pol2cart(theta, rho):
x = rho * np.cos(theta)
y = rho * np.sin(theta)
return(x, y)
def sample_spiral():
# Only 2 clusters in this case. This is hard-coded.
points_per_cluster = 500
bandwidth = 0.1
data = np.empty((points_per_cluster, 2))
w = np.arange(1, points_per_cluster + 1).astype(np.float32) / points_per_cluster
data[:,0] = (4 * w + 1) * np.cos(2*np.pi * w) + np.random.randn(points_per_cluster) * bandwidth
data[:,1] = (4 * w + 1) * np.sin(2*np.pi * w) + np.random.randn(points_per_cluster) * bandwidth
data = np.vstack((data, -data))
return data
def plot_data(data, assignments, title, data_type):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(data[assignments == 0, 0], data[assignments == 0, 1], 'o', color='r')
ax.plot(data[assignments == 1, 0], data[assignments == 1, 1], 'o', color='b')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.axis('square')
ax.grid(True)
ax.set_title(title)
plt.tight_layout()
pml.savefig(f"{data_type}_{title.replace(' ', '_')}.pdf")
if __name__ == '__main__':
spectral_clustering_demo()
| mit | 405159cc06a82c4af6c376530008a56d | 35.247619 | 99 | 0.607725 | 3.275387 | false | false | false | false |
probml/pyprobml | deprecated/vae/models/info_vae.py | 1 | 4990 | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional
def compute_kernel(x1: torch.Tensor, x2: torch.Tensor, kernel_type: str = "rbf") -> torch.Tensor:
# Convert the tensors into row and column vectors
D = x1.size(1)
N = x1.size(0)
x1 = x1.unsqueeze(-2) # Make it into a column tensor
x2 = x2.unsqueeze(-3) # Make it into a row tensor
"""
Usually the below lines are not required, especially in our case,
but this is useful when x1 and x2 have different sizes
along the 0th dimension.
"""
x1 = x1.expand(N, N, D)
x2 = x2.expand(N, N, D)
if kernel_type == "rbf":
result = compute_rbf(x1, x2)
elif kernel_type == "imq":
result = compute_inv_mult_quad(x1, x2)
else:
raise ValueError("Undefined kernel type.")
return result
def compute_rbf(x1: torch.Tensor, x2: torch.Tensor, latent_var: float = 2.0, eps: float = 1e-7) -> torch.Tensor:
"""
Computes the RBF Kernel between x1 and x2.
:param x1: (Tensor)
:param x2: (Tensor)
:param eps: (Float)
:return:
"""
z_dim = x2.size(-1)
sigma = 2.0 * z_dim * latent_var
result = torch.exp(-((x1 - x2).pow(2).mean(-1) / sigma))
return result
def compute_inv_mult_quad(
x1: torch.Tensor, x2: torch.Tensor, latent_var: float = 2.0, eps: float = 1e-7
) -> torch.Tensor:
"""
Computes the Inverse Multi-Quadratics Kernel between x1 and x2,
given by
k(x_1, x_2) = \sum \frac{C}{C + \|x_1 - x_2 \|^2}
:param x1: (Tensor)
:param x2: (Tensor)
:param eps: (Float)
:return:
"""
z_dim = x2.size(-1)
C = (2 / z_dim) * latent_var
kernel = C / (eps + C + (x1 - x2).pow(2).sum(dim=-1))
# Exclude diagonal elements
result = kernel.sum() - kernel.diag().sum()
return result
def MMD(prior_z: torch.Tensor, z: torch.Tensor):
prior_z__kernel = compute_kernel(prior_z, prior_z)
z__kernel = compute_kernel(z, z)
priorz_z__kernel = compute_kernel(prior_z, z)
mmd = prior_z__kernel.mean() + z__kernel.mean() - 2 * priorz_z__kernel.mean()
return mmd
def kl_divergence(mean, logvar):
return -0.5 * torch.mean(1 + logvar - torch.square(mean) - torch.exp(logvar))
def loss(config, x, x_hat, z, mu, logvar):
recon_loss = F.mse_loss(x_hat, x, reduction="mean")
kld_loss = kl_divergence(mu, logvar)
mmd = MMD(torch.randn_like(z), z)
loss = recon_loss + (1 - config["alpha"]) * kld_loss + (config["alpha"] + config["beta"] - 1) * mmd
return loss
class Encoder(nn.Module):
def __init__(self, in_channels: int = 3, hidden_dims: Optional[list] = None, latent_dim: int = 256):
super(Encoder, self).__init__()
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU(),
)
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1] * 4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1] * 4, latent_dim)
def forward(self, x):
x = self.encoder(x)
x = torch.flatten(x, start_dim=1)
mu = self.fc_mu(x)
log_var = self.fc_var(x)
return mu, log_var
class Decoder(nn.Module):
def __init__(self, hidden_dims: Optional[list] = None, latent_dim: int = 256):
super(Decoder, self).__init__()
# Build Decoder
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
hidden_dims.reverse()
self.decoder_input = nn.Linear(latent_dim, hidden_dims[0] * 4)
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(
hidden_dims[i], hidden_dims[i + 1], kernel_size=3, stride=2, padding=1, output_padding=1
),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU(),
)
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1], hidden_dims[-1], kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels=3, kernel_size=3, padding=1),
nn.Sigmoid(),
)
def forward(self, z):
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
| mit | 91290df8d97f3b4bba4c271036e9ee59 | 29.426829 | 119 | 0.559118 | 3.158228 | false | false | false | false |
probml/pyprobml | deprecated/vae/utils/interpolation.py | 1 | 3470 | import torch
import numpy as np
import pandas as pd
from einops import rearrange
from typing import Callable
from torchvision.utils import make_grid
def get_imgs_and_attr(batch):
imgs, attr = batch
df = pd.DataFrame(
attr.numpy(),
columns=[
"5_o_Clock_Shadow",
"Arched_Eyebrows",
"Attractive",
"Bags_Under_Eyes",
"Bald",
"Bangs",
"Big_Lips",
"Big_Nose",
"Black_Hair",
"Blond_Hair",
"Blurry",
"Brown_Hair",
"Bushy_Eyebrows",
"Chubby",
"Double_Chin",
"Eyeglasses",
"Goatee",
"Gray_Hair",
"Heavy_Makeup",
"High_Cheekbones",
"Male",
"Mouth_Slightly_Open",
"Mustache",
"Narrow_Eyes",
"No_Beard",
"Oval_Face",
"Pale_Skin",
"Pointy_Nose",
"Receding_Hairline",
"Rosy_Cheeks",
"Sideburns",
"Smiling",
"Straight_Hair",
"Wavy_Hair",
"Wearing_Earrings",
"Wearing_Hat",
"Wearing_Lipstick",
"Wearing_Necklace",
"Wearing_Necktie",
"Young",
],
)
return imgs, df
def vector_of_interest(vae, batch, feature_of_interest="Male"):
imgs, attr = get_imgs_and_attr(batch)
id = np.array(attr.index)
get_id_of_all_absent = id[attr[feature_of_interest] == 0]
get_id_of_all_present = id[attr[feature_of_interest] == 1]
present = imgs[get_id_of_all_present]
absent = imgs[get_id_of_all_absent]
z_present = vae.det_encode(present).mean(axis=0)
z_absent = vae.det_encode(absent).mean(axis=0)
label_vector = z_present - z_absent
return label_vector, present, absent
def get_interpolation(interpolation):
"""
interpolation: can accept either string or function
"""
if interpolation == "spherical":
return slerp
elif interpolation == "linear":
return lerp
elif callable(interpolation):
return interpolation
def lerp(val, low, high):
"""Linear interpolation"""
return low + (high - low) * val
def slerp(val, low, high):
"""Spherical interpolation. val has a range of 0 to 1."""
if val <= 0:
return low
elif val >= 1:
return high
elif torch.allclose(low, high):
return low
omega = torch.arccos(torch.dot(low / torch.norm(low), high / torch.norm(high)))
so = torch.sin(omega)
return torch.sin((1.0 - val) * omega) / so * low + torch.sin(val * omega) / so * high
def make_imrange(arr: list):
interpolation = torch.stack(arr)
imgs = rearrange(make_grid(interpolation, 11), "c h w -> h w c")
imgs = imgs.cpu().detach().numpy() if torch.cuda.is_available() else imgs.detach().numpy()
return imgs
def get_imrange(
G: Callable[[torch.tensor], torch.tensor],
start: torch.tensor,
end: torch.tensor,
nums: int = 8,
interpolation="spherical",
) -> torch.tensor:
"""
Decoder must produce a 3d vector to be appened togther to form a new grid
"""
val = 0
arr2 = []
inter = get_interpolation(interpolation)
for val in torch.linspace(0, 1, nums):
new_z = torch.unsqueeze(inter(val, start[0], end[0]), 0)
arr2.append(G(new_z)[0])
return make_imrange(arr2)
| mit | 430ce8107ab31284ad768be1c7609ec8 | 26.76 | 94 | 0.551873 | 3.333333 | false | false | false | false |
probml/pyprobml | deprecated/scripts/svi_gmm_tfp_original.py | 1 | 6229 | # SVI for a GMM
# https://github.com/brendanhasz/svi-gaussian-mixture-model/blob/master/BayesianGaussianMixtureModel.ipynb
# MIT License
#pip install tf-nightly
#pip install --upgrade tfp-nightly -q
# Imports
import superimport
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
from time import time
# Plot settings
#%config InlineBackend.figure_format = 'svg'
# Random seed
np.random.seed(12345)
tf.random.set_seed(12345)
# Generate some data
N = 3000
X = np.random.randn(N, 2).astype('float32')
X[:1000, :] += [2, 0]
X[1000:2000, :] -= [2, 4]
X[2000:, :] += [-2, 4]
# Plot the data
plt.plot(X[:, 0], X[:, 1], '.')
plt.axis('equal')
plt.show()
# Make a TensorFlow Dataset from that data
batch_size = 500
dataset = tf.data.Dataset.from_tensor_slices(
(X)).shuffle(10000).batch(batch_size)
class GaussianMixtureModel(tf.keras.Model):
"""A Bayesian Gaussian mixture model.
Assumes Gaussians' variances in each dimension are independent.
Parameters
----------
Nc : int > 0
Number of mixture components.
Nd : int > 0
Number of dimensions.
"""
def __init__(self, Nc, Nd):
# Initialize
super(GaussianMixtureModel, self).__init__()
self.Nc = Nc
self.Nd = Nd
# Variational distribution variables for means
self.locs = tf.Variable(tf.random.normal((Nc, Nd)))
self.scales = tf.Variable(tf.pow(tf.random.gamma((Nc, Nd), 5, 5), -0.5))
# Variational distribution variables for standard deviations
self.alpha = tf.Variable(tf.random.uniform((Nc, Nd), 4., 6.))
self.beta = tf.Variable(tf.random.uniform((Nc, Nd), 4., 6.))
# Variational distribution variables for component weights
self.counts = tf.Variable(2*tf.ones((Nc,)))
# Prior distributions for the means
self.mu_prior = tfd.Normal(tf.zeros((Nc, Nd)), tf.ones((Nc, Nd)))
# Prior distributions for the standard deviations
self.sigma_prior = tfd.Gamma(5*tf.ones((Nc, Nd)), 5*tf.ones((Nc, Nd)))
# Prior distributions for the component weights
self.theta_prior = tfd.Dirichlet(2*tf.ones((Nc,)))
def call(self, x, sampling=True, independent=True):
"""Compute losses given a batch of data.
Parameters
----------
x : tf.Tensor
A batch of data
sampling : bool
Whether to sample from the variational posterior
distributions (if True, the default), or just use the
mean of the variational distributions (if False).
Returns
-------
log_likelihoods : tf.Tensor
Log likelihood for each sample
kl_sum : tf.Tensor
Sum of the KL divergences between the variational
distributions and their priors
"""
# The variational distributions
mu = tfd.Normal(self.locs, self.scales)
sigma = tfd.Gamma(self.alpha, self.beta)
theta = tfd.Dirichlet(self.counts)
# Sample from the variational distributions
if sampling:
Nb = x.shape[0] #number of samples in the batch
mu_sample = mu.sample(Nb)
sigma_sample = tf.pow(sigma.sample(Nb), -0.5)
theta_sample = theta.sample(Nb)
else:
mu_sample = tf.reshape(mu.mean(), (1, self.Nc, self.Nd))
sigma_sample = tf.pow(tf.reshape(sigma.mean(), (1, self.Nc, self.Nd)), -0.5)
theta_sample = tf.reshape(theta.mean(), (1, self.Nc))
# The mixture density
density = tfd.Mixture(
cat=tfd.Categorical(probs=theta_sample),
components=[
tfd.MultivariateNormalDiag(loc=mu_sample[:, i, :],
scale_diag=sigma_sample[:, i, :])
for i in range(self.Nc)])
# Compute the mean log likelihood
log_likelihoods = density.log_prob(x)
# Compute the KL divergence sum
mu_div = tf.reduce_sum(tfd.kl_divergence(mu, self.mu_prior))
sigma_div = tf.reduce_sum(tfd.kl_divergence(sigma, self.sigma_prior))
theta_div = tf.reduce_sum(tfd.kl_divergence(theta, self.theta_prior))
kl_sum = mu_div + sigma_div + theta_div
# Return both losses
return log_likelihoods, kl_sum
# A GMM with 3 components in 2 dimensions
model = GaussianMixtureModel(3, 2)
# Use the Adam optimizer
optimizer = tf.keras.optimizers.Adam(lr=1e-3)
@tf.function
def train_step(data):
with tf.GradientTape() as tape:
log_likelihoods, kl_sum = model(data)
elbo_loss = kl_sum/N - tf.reduce_mean(log_likelihoods)
gradients = tape.gradient(elbo_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# Fit the model
EPOCHS = 1000
time_start = time()
for epoch in range(EPOCHS):
for data in dataset:
train_step(data)
elapsed_time = (time() - time_start)
#print('method {}'.format(method))
print(elapsed_time)
# Compute log likelihood at each point on a grid
Np = 100 #number of grid points
Xp, Yp = np.meshgrid(np.linspace(-6, 6, Np), np.linspace(-6, 6, Np))
Pp = np.column_stack([Xp.flatten(), Yp.flatten()]).astype('float32')
Z, _ = model(Pp, sampling=False)
Z = np.reshape(Z, (Np, Np))
# Show the fit mixture density
plt.figure()
plt.imshow(np.exp(Z),
extent=(-6, 6, -6, 6),
origin='lower')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Likelihood')
model.locs
model.trainable_variables
# Sample from the std deviation variational posterior
stds = tf.pow(tfd.Gamma(model.alpha, model.beta).sample(10000), -0.5)
# Plot the samples
plt.figure()
sns.distplot(stds[:, 0, 0])
# Sample from the mean variational posterior
means = tfd.Normal(model.locs, model.scales).sample(10000)
# Plot the mean samples for a single
plt.figure()
sns.kdeplot(means[:, 0, 0].numpy(),
means[:, 0, 1].numpy(),
n_levels=10)
| mit | 11e7466934edfa208d991ed4db282811 | 29.99005 | 106 | 0.609568 | 3.462479 | false | false | false | false |
probml/pyprobml | deprecated/scripts/bayes_change_of_var.py | 1 | 1657 | # Based on https://github.com/probml/pmtk3/blob/master/demos/bayesChangeOfVar.m
# MC on change of variables and empirical distribution, highlighting that
# modes are not, in general, preserved.
import superimport
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import os
from pyprobml_utils import save_fig
# Ensure stochastic reproducibility.
np.random.seed(42)
# Define a mapping from x-space to y-space.
def ginv(x):
"""transform func"""
return 1 / (1 + np.exp(5 - x))
# Define a probability density on x-space, and sample from it.
mu = 6
sigma = 1
n = 10 ** 6
x_samples = norm.rvs(size=n, loc=mu, scale=sigma)
# Calculate a histogram for the samples in x-space and a histogram
# for their transformations to y-space.
hist_x, bin_edges_x = np.histogram(x_samples, bins=50, density=True)
hist_y, bin_edges_y = np.histogram(ginv(x_samples), bins=50, density=True)
# Plot the histograms, the mapping function, and an indication of how
# the x-distribution's mean maps to y-space.
linewidth = 5
plt.bar(bin_edges_x[:-1], hist_x, color='red', align='edge', width=bin_edges_x[1] - bin_edges_x[0])
plt.barh(bin_edges_y[:-1], hist_y, color='green', align='edge', height=bin_edges_y[1] - bin_edges_y[0])
x_range = np.arange(0, 10, 0.01)
plt.plot(x_range, ginv(x_range), 'blue', linewidth=linewidth)
plt.vlines(mu, ymin=0, ymax=ginv(mu), color='yellow', linewidth=linewidth)
plt.hlines(ginv(mu), xmin=0, xmax=mu, color='yellow', linewidth=linewidth)
plt.text(9, 1/10, r'$p_X$');
plt.text(2/3, 2/10, r'$p_Y$');
plt.text(9, ginv(9) - 1/10, r'$g$');
## Save the figure.
save_fig('bayesChangeOfVar.pdf')
plt.show()
| mit | aac4d6463819a4086d1c236acd7377a9 | 33.520833 | 103 | 0.704888 | 2.752492 | false | false | false | false |
probml/pyprobml | deprecated/scripts/postDensityIntervals.py | 1 | 1771 |
import superimport
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
def logdet(Sigma):
return np.log2(Sigma)
def gaussProb(X, mu, Sigma):
d = 1
X = X.reshape(X.shape[0], d)
X = X - np.transpose(mu)
logp = -0.5*np.sum(np.multiply((X/(Sigma)), X), 1)
logZ = (d/2)*np.log(2*np.pi) + 0.5*logdet(Sigma)
logp = logp - logZ
p = np.exp(logp)
return p
def f(x): return gaussProb(x, 0, 1) + gaussProb(x, 6, 1)
domain = np.arange(-4, 10.001, 0.001)
plt.plot(domain, f(domain), '-r', linewidth=3)
plt.fill_between(domain, f(domain), color='gray', alpha=0.2)
plt.fill_between(np.arange(-4, -1.999, 0.001),
f(np.arange(-4, -1.999, 0.001)), color='white')
plt.fill_between(np.arange(8, 10.001, 0.001), f(
np.arange(8, 10.001, 0.001)), color='white')
plt.annotate(r'$\alpha /2$', xytext=(-3.5, 0.11), xy=(-2.3, 0.015),
arrowprops=dict(facecolor='black'),
fontsize=14)
plt.annotate(r'$\alpha /2$', xytext=(9.5, 0.11), xy=(8.3, 0.015),
arrowprops=dict(facecolor='black'),
fontsize=14)
plt.ylim(0, 0.5)
pml.savefig('centralInterval.pdf')
plt.show()
plt.plot(domain, f(domain), '-r', linewidth=3)
plt.fill_between(domain, f(domain), color='gray', alpha=0.2)
plt.fill_between(np.arange(-4, -1.43992, 0.001),
f(np.arange(-4, -1.43992, 0.001)), color='white')
plt.fill_between(np.arange(7.37782, 10.001, 0.001), f(
np.arange(7.37782, 10.001, 0.001)), color='white')
plt.plot(domain, [0.15 for i in range(0, 14001)], 'b-')
plt.fill_between(np.arange(1.3544, 4.5837, 0.001), f(
np.arange(1.3544, 4.5837, 0.001)), color='white')
plt.yticks([0.15], ["pMIN"], fontsize=14)
plt.ylim(0, 0.5)
pml.savefig('HPD.pdf')
plt.show()
| mit | 823f1692db5df21fa8fb434e8a563761 | 31.2 | 67 | 0.605872 | 2.466574 | false | false | false | false |
probml/pyprobml | deprecated/scripts/subspace_sgd_mlp_mnist_demo.py | 1 | 3742 | # This demo replicates Figure 2 of the paper
# "Measuring the Intrinsic Dimension of Objetive Landscape"
# By Li et al. (https://arxiv.org/abs/1804.08838)
# We consider a 2-layer MLP with ReLU activations
# Code based on the following repos:
# * https://github.com/ganguli-lab/degrees-of-freedom
# * https://github.com/uber-research/intrinsic-dimension
# Author : Gerardo Durán-Martín (@gerdm), Aleyna Kara(@karalleyna), Kevin Murphy(@murphyk)
# import superimport
import matplotlib.pyplot as plt
import pyprobml_utils as pml
import jax
import jax.numpy as jnp
from jax.random import PRNGKey, split, permutation, normal
from jax.experimental.stax import Dense, Relu, LogSoftmax
from jax.experimental import stax
import optax
from tensorflow.keras.datasets import mnist
#from subspace_opt_lib import make_potential, make_potential_subspace, optimize_loop
import subspace_opt_lib as sub
def load_mnist(key, n_train, n_test, shuffle=True):
(X, y), (X_test, y_test) = mnist.load_data()
n_train = n_train if n_train < len(y) else len(y)
n_test = n_test if n_test < len(y_test) else len(y)
train_key, test_key = split(key)
train_indices = jnp.arange(len(y))
perm = permutation(train_key, train_indices)[:n_train] if shuffle else train_indices[:n_train]
train_ds = {
"X": jnp.float32(X[perm].reshape(n_train, -1)) / 255.,
"y": jnp.array(y[perm])
}
test_indices = jnp.arange(len(y_test))
perm = permutation(test_key, test_indices)[:n_test] if shuffle else test_indices[:n_test]
test_ds = {
"X": jnp.float32(X_test[perm].reshape(n_test, -1)) / 255.,
"y": jnp.array(y_test[perm])
}
return train_ds, test_ds
key = PRNGKey(42)
data_key, key = split(key)
n_train, n_test = 5000, 1000
train_ds, test_ds = load_mnist(data_key, n_train, n_test)
n_features = train_ds["X"].shape[1]
n_classes = 10
init_random_params, predict = stax.serial(
Dense(n_features), Relu,
Dense(50), Relu,
Dense(n_classes), LogSoftmax)
init_key, key = split(key)
_, params_tree_init = init_random_params(init_key, input_shape=(-1, n_features))
# Do one step of SGD in full parameter space to get good initial value (“anchor”)
potential_key, key = split(key)
l2_regularizer, batch_size = 1., 512
objective = sub.make_potential(potential_key, predict, train_ds, batch_size, l2_regularizer)
losses = jnp.array([])
learning_rate = 1e-3
optimizer = optax.adam(learning_rate)
n_steps = 300
anchor_params_tree, loss, _ = sub.optimize_loop(objective, params_tree_init, optimizer, n_steps=n_steps, callback=None)
print(f"Loss : {loss[-1]}")
# Do subspace optimization starting from rnd location
subspace_dim = 100
subspace_key, key = split(key)
anchor_params_full, flat_to_pytree_fn = jax.flatten_util.ravel_pytree(anchor_params_tree)
full_dim = len(anchor_params_full)
projection_matrix = sub.generate_random_basis(key, subspace_dim, full_dim)
objective_subspace, subspace_to_pytree_fn = sub.make_potential_subspace(
subspace_key, anchor_params_tree, predict, train_ds, batch_size, l2_regularizer,
subspace_dim, projection_matrix=projection_matrix)
losses = jnp.array([])
params_subspace = normal(key, shape=(subspace_dim,))
params_subspace, loss, _ = sub.optimize_loop(objective_subspace, params_subspace, optimizer, n_steps)
print(f"Loss : {loss[-1]}")
losses = jnp.append(losses, loss)
# Do more subspace optimization continuing from before (warm-start)
params_subspace, loss, _ = sub.optimize_loop(objective_subspace, params_subspace, optimizer, n_steps)
print(f"Loss : {loss[-1]}")
losses = jnp.append(losses, loss)
# Plot loss curve
plt.plot(losses, linewidth=3)
plt.xlabel("Iteration")
pml.savefig("subspace_sgd_mlp_mnist_demo.png")
plt.show()
| mit | fe68c1e3ead85e2f155db56fc2439822 | 32.357143 | 119 | 0.711724 | 2.887172 | false | true | false | false |
probml/pyprobml | deprecated/scripts/spam_dtree_size.py | 1 | 5450 |
#Performance of tree ensembles. Based on the email spam example from chapter 10 of "Elements of statistical learning". Code is from Andrey Gaskov's site:
#https://github.com/empathy87/The-Elements-of-Statistical-Learning-Python-Notebooks/blob/master/examples/Spam.ipynb
import superimport
from one_standard_error_rule_model import OneStandardErrorRuleModel
from sklearn import tree
# Commented out IPython magic to ensure Python compatibility.
import pandas as pd
from matplotlib import transforms, pyplot as plt
import numpy as np
from sklearn.metrics import accuracy_score
# omit numpy warnings (don't do it in real work)
np.seterr(divide='ignore', invalid='ignore')
np.warnings.filterwarnings('ignore')
# %matplotlib inline
# define plots common properties and color constants
plt.rcParams['font.family'] = 'Arial'
plt.rcParams['axes.linewidth'] = 0.5
ORANGE, BLUE, PURPLE = '#FF8C00', '#0000FF', '#A020F0'
GRAY1, GRAY4, GRAY7 = '#231F20', '#646369', '#929497'
# we will calculate train and test error rates for all models
def error_rate(y_true, y_pred):
return 1 - accuracy_score(y_true, y_pred)
"""Get data"""
df = pd.read_csv("https://github.com/empathy87/The-Elements-of-Statistical-Learning-Python-Notebooks/blob/master/data/Spam.txt?raw=True")
df.head()
# PAGE 301. We coded spam as 1 and email as zero. A test set of size 1536 was
# randomly chosen, leaving 3065 observations in the training set.
target = 'spam'
columns = ['word_freq_make', 'word_freq_address', 'word_freq_all',
'word_freq_3d', 'word_freq_our', 'word_freq_over',
'word_freq_remove', 'word_freq_internet', 'word_freq_order',
'word_freq_mail', 'word_freq_receive', 'word_freq_will',
'word_freq_people', 'word_freq_report', 'word_freq_addresses',
'word_freq_free', 'word_freq_business', 'word_freq_email',
'word_freq_you', 'word_freq_credit', 'word_freq_your',
'word_freq_font', 'word_freq_000', 'word_freq_money',
'word_freq_hp', 'word_freq_hpl', 'word_freq_george',
'word_freq_650', 'word_freq_lab', 'word_freq_labs',
'word_freq_telnet', 'word_freq_857', 'word_freq_data',
'word_freq_415', 'word_freq_85', 'word_freq_technology',
'word_freq_1999', 'word_freq_parts', 'word_freq_pm',
'word_freq_direct', 'word_freq_cs', 'word_freq_meeting',
'word_freq_original', 'word_freq_project', 'word_freq_re',
'word_freq_edu', 'word_freq_table', 'word_freq_conference',
'char_freq_;', 'char_freq_(', 'char_freq_[', 'char_freq_!',
'char_freq_$', 'char_freq_#', 'capital_run_length_average',
'capital_run_length_longest', 'capital_run_length_total']
# let's give columns more compact names
features = ['make', 'address', 'all', '3d', 'our', 'over', 'remove',
'internet', 'order', 'mail', 'receive', 'will', 'people',
'report', 'addresses', 'free', 'business', 'email', 'you',
'credit', 'your', 'font', '000', 'money', 'hp', 'hpl',
'george', '650', 'lab', 'labs', 'telnet', '857', 'data',
'415', '85', 'technology', '1999', 'parts', 'pm', 'direct',
'cs', 'meeting', 'original', 'project', 're', 'edu', 'table',
'conference', 'ch_;', 'ch(', 'ch[', 'ch!', 'ch$', 'ch#',
'CAPAVE', 'CAPMAX', 'CAPTOT']
X, y = df[columns].values, df[target].values
# split by test column value
is_test = df.test.values
X_train, X_test = X[is_test == 0], X[is_test == 1]
y_train, y_test = y[is_test == 0], y[is_test == 1]
#max_leaf_nodes = [2, 3, 4, 5, 6, 7, 8, 9, 10, 17, 18, 21, 26, 30, 33, 37, 42]
max_leaf_nodes = [int(x) for x in np.linspace(2,200,10)]
tree_based_clf = OneStandardErrorRuleModel(
tree.DecisionTreeClassifier(criterion='entropy', random_state=5),
'max_leaf_nodes', max_leaf_nodes,
is_regression=False, random_state=26,
).fit(X_train, y_train)
print(f'Selected max_leaf_nodes: {tree_based_clf.model_.max_leaf_nodes}')
print(f'Test error rate: {tree_based_clf.assess(X_test, y_test)[0]*100:.1f}%')
# calculate test error rate for each parameter value
test_error_rates = [
tree_based_clf.refit(X_train, y_train, i).assess(X_test, y_test)[0]
for i in range(len(max_leaf_nodes))]
# PAGE 313. Figure 9.4 shows the 10-fold cross-validation error rate as a
# function of the size of the pruned tree, along with ±2 standard
# errors of the mean, from the ten replications. The test error curve
# is shown in orange.
fig, ax = plt.subplots(figsize=(4.75, 3.15), dpi=150)
ax.plot(max_leaf_nodes, tree_based_clf.cv_mean_errors_, c=BLUE, linewidth=0.6)
ax.errorbar(max_leaf_nodes, tree_based_clf.cv_mean_errors_,
color=BLUE, linestyle='None', marker='o', elinewidth=0.2,
markersize=1.5, yerr=tree_based_clf.cv_mean_errors_std_,
ecolor=BLUE, capsize=2)
ax.axhline(y=tree_based_clf.cv_min_error_ + tree_based_clf.cv_min_error_std_,
c=GRAY1, linewidth=0.6, linestyle=':')
for e in ax.get_yticklabels() + ax.get_xticklabels():
e.set_fontsize(6)
ax.set_xlabel('Tree size', color=GRAY4, fontsize=7)
ax.set_ylabel('Misclassification Rate', color=GRAY4, fontsize=7)
ax.scatter(max_leaf_nodes, test_error_rates, color=ORANGE,
s=3, zorder=10)
ax.plot(max_leaf_nodes, test_error_rates, color=ORANGE,
linewidth=0.6)
_ = ax.set_ylim(-0.02, 0.47)
plt.tight_layout()
| mit | e578194ae1bf64aba2787f750ecd7d5f | 45.177966 | 153 | 0.643971 | 2.979224 | false | true | false | false |
probml/pyprobml | deprecated/scripts/activation_fun_plot.py | 1 | 2886 | # Plots various neural net activation functions.
import superimport
import numpy as np
import matplotlib.pyplot as plt
import os
import pyprobml_utils as pml
import sys
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def relu(z):
return np.maximum(0, z)
def heaviside(z):
return (z > 0)
def softplus(z):
return np.log(1+np.exp(z))
def lrelu(z, lam=0.1):
return np.maximum(lam*z, z)
def elu(z, alpha=1):
return np.where(z < 0, alpha * (np.exp(z) - 1), z)
def elu2(z, lam=0.5):
return np.maximum(0, z) + np.minimum(0, lam*(np.exp(z) - 1))
def swish(z):
return z * sigmoid(z)
from scipy.special import erfc
# alpha and scale to self normalize with mean 0 and standard deviation 1
# (see equation 14 in the SELU paper):
alpha_0_1 = -np.sqrt(2 / np.pi) / (erfc(1/np.sqrt(2)) * np.exp(1/2) - 1)
scale_0_1 = (1 - erfc(1 / np.sqrt(2)) * np.sqrt(np.e)) * np.sqrt(2 * np.pi) * (2 * erfc(np.sqrt(2))*np.e**2 + np.pi*erfc(1/np.sqrt(2))**2*np.e - 2*(2+np.pi)*erfc(1/np.sqrt(2))*np.sqrt(np.e)+np.pi+2)**(-1/2)
def selu(z, scale=scale_0_1, alpha=alpha_0_1):
return scale * elu(z, alpha)
z = np.linspace(-5, 5, 200)
print(z)
# dummy test
#sys.exit()
#plt.figure(figsize=(11,4))
plt.figure()
plt.plot(z, sigmoid(z), "b-", linewidth=2, label="Sigmoid")
plt.plot(z, np.tanh(z), "g--", linewidth=2, label="Tanh")
plt.plot(z, relu(z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
plt.legend(loc="lower right", fontsize=14)
plt.title("Activation functions", fontsize=14)
plt.axis([-5, 5, -1.2, 1.2])
pml.savefig('activationFuns.pdf')
plt.show()
#plt.figure(figsize=(11,4))
plt.figure()
plt.plot(z, relu(z), "r-", linewidth=2, label="ReLU")
plt.plot(z, lrelu(z), "g--", linewidth=2, label="LReLU")
plt.plot(z, elu(z), "b-", linewidth=2, label="ELU")
plt.plot(z, selu(z), "k:", linewidth=2, label="SELU")
plt.plot(z, swish(z), "m-.", linewidth=2, label="swish")
plt.grid(True)
plt.legend(loc="upper left", fontsize=14)
plt.title("Activation functions", fontsize=14)
plt.axis([-2, 2, -1.2, 2])
pml.savefig('activationFuns2.pdf')
plt.show()
# From https://github.com/ageron/handson-ml2/blob/master/11_training_deep_neural_networks.ipynb
plt.figure()
z = np.linspace(-5, 5, 200)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [1, 1], 'k--')
plt.plot([0, 0], [-0.2, 1.2], 'k-')
plt.plot([-5, 5], [-3/4, 7/4], 'g--')
plt.plot(z, sigmoid(z), "b-", linewidth=2)
props = dict(facecolor='black', shrink=0.1)
plt.annotate('Saturating', xytext=(3.5, 0.7), xy=(5, 1), arrowprops=props, fontsize=14, ha="center")
plt.annotate('Saturating', xytext=(-3.5, 0.3), xy=(-5, 0), arrowprops=props, fontsize=14, ha="center")
plt.annotate('Linear', xytext=(2, 0.2), xy=(0, 0.5), arrowprops=props, fontsize=14, ha="center")
plt.grid(True)
plt.title("Sigmoid activation function", fontsize=14)
plt.axis([-5, 5, -0.2, 1.2])
pml.savefig("sigmoid_saturation_plot.pdf")
plt.show()
| mit | 5977f999f981c2b226ea9d487bcc20e5 | 29.702128 | 206 | 0.638254 | 2.361702 | false | false | false | false |
amccaugh/phidl | phidl/device_layout.py | 1 | 107019 | # ==============================================================================
# Major TODO
# ==============================================================================
# Add D.add_gdsii_path() to allow creation of GDSII paths
# Add D.write_gds(max_points_per_polygon)
# Remove Device.add()
# Show labels in quickplot
# ==============================================================================
# Minor TODO
# ==============================================================================
# Add Group.get_polygons()
# Allow Boolean to use Groups
# Add pp.delay_sine(distance = 10, length = 20, num_periods = 2)
# Allow connect(overlap) to be a tuple (0, 0.7)
# Possibly replace gdspy bezier (font rendering) with
# https://stackoverflow.com/a/12644499
# ==============================================================================
# Documentation TODO
# ==============================================================================
# Tutorials
# - Using Aliases
# - "Using info / metadata" tutorial with .info explanation and tutorial of get_info
# - Advanced and Misc (simplify)
# Examples
# - An electrical device with contact pads
# - An optoelectronic device
# - Waveguide + LED
# - route_manhattan
# ==============================================================================
# Imports
# ==============================================================================
import hashlib
import numbers
import warnings
from copy import deepcopy as _deepcopy
import gdspy
# Remove this once gdspy fully deprecates current_library
import gdspy.library
import numpy as np
from numpy import cos, mod, pi, sin, sqrt
from numpy.linalg import norm
from phidl.constants import _CSS3_NAMES_TO_HEX
gdspy.library.use_current_library = False
__version__ = "1.6.2"
# ==============================================================================
# Useful transformation functions
# ==============================================================================
def _rotate_points(points, angle=45, center=(0, 0)):
"""Rotates points around a centerpoint defined by ``center``. ``points``
may be input as either single points [1,2] or array-like[N][2], and will
return in kind.
Parameters
----------
points : array-like[N][2]
Coordinates of the element to be rotated.
angle : int or float
Angle to rotate the points.
center : array-like[2]
Centerpoint of rotation.
Returns
-------
A new set of points that are rotated around ``center``.
"""
if angle == 0:
return points
angle = angle * pi / 180
ca = cos(angle)
sa = sin(angle)
sa = np.array((-sa, sa))
c0 = np.array(center)
if np.asarray(points).ndim == 2:
return (points - c0) * ca + (points - c0)[:, ::-1] * sa + c0
if np.asarray(points).ndim == 1:
return (points - c0) * ca + (points - c0)[::-1] * sa + c0
def _reflect_points(points, p1=(0, 0), p2=(1, 0)):
"""Reflects points across the line formed by p1 and p2. ``points`` may be
input as either single points [1,2] or array-like[N][2], and will return in kind.
Parameters
----------
points : array-like[N][2]
Coordinates of the element to be reflected.
p1 : array-like[2]
Coordinates of the start of the reflecting line.
p2 : array-like[2]
Coordinates of the end of the reflecting line.
Returns
-------
A new set of points that are reflected across ``p1`` and ``p2``.
"""
# From http://math.stackexchange.com/questions/11515/point-reflection-across-a-line
points = np.array(points)
p1 = np.array(p1)
p2 = np.array(p2)
if np.asarray(points).ndim == 1:
return (
2 * (p1 + (p2 - p1) * np.dot((p2 - p1), (points - p1)) / norm(p2 - p1) ** 2)
- points
)
if np.asarray(points).ndim == 2:
return np.array(
[
2 * (p1 + (p2 - p1) * np.dot((p2 - p1), (p - p1)) / norm(p2 - p1) ** 2)
- p
for p in points
]
)
def _is_iterable(items):
"""Checks if the passed variable is iterable.
Parameters
----------
items : any
Item to check for iterability.
"""
return isinstance(items, (list, tuple, set, np.ndarray))
def _parse_coordinate(c):
"""Translates various inputs (lists, tuples, Ports) to an (x,y) coordinate.
Parameters
----------
c : array-like[N] or Port
Input to translate into a coordinate.
Returns
-------
c : array-like[2]
Parsed coordinate.
"""
if isinstance(c, Port):
return c.midpoint
elif np.array(c).size == 2:
return c
else:
raise ValueError(
"[PHIDL] Could not parse coordinate, input should be array-like (e.g. [1.5,2.3] or a Port"
)
def _parse_move(origin, destination, axis):
"""Translates various input coordinates to changes in position in the x-
and y-directions.
Parameters
----------
origin : array-like[2] of int or float, Port, or key
Origin point of the move.
destination : array-like[2] of int or float, Port, key, or None
Destination point of the move.
axis : {'x', 'y'}
Direction of move.
Returns
-------
dx : int or float
Change in position in the x-direction.
dy : int or float
Change in position in the y-direction.
"""
# If only one set of coordinates is defined, make sure it's used to move things
if destination is None:
destination = origin
origin = [0, 0]
d = _parse_coordinate(destination)
o = _parse_coordinate(origin)
if axis == "x":
d = (d[0], o[1])
if axis == "y":
d = (o[0], d[1])
dx, dy = np.array(d) - o
return dx, dy
def _distribute(elements, direction="x", spacing=100, separation=True, edge=None):
"""Takes a list of elements and distributes them either equally along a
grid or with a fixed spacing between them.
Parameters
----------
elements : array-like of PHIDL objects
Elements to distribute.
direction : {'x', 'y'}
Direction of distribution; either a line in the x-direction or
y-direction.
spacing : int or float
Distance between elements.
separation : bool
If True, guarantees elements are speparated with a fixed spacing between; if False, elements are spaced evenly along a grid.
edge : {'x', 'xmin', 'xmax', 'y', 'ymin', 'ymax'}
Which edge to perform the distribution along (unused if
separation == True)
Returns
-------
elements : Device, DeviceReference, Port, Polygon, CellArray, Label, or Group
Distributed elements.
"""
if len(elements) == 0:
return elements
if direction not in ({"x", "y"}):
raise ValueError(
"[PHIDL] distribute(): 'direction' argument must be either 'x' or'y'"
)
if (
(direction == "x")
and (edge not in ({"x", "xmin", "xmax"}))
and (not separation)
):
raise ValueError(
"[PHIDL] distribute(): When `separation` == False and direction == 'x',"
+ " the `edge` argument must be one of {'x', 'xmin', 'xmax'}"
)
if (
(direction == "y")
and (edge not in ({"y", "ymin", "ymax"}))
and (not separation)
):
raise ValueError(
"[PHIDL] distribute(): When `separation` == False and direction == 'y',"
+ " the `edge` argument must be one of {'y', 'ymin', 'ymax'}"
)
if direction == "y":
sizes = [e.ysize for e in elements]
if direction == "x":
sizes = [e.xsize for e in elements]
spacing = np.array([spacing] * len(elements))
if separation: # Then `edge` doesn't apply
if direction == "x":
edge = "xmin"
if direction == "y":
edge = "ymin"
else:
sizes = np.zeros(len(spacing))
# Calculate new positions and move each element
start = elements[0].__getattribute__(edge)
positions = np.cumsum(np.concatenate(([start], (spacing + sizes))))
for n, e in enumerate(elements):
e.__setattr__(edge, positions[n])
return elements
def _align(elements, alignment="ymax"):
"""Aligns lists of PHIDL elements
Parameters
----------
elements : array-like of PHIDL objects
Elements to align.
alignment : {'x', 'y', 'xmin', 'xmax', 'ymin', 'ymax'}
Which edge to align along (e.g. 'ymax' will align move the elements such
that all of their topmost points are aligned)
Returns
-------
elements : array-like of PHIDL objects
Aligned elements.
"""
if len(elements) == 0:
return elements
if alignment not in (["x", "y", "xmin", "xmax", "ymin", "ymax"]):
raise ValueError(
"[PHIDL] 'alignment' argument must be one of 'x','y','xmin', 'xmax', 'ymin','ymax'"
)
value = Group(elements).__getattribute__(alignment)
for e in elements:
e.__setattr__(alignment, value)
return elements
def _line_distances(points, start, end):
if np.all(start == end):
return np.linalg.norm(points - start, axis=1)
vec = end - start
cross = np.cross(vec, start - points)
return np.divide(abs(cross), np.linalg.norm(vec))
def _simplify(points, tolerance=0):
"""Ramer–Douglas–Peucker algorithm for line simplification. Takes an
array of points of shape (N,2) and removes excess points in the line. The
remaining points form a identical line to within `tolerance` from the
original"""
# From https://github.com/fhirschmann/rdp/issues/7
# originally written by Kirill Konevets https://github.com/kkonevets
M = np.asarray(points)
start, end = M[0], M[-1]
dists = _line_distances(M, start, end)
index = np.argmax(dists)
dmax = dists[index]
if dmax > tolerance:
result1 = _simplify(M[: index + 1], tolerance)
result2 = _simplify(M[index:], tolerance)
result = np.vstack((result1[:-1], result2))
else:
result = np.array([start, end])
return result
def reset():
"""Resets the built-in Layer dictionary (controls the coloring in
quickplot() ), and sets the Device universal ID (uid) to zero."""
Layer.layer_dict = {}
Device._next_uid = 0
class LayerSet:
"""Set of layer objects."""
def __init__(self):
"""Initialises an empty LayerSet."""
self._layers = {}
def add_layer(
self,
name="unnamed",
gds_layer=0,
gds_datatype=0,
description=None,
color=None,
inverted=False,
alpha=0.6,
dither=None,
):
"""Adds a layer to an existing LayerSet object.
Parameters
----------
name : str
Name of the Layer.
gds_layer : int
GDSII Layer number.
gds_datatype : int
GDSII datatype.
description : str
Layer description.
color : str
Hex code of color for the Layer.
inverted : bool
If true, inverts the Layer.
alpha : int or float
Alpha parameter (opacity) for the Layer, value must be between 0.0
and 1.0.
dither : str
KLayout dither style (only used in phidl.utilities.write_lyp() )
"""
new_layer = Layer(
gds_layer=gds_layer,
gds_datatype=gds_datatype,
name=name,
description=description,
inverted=inverted,
color=color,
alpha=alpha,
dither=dither,
)
if name in self._layers:
raise ValueError(
"[PHIDL] LayerSet: Tried to add layer named "
'"%s"' % (name) + ", but a layer with that "
"name already exists in this LayerSet"
)
else:
self._layers[name] = new_layer
def __getitem__(self, val):
"""If you have a LayerSet `ls`, allows access to the layer names like
ls['gold2'].
Parameters
----------
val : str
Layer name to access within the LayerSet.
Returns
-------
self._layers[val] : Layer
Accessed Layer in the LayerSet.
"""
try:
return self._layers[val]
except Exception:
raise ValueError(
"[PHIDL] LayerSet: Tried to access layer "
'named "%s"' % (val) + " which does not exist"
)
def __repr__(self):
"""Prints the number of Layers in the LayerSet object."""
return "LayerSet (%s layers total)" % (len(self._layers))
class Layer:
"""Layer object.
Parameters
----------
gds_layer : int
GDSII Layer number.
gds_datatype : int
GDSII datatype.
name : str
Name of the Layer.
color : str
Hex code of color for the Layer.
alpha : int or float
Alpha parameter (opacity) for the Layer.
dither : str
KLayout dither parameter (texture) for the Layer
(only used in phidl.utilities.write_lyp)
"""
layer_dict = {}
def __init__(
self,
gds_layer=0,
gds_datatype=0,
name="unnamed",
description=None,
inverted=False,
color=None,
alpha=0.6,
dither=None,
):
if isinstance(gds_layer, Layer):
l = gds_layer # We were actually passed Layer(mylayer), make a copy
gds_datatype = l.gds_datatype
name = l.name
description = l.description
alpha = l.alpha
dither = l.dither
inverted = l.inverted
gds_layer = l.gds_layer
self.gds_layer = gds_layer
self.gds_datatype = gds_datatype
self.name = name
self.description = description
self.inverted = inverted
self.alpha = alpha
self.dither = dither
try:
if color is None: # not specified
self.color = None
elif np.size(color) == 3: # in format (0.5, 0.5, 0.5)
color = np.array(color)
if np.any(color > 1) or np.any(color < 0):
raise ValueError
color = np.array(np.round(color * 255), dtype=int)
self.color = "#{:02x}{:02x}{:02x}".format(*color)
elif color[0] == "#": # in format #1d2e3f
if len(color) != 7:
raise ValueError
int(color[1:], 16) # Will throw error if not hex format
self.color = color
else: # in named format 'gold'
self.color = _CSS3_NAMES_TO_HEX[color.lower()]
except Exception:
raise ValueError(
"[PHIDL] Layer() color must be specified as a "
+ "0-1 RGB triplet, (e.g. [0.5, 0.1, 0.9]), an HTML hex color string "
+ "(e.g. '#a31df4'), or a CSS3 color name (e.g. 'gold' or "
+ "see http://www.w3schools.com/colors/colors_names.asp )"
)
Layer.layer_dict[(gds_layer, gds_datatype)] = self
def __repr__(self):
"""Prints a description of the Layer object, including the name, GDS
layer, GDS datatype, description, and color of the Layer."""
return (
"Layer (name %s, GDS layer %s, GDS datatype %s, description %s, color %s)"
% (
self.name,
self.gds_layer,
self.gds_datatype,
self.description,
self.color,
)
)
def _parse_layer(layer):
"""Check if the variable layer is a Layer object, a 2-element list like
[0, 1] representing layer = 0 and datatype = 1, or just a layer number.
Parameters
----------
layer : int, array-like[2], or set
Variable to check.
Returns
-------
(gds_layer, gds_datatype) : array-like[2]
The layer number and datatype of the input.
"""
if isinstance(layer, Layer):
gds_layer, gds_datatype = layer.gds_layer, layer.gds_datatype
elif np.shape(layer) == (2,): # In form [3,0]
gds_layer, gds_datatype = layer[0], layer[1]
elif np.shape(layer) == (1,): # In form [3]
gds_layer, gds_datatype = layer[0], 0
elif layer is None:
gds_layer, gds_datatype = 0, 0
elif isinstance(layer, numbers.Number):
gds_layer, gds_datatype = layer, 0
else:
raise ValueError(
"""[PHIDL] _parse_layer() was passed something
that could not be interpreted as a layer: layer = %s"""
% layer
)
return (gds_layer, gds_datatype)
class _GeometryHelper:
"""This is a helper class. It can be added to any other class which has
the functions move() and the property ``bbox`` (as in self.bbox). It uses
that function+property to enable you to do things like check what the
center of the bounding box is (self.center), and also to do things like
move the bounding box such that its maximum x value is 5.2
(self.xmax = 5.2).
"""
@property
def center(self):
"""Returns the center of the bounding box."""
return np.sum(self.bbox, 0) / 2
@center.setter
def center(self, destination):
"""Sets the center of the bounding box.
Parameters
----------
destination : array-like[2]
Coordinates of the new bounding box center.
"""
self.move(destination=destination, origin=self.center)
@property
def x(self):
"""Returns the x-coordinate of the center of the bounding box."""
return np.sum(self.bbox, 0)[0] / 2
@x.setter
def x(self, destination):
"""Sets the x-coordinate of the center of the bounding box.
Parameters
----------
destination : int or float
x-coordinate of the bbox center.
"""
destination = (destination, self.center[1])
self.move(destination=destination, origin=self.center, axis="x")
@property
def y(self):
"""Returns the y-coordinate of the center of the bounding box."""
return np.sum(self.bbox, 0)[1] / 2
@y.setter
def y(self, destination):
"""Sets the y-coordinate of the center of the bounding box.
Parameters
----------
destination : int or float
y-coordinate of the bbox center.
"""
destination = (self.center[0], destination)
self.move(destination=destination, origin=self.center, axis="y")
@property
def xmax(self):
"""Returns the maximum x-value of the bounding box."""
return self.bbox[1][0]
@xmax.setter
def xmax(self, destination):
"""Sets the x-coordinate of the maximum edge of the bounding box.
Parameters
----------
destination : int or float
x-coordinate of the maximum edge of the bbox.
"""
self.move(destination=(destination, 0), origin=self.bbox[1], axis="x")
@property
def ymax(self):
"""Returns the maximum y-value of the bounding box."""
return self.bbox[1][1]
@ymax.setter
def ymax(self, destination):
"""Sets the y-coordinate of the maximum edge of the bounding box.
Parameters
----------
destination : int or float
y-coordinate of the maximum edge of the bbox.
"""
self.move(destination=(0, destination), origin=self.bbox[1], axis="y")
@property
def xmin(self):
"""Returns the minimum x-value of the bounding box."""
return self.bbox[0][0]
@xmin.setter
def xmin(self, destination):
"""Sets the x-coordinate of the minimum edge of the bounding box.
Parameters
----------
destination : int or float
x-coordinate of the minimum edge of the bbox.
"""
self.move(destination=(destination, 0), origin=self.bbox[0], axis="x")
@property
def ymin(self):
"""Returns the minimum y-value of the bounding box."""
return self.bbox[0][1]
@ymin.setter
def ymin(self, destination):
"""Sets the y-coordinate of the minimum edge of the bounding box.
Parameters
----------
destination : int or float
y-coordinate of the minimum edge of the bbox.
"""
self.move(destination=(0, destination), origin=self.bbox[0], axis="y")
@property
def size(self):
"""Returns the (x, y) size of the bounding box."""
bbox = self.bbox
return bbox[1] - bbox[0]
@property
def xsize(self):
"""Returns the horizontal size of the bounding box."""
bbox = self.bbox
return bbox[1][0] - bbox[0][0]
@property
def ysize(self):
"""Returns the vertical size of the bounding box."""
bbox = self.bbox
return bbox[1][1] - bbox[0][1]
def movex(self, origin=0, destination=None):
"""Moves an object by a specified x-distance.
Parameters
----------
origin : array-like[2], Port, or key
Origin point of the move.
destination : array-like[2], Port, key, or None
Destination point of the move.
"""
if destination is None:
destination = origin
origin = 0
self.move(origin=(origin, 0), destination=(destination, 0))
return self
def movey(self, origin=0, destination=None):
"""Moves an object by a specified y-distance.
Parameters
----------
origin : array-like[2], Port, or key
Origin point of the move.
destination : array-like[2], Port, or key
Destination point of the move.
"""
if destination is None:
destination = origin
origin = 0
self.move(origin=(0, origin), destination=(0, destination))
return self
def __add__(self, element):
"""Adds an element to a Group.
Parameters
----------
element : Device, DeviceReference, Port, Polygon, CellArray, Label, or Group
Element to add.
"""
if isinstance(self, Group):
G = Group()
G.add(self.elements)
G.add(element)
else:
G = Group([self, element])
return G
class Port:
"""Port object that can be used to easily snap together other geometric objects
Parameters
----------
name : str
Name of the Port object.
midpoint : array-like[2] of int or float
Midpoint of the Port location.
width : int or float
Width of the Port.
orientation : int or float
Orientation (rotation) of the Port.
parent :
"""
_next_uid = 0
def __init__(self, name=None, midpoint=(0, 0), width=1, orientation=0, parent=None):
self.name = name
self.midpoint = np.array(midpoint, dtype="float64")
self.width = width
self.orientation = mod(orientation, 360)
self.parent = parent
self.info = {}
self.uid = Port._next_uid
if self.width < 0:
raise ValueError("[PHIDL] Port creation " "error: width must be >=0")
Port._next_uid += 1
def __repr__(self):
"""Prints a description of the Port object, including the name,
midpoint, width, and orientation of the Port."""
return "Port (name {}, midpoint {}, width {}, orientation {})".format(
self.name,
self.midpoint,
self.width,
self.orientation,
)
@property
def endpoints(self):
"""Returns the endpoints of the Port."""
dxdy = np.array(
[
self.width / 2 * cos((self.orientation - 90) * pi / 180),
self.width / 2 * sin((self.orientation - 90) * pi / 180),
]
)
left_point = self.midpoint - dxdy
right_point = self.midpoint + dxdy
return np.array([left_point, right_point])
@endpoints.setter
def endpoints(self, points):
"""Sets the endpoints of a Port.
Parameters
----------
points : array-like[2] of int or float
Endpoints to assign to the Port.
"""
p1, p2 = np.array(points[0]), np.array(points[1])
self.midpoint = (p1 + p2) / 2
dx, dy = p2 - p1
self.orientation = np.arctan2(dx, -dy) * 180 / pi
self.width = sqrt(dx**2 + dy**2)
@property
def normal(self):
"""Returns a vector normal to the Port
Returns
-------
array-like[2]
Vector normal to the Port
"""
dx = cos((self.orientation) * pi / 180)
dy = sin((self.orientation) * pi / 180)
return np.array([self.midpoint, self.midpoint + np.array([dx, dy])])
@property
def x(self):
"""Returns the x-coordinate of the Port midpoint."""
return self.midpoint[0]
@property
def y(self):
"""Returns the y-coordinate of the Port midpoint."""
return self.midpoint[1]
@property
def center(self):
"""Returns the midpoint of the Port."""
return self.midpoint
def _copy(self, new_uid=True):
"""Copies a Port.
Returns
-------
Port
Copied Port.
Notes
-----
Use this function instead of copy() (which will not create a new numpy
array for self.midpoint) or deepcopy() (which will also deepcopy the
self.parent DeviceReference recursively, causing performance issues).
"""
new_port = Port(
name=self.name,
midpoint=self.midpoint,
width=self.width,
orientation=self.orientation,
parent=self.parent,
)
new_port.info = _deepcopy(self.info)
if not new_uid:
new_port.uid = self.uid
Port._next_uid -= 1
return new_port
def rotate(self, angle=45, center=None):
"""Rotates a Port around the specified center point,
if no centerpoint specified will rotate around (0,0).
Parameters
----------
angle : int or float
Angle to rotate the Port in degrees.
center : array-like[2] or None
Midpoint of the Port.
"""
self.orientation = mod(self.orientation + angle, 360)
if center is None:
center = self.midpoint
self.midpoint = _rotate_points(self.midpoint, angle=angle, center=center)
return self
class Polygon(gdspy.Polygon, _GeometryHelper):
"""Polygonal geometric object.
Parameters
----------
points : array-like[N][2]
Coordinates of the vertices of the Polygon.
gds_layer : int
GDSII layer of the Polygon.
gds_datatype : int
GDSII datatype of the Polygon.
parent :
"""
def __init__(self, points, gds_layer, gds_datatype, parent):
self.parent = parent
super().__init__(points=points, layer=gds_layer, datatype=gds_datatype)
@property
def bbox(self):
"""Returns the bounding box of the Polygon."""
return self.get_bounding_box()
def rotate(self, angle=45, center=(0, 0)):
"""Rotates a Polygon by the specified angle.
Parameters
----------
angle : int or float
Angle to rotate the Polygon in degrees.
center : array-like[2] or None
Midpoint of the Polygon.
"""
super().rotate(angle=angle * pi / 180, center=center)
if self.parent is not None:
self.parent._bb_valid = False
return self
def move(self, origin=(0, 0), destination=None, axis=None):
"""Moves elements of the Device from the origin point to the
destination. Both origin and destination can be 1x2 array-like, Port,
or a key corresponding to one of the Ports in this device.
Parameters
----------
origin : array-like[2], Port, or key
Origin point of the move.
destination : array-like[2], Port, or key
Destination point of the move.
axis : {'x', 'y'}
Direction of move.
"""
dx, dy = _parse_move(origin, destination, axis)
super().translate(dx, dy)
if self.parent is not None:
self.parent._bb_valid = False
return self
def mirror(self, p1=(0, 1), p2=(0, 0)):
"""Mirrors a Polygon across the line formed between the two
specified points. ``points`` may be input as either single points
[1,2] or array-like[N][2], and will return in kind.
Parameters
----------
p1 : array-like[N][2]
First point of the line.
p2 : array-like[N][2]
Second point of the line.
"""
for n, points in enumerate(self.polygons):
self.polygons[n] = _reflect_points(points, p1, p2)
if self.parent is not None:
self.parent._bb_valid = False
return self
def simplify(self, tolerance=1e-3):
"""Removes points from the polygon but does not change the polygon
shape by more than `tolerance` from the original. Uses the
Ramer-Douglas-Peucker algorithm.
Parameters
----------
tolerance : float
Tolerance value for the simplification algorithm. All points that
can be removed without changing the resulting polygon by more than
the value listed here will be removed. Also known as `epsilon` here
https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm
"""
for n, points in enumerate(self.polygons):
self.polygons[n] = _simplify(points, tolerance=tolerance)
if self.parent is not None:
self.parent._bb_valid = False
return self
def make_device(fun, config=None, **kwargs):
"""Makes a Device from a function.
Parameters
----------
fun : str
Name of the function to make the Device with.
config : dict or None
A dictionary containing arguments for the given function.
Returns
-------
D : Device
A Device constructed from the specified function.
"""
config_dict = {}
if isinstance(config, dict):
config_dict = dict(config)
elif config is None:
pass
else:
raise TypeError(
"""[PHIDL] When creating Device() from a function, the
second argument should be a ``config`` argument which is a
dictionary containing arguments for the function.
e.g. make_device(ellipse, config = ellipse_args_dict) """
)
config_dict.update(**kwargs)
D = fun(**config_dict)
if not isinstance(D, Device):
raise ValueError(
"""[PHIDL] Device() was passed a function, but that
function does not produce a Device."""
)
return D
class Device(gdspy.Cell, _GeometryHelper):
"""The basic object that holds polygons, labels, and ports in PHIDL"""
_next_uid = 0
def __init__(self, *args, **kwargs):
if len(args) > 0:
if callable(args[0]):
raise ValueError(
"[PHIDL] You can no longer create geometry "
"by calling Device(device_making_function), please use "
"make_device(device_making_function) instead"
)
# Allow name to be set like Device('arc') or Device(name = 'arc')
if "name" in kwargs:
name = kwargs["name"]
elif (len(args) == 1) and (len(kwargs) == 0):
name = args[0]
else:
name = "Unnamed"
# Make a new blank device
self.ports = {}
self.info = {}
self.aliases = {}
# self.a = self.aliases
# self.p = self.ports
self.uid = Device._next_uid
super().__init__(name=name)
Device._next_uid += 1
def __getitem__(self, key):
"""If you have a Device D, allows access to aliases you made like
D['arc2'].
Parameters
----------
key : str
Element name to access within the Device.
Returns
-------
self._layers[val] : Layer
Accessed element in the Device.
"""
try:
return self.aliases[key]
except Exception:
raise ValueError(
'[PHIDL] Tried to access alias "%s" in Device '
'"%s", which does not exist' % (key, self.name)
)
def __repr__(self):
"""Prints a description of the Device, including the name, uid,
ports, aliases, polygons, and references.
"""
return (
'Device (name "%s" (uid %s), ports %s, aliases %s, %s '
"polygons, %s references)"
% (
self.name,
self.uid,
list(self.ports.keys()),
list(self.aliases.keys()),
len(self.polygons),
len(self.references),
)
)
def __str__(self):
"""Prints a description of the Device, including the name, uid,
ports, aliases, polygons, and references."""
return self.__repr__()
def __lshift__(self, element):
"""Convenience operators equivalent to add_ref()
Parameters
----------
elements : Device
Element to reference
"""
return self.add_ref(element)
def __setitem__(self, key, element):
"""Allow adding polygons and cell references like D['arc3'] = pg.arc()
Parameters
----------
key :
Alias name
element :
Object that will be accessible by alias name
Returns
-------
"""
if isinstance(element, (DeviceReference, Polygon, CellArray)):
self.aliases[key] = element
else:
raise ValueError(
'[PHIDL] Tried to assign alias "%s" in '
'Device "%s", but failed because the item was '
"not a DeviceReference" % (key, self.name)
)
@property
def layers(self):
"""Returns a set of the Layers in the Device."""
return self.get_layers()
# @property
# def references(self):
# return [e for e in self.elements if isinstance(e, DeviceReference)]
# @property
# def polygons(self):
# return [e for e in self.elements if isinstance(e, gdspy.PolygonSet)]
@property
def bbox(self):
"""Returns the bounding box of the Device."""
bbox = self.get_bounding_box()
if bbox is None:
bbox = ((0, 0), (0, 0))
return np.array(bbox)
def add_ref(self, device, alias=None):
"""Takes a Device and adds it as a DeviceReference to the current
Device.
Parameters
----------
device : Device
Device to be added as a DeviceReference.
alias : str
Alias of the Device.
Returns
-------
d : DeviceReference
A DeviceReference that is added to the current Device.
"""
if _is_iterable(device):
return [self.add_ref(E) for E in device]
if not isinstance(device, Device):
raise TypeError(
"""[PHIDL] add_ref() was passed something that
was not a Device object. """
)
d = DeviceReference(device) # Create a DeviceReference (CellReference)
d.owner = self
self.add(d) # Add DeviceReference (CellReference) to Device (Cell)
if alias is not None:
self.aliases[alias] = d
return d # Return the DeviceReference (CellReference)
def add_polygon(self, points, layer=np.nan):
"""Adds a Polygon to the Device.
Parameters
----------
points : array-like[N][2]
Coordinates of the vertices of the Polygon.
layer : int, array-like[2], or set
Specific layer(s) to put polygon geometry on.
"""
if layer is None:
return None
# Check if input a list of polygons by seeing if it's 3 levels deep
try:
points[0][0][0] # Try to access first x point
return [self.add_polygon(p, layer) for p in points]
except Exception:
pass # Verified points is not a list of polygons, continue on
if isinstance(points, gdspy.PolygonSet):
if layer is np.nan:
layers = zip(points.layers, points.datatypes)
else:
layers = [layer] * len(points.polygons)
polygons = []
for p, layer in zip(points.polygons, layers):
new_polygon = self.add_polygon(p, layer)
new_polygon.properties = points.properties
polygons.append(new_polygon)
return polygons
if layer is np.nan:
layer = 0
# Check if layer is actually a list of Layer objects
try:
if isinstance(layer, LayerSet):
return [self.add_polygon(points, l) for l in layer._layers.values()]
elif isinstance(layer, set):
return [self.add_polygon(points, l) for l in layer]
elif all([isinstance(l, (Layer)) for l in layer]):
return [self.add_polygon(points, l) for l in layer]
elif len(layer) > 2: # Someone wrote e.g. layer = [1,4,5]
raise ValueError(
""" [PHIDL] If specifying multiple layers
you must use set notation, e.g. {1,5,8} """
)
except Exception:
pass
# If in the form [[1,3,5],[2,4,6]]
if len(points[0]) > 2:
# Convert to form [[1,2],[3,4],[5,6]]
points = np.column_stack(points)
gds_layer, gds_datatype = _parse_layer(layer)
polygon = Polygon(
points=points, gds_layer=gds_layer, gds_datatype=gds_datatype, parent=self
)
self.add(polygon)
return polygon
def add_array(self, device, columns=2, rows=2, spacing=(100, 100), alias=None):
"""Creates a CellArray reference to a Device.
Parameters
----------
device : Device
The referenced Device.
columns : int
Number of columns in the array.
rows : int
Number of rows in the array.
spacing : array-like[2] of int or float
Distances between adjacent columns and adjacent rows.
alias : str or None
Alias of the referenced Device.
Returns
-------
a : CellArray
A CellArray containing references to the input Device.
"""
if not isinstance(device, Device):
raise TypeError(
"""[PHIDL] add_array() was passed something that
was not a Device object. """
)
a = CellArray(
device=device,
columns=int(round(columns)),
rows=int(round(rows)),
spacing=spacing,
)
a.owner = self
self.add(a) # Add DeviceReference (CellReference) to Device (Cell)
if alias is not None:
self.aliases[alias] = a
return a # Return the CellArray
def add_port(self, name=None, midpoint=(0, 0), width=1, orientation=45, port=None):
"""Adds a Port to the Device.
Parameters
----------
name : str
Name of the Port object.
midpoint : array-like[2] of int or float
Midpoint of the Port location.
width : int or float
Width of the Port.
orientation : int or float
Orientation (rotation) of the Port.
port : Port or None
A Port if the added Port is a copy of an existing Port.
Notes
-----
Can be called to copy an existing port like
add_port(port = existing_port) or to create a new port
add_port(myname, mymidpoint, mywidth, myorientation).
Can also be called to copy an existing port with a new name like
add_port(port = existing_port, name = new_name)
"""
if port is not None:
if not isinstance(port, Port):
raise ValueError(
"[PHIDL] add_port() error: Argument `port` must be a Port for copying"
)
p = port._copy(new_uid=True)
p.parent = self
elif isinstance(name, Port):
p = name._copy(new_uid=True)
p.parent = self
name = p.name
else:
p = Port(
name=name,
midpoint=midpoint,
width=width,
orientation=orientation,
parent=self,
)
if name is not None:
p.name = name
if p.name in self.ports:
raise ValueError(
'[DEVICE] add_port() error: Port name "%s" already exists in this Device (name "%s", uid %s)'
% (p.name, self.name, self.uid)
)
self.ports[p.name] = p
return p
def add_label(
self,
text="hello",
position=(0, 0),
magnification=None,
rotation=None,
anchor="o",
layer=255,
):
"""Adds a Label to the Device.
Parameters
----------
text : str
Label text.
position : array-like[2]
x-, y-coordinates of the Label location.
magnification : int, float, or None
Magnification factor for the Label text.
rotation : int, float, or None
Angle rotation of the Label text.
anchor : {'n', 'e', 's', 'w', 'o', 'ne', 'nw', ...}
Position of the anchor relative to the text.
layer : int, array-like[2], or set
Specific layer(s) to put Label on.
"""
if layer is None:
return None
if len(text) >= 1023:
warnings.warn(
"[PHIDL] add_label(): Label text exceeds 1023 characters, "
+ "this may affect compatibility with some GDS readers"
)
gds_layer, gds_datatype = _parse_layer(layer)
if not isinstance(text, str):
text = str(text)
l = Label(
text=text,
position=position,
anchor=anchor,
magnification=magnification,
rotation=rotation,
layer=gds_layer,
texttype=gds_datatype,
)
self.add(l)
return l
def write_gds(
self,
filename,
unit=1e-6,
precision=1e-9,
auto_rename=True,
max_cellname_length=28,
cellname="toplevel",
):
"""Writes a Device to a GDS file.
Parameters
----------
filename : str or file
The GDS file to write to.
unit : int or float
Unit size for the objects in the library (in `meters`).
precision : float
Precision for the dimensions of the objects in the library (in
`meters`).
auto_rename : bool
If True, fixes any duplicate cell names.
max_cellname_length : int or None
If given, and if `auto_rename` is True, enforces a limit on the
length of the fixed duplicate cellnames.
cellname : str
Name of the top-level cell in the saved GDS
Returns
-------
"""
# If string, try to append ".gds" to the end, otherwise leave alone
try:
if filename[-4:] != ".gds":
filename += ".gds"
except Exception:
pass
referenced_cells = list(self.get_dependencies(recursive=True))
all_cells = [self] + referenced_cells
# Autofix names so there are no duplicates
if auto_rename:
all_cells_sorted = sorted(all_cells, key=lambda x: x.uid)
all_cells_original_names = [c.name for c in all_cells_sorted]
used_names = {cellname}
n = 1
for c in all_cells_sorted:
if max_cellname_length is not None:
new_name = c.name[:max_cellname_length]
else:
new_name = c.name
temp_name = new_name
while temp_name in used_names:
n += 1
temp_name = new_name + ("%0.3i" % n)
new_name = temp_name
used_names.add(new_name)
c.name = new_name
self.name = cellname
# Write the gds
lib = gdspy.GdsLibrary(unit=unit, precision=precision)
lib.write_gds(filename, cells=all_cells)
# Return cells to their original names if they were auto-renamed
if auto_rename:
for n, c in enumerate(all_cells_sorted):
c.name = all_cells_original_names[n]
return filename
def remap_layers(self, layermap={}, include_labels=True):
"""Moves all polygons in the Device from one layer to another
according to the layermap argument.
Parameters
----------
layermap : dict
Dictionary of values in format {layer_from : layer_to}
include_labels : bool
Selects whether to move Labels along with polygons
"""
layermap = {_parse_layer(k): _parse_layer(v) for k, v in layermap.items()}
all_D = list(self.get_dependencies(True))
all_D.append(self)
for D in all_D:
for p in D.polygons:
for n, layer in enumerate(p.layers):
original_layer = (p.layers[n], p.datatypes[n])
original_layer = _parse_layer(original_layer)
if original_layer in layermap.keys():
new_layer = layermap[original_layer]
p.layers[n] = new_layer[0]
p.datatypes[n] = new_layer[1]
if include_labels:
for l in D.labels:
original_layer = (l.layer, l.texttype)
original_layer = _parse_layer(original_layer)
if original_layer in layermap.keys():
new_layer = layermap[original_layer]
l.layer = new_layer[0]
l.texttype = new_layer[1]
return self
def remove_layers(self, layers=(), include_labels=True, invert_selection=False):
"""Removes layers from a Device.
Parameters
----------
layers : int, array-like[2], or set
Specific layer(s) to remove.
include_labels : bool
If True, keeps the labels corresponding to the input layers.
invert_selection : bool
If True, removes all layers except those specified.
"""
layers = [_parse_layer(l) for l in layers]
all_D = list(self.get_dependencies(True))
all_D.append(self)
for D in all_D:
for polygonset in D.polygons:
polygon_layers = zip(polygonset.layers, polygonset.datatypes)
polygons_to_keep = [(pl in layers) for pl in polygon_layers]
if not invert_selection:
polygons_to_keep = [(not p) for p in polygons_to_keep]
polygonset.polygons = [
p for p, keep in zip(polygonset.polygons, polygons_to_keep) if keep
]
polygonset.layers = [
p for p, keep in zip(polygonset.layers, polygons_to_keep) if keep
]
polygonset.datatypes = [
p for p, keep in zip(polygonset.datatypes, polygons_to_keep) if keep
]
paths = []
for path in D.paths:
for layer in zip(path.layers, path.datatypes):
if layer not in layers:
paths.append(path)
D.paths = paths
if include_labels:
new_labels = []
for l in D.labels:
original_layer = (l.layer, l.texttype)
original_layer = _parse_layer(original_layer)
if invert_selection:
keep_layer = original_layer in layers
else:
keep_layer = original_layer not in layers
if keep_layer:
new_labels.append(l)
D.labels = new_labels
return self
def distribute(
self, elements="all", direction="x", spacing=100, separation=True, edge="center"
):
"""Distributes the specified elements in the Device.
Parameters
----------
elements : array-like of PHIDL objects or 'all'
Elements to distribute.
direction : {'x', 'y'}
Direction of distribution; either a line in the x-direction or
y-direction.
spacing : int or float
Distance between elements.
separation : bool
If True, guarantees elements are speparated with a fixed spacing
between; if False, elements are spaced evenly along a grid.
edge : {'x', 'xmin', 'xmax', 'y', 'ymin', 'ymax'}
Which edge to perform the distribution along (unused if
separation == True)
"""
if elements == "all":
elements = self.polygons + self.references
_distribute(
elements=elements,
direction=direction,
spacing=spacing,
separation=separation,
edge=edge,
)
return self
def align(self, elements="all", alignment="ymax"):
"""Align elements in the Device
Parameters
----------
elements : array-like of PHIDL objects, or 'all'
Elements in the Device to align.
alignment : {'x', 'y', 'xmin', 'xmax', 'ymin', 'ymax'}
Which edge to align along (e.g. 'ymax' will move the elements such
that all of their topmost points are aligned)
"""
if elements == "all":
elements = self.polygons + self.references
_align(elements, alignment=alignment)
return self
def flatten(self, single_layer=None):
"""Flattens the heirarchy of the Device such that there are no longer
any references to other Devices. All polygons and labels from
underlying references are copied and placed in the top-level Device.
If single_layer is specified, all polygons are moved to that layer.
Parameters
----------
single_layer : None, int, tuple of int, or set of int
If not None, all polygons are moved to the specified
"""
if single_layer is None:
super().flatten(
single_layer=None, single_datatype=None, single_texttype=None
)
else:
gds_layer, gds_datatype = _parse_layer(single_layer)
super().flatten(
single_layer=gds_layer,
single_datatype=gds_datatype,
single_texttype=gds_datatype,
)
temp_polygons = list(self.polygons)
self.references = []
self.polygons = []
[self.add_polygon(poly) for poly in temp_polygons]
return self
def absorb(self, reference):
"""Flattens and absorbs polygons from an underlying DeviceReference
into the Device, destroying the reference in the process but keeping
the polygon geometry.
Parameters
----------
reference : DeviceReference
DeviceReference to be absorbed into the Device.
"""
if reference not in self.references:
raise ValueError(
"""[PHIDL] Device.absorb() failed -
the reference it was asked to absorb does not
exist in this Device. """
)
ref_polygons = reference.get_polygons(by_spec=True)
for (layer, polys) in ref_polygons.items():
[self.add_polygon(points=p, layer=layer) for p in polys]
self.add(reference.parent.labels)
self.add(reference.parent.paths)
self.remove(reference)
return self
def get_ports(self, depth=None):
"""Returns copies of all the ports of the Device, rotated and
translated so that they're in their top-level position. The Ports
returned are copies of the originals, but each copy has the same
``uid`` as the original so that they can be traced back to the
original if needed.
Parameters
----------
depth : int or None
If not None, defines from how many reference levels to
retrieve Ports from.
Returns
-------
port_list : list of Port
List of all Ports in the Device.
"""
port_list = [p._copy(new_uid=False) for p in self.ports.values()]
if depth is None or depth > 0:
for r in self.references:
if depth is None:
new_depth = None
else:
new_depth = depth - 1
ref_ports = r.parent.get_ports(depth=new_depth)
# Transform ports that came from a reference
ref_ports_transformed = []
for rp in ref_ports:
new_port = rp._copy(new_uid=False)
new_midpoint, new_orientation = r._transform_port(
rp.midpoint,
rp.orientation,
r.origin,
r.rotation,
r.x_reflection,
)
new_port.midpoint = new_midpoint
new_port.new_orientation = new_orientation
ref_ports_transformed.append(new_port)
port_list += ref_ports_transformed
return port_list
def get_info(self):
"""Gathers the .info dictionaries from every sub-Device and returns
them in a list.
Parameters
----------
depth : int or None
If not None, defines from how many reference levels to
retrieve Ports from.
Returns
-------
list of dictionaries
List of the ".info" property dictionaries from all sub-Devices
"""
D_list = self.get_dependencies(recursive=True)
info_list = []
for D in D_list:
info_list.append(D.info.copy())
return info_list
def remove(self, items):
"""Removes items from a Device, which can include Ports, PolygonSets,
CellReferences, and Labels.
Parameters
----------
items : array-like[N]
Items to be removed from the Device.
"""
if not _is_iterable(items):
items = [items]
for item in items:
if isinstance(item, Port):
try:
self.ports = {k: v for k, v in self.ports.items() if v != item}
except Exception:
raise ValueError(
"""[PHIDL] Device.remove() cannot find the
Port
it was asked to remove in the Device:
"%s"."""
% (item)
)
else:
try:
if isinstance(item, gdspy.PolygonSet):
self.polygons.remove(item)
if isinstance(item, gdspy.CellReference):
self.references.remove(item)
if isinstance(item, gdspy.Label):
self.labels.remove(item)
self.aliases = {k: v for k, v in self.aliases.items() if v != item}
except Exception:
raise ValueError(
"""[PHIDL] Device.remove() cannot find the
item
it was asked to remove in the Device:
"%s"."""
% (item)
)
self._bb_valid = False
return self
def rotate(self, angle=45, center=(0, 0)):
"""Rotates all Polygons in the Device around the specified
center point.
Parameters
----------
angle : int or float
Angle to rotate the Device in degrees.
center : array-like[2] or None
Midpoint of the Device.
"""
if angle == 0:
return self
for e in self.polygons:
e.rotate(angle=angle, center=center)
for e in self.references:
e.rotate(angle, center)
for e in self.labels:
e.rotate(angle, center)
for p in self.ports.values():
p.midpoint = _rotate_points(p.midpoint, angle, center)
p.orientation = mod(p.orientation + angle, 360)
self._bb_valid = False
return self
def move(self, origin=(0, 0), destination=None, axis=None):
"""Moves elements of the Device from the origin point to the
destination. Both origin and destination can be 1x2 array-like, Port,
or a key corresponding to one of the Ports in this Device.
Parameters
----------
origin : array-like[2], Port, or key
Origin point of the move.
destination : array-like[2], Port, or key
Destination point of the move.
axis : {'x', 'y'}
Direction of the move.
"""
dx, dy = _parse_move(origin, destination, axis)
# Move geometries
for e in self.polygons:
e.translate(dx, dy)
for e in self.references:
e.move((dx, dy))
for e in self.labels:
e.move((dx, dy))
for p in self.ports.values():
p.midpoint = np.array(p.midpoint) + np.array((dx, dy))
self._bb_valid = False
return self
def mirror(self, p1=(0, 1), p2=(0, 0)):
"""Mirrors a Device across the line formed between the two
specified points. ``points`` may be input as either single points
[1,2] or array-like[N][2], and will return in kind.
Parameters
----------
p1 : array-like[N][2]
First point of the line.
p2 : array-like[N][2]
Second point of the line.
"""
for e in self.polygons + self.references + self.labels:
e.mirror(p1, p2)
for p in self.ports.values():
p.midpoint = _reflect_points(p.midpoint, p1, p2)
phi = np.arctan2(p2[1] - p1[1], p2[0] - p1[0]) * 180 / pi
p.orientation = 2 * phi - p.orientation
self._bb_valid = False
return self
def hash_geometry(self, precision=1e-4):
"""Computes an SHA1 hash of the geometry in the Device. For each layer,
each polygon is individually hashed and then the polygon hashes are
sorted, to ensure the hash stays constant regardless of the ordering
the polygons. Similarly, the layers are sorted by (layer, datatype)
Parameters
----------
precision : float
Roudning precision for the the objects in the Device. For instance,
a precision of 1e-2 will round a point at (0.124, 1.748) to (0.12, 1.75)
Returns
-------
str
Hash result in the form of an SHA1 hex digest string
Notes
-----
Algorithm:
.. code-block:: python
hash(
hash(First layer information: [layer1, datatype1]),
hash(Polygon 1 on layer 1 points: [(x1,y1),(x2,y2),(x3,y3)] ),
hash(Polygon 2 on layer 1 points: [(x1,y1),(x2,y2),(x3,y3),(x4,y4)] ),
hash(Polygon 3 on layer 1 points: [(x1,y1),(x2,y2),(x3,y3)] ),
hash(Second layer information: [layer2, datatype2]),
hash(Polygon 1 on layer 2 points: [(x1,y1),(x2,y2),(x3,y3),(x4,y4)] ),
hash(Polygon 2 on layer 2 points: [(x1,y1),(x2,y2),(x3,y3)] ),
)
"""
polygons_by_spec = self.get_polygons(by_spec=True)
layers = np.array(list(polygons_by_spec.keys()))
sorted_layers = layers[np.lexsort((layers[:, 0], layers[:, 1]))]
# A random offset which fixes common rounding errors intrinsic
# to floating point math. Example: with a precision of 0.1, the
# floating points 7.049999 and 7.050001 round to different values
# (7.0 and 7.1), but offset values (7.220485 and 7.220487) don't
magic_offset = 0.17048614
final_hash = hashlib.sha1()
for layer in sorted_layers:
layer_hash = hashlib.sha1(layer.astype(np.int64)).digest()
polygons = polygons_by_spec[tuple(layer)]
polygons = [
np.ascontiguousarray((p / precision) + magic_offset, dtype=np.int64)
for p in polygons
]
polygon_hashes = np.sort([hashlib.sha1(p).digest() for p in polygons])
final_hash.update(layer_hash)
for ph in polygon_hashes:
final_hash.update(ph)
return final_hash.hexdigest()
class DeviceReference(gdspy.CellReference, _GeometryHelper):
"""Simple reference to an existing Device.
Parameters
----------
device : Device
The referenced Device.
origin : array-like[2] of int or float
Position where the Device is inserted.
rotation : int or float
Angle of rotation of the reference (in `degrees`)
magnification : int or float
Magnification factor for the reference.
x_reflection : bool
If True, the reference is reflected parallel to the x-direction before
being rotated.
"""
def __init__(
self, device, origin=(0, 0), rotation=0, magnification=None, x_reflection=False
):
super().__init__(
ref_cell=device,
origin=origin,
rotation=rotation,
magnification=magnification,
x_reflection=x_reflection,
ignore_missing=False,
)
self.owner = None
# The ports of a DeviceReference have their own unique id (uid),
# since two DeviceReferences of the same parent Device can be
# in different locations and thus do not represent the same port
self._local_ports = {
name: port._copy(new_uid=True) for name, port in device.ports.items()
}
@property
def parent(self):
return self.ref_cell
@parent.setter
def parent(self, value):
self.ref_cell = value
def __repr__(self):
"""Prints a description of the DeviceReference, including parent
Device, ports, origin, rotation, and x_reflection.
"""
return (
'DeviceReference (parent Device "%s", ports %s, origin %s, rotation %s, x_reflection %s)'
% (
self.parent.name,
list(self.ports.keys()),
self.origin,
self.rotation,
self.x_reflection,
)
)
def __str__(self):
"""Prints a description of the DeviceReference, including parent
Device, ports, origin, rotation, and x_reflection.
"""
return self.__repr__()
def __getitem__(self, val):
"""This allows you to access an alias from the reference's parent and
receive a copy of the reference which is correctly rotated and
translated.
Parameters
----------
val : str
Alias from the reference's parent to be accessed.
Returns
-------
new_reference : DeviceReference
DeviceReference for the copied parent reference.
"""
try:
alias_device = self.parent[val]
except KeyError:
raise ValueError(
'[PHIDL] Tried to access alias "%s" from parent '
'Device "%s", which does not exist' % (val, self.parent.name)
)
new_reference = DeviceReference(
alias_device.parent,
origin=alias_device.origin,
rotation=alias_device.rotation,
magnification=alias_device.magnification,
x_reflection=alias_device.x_reflection,
)
if self.x_reflection:
new_reference.mirror((1, 0))
if self.rotation is not None:
new_reference.rotate(self.rotation)
if self.origin is not None:
new_reference.move(self.origin)
return new_reference
@property
def ports(self):
"""This property allows you to access myref.ports, and receive a copy
of the ports dict which is correctly rotated and translated."""
for name, port in self.parent.ports.items():
port = self.parent.ports[name]
new_midpoint, new_orientation = self._transform_port(
port.midpoint,
port.orientation,
self.origin,
self.rotation,
self.x_reflection,
)
if name not in self._local_ports:
self._local_ports[name] = port._copy(new_uid=True)
self._local_ports[name].midpoint = new_midpoint
self._local_ports[name].orientation = mod(new_orientation, 360)
self._local_ports[name].parent = self
# Remove any ports that no longer exist in the reference's parent
parent_names = self.parent.ports.keys()
local_names = list(self._local_ports.keys())
for name in local_names:
if name not in parent_names:
self._local_ports.pop(name)
return self._local_ports
@property
def info(self):
"""Returns information about the properties of the reference's
parent.
"""
return self.parent.info
@property
def bbox(self):
"""Returns the bounding box of the DeviceReference."""
bbox = self.get_bounding_box()
if bbox is None:
bbox = ((0, 0), (0, 0))
return np.array(bbox)
def _transform_port(
self, point, orientation, origin=(0, 0), rotation=None, x_reflection=False
):
"""Applies various transformations to a Port.
Parameters
----------
point : array-like[N][2]
Coordinates of the Port.
orientation : int, float, or None
Orientation of the Port
origin : array-like[2] or None
If given, shifts the transformed points to the specified origin.
rotation : int, float, or None
Angle of rotation to apply
x_reflection : bool
If True, reflects the Port across the x-axis before applying
rotation.
Returns
-------
new_point : array-like[N][2]
Coordinates of the transformed Port.
new_orientation : int, float, or None
"""
# Apply GDS-type transformations to a port (x_ref)
new_point = np.array(point)
new_orientation = orientation
if x_reflection:
new_point[1] = -new_point[1]
new_orientation = -orientation
if rotation is not None:
new_point = _rotate_points(new_point, angle=rotation, center=[0, 0])
new_orientation += rotation
if origin is not None:
new_point = new_point + np.array(origin)
new_orientation = mod(new_orientation, 360)
return new_point, new_orientation
def move(self, origin=(0, 0), destination=None, axis=None):
"""Moves the DeviceReference from the origin point to the
destination. Both origin and destination can be 1x2 array-like,
Port, or a key corresponding to one of the Ports in this
DeviceReference.
Parameters
----------
origin : array-like[2], Port, or key
Origin point of the move.
destination : array-like[2], Port, or key
Destination point of the move.
axis : {'x', 'y'}
Direction of move.
"""
dx, dy = _parse_move(origin, destination, axis)
self.origin = np.array(self.origin) + np.array((dx, dy))
if self.owner is not None:
self.owner._bb_valid = False
return self
def rotate(self, angle=45, center=(0, 0)):
"""Rotates all Polygons in the DeviceReference around the specified
centerpoint.
Parameters
----------
angle : int or float
Angle to rotate the DeviceReference in degrees.
center : array-like[2] or None
Midpoint of the DeviceReference.
"""
if angle == 0:
return self
if isinstance(center, Port):
center = center.midpoint
self.rotation += angle
self.origin = _rotate_points(self.origin, angle, center)
if self.owner is not None:
self.owner._bb_valid = False
return self
def mirror(self, p1=(0, 1), p2=(0, 0)):
"""Mirrors a DeviceReference across the line formed between the two
specified points. ``points`` may be input as either single points
[1,2] or array-like[N][2], and will return in kind.
Parameters
----------
p1 : array-like[N][2]
First point of the line.
p2 : array-like[N][2]
Second point of the line.
"""
if isinstance(p1, Port):
p1 = p1.midpoint
if isinstance(p2, Port):
p2 = p2.midpoint
p1 = np.array(p1)
p2 = np.array(p2)
# Translate so reflection axis passes through origin
self.origin = self.origin - p1
# Rotate so reflection axis aligns with x-axis
angle = np.arctan2((p2[1] - p1[1]), (p2[0] - p1[0])) * 180 / pi
self.origin = _rotate_points(self.origin, angle=-angle, center=[0, 0])
self.rotation -= angle
# Reflect across x-axis
self.x_reflection = not self.x_reflection
self.origin[1] = -self.origin[1]
self.rotation = -self.rotation
# Un-rotate and un-translate
self.origin = _rotate_points(self.origin, angle=angle, center=[0, 0])
self.rotation += angle
self.origin = self.origin + p1
if self.owner is not None:
self.owner._bb_valid = False
return self
def connect(self, port, destination, overlap=0):
"""Moves and rotates this object such that the the Port specified by
`port` is connected (aligned and adjacent) with the Port specified by
`destination`
Parameters
----------
port : str or Port
destination : array-like[2]
overlap : int or float
"""
# ``port`` can either be a string with the name or an actual Port
if port in self.ports: # Then ``port`` is a key for the ports dict
p = self.ports[port]
elif isinstance(port, Port):
p = port
else:
raise ValueError(
"[PHIDL] connect() did not receive a Port or valid port name"
+ " - received (%s), ports available are (%s)"
% (port, tuple(self.ports.keys()))
)
self.rotate(
angle=180 + destination.orientation - p.orientation, center=p.midpoint
)
self.move(origin=p, destination=destination)
self.move(
-overlap
* np.array(
[
cos(destination.orientation * pi / 180),
sin(destination.orientation * pi / 180),
]
)
)
return self
class CellArray(gdspy.CellArray, _GeometryHelper):
"""Multiple references to an existing cell in an array format.
Parameters
----------
device : Device
The referenced Device.
columns : int
Number of columns in the array.
rows : int
Number of rows in the array.
spacing : array-like[2] of int or float
Distances between adjacent columns and adjacent rows.
origin : array-like[2] of int or float
Position where the cell is inserted.
rotation : int or float
Angle of rotation of the reference (in `degrees`).
magnification : int or float
Magnification factor for the reference.
x_reflection : bool
If True, the reference is reflected parallel to the x direction
before being rotated.
"""
def __init__(
self,
device,
columns,
rows,
spacing,
origin=(0, 0),
rotation=0,
magnification=None,
x_reflection=False,
):
super().__init__(
columns=columns,
rows=rows,
spacing=spacing,
ref_cell=device,
origin=origin,
rotation=rotation,
magnification=magnification,
x_reflection=x_reflection,
ignore_missing=False,
)
self.parent = device
self.owner = None
@property
def bbox(self):
"""Returns the bounding box of the CellArray."""
bbox = self.get_bounding_box()
if bbox is None:
bbox = ((0, 0), (0, 0))
return np.array(bbox)
def move(self, origin=(0, 0), destination=None, axis=None):
"""Moves the CellArray from the origin point to the destination. Both
origin and destination can be 1x2 array-like, Port, or a key
corresponding to one of the Ports in this CellArray.
Parameters
----------
origin : array-like[2], Port, or key
Origin point of the move.
destination : array-like[2], Port, or key
Destination point of the move.
axis : {'x', 'y'}
Direction of the move.
"""
dx, dy = _parse_move(origin, destination, axis)
self.origin = np.array(self.origin) + np.array((dx, dy))
if self.owner is not None:
self.owner._bb_valid = False
return self
def rotate(self, angle=45, center=(0, 0)):
"""Rotates all elements in the CellArray around the specified
centerpoint.
Parameters
----------
angle : int or float
Angle to rotate the CellArray in degrees.
center : array-like[2], Port, or None
Midpoint of the CellArray.
"""
if angle == 0:
return self
if isinstance(center, Port):
center = center.midpoint
self.rotation += angle
self.origin = _rotate_points(self.origin, angle, center)
if self.owner is not None:
self.owner._bb_valid = False
return self
def mirror(self, p1=(0, 1), p2=(0, 0)):
"""Mirrors a CellArray across the line formed between the two
specified points.
Parameters
----------
p1 : array-like[N][2]
First point of the line.
p2 : array-like[N][2]
Second point of the line.
"""
if isinstance(p1, Port):
p1 = p1.midpoint
if isinstance(p2, Port):
p2 = p2.midpoint
p1 = np.array(p1)
p2 = np.array(p2)
# Translate so reflection axis passes through origin
self.origin = self.origin - p1
# Rotate so reflection axis aligns with x-axis
angle = np.arctan2((p2[1] - p1[1]), (p2[0] - p1[0])) * 180 / pi
self.origin = _rotate_points(self.origin, angle=-angle, center=[0, 0])
self.rotation -= angle
# Reflect across x-axis
self.x_reflection = not self.x_reflection
self.origin[1] = -self.origin[1]
self.rotation = -self.rotation
# Un-rotate and un-translate
self.origin = _rotate_points(self.origin, angle=angle, center=[0, 0])
self.rotation += angle
self.origin = self.origin + p1
if self.owner is not None:
self.owner._bb_valid = False
return self
class Label(gdspy.Label, _GeometryHelper):
"""Text that can be used to label parts of the geometry or display
messages. The text does not create additional geometry, it’s meant for
display and labeling purposes only.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.position = np.array(self.position, dtype="float64")
@property
def bbox(self):
"""Returns the bounding box of the Label."""
return np.array(
[[self.position[0], self.position[1]], [self.position[0], self.position[1]]]
)
def rotate(self, angle=45, center=(0, 0)):
"""Rotates Label around the specified centerpoint.
Parameters
----------
angle : int or float
Angle to rotate the Label in degrees.
center : array-like[2] or None
Midpoint of the Label.
"""
self.position = _rotate_points(self.position, angle=angle, center=center)
return self
def move(self, origin=(0, 0), destination=None, axis=None):
"""Moves the Label from the origin point to the destination. Both
origin and destination can be 1x2 array-like, Port, or a key
corresponding to one of the Ports in this Label.
Parameters
----------
origin : array-like[2], Port, or key
Origin point of the move.
destination : array-like[2], Port, or key
Destination point of the move.
axis : {'x', 'y'}
Direction of the move.
"""
dx, dy = _parse_move(origin, destination, axis)
self.position += np.asarray((dx, dy))
return self
def mirror(self, p1=(0, 1), p2=(0, 0)):
"""Mirrors a Label across the line formed between the two
specified points. ``points`` may be input as either single points
[1,2] or array-like[N][2], and will return in kind.
Parameters
----------
p1 : array-like[N][2]
First point of the line.
p2 : array-like[N][2]
Second point of the line.
"""
self.position = _reflect_points(self.position, p1, p2)
return self
class Group(_GeometryHelper):
"""Groups objects together so they can be manipulated as though
they were a single object (move/rotate/mirror)."""
def __init__(self, *args):
self.elements = []
self.add(args)
def __repr__(self):
"""Prints the number of elements in the Group."""
return "Group (%s elements total)" % (len(self.elements))
def __len__(self):
"""Returns the number of elements in the Group."""
return len(self.elements)
def __iadd__(self, element):
"""Adds an element to the Group.
Parameters
----------
element : Device, DeviceReference, Port, Polygon, CellArray, Label, or Group
Element to be added.
"""
return self.add(element)
@property
def bbox(self):
"""Returns the bounding boxes of the Group."""
if len(self.elements) == 0:
raise ValueError("[PHIDL] Group is empty, no bbox is available")
bboxes = np.empty([len(self.elements), 4])
for n, e in enumerate(self.elements):
bboxes[n] = e.bbox.flatten()
bbox = (
(bboxes[:, 0].min(), bboxes[:, 1].min()),
(bboxes[:, 2].max(), bboxes[:, 3].max()),
)
return np.array(bbox)
def add(self, element):
"""Adds an element to the Group.
Parameters
----------
element : Device, DeviceReference, Port, Polygon, CellArray, Label, or Group
Element to add.
"""
if _is_iterable(element):
[self.add(e) for e in element]
elif element is None:
return self
elif isinstance(element, PHIDL_ELEMENTS):
self.elements.append(element)
else:
raise ValueError(
"[PHIDL] add() Could not add element to Group, the only "
"allowed element types are "
"(Device, DeviceReference, Polygon, CellArray, Label, Group)"
)
# Remove non-unique entries
used = set()
self.elements = [
x for x in self.elements if x not in used and (used.add(x) or True)
]
return self
def rotate(self, angle=45, center=(0, 0)):
"""Rotates all elements in a Group around the specified centerpoint.
Parameters
----------
angle : int or float
Angle to rotate the Group in degrees.
center : array-like[2] or None
Midpoint of the Group.
"""
for e in self.elements:
e.rotate(angle=angle, center=center)
return self
def move(self, origin=(0, 0), destination=None, axis=None):
"""Moves the Group from the origin point to the destination. Both
origin and destination can be 1x2 array-like, Port, or a key
corresponding to one of the Ports in this Group.
Parameters
----------
origin : array-like[2], Port, or key
Origin point of the move.
destination : array-like[2], Port, or key
Destination point of the move.
axis : {'x', 'y'}
Direction of the move.
"""
for e in self.elements:
e.move(origin=origin, destination=destination, axis=axis)
return self
def mirror(self, p1=(0, 1), p2=(0, 0)):
"""Mirrors a Group across the line formed between the two
specified points. ``points`` may be input as either single points
[1,2] or array-like[N][2], and will return in kind.
Parameters
----------
p1 : array-like[N][2]
First point of the line.
p2 : array-like[N][2]
Second point of the line.
"""
for e in self.elements:
e.mirror(p1=p1, p2=p2)
return self
def distribute(self, direction="x", spacing=100, separation=True, edge="center"):
"""Distributes the elements in the Group.
Parameters
----------
direction : {'x', 'y'}
Direction of distribution; either a line in the x-direction or
y-direction.
spacing : int or float
Distance between elements.
separation : bool
If True, guarantees elements are speparated with a fixed spacing
between; if False, elements are spaced evenly along a grid.
edge : {'x', 'xmin', 'xmax', 'y', 'ymin', 'ymax'}
Which edge to perform the distribution along (unused if
separation == True)
"""
_distribute(
elements=self.elements,
direction=direction,
spacing=spacing,
separation=separation,
edge=edge,
)
return self
def align(self, alignment="ymax"):
"""Aligns the elements in the Group.
Parameters
----------
alignment : {'x', 'y', 'xmin', 'xmax', 'ymin', 'ymax'}
Which edge to align along (e.g. 'ymax' will align move the elements
such that all of their topmost points are aligned)
"""
_align(elements=self.elements, alignment=alignment)
return self
def _linear_transition(y1, y2):
dx = y2 - y1
return lambda t: y1 + t * dx
PHIDL_ELEMENTS = (Device, DeviceReference, Polygon, CellArray, Label, Group)
class Path(_GeometryHelper):
"""The Path object for making smooth Paths. To be used in combination
with a CrossSection to create a Device.
Parameters
----------
path : array-like[N][2], Path, or list of Paths
Points or Paths to append() initially
"""
def __init__(self, path=None):
self.points = np.array([[0, 0]], dtype=np.float64)
self.start_angle = 0
self.end_angle = 0
self.info = {}
if path is not None:
# If array[N][2]
if (
(np.asarray(path, dtype=object).ndim == 2)
and np.issubdtype(np.array(path).dtype, np.number)
and (np.shape(path)[1] == 2)
):
self.points = np.array(path, dtype=np.float64)
nx1, ny1 = self.points[1] - self.points[0]
self.start_angle = np.arctan2(ny1, nx1) / np.pi * 180
nx2, ny2 = self.points[-1] - self.points[-2]
self.end_angle = np.arctan2(ny2, nx2) / np.pi * 180
elif isinstance(path, Path):
self.points = np.array(path.points, dtype=np.float64)
self.start_angle = path.start_angle
self.end_angle = path.end_angle
self.info = {}
elif np.asarray(path, dtype=object).size > 1:
self.append(path)
else:
raise ValueError(
'[PHIDL] Path() the "path" argument must be either '
+ "blank, a Path object, an array-like[N][2] list of points, or a list of these"
)
def __len__(self):
return len(self.points)
@property
def bbox(self):
"""Returns the bounding box of the Path."""
bbox = [
(np.min(self.points[:, 0]), np.min(self.points[:, 1])),
(np.max(self.points[:, 0]), np.max(self.points[:, 1])),
]
return np.array(bbox)
def append(self, path):
"""Attaches the input path to the end of this object. The input path
will be automatically rotated and translated such that it continues
smoothly from the previous segment.
Parameters
----------
path : Path, array-like[N][2], or list of Paths
The input path that will be appended
"""
# If appending another Path, load relevant variables
if isinstance(path, Path):
start_angle = path.start_angle
end_angle = path.end_angle
points = path.points
# If array[N][2]
elif (
(np.asarray(path, dtype=object).ndim == 2)
and np.issubdtype(np.array(path).dtype, np.number)
and (np.shape(path)[1] == 2)
):
points = np.asfarray(path)
nx1, ny1 = points[1] - points[0]
start_angle = np.arctan2(ny1, nx1) / np.pi * 180
nx2, ny2 = points[-1] - points[-2]
end_angle = np.arctan2(ny2, nx2) / np.pi * 180
# If list of Paths or arrays
elif isinstance(path, (list, tuple)):
for p in path:
self.append(p)
return self
else:
raise ValueError(
'[PHIDL] Path.append() the "path" argument must be either '
+ "a Path object, an array-like[N][2] list of points, or a list of these"
)
# Connect beginning of new points with old points
points = _rotate_points(points, angle=self.end_angle - start_angle)
points += self.points[-1, :] - points[0, :]
# Update end angle
self.end_angle = mod(end_angle + self.end_angle - start_angle, 360)
# Concatenate old points + new points
self.points = np.vstack([self.points, points[1:]])
return self
def extrude(self, width, layer=np.nan, simplify=None):
"""Combines the 1D Path with a 1D cross-section to form 2D polygons.
Parameters
----------
width : int, float, array-like[2], or CrossSection
If set to a single number (e.g. `width=1.7`): makes a constant-width extrusion
If set to a 2-element array (e.g. `width=[1.8,2.5]`): makes an extrusion
whose width varies linearly from width[0] to width[1]
If set to a CrossSection: uses the CrossSection parameters for extrusion
layer : int, tuple of int, or set of int
The layer to put the extruded polygons on. `layer=0` is used by default.
simplify : float
Tolerance value for the simplification algorithm. All points that
can be removed without changing the resulting polygon by more than
the value listed here will be removed. Also known as `epsilon` here
https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm
Returns
-------
Device
A Device with polygons added that correspond to the extrusion of the
Path
"""
if isinstance(width, CrossSection) and (layer is not np.nan):
raise ValueError(
"""[PHIDL] extrude(): when using a CrossSection as the
`width` argument cannot also define the `layer` argument"""
)
if not isinstance(width, CrossSection) and (layer is np.nan):
layer = 0
if isinstance(width, CrossSection):
X = width
elif np.size(width) == 1:
X = CrossSection()
X.add(width=width, layer=layer)
elif np.size(width) == 2:
X = CrossSection()
X.add(width=_linear_transition(width[0], width[1]), layer=layer)
else:
raise ValueError(
"""[PHIDL] extrude(): width argument must be one of
int, float, array-like[2], or CrossSection"""
)
D = Device("extrude")
for section in X.sections:
width = section["width"]
offset = section["offset"]
layer = section["layer"]
ports = section["ports"]
if callable(offset):
P_offset = self.copy()
P_offset.offset(offset)
points = P_offset.points
start_angle = P_offset.start_angle
end_angle = P_offset.end_angle
offset = 0
else:
points = self.points
start_angle = self.start_angle
end_angle = self.end_angle
if callable(width):
# Compute lengths
dx = np.diff(self.points[:, 0])
dy = np.diff(self.points[:, 1])
lengths = np.cumsum(np.sqrt((dx) ** 2 + (dy) ** 2))
lengths = np.concatenate([[0], lengths])
width = width(lengths / lengths[-1])
else:
pass
points1 = self._centerpoint_offset_curve(
points,
offset_distance=offset + width / 2,
start_angle=start_angle,
end_angle=end_angle,
)
points2 = self._centerpoint_offset_curve(
points,
offset_distance=offset - width / 2,
start_angle=start_angle,
end_angle=end_angle,
)
# Simplify lines using the Ramer–Douglas–Peucker algorithm
if isinstance(simplify, bool):
raise ValueError(
"[PHIDL] the simplify argument must be a number (e.g. 1e-3) or None"
)
if simplify is not None:
points1 = _simplify(points1, tolerance=simplify)
points2 = _simplify(points2, tolerance=simplify)
# Join points together
points = np.concatenate([points1, points2[::-1, :]])
# Combine the offset-lines into a polygon and union if join_after == True
# if join_after == True: # Use clipper to perform a union operation
# points = np.array(clipper.offset([points], 0, 'miter', 2, int(1/simplify), 0)[0])
D.add_polygon(points, layer=layer)
# Add ports if they were specified
if ports[0] is not None:
new_port = D.add_port(name=ports[0])
new_port.endpoints = (points1[0], points2[0])
if ports[1] is not None:
new_port = D.add_port(name=ports[1])
new_port.endpoints = (points2[-1], points1[-1])
return D
def offset(self, offset=0):
"""Offsets the Path so that it follows the Path centerline plus
an offset. The offset can either be a fixed value, or a function
of the form my_offset(t) where t goes from 0->1
Parameters
----------
offset : int or float, callable
Magnitude of the offset
"""
if offset == 0:
points = self.points
start_angle = self.start_angle
end_angle = self.end_angle
elif callable(offset):
# Compute lengths
dx = np.diff(self.points[:, 0])
dy = np.diff(self.points[:, 1])
lengths = np.cumsum(np.sqrt((dx) ** 2 + (dy) ** 2))
lengths = np.concatenate([[0], lengths])
# Create list of offset points and perform offset
points = self._centerpoint_offset_curve(
self.points,
offset_distance=offset(lengths / lengths[-1]),
start_angle=self.start_angle,
end_angle=self.end_angle,
)
# Numerically compute start and end angles
tol = 1e-6
ds = tol / lengths[-1]
ny1 = offset(ds) - offset(0)
start_angle = np.arctan2(-ny1, tol) / np.pi * 180 + self.start_angle
start_angle = np.round(start_angle, decimals=6)
ny2 = offset(1) - offset(1 - ds)
end_angle = np.arctan2(-ny2, tol) / np.pi * 180 + self.end_angle
end_angle = np.round(end_angle, decimals=6)
else: # Offset is just a number
points = self._centerpoint_offset_curve(
self.points,
offset_distance=offset,
start_angle=self.start_angle,
end_angle=self.end_angle,
)
start_angle = self.start_angle
end_angle = self.end_angle
self.points = points
self.start_angle = start_angle
self.end_angle = end_angle
return self
def copy(self):
"""Creates a copy of the Path.
Returns
-------
Path
A copy of the Path
"""
P = Path()
P.info = self.info.copy()
P.points = np.array(self.points)
P.start_angle = self.start_angle
P.end_angle = self.end_angle
return P
def move(self, origin=(0, 0), destination=None, axis=None):
"""Moves the Path from the origin point to the
destination. Both origin and destination can be 1x2 array-like
or a Port.
Parameters
----------
origin : array-like[2], Port
Origin point of the move.
destination : array-like[2], Port
Destination point of the move.
axis : {'x', 'y'}
Direction of move.
"""
dx, dy = _parse_move(origin, destination, axis)
self.points += np.array([dx, dy])
return self
def rotate(self, angle=45, center=(0, 0)):
"""Rotates all Polygons in the Device around the specified
center point. If no center point specified will rotate around (0,0).
Parameters
----------
angle : int or float
Angle to rotate the Device in degrees.
center : array-like[2] or None
Midpoint of the Device.
"""
if angle == 0:
return self
self.points = _rotate_points(self.points, angle, center)
if self.start_angle is not None:
self.start_angle = mod(self.start_angle + angle, 360)
if self.end_angle is not None:
self.end_angle = mod(self.end_angle + angle, 360)
return self
def mirror(self, p1=(0, 1), p2=(0, 0)):
"""Mirrors the Path across the line formed between the two
specified points. ``points`` may be input as either single points
[1,2] or array-like[N][2], and will return in kind.
Parameters
----------
p1 : array-like[N][2]
First point of the line.
p2 : array-like[N][2]
Second point of the line.
"""
self.points = _reflect_points(self.points, p1, p2)
angle = np.arctan2((p2[1] - p1[1]), (p2[0] - p1[0])) * 180 / pi
if self.start_angle is not None:
self.start_angle = mod(2 * angle - self.start_angle, 360)
if self.end_angle is not None:
self.end_angle = mod(2 * angle - self.end_angle, 360)
return self
def _centerpoint_offset_curve(
self, points, offset_distance, start_angle, end_angle
):
"""Creates a offset curve (but does not account for cusps etc)
by computing the centerpoint offset of the supplied x and y points"""
new_points = np.array(points, dtype=np.float64)
dx = np.diff(points[:, 0])
dy = np.diff(points[:, 1])
theta = np.arctan2(dy, dx)
theta = np.concatenate([theta[0:1], theta, theta[-1:]])
theta_mid = (np.pi + theta[1:] + theta[:-1]) / 2 # Mean angle between segments
dtheta_int = np.pi + theta[:-1] - theta[1:] # Internal angle between segments
offset_distance = offset_distance / np.sin(dtheta_int / 2)
new_points[:, 0] -= offset_distance * np.cos(theta_mid)
new_points[:, 1] -= offset_distance * np.sin(theta_mid)
if start_angle is not None:
new_points[0, :] = points[0, :] + (
np.sin(start_angle * np.pi / 180) * offset_distance[0],
-np.cos(start_angle * np.pi / 180) * offset_distance[0],
)
if end_angle is not None:
new_points[-1, :] = points[-1, :] + (
np.sin(end_angle * np.pi / 180) * offset_distance[-1],
-np.cos(end_angle * np.pi / 180) * offset_distance[-1],
)
return new_points
def _parametric_offset_curve(self, points, offset_distance, start_angle, end_angle):
"""Creates a parametric offset (does not account for cusps etc)
by using gradient of the supplied x and y points"""
x = points[:, 0]
y = points[:, 1]
dxdt = np.gradient(x)
dydt = np.gradient(y)
if start_angle is not None:
dxdt[0] = np.cos(start_angle * np.pi / 180)
dydt[0] = np.sin(start_angle * np.pi / 180)
if end_angle is not None:
dxdt[-1] = np.cos(end_angle * np.pi / 180)
dydt[-1] = np.sin(end_angle * np.pi / 180)
x_offset = x + offset_distance * dydt / np.sqrt(dxdt**2 + dydt**2)
y_offset = y - offset_distance * dxdt / np.sqrt(dydt**2 + dxdt**2)
return np.array([x_offset, y_offset]).T
def length(self):
"""Computes the cumulative length (arc length) of the path.
Returns
-------
float
The length of the Path
"""
x = self.points[:, 0]
y = self.points[:, 1]
dx = np.diff(x)
dy = np.diff(y)
return np.sum(np.sqrt((dx) ** 2 + (dy) ** 2))
def curvature(self):
"""Calculates the curvature of the Path. Note this curvature is
numerically computed so areas where the curvature jumps instantaneously
(such as between an arc and a straight segment) will be slightly
interpolated, and sudden changes in point density along the curve can
cause discontinuities.
Returns
-------
s : array-like[N]
The arc-length of the Path
K : array-like[N]
The curvature of the Path
"""
x = self.points[:, 0]
y = self.points[:, 1]
dx = np.diff(x)
dy = np.diff(y)
ds = np.sqrt((dx) ** 2 + (dy) ** 2)
s = np.cumsum(ds)
theta = np.arctan2(dy, dx)
# Fix discontinuities arising from np.arctan2
dtheta = np.diff(theta)
dtheta[np.where(dtheta > np.pi)] += -2 * np.pi
dtheta[np.where(dtheta < -np.pi)] += 2 * np.pi
theta = np.concatenate([[0], np.cumsum(dtheta)]) + theta[0]
K = np.gradient(theta, s, edge_order=2)
return s, K
def hash_geometry(self, precision=1e-4):
"""Computes an SHA1 hash of the points in the Path and the start_angle
and end_angle
Parameters
----------
precision : float
Roudning precision for the the objects in the Device. For instance,
a precision of 1e-2 will round a point at (0.124, 1.748) to (0.12, 1.75)
Returns
-------
str
Hash result in the form of an SHA1 hex digest string
Notes
-----
Algorithm:
.. code-block:: python
hash(
hash(First layer information: [layer1, datatype1]),
hash(Polygon 1 on layer 1 points: [(x1,y1),(x2,y2),(x3,y3)] ),
hash(Polygon 2 on layer 1 points: [(x1,y1),(x2,y2),(x3,y3),(x4,y4)] ),
hash(Polygon 3 on layer 1 points: [(x1,y1),(x2,y2),(x3,y3)] ),
hash(Second layer information: [layer2, datatype2]),
hash(Polygon 1 on layer 2 points: [(x1,y1),(x2,y2),(x3,y3),(x4,y4)] ),
hash(Polygon 2 on layer 2 points: [(x1,y1),(x2,y2),(x3,y3)] ),
)
"""
# A random offset which fixes common rounding errors intrinsic
# to floating point math. Example: with a precision of 0.1, the
# floating points 7.049999 and 7.050001 round to different values
# (7.0 and 7.1), but offset values (7.220485 and 7.220487) don't
magic_offset = 0.17048614
final_hash = hashlib.sha1()
p = np.ascontiguousarray(
(self.points / precision) + magic_offset, dtype=np.int64
)
final_hash.update(p)
p = np.ascontiguousarray((self.start_angle, self.end_angle), dtype=np.float64)
final_hash.update(p)
return final_hash.hexdigest()
class CrossSection:
"""The CrossSection object for extruding a Path. To be used in
combination with a Path to create a Device.
Parameters
----------
path : array-like[N][2], Path, or list of Paths
Points or Paths to append() initially
"""
def __init__(self):
self.sections = []
self.ports = set()
self.aliases = {}
self.info = {}
def add(self, width=1, offset=0, layer=0, ports=(None, None), name=None):
"""Adds a cross-sectional element to the CrossSection. If ports are
specified, when creating a Device with the extrude() command there be
have Ports at the ends.
Parameters
----------
width : float
Width of the segment
offset : float
Offset of the segment (positive values = right hand side)
layer : int, tuple of int, or set of int
The polygon layer to put the segment on
ports : array-like[2] of str, int, or None
If not None, specifies the names for the ports at the ends of the
cross-sectional element
name : str, int, or None
Name of the cross-sectional element for later access
"""
if isinstance(width, (float, int)) and (width <= 0):
raise ValueError("[PHIDL] CrossSection.add(): widths must be >0")
if len(ports) != 2:
raise ValueError("[PHIDL] CrossSection.add(): must receive 2 port names")
for p in ports:
if p in self.ports:
raise ValueError(
'[PHIDL] CrossSection.add(): a port named "%s" already '
"exists in this CrossSection, please rename port" % p
)
if name in self.aliases:
raise ValueError(
'[PHIDL] CrossSection.add(): an element named "%s" already '
"exists in this CrossSection, please change the name" % name
)
new_segment = dict(
width=width,
offset=offset,
layer=layer,
ports=ports,
)
if name is not None:
self.aliases[name] = new_segment
self.sections.append(new_segment)
[self.ports.add(p) for p in ports if p is not None]
return self
def extrude(self, path, simplify=None):
"""Combines the 1D CrossSection with a 1D Path to form 2D polygons.
Parameters
----------
path : Path
The Path for the CrossSection to follow
simplify : float
Tolerance value for the simplification algorithm. All points that
can be removed without changing the resulting polygon by more than
the value listed here will be removed. Also known as `epsilon` here
https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm
Returns
-------
Device
A Device with polygons added that correspond to the extrusion of the
Path with the CrossSection
"""
D = path.extrude(width=self, simplify=simplify)
return D
def copy(self):
"""Creates a copy of the CrosSection.
Returns
-------
CrossSection
A copy of the CrossSection
"""
X = CrossSection()
X.info = self.info.copy()
X.sections = list(self.sections)
X.ports = set(self.ports)
X.aliases = dict(self.aliases)
return X
def __getitem__(self, key):
"""Allows access to cross-sectional elements by name like X['etch2'].
Parameters
----------
key : str
Element name to access within the CrossSection.
"""
try:
return self.aliases[key]
except KeyError:
raise ValueError(
'[PHIDL] Tried to access name "%s" in CrossSection '
"which does not exist" % (key)
)
| mit | 95a46df0a228d3b651d77213cb78f655 | 32.767435 | 132 | 0.538871 | 4.178081 | false | false | false | false |
cmu-delphi/delphi-epidata | src/acquisition/wiki/wiki_extract.py | 2 | 3945 | """
===============
=== Purpose ===
===============
Extracts and stores article access counts
See also: wiki.py
=================
=== Changelog ===
=================
2017-02-23
* secrets and minor cleanup
2016-08-14
* use pageviews instead of pagecounts-raw
* default job limit from 1000 to 100
2015-08-11
+ Store total and other metadata in `wiki_meta`
2015-05-21
* Original version
"""
# standard library
from datetime import datetime, timedelta
import json
# third party
import mysql.connector
# first party
import delphi.operations.secrets as secrets
def floor_timestamp(timestamp):
return datetime(timestamp.year, timestamp.month, timestamp.day, timestamp.hour)
def ceil_timestamp(timestamp):
return floor_timestamp(timestamp) + timedelta(hours=1)
def round_timestamp(timestamp):
before = floor_timestamp(timestamp)
after = ceil_timestamp(timestamp)
if (timestamp - before) < (after - timestamp):
return before
else:
return after
def get_timestamp(name):
# new parsing for pageviews compared to pagecounts - maybe switch to regex in the future
#return datetime(int(name[11:15]), int(name[15:17]), int(name[17:19]), int(name[20:22]), int(name[22:24]), int(name[24:26]))
return datetime(int(name[10:14]), int(name[14:16]), int(name[16:18]), int(name[19:21]), int(name[21:23]), int(name[23:25]))
def run(job_limit=100):
# connect to the database
u, p = secrets.db.epi
cnx = mysql.connector.connect(user=u, password=p, database='epidata')
cur = cnx.cursor()
# # Some preparation for utf-8, and it is a temporary trick solution. The real solution should change those char set and collation encoding to utf8 permanently
# cur.execute("SET NAMES utf8;")
# cur.execute("SET CHARACTER SET utf8;")
# # I print SHOW SESSION VARIABLES LIKE 'character\_set\_%'; and SHOW SESSION VARIABLES LIKE 'collation\_%'; on my local computer
# cur.execute("SET character_set_client=utf8mb4;")
# cur.execute("SET character_set_connection=utf8mb4;")
# cur.execute("SET character_set_database=utf8;")
# cur.execute("SET character_set_results=utf8mb4;")
# cur.execute("SET character_set_server=utf8;")
# cur.execute("SET collation_connection=utf8mb4_general_ci;")
# cur.execute("SET collation_database=utf8_general_ci;")
# cur.execute("SET collation_server=utf8_general_ci;")
# find jobs that are queued for extraction
cur.execute('SELECT `id`, `name`, `data` FROM `wiki_raw` WHERE `status` = 2 ORDER BY `name` ASC LIMIT %s', (job_limit,))
jobs = []
for (id, name, data_str) in cur:
jobs.append((id, name, json.loads(data_str)))
print('Processing data from %d jobs'%(len(jobs)))
# get the counts from the json object and insert into (or update) the database
# Notice that data_collect contains data with different languages
for (id, name, data_collect) in jobs:
print('processing job [%d|%s]...'%(id, name))
timestamp = round_timestamp(get_timestamp(name))
for language in data_collect.keys():
data = data_collect[language]
for article in sorted(data.keys()):
count = data[article]
cur.execute('INSERT INTO `wiki` (`datetime`, `article`, `count`, `language`) VALUES (%s, %s, %s, %s) ON DUPLICATE KEY UPDATE `count` = `count` + %s', (str(timestamp), article.encode('utf-8').decode('latin-1'), count, language, count))
if article == 'total':
cur.execute('INSERT INTO `wiki_meta` (`datetime`, `date`, `epiweek`, `total`, `language`) VALUES (%s, date(%s), yearweek(%s, 6), %s, %s) ON DUPLICATE KEY UPDATE `total` = `total` + %s', (str(timestamp), str(timestamp), str(timestamp), count, language, count))
# update the job
cur.execute('UPDATE `wiki_raw` SET `status` = 3 WHERE `id` = %s', (id,))
# cleanup
cur.close()
cnx.commit()
cnx.close()
if __name__ == '__main__':
run()
| mit | aa43f0fd88141927e7639ee546ca5abc | 34.527778 | 269 | 0.651965 | 3.427454 | false | false | false | false |
cmu-delphi/delphi-epidata | src/server/_query.py | 1 | 15964 | from datetime import date, datetime
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
Mapping,
)
from sqlalchemy import text
from sqlalchemy.engine import Row
from ._common import db, app
from ._db import metadata
from ._printer import create_printer, APrinter
from ._exceptions import DatabaseErrorException
from ._validate import extract_strings
from ._params import GeoPair, SourceSignalPair, TimePair
from .utils import time_values_to_ranges, TimeValues
def date_string(value: int) -> str:
# converts a date integer (YYYYMMDD) into a date string (YYYY-MM-DD)
# $value: the date as an 8-digit integer
year = int(value / 10000) % 10000
month = int(value / 100) % 100
day = value % 100
return "{0:04d}-{1:02d}-{2:02d}".format(year, month, day)
def to_condition(
field: str,
value: Union[str, Tuple[int, int], int],
param_key: str,
params: Dict[str, Any],
formatter=lambda x: x,
) -> str:
if isinstance(value, (list, tuple)):
params[param_key] = formatter(value[0])
params[f"{param_key}_2"] = formatter(value[1])
return f"{field} BETWEEN :{param_key} AND :{param_key}_2"
params[param_key] = formatter(value)
return f"{field} = :{param_key}"
def filter_values(
field: str,
values: Optional[Sequence[Union[str, Tuple[int, int], int]]],
param_key: str,
params: Dict[str, Any],
formatter=lambda x: x,
):
if not values:
return "FALSE"
# builds a SQL expression to filter strings (ex: locations)
# $field: name of the field to filter
# $values: array of values
conditions = [to_condition(field, v, f"{param_key}_{i}", params, formatter) for i, v in enumerate(values)]
return f"({' OR '.join(conditions)})"
def filter_strings(
field: str,
values: Optional[Sequence[str]],
param_key: str,
params: Dict[str, Any],
):
return filter_values(field, values, param_key, params)
def filter_integers(
field: str,
values: Optional[Sequence[Union[Tuple[int, int], int]]],
param_key: str,
params: Dict[str, Any],
):
return filter_values(field, values, param_key, params)
def filter_dates(
field: str,
values: Optional[TimeValues],
param_key: str,
params: Dict[str, Any],
):
ranges = time_values_to_ranges(values)
return filter_values(field, ranges, param_key, params, date_string)
def filter_fields(generator: Iterable[Dict[str, Any]]):
fields = extract_strings("fields")
if not fields:
yield from generator
else:
exclude_fields = {f[1:] for f in fields if f.startswith("-")}
include_fields = [f for f in fields if not f.startswith("-") and f not in exclude_fields]
for row in generator:
filtered = dict()
if include_fields:
# positive list
for field in include_fields:
if field in row:
filtered[field] = row[field]
elif exclude_fields:
# negative list
for k, v in row.items():
if k not in exclude_fields:
filtered[k] = v
yield filtered
def filter_geo_pairs(
type_field: str,
value_field: str,
values: Sequence[GeoPair],
param_key: str,
params: Dict[str, Any],
) -> str:
"""
returns the SQL sub query to filter by the given geo pairs
"""
def filter_pair(pair: GeoPair, i) -> str:
type_param = f"{param_key}_{i}t"
params[type_param] = pair.geo_type
if isinstance(pair.geo_values, bool) and pair.geo_values:
return f"{type_field} = :{type_param}"
return f"({type_field} = :{type_param} AND {filter_strings(value_field, cast(Sequence[str], pair.geo_values), type_param, params)})"
parts = [filter_pair(p, i) for i, p in enumerate(values)]
if not parts:
# something has to be selected
return "FALSE"
return f"({' OR '.join(parts)})"
def filter_source_signal_pairs(
source_field: str,
signal_field: str,
values: Sequence[SourceSignalPair],
param_key: str,
params: Dict[str, Any],
) -> str:
"""
returns the SQL sub query to filter by the given source signal pairs
"""
def filter_pair(pair: SourceSignalPair, i) -> str:
source_param = f"{param_key}_{i}t"
params[source_param] = pair.source
if isinstance(pair.signal, bool) and pair.signal:
return f"{source_field} = :{source_param}"
return f"({source_field} = :{source_param} AND {filter_strings(signal_field, cast(Sequence[str], pair.signal), source_param, params)})"
parts = [filter_pair(p, i) for i, p in enumerate(values)]
if not parts:
# something has to be selected
return "FALSE"
return f"({' OR '.join(parts)})"
def filter_time_pair(
type_field: str,
time_field: str,
pair: Optional[TimePair],
param_key: str,
params: Dict[str, Any],
) -> str:
"""
returns the SQL sub query to filter by the given time pair
"""
# safety path; should normally not be reached as time pairs are enforced by the API
if not pair:
return "FALSE"
type_param = f"{param_key}_0t"
params[type_param] = pair.time_type
if isinstance(pair.time_values, bool) and pair.time_values:
parts = f"{type_field} = :{type_param}"
else:
ranges = pair.to_ranges().time_values
parts = f"({type_field} = :{type_param} AND {filter_integers(time_field, ranges, type_param, params)})"
return f"({parts})"
def parse_row(
row: Row,
fields_string: Optional[Sequence[str]] = None,
fields_int: Optional[Sequence[str]] = None,
fields_float: Optional[Sequence[str]] = None,
):
keys = set(row.keys())
parsed = dict()
if fields_string:
for f in fields_string:
v = row[f] if f in keys else None
if isinstance(v, (date, datetime)):
v = v.strftime("%Y-%m-%d") # format to iso date
parsed[f] = v
if fields_int:
for f in fields_int:
parsed[f] = int(row[f]) if f in keys and row[f] is not None else None
if fields_float:
for f in fields_float:
parsed[f] = float(row[f]) if f in keys and row[f] is not None else None
return parsed
def parse_result(
query: str,
params: Dict[str, Any],
fields_string: Optional[Sequence[str]] = None,
fields_int: Optional[Sequence[str]] = None,
fields_float: Optional[Sequence[str]] = None,
) -> List[Dict[str, Any]]:
"""
execute the given query and return the result as a list of dictionaries
"""
return [parse_row(row, fields_string, fields_int, fields_float) for row in db.execute(text(query), **params)]
def limit_query(query: str, limit: int) -> str:
full_query = f"{query} LIMIT {limit}"
return full_query
def run_query(p: APrinter, query_tuple: Tuple[str, Dict[str, Any]]):
query, params = query_tuple
# limit rows + 1 for detecting whether we would have more
full_query = text(limit_query(query, p.remaining_rows + 1))
app.logger.info("full_query: %s, params: %s", full_query, params)
return db.execution_options(stream_results=True).execute(full_query, **params)
def _identity_transform(row: Dict[str, Any], _: Row) -> Dict[str, Any]:
"""
identity transform
"""
return row
def execute_queries(
queries: Sequence[Tuple[str, Dict[str, Any]]],
fields_string: Sequence[str],
fields_int: Sequence[str],
fields_float: Sequence[str],
transform: Callable[[Dict[str, Any], Row], Dict[str, Any]] = _identity_transform,
):
"""
execute the given queries and return the response to send them
"""
p = create_printer()
fields_to_send = set(extract_strings("fields") or [])
if fields_to_send:
exclude_fields = {f[1:] for f in fields_to_send if f.startswith("-")}
include_fields = {f for f in fields_to_send if not f.startswith("-") and f not in exclude_fields}
if include_fields:
fields_string = [v for v in fields_string if v in include_fields]
fields_int = [v for v in fields_int if v in include_fields]
fields_float = [v for v in fields_float if v in include_fields]
if exclude_fields:
fields_string = [v for v in fields_string if v not in exclude_fields]
fields_int = [v for v in fields_int if v not in exclude_fields]
fields_float = [v for v in fields_float if v not in exclude_fields]
query_list = list(queries)
def dummy_gen():
if 3 > 4:
yield {}
pass
if not query_list or p.remaining_rows <= 0:
return p(dummy_gen)
def gen(first_rows):
for row in first_rows:
yield transform(parse_row(row, fields_string, fields_int, fields_float), row)
for query_params in query_list:
if p.remaining_rows <= 0:
# no more rows
break
r = run_query(p, query_params)
for row in r:
yield transform(parse_row(row, fields_string, fields_int, fields_float), row)
# execute first query
try:
r = run_query(p, query_list.pop(0))
except Exception as e:
raise DatabaseErrorException(str(e))
# now use a generator for sending the rows and execute all the other queries
return p(gen(r))
def execute_query(
query: str,
params: Dict[str, Any],
fields_string: Sequence[str],
fields_int: Sequence[str],
fields_float: Sequence[str],
transform: Callable[[Dict[str, Any], Row], Dict[str, Any]] = _identity_transform,
):
"""
execute the given query and return the response to send it
"""
return execute_queries([(query, params)], fields_string, fields_int, fields_float, transform)
def _join_l(value: Union[str, List[str]]):
return ", ".join(value) if isinstance(value, (list, tuple)) else value
class QueryBuilder:
"""
query builder helper class for simplified conditions
"""
def __init__(self, table: str, alias: str):
self.table: str = f"{table} {alias}"
self.alias: str = alias
self.group_by: Union[str, List[str]] = ""
self.order: Union[str, List[str]] = ""
self.fields: Union[str, List[str]] = "*"
self.conditions: List[str] = []
self.params: Dict[str, Any] = {}
self.subquery: str = ""
self.index: Optional[str] = None
def retable(self, new_table: str):
"""
updates this QueryBuilder to point to another table.
useful for switching to a different view of the data...
"""
# WARNING: if we ever switch to re-using QueryBuilder, we should change this to return a copy.
self.table: str = f"{new_table} {self.alias}"
return self
@property
def conditions_clause(self) -> str:
return " AND ".join(self.conditions)
@property
def fields_clause(self) -> str:
return _join_l(self.fields) if self.fields else "*"
@property
def order_clause(self) -> str:
return _join_l(self.order)
def __str__(self):
where = f"WHERE {self.conditions_clause}" if self.conditions else ""
order = f"ORDER BY {_join_l(self.order)}" if self.order else ""
group_by = f"GROUP BY {_join_l(self.group_by)}" if self.group_by else ""
index = f"USE INDEX ({self.index})" if self.index else ""
return f"SELECT {self.fields_clause} FROM {self.table} {index} {self.subquery} {where} {group_by} {order}"
@property
def query(self) -> str:
"""
returns the full query
"""
return str(self)
def where(self, **kvargs: Dict[str, Any]) -> "QueryBuilder":
for k, v in kvargs.items():
self.conditions.append(f"{self.alias}.{k} = :{k}")
self.params[k] = v
return self
def where_strings(
self,
field: str,
values: Optional[Sequence[str]],
param_key: Optional[str] = None,
) -> "QueryBuilder":
fq_field = f"{self.alias}.{field}" if "." not in field else field
self.conditions.append(filter_strings(fq_field, values, param_key or field, self.params))
return self
def _fq_field(self, field: str) -> str:
return f"{self.alias}.{field}" if "." not in field else field
def where_integers(
self,
field: str,
values: Optional[Sequence[Union[Tuple[int, int], int]]],
param_key: Optional[str] = None,
) -> "QueryBuilder":
fq_field = self._fq_field(field)
self.conditions.append(filter_integers(fq_field, values, param_key or field, self.params))
return self
def where_geo_pairs(
self,
type_field: str,
value_field: str,
values: Sequence[GeoPair],
param_key: Optional[str] = None,
) -> "QueryBuilder":
fq_type_field = self._fq_field(type_field)
fq_value_field = self._fq_field(value_field)
self.conditions.append(
filter_geo_pairs(
fq_type_field,
fq_value_field,
values,
param_key or type_field,
self.params,
)
)
return self
def where_source_signal_pairs(
self,
type_field: str,
value_field: str,
values: Sequence[SourceSignalPair],
param_key: Optional[str] = None,
) -> "QueryBuilder":
fq_type_field = self._fq_field(type_field)
fq_value_field = self._fq_field(value_field)
self.conditions.append(
filter_source_signal_pairs(
fq_type_field,
fq_value_field,
values,
param_key or type_field,
self.params,
)
)
return self
def where_time_pair(
self,
type_field: str,
value_field: str,
values: Optional[TimePair],
param_key: Optional[str] = None,
) -> "QueryBuilder":
fq_type_field = self._fq_field(type_field)
fq_value_field = self._fq_field(value_field)
self.conditions.append(
filter_time_pair(
fq_type_field,
fq_value_field,
values,
param_key or type_field,
self.params,
)
)
return self
def set_fields(self, *fields: Iterable[str]) -> "QueryBuilder":
self.fields = [f"{self.alias}.{field}" for field_list in fields for field in field_list]
return self
def set_order(self, *args: str, **kwargs: Union[str, bool]) -> "QueryBuilder":
"""
sets the order for the given fields (as key word arguments), True = ASC, False = DESC
"""
def to_asc(v: Union[str, bool]) -> str:
if v == True:
return "ASC"
elif v == False:
return "DESC"
return cast(str, v)
args_order = [f"{self.alias}.{k} ASC" for k in args]
kw_order = [f"{self.alias}.{k} {to_asc(v)}" for k, v in kwargs.items()]
self.order = args_order + kw_order
return self
def with_max_issue(self, *args: str) -> "QueryBuilder":
fields: List[str] = [f for f in args]
subfields = f"max(issue) max_issue, {','.join(fields)}"
group_by = ",".join(fields)
field_conditions = " AND ".join(f"x.{field} = {self.alias}.{field}" for field in fields)
condition = f"x.max_issue = {self.alias}.issue AND {field_conditions}"
self.subquery = f"JOIN (SELECT {subfields} FROM {self.table} WHERE {self.conditions_clause} GROUP BY {group_by}) x ON {condition}"
# reset conditions since for join
self.conditions = []
return self
| mit | ee55865c056f4293596a5409a3251702 | 30.674603 | 143 | 0.588011 | 3.64808 | false | false | false | false |
cmu-delphi/delphi-epidata | src/acquisition/afhsb/afhsb_csv.py | 1 | 10748 | '''
afhsb_csv.py creates CSV files filled_00to13.csv, filled_13to17.csv and simple_DMISID_FY2018.csv
which will be later used to create MYSQL data tables.
Several intermediate files will be created, including:
00to13.pickle 13to17.pickle 00to13.csv 13to17.csv
Required source files:
ili_1_2000_5_2013_new.sas7bdat and ili_1_2013_11_2017_new.sas7bdat under SOURCE_DIR
country_codes.csv and DMISID_FY2018.csv under TARGET_DIR
All intermediate files and final csv files will be stored in TARGET_DIR
'''
import csv
import os
import sas7bdat
import pickle
import epiweeks as epi
DATAPATH = '/home/automation/afhsb_data'
SOURCE_DIR = DATAPATH
TARGET_DIR = DATAPATH
INVALID_DMISIDS = set()
def get_flu_cat(dx):
# flu1 (influenza)
if (len(dx) == 0): return None
dx = dx.capitalize()
if (dx.isnumeric()):
for prefix in ["487", "488"]:
if (dx.startswith(prefix)): return 1
for i in range(0, 7):
prefix = str(480 + i)
if (dx.startswith(prefix)): return 2
for i in range(0, 7):
prefix = str(460 + i)
if (dx.startswith(prefix)): return 3
for prefix in ["07999", "3829", "7806", "7862"]:
if (dx.startswith(prefix)): return 3
elif (dx[0].isalpha() and dx[1:].isnumeric()):
for prefix in ["J09", "J10", "J11"]:
if (dx.startswith(prefix)): return 1
for i in range(12, 19):
prefix = "J{}".format(i)
if (dx.startswith(prefix)): return 2
for i in range(0, 7):
prefix = "J0{}".format(i)
if (dx.startswith(prefix)): return 3
for i in range(20, 23):
prefix = "J{}".format(i)
if (dx.startswith(prefix)): return 3
for prefix in ["J40", "R05", "H669", "R509", "B9789"]:
if (dx.startswith(prefix)): return 3
else:
return None
def aggregate_data(sourcefile, targetfile):
reader = sas7bdat.SAS7BDAT(os.path.join(SOURCE_DIR, sourcefile), skip_header=True)
# map column names to column indices
COL2IDX = {column.name.decode('utf-8'): column.col_id for column in reader.columns}
def get_field(row, column): return row[COL2IDX[column]]
def row2flu(row):
for i in range(1, 9):
dx = get_field(row, "dx{}".format(i))
flu_cat = get_flu_cat(dx)
if (flu_cat != None): return flu_cat
return 0
def row2epiweek(row):
date = get_field(row, 'd_event')
year, month, day = date.year, date.month, date.day
week_tuple = epi.Week.fromdate(year, month, day).weektuple()
year, week_num = week_tuple[0], week_tuple[1]
return year, week_num
results_dict = dict()
for r, row in enumerate(reader):
# if (r >= 1000000): break
if (get_field(row, 'type') != "Outpt"): continue
year, week_num = row2epiweek(row)
dmisid = get_field(row, 'DMISID')
flu_cat = row2flu(row)
key_list = [year, week_num, dmisid, flu_cat]
curr_dict = results_dict
for i, key in enumerate(key_list):
if (i == len(key_list) - 1):
if (not key in curr_dict): curr_dict[key] = 0
curr_dict[key] += 1
else:
if (not key in curr_dict): curr_dict[key] = dict()
curr_dict = curr_dict[key]
results_path = os.path.join(TARGET_DIR, targetfile)
with open(results_path, 'wb') as f:
pickle.dump(results_dict, f, pickle.HIGHEST_PROTOCOL)
return
################# Functions for geographical information ####################
def get_country_mapping():
filename = "country_codes.csv"
mapping = dict()
with open(os.path.join(TARGET_DIR, filename), "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
print(row.keys())
alpha2 = row['alpha-2']
alpha3 = row['alpha-3']
mapping[alpha2] = alpha3
return mapping
def format_dmisid_csv(filename, target_name):
src_path = os.path.join(TARGET_DIR, "{}.csv".format(filename))
dst_path = os.path.join(TARGET_DIR, target_name)
src_csv = open(src_path, "r", encoding='utf-8-sig')
reader = csv.DictReader(src_csv)
dst_csv = open(dst_path, "w")
fieldnames = ['dmisid', 'country', 'state', 'zip5']
writer = csv.DictWriter(dst_csv, fieldnames=fieldnames)
writer.writeheader()
country_mapping = get_country_mapping()
for row in reader:
country2 = row['Facility ISO Country Code']
if (country2 == ""): country3 = ""
elif (not country2 in country_mapping):
for key in row.keys(): print(key, row[key])
continue
else:
country3 = country_mapping[country2]
new_row = {'dmisid': row['DMIS ID'],
'country': country3,
'state': row['Facility State Code'],
'zip5': row['Facility 5-Digit ZIP Code']}
writer.writerow(new_row)
def dmisid():
filename = 'DMISID_FY2018'
target_name = "simple_DMISID_FY2018.csv"
format_dmisid_csv(filename, target_name)
cen2states = {'cen1': {'CT', 'ME', 'MA', 'NH', 'RI', 'VT'},
'cen2': {'NJ', 'NY', 'PA'},
'cen3': {'IL', 'IN', 'MI', 'OH', 'WI'},
'cen4': {'IA', 'KS', 'MN', 'MO', 'NE', 'ND', 'SD'},
'cen5': {'DE', 'DC', 'FL', 'GA', 'MD', 'NC', 'SC', 'VA', 'WV'},
'cen6': {'AL', 'KY', 'MS', 'TN'},
'cen7': {'AR', 'LA', 'OK', 'TX'},
'cen8': {'AZ', 'CO', 'ID', 'MT', 'NV', 'NM', 'UT', 'WY'},
'cen9': {'AK', 'CA', 'HI', 'OR', 'WA'}}
hhs2states = {'hhs1': {'VT', 'CT', 'ME', 'MA', 'NH', 'RI'},
'hhs2': {'NJ', 'NY'},
'hhs3': {'DE', 'DC', 'MD', 'PA', 'VA', 'WV'},
'hhs4': {'AL', 'FL', 'GA', 'KY', 'MS', 'NC', 'TN', 'SC'},
'hhs5': {'IL', 'IN', 'MI', 'MN', 'OH', 'WI'},
'hhs6': {'AR', 'LA', 'NM', 'OK', 'TX'},
'hhs7': {'IA', 'KS', 'MO', 'NE'},
'hhs8': {'CO', 'MT', 'ND', 'SD', 'UT', 'WY'},
'hhs9': {'AZ', 'CA', 'HI', 'NV'},
'hhs10': {'AK', 'ID', 'OR', 'WA'}}
def state2region(D):
results = dict()
for region in D.keys():
states = D[region]
for state in states:
assert(not state in results)
results[state] = region
return results
def state2region_csv():
to_hhs = state2region(hhs2states)
to_cen = state2region(cen2states)
states = to_hhs.keys()
target_name = "state2region.csv"
fieldnames = ['state', 'hhs', 'cen']
with open(target_name, "w") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for state in states:
content = {"state": state, "hhs": to_hhs[state], "cen": to_cen[state]}
writer.writerow(content)
################# Functions for geographical information ####################
######################### Functions for AFHSB data ##########################
def write_afhsb_csv(period):
flu_mapping = {0: "ili-flu3", 1: "flu1", 2:"flu2-flu1", 3: "flu3-flu2"}
results_dict = pickle.load(open(os.path.join(TARGET_DIR, "{}.pickle".format(period)), 'rb'))
fieldnames = ["id", "epiweek", "dmisid", "flu_type", "visit_sum"]
with open(os.path.join(TARGET_DIR, "{}.csv".format(period)), 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
i = 0
for year in sorted(results_dict.keys()):
year_dict = results_dict[year]
for week in sorted(year_dict.keys()):
week_dict = year_dict[week]
for dmisid in sorted(week_dict.keys()):
dmisid_dict = week_dict[dmisid]
for flu in sorted(dmisid_dict.keys()):
visit_sum = dmisid_dict[flu]
i += 1
epiweek = int("{}{:02d}".format(year, week))
flu_type = flu_mapping[flu]
row = {"epiweek": epiweek, "dmisid": None if (not dmisid.isnumeric()) else dmisid,
"flu_type": flu_type, "visit_sum": visit_sum, "id": i}
writer.writerow(row)
if (i % 100000 == 0): print(row)
def dmisid_start_time_from_file(filename):
starttime_record = dict()
with open(filename, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
dmisid = row['dmisid']
epiweek = int(row['epiweek'])
if (not dmisid in starttime_record):
starttime_record[dmisid] = epiweek
else:
starttime_record[dmisid] = min(epiweek, starttime_record[dmisid])
return starttime_record
def dmisid_start_time():
record1 = dmisid_start_time_from_file(os.path.join(TARGET_DIR, "00to13.csv"))
record2 = dmisid_start_time_from_file(os.path.join(TARGET_DIR, "13to17.csv"))
record = record1
for dmisid, epiweek in record2.items():
if (dmisid in record):
record[dmisid] = min(record[dmisid], epiweek)
else:
record[dmisid] = epiweek
return record
def fillin_zero_to_csv(period, dmisid_start_record):
src_path = os.path.join(TARGET_DIR, "{}.csv".format(period))
dst_path = os.path.join(TARGET_DIR, "filled_{}.csv".format(period))
# Load data into a dictionary
src_csv = open(src_path, "r")
reader = csv.DictReader(src_csv)
results_dict = dict() # epiweek -> dmisid -> flu_type: visit_sum
for i, row in enumerate(reader):
epiweek = int(row['epiweek'])
dmisid = row['dmisid']
flu_type = row['flu_type']
visit_sum = row['visit_sum']
if (not epiweek in results_dict):
results_dict[epiweek] = dict()
week_dict = results_dict[epiweek]
if (not dmisid in week_dict):
week_dict[dmisid] = dict()
dmisid_dict = week_dict[dmisid]
dmisid_dict[flu_type] = visit_sum
# Fill in zero count records
dmisid_group = dmisid_start_record.keys()
flutype_group = ["ili-flu3", "flu1", "flu2-flu1", "flu3-flu2"]
for epiweek in results_dict.keys():
week_dict = results_dict[epiweek]
for dmisid in dmisid_group:
start_week = dmisid_start_record[dmisid]
if (start_week > epiweek): continue
if (not dmisid in week_dict):
week_dict[dmisid] = dict()
dmisid_dict = week_dict[dmisid]
for flutype in flutype_group:
if (not flutype in dmisid_dict):
dmisid_dict[flutype] = 0
# Write to csv files
dst_csv = open(dst_path, "w")
fieldnames = ["id", "epiweek", "dmisid", "flu_type", "visit_sum"]
writer = csv.DictWriter(dst_csv, fieldnames=fieldnames)
writer.writeheader()
i = 1
for epiweek in results_dict:
for dmisid in results_dict[epiweek]:
for flutype in results_dict[epiweek][dmisid]:
visit_sum = results_dict[epiweek][dmisid][flutype]
row = {"id": i, "epiweek": epiweek, "dmisid": dmisid,
"flu_type": flutype, "visit_sum": visit_sum}
writer.writerow(row)
if (i % 100000 == 0):
print(row)
i += 1
print("Wrote {} rows".format(i))
######################### Functions for AFHSB data ##########################
def main():
# Build tables containing geographical information
state2region_csv()
dmisid()
# Aggregate raw data into pickle files
aggregate_data("ili_1_2000_5_2013_new.sas7bdat", "00to13.pickle")
aggregate_data("ili_1_2013_11_2017_new.sas7bdat", "13to17.pickle")
# write pickle content to csv files
write_afhsb_csv("00to13")
write_afhsb_csv("13to17")
# Fill in zero count records
dmisid_start_record = dmisid_start_time()
fillin_zero_to_csv("00to13", dmisid_start_record)
fillin_zero_to_csv("13to17", dmisid_start_record)
if __name__ == '__main__':
main() | mit | d30a15bcce09881748d06b4441dfdbc6 | 31.47432 | 97 | 0.621046 | 2.69441 | false | false | false | false |
cmu-delphi/delphi-epidata | src/acquisition/cdcp/cdc_upload.py | 1 | 7456 | """
===============
=== Purpose ===
===============
Reads zip/csv files from CDC and stores page hit counts in the database.
Files can be uploaded at:
https://delphi.cmu.edu/~automation/public/cdc_upload/
When someone uploads a new file, two things happen:
1. the uploaded file is moved to /common/cdc_stage
2. this program is queued to run
This program:
1. extracts csv file(s)
2. parses counts from csv file(s)
3. stores counts in the database (see below)
4. deletes csv file(s)
5. moves zip file(s) from staging directory to /home/automation/cdc_page_stats
=======================
=== Data Dictionary ===
=======================
`cdc` is the table where individual page counts are stored.
+-------+--------------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+-------+--------------+------+-----+---------+----------------+
| id | int(11) | NO | PRI | NULL | auto_increment |
| date | date | NO | MUL | NULL | |
| page | varchar(128) | NO | MUL | NULL | |
| state | char(2) | NO | MUL | NULL | |
| num | int(11) | NO | | NULL | |
+-------+--------------+------+-----+---------+----------------+
id: unique identifier for each record
date: the date when the hits were recorded (maybe by eastern time?)
page: the full page title
state: the state where the page was accessed from (maybe by IP address?)
num: the number of hits for this particular page
`cdc_meta` is the table where total counts are stored.
+---------+---------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+---------+---------+------+-----+---------+----------------+
| id | int(11) | NO | PRI | NULL | auto_increment |
| date | date | NO | MUL | NULL | |
| epiweek | int(11) | NO | MUL | NULL | |
| state | char(2) | NO | MUL | NULL | |
| total | int(11) | NO | | NULL | |
+---------+---------+------+-----+---------+----------------+
id: unique identifier for each record
date: the date when the hits were recorded (maybe by eastern time?)
epiweek: the epiweek corresponding to the date
state: the state where the pages were accessed from (maybe by IP address?)
total: total number of hits for all CDC pages
=================
=== Changelog ===
=================
2017-02-23
* secrets and minor cleanup
2016-06-11
* work in automation dir
2016-04-18
+ initial version
"""
# standard library
import argparse
import csv
from datetime import datetime
import glob
import io
import os
import shutil
import sys
from zipfile import ZipFile
# third party
import mysql.connector
# first party
import delphi.operations.secrets as secrets
import delphi.utils.epiweek as flu
STATES = {
'Alabama': 'AL',
'Alaska': 'AK',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District of Columbia': 'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY',
#'Puerto Rico': 'PR',
#'Virgin Islands': 'VI',
#'Guam': 'GU',
}
sql_cdc = '''
INSERT INTO
`cdc` (`date`, `page`, `state`, `num`)
VALUES
(%s, %s, %s, %s)
ON DUPLICATE KEY UPDATE
`num` = %s
'''
sql_cdc_meta = '''
INSERT INTO
`cdc_meta` (`date`, `epiweek`, `state`, `total`)
VALUES
(%s, yearweek(%s, 6), %s, %s)
ON DUPLICATE KEY UPDATE
`total` = %s
'''
def upload(test_mode):
# connect
u, p = secrets.db.epi
cnx = mysql.connector.connect(user=u, password=p, database='epidata')
cur = cnx.cursor()
# insert (or update) table `cdc`
def insert_cdc(date, page, state, num):
cur.execute(sql_cdc, (date, page, state, num, num))
# insert (or update) table `cdc_meta`
def insert_cdc_meta(date, state, total):
cur.execute(sql_cdc_meta, (date, date, state, total, total))
# loop over rows until the header row is found
def find_header(reader):
for row in reader:
if len(row) > 0 and row[0] == 'Date':
return True
return False
# parse csv files for `cdc` and `cdc_meta`
def parse_csv(meta):
def handler(reader):
if not find_header(reader):
raise Exception('header not found')
count = 0
cols = 3 if meta else 4
for row in reader:
if len(row) != cols:
continue
if meta:
(a, c, d) = row
else:
(a, b, c, d) = row
c = c[:-16]
if c not in STATES:
continue
a = datetime.strptime(a, '%b %d, %Y').strftime('%Y-%m-%d')
c = STATES[c]
d = int(d)
if meta:
insert_cdc_meta(a, c, d)
else:
insert_cdc(a, b, c, d)
count += 1
return count
return handler
# recursively open zip files
def parse_zip(zf, level=1):
for name in zf.namelist():
prefix = ' ' * level
print(prefix, name)
if name[-4:] == '.zip':
with zf.open(name) as temp:
with ZipFile(io.BytesIO(temp.read())) as zf2:
parse_zip(zf2, level + 1)
elif name[-4:] == '.csv':
handler = None
if 'Flu Pages by Region' in name:
handler = parse_csv(False)
elif 'Regions for all CDC' in name:
handler = parse_csv(True)
else:
print(prefix, ' (skipped)')
if handler is not None:
with zf.open(name) as temp:
count = handler(csv.reader(io.StringIO(str(temp.read(), 'utf-8'))))
print(prefix, ' %d rows' % count)
else:
print(prefix, ' (ignored)')
# find, parse, and move zip files
zip_files = glob.glob('/common/cdc_stage/*.zip')
print('searching...')
for f in zip_files:
print(' ', f)
print('parsing...')
for f in zip_files:
with ZipFile(f) as zf:
parse_zip(zf)
print('moving...')
for f in zip_files:
src = f
dst = os.path.join('/home/automation/cdc_page_stats/', os.path.basename(src))
print(' ', src, '->', dst)
if test_mode:
print(' (test mode enabled - not moved)')
else:
shutil.move(src, dst)
if not os.path.isfile(dst):
raise Exception('unable to move file')
# disconnect
cur.close()
if not test_mode:
cnx.commit()
cnx.close()
def main():
# args and usage
parser = argparse.ArgumentParser()
parser.add_argument('--test', '-t', default=False, action='store_true', help='dry run only')
args = parser.parse_args()
# make it happen
upload(args.test)
if __name__ == '__main__':
main()
| mit | 039136f46bd04ef44c0c48c9736dabc1 | 25.724014 | 94 | 0.530311 | 3.197256 | false | false | false | false |
cmu-delphi/delphi-epidata | src/acquisition/covid_hosp/common/database.py | 1 | 7963 | """Common database code used by multiple `covid_hosp` scrapers."""
# standard library
from collections import namedtuple
from contextlib import contextmanager
import math
# third party
import mysql.connector
import pandas as pd
# first party
import delphi.operations.secrets as secrets
Columndef = namedtuple("Columndef", "csv_name sql_name dtype")
class Database:
def __init__(self,
connection,
table_name=None,
columns_and_types=None,
key_columns=None,
additional_fields=None):
"""Create a new Database object.
Parameters
----------
connection
An open connection to a database.
table_name : str
The name of the table which holds the dataset.
columns_and_types : tuple[str, str, Callable]
List of 3-tuples of (CSV header name, SQL column name, data type) for
all the columns in the CSV file.
additional_fields : tuple[str]
List of 2-tuples of (value, SQL column name) fordditional fields to include
at the end of the row which are not present in the CSV data.
"""
self.connection = connection
self.table_name = table_name
self.publication_col_name = "issue" if table_name == 'covid_hosp_state_timeseries' else \
'publication_date'
self.columns_and_types = {
c.csv_name: c
for c in (columns_and_types if columns_and_types is not None else [])
}
self.key_columns = key_columns if key_columns is not None else []
self.additional_fields = additional_fields if additional_fields is not None else []
@classmethod
@contextmanager
def connect(database_class, mysql_connector_impl=mysql.connector):
"""Connect to a database and provide the connection as a context manager.
As long as the context manager exits normally, the connection's transaction
will be committed. Otherwise, if the context is exited by an Exception, the
transaction will be rolled back.
In any case, the connection will be gracefully closed upon exiting the
context manager.
"""
# connect to the database
user, password = secrets.db.epi
connection = mysql_connector_impl.connect(
host=secrets.db.host,
user=user,
password=password,
database='epidata')
try:
# provide the connection to the context manager
yield database_class(connection)
# rollback by default; the following commit will only take place if no
# exception was raised in calling code
connection.commit()
finally:
# close the connection in any case
connection.close()
@contextmanager
def new_cursor(self):
"""Create and provide a database cursor as a context manager.
The cursor will be gracefully closed upon exiting the context manager.
"""
cursor = self.connection.cursor()
try:
yield cursor
finally:
cursor.close()
def contains_revision(self, revision):
"""Return whether the given revision already exists in the database.
Parameters
----------
revision : str
Unique revision string.
Returns
-------
bool
True iff the revision already exists.
"""
with self.new_cursor() as cursor:
cursor.execute('''
SELECT
count(1) > 0
FROM
`covid_hosp_meta`
WHERE
`dataset_name` = %s AND `revision_timestamp` = %s
''', (self.table_name, revision))
for (result,) in cursor:
return bool(result)
def insert_metadata(self, publication_date, revision, meta_json):
"""Add revision metadata to the database.
Parameters
----------
publication_date : int
Date when the dataset was published in YYYYMMDD format.
revision : str
Unique revision string.
meta_json : str
Metadata serialized as a JSON string.
"""
with self.new_cursor() as cursor:
cursor.execute('''
INSERT INTO
`covid_hosp_meta` (
`dataset_name`,
`publication_date`,
`revision_timestamp`,
`metadata_json`,
`acquisition_datetime`
)
VALUES
(%s, %s, %s, %s, NOW())
''', (self.table_name, publication_date, revision, meta_json))
def insert_dataset(self, publication_date, dataframe):
"""Add a dataset to the database.
Parameters
----------
publication_date : int
Date when the dataset was published in YYYYMMDD format.
dataframe : pandas.DataFrame
The dataset.
"""
dataframe_columns_and_types = [
x for x in self.columns_and_types.values() if x.csv_name in dataframe.columns
]
def nan_safe_dtype(dtype, value):
if isinstance(value, float) and math.isnan(value):
return None
return dtype(value)
# first convert keys and save the results; we'll need them later
for csv_name in self.key_columns:
dataframe.loc[:, csv_name] = dataframe[csv_name].map(self.columns_and_types[csv_name].dtype)
num_columns = 2 + len(dataframe_columns_and_types) + len(self.additional_fields)
value_placeholders = ', '.join(['%s'] * num_columns)
columns = ', '.join(f'`{i.sql_name}`' for i in dataframe_columns_and_types + self.additional_fields)
sql = f'INSERT INTO `{self.table_name}` (`id`, `{self.publication_col_name}`, {columns}) ' \
f'VALUES ({value_placeholders})'
id_and_publication_date = (0, publication_date)
with self.new_cursor() as cursor:
for _, row in dataframe.iterrows():
values = []
for c in dataframe_columns_and_types:
values.append(nan_safe_dtype(c.dtype, row[c.csv_name]))
cursor.execute(sql,
id_and_publication_date +
tuple(values) +
tuple(i.csv_name for i in self.additional_fields))
# deal with non/seldomly updated columns used like a fk table (if this database needs it)
if hasattr(self, 'AGGREGATE_KEY_COLS'):
ak_cols = self.AGGREGATE_KEY_COLS
# restrict data to just the key columns and remove duplicate rows
# sort by key columns to ensure that the last ON DUPLICATE KEY overwrite
# uses the most-recent aggregate key information
ak_data = (dataframe[set(ak_cols + self.key_columns)]
.sort_values(self.key_columns)[ak_cols]
.drop_duplicates())
# cast types
for col in ak_cols:
ak_data[col] = ak_data[col].map(
lambda value: nan_safe_dtype(self.columns_and_types[col].dtype, value)
)
# fix NULLs
ak_data = ak_data.to_numpy(na_value=None).tolist()
# create string of tick-quoted and comma-seperated column list
ak_cols_str = ','.join(f'`{col}`' for col in ak_cols)
# ...and ticked and comma-sep'd "column=column" list for ON UPDATE (to keep only the most recent values for each pk)
ak_updates_str = ','.join(f'`{col}`=v.{col}' for col in ak_cols)
# ...and string of VALUES placeholders
values_str = ','.join( ['%s'] * len(ak_cols) )
# use aggregate key table alias
ak_table = self.table_name + '_key'
# assemble full SQL statement
ak_insert_sql = f'INSERT INTO `{ak_table}` ({ak_cols_str}) VALUES ({values_str}) AS v ON DUPLICATE KEY UPDATE {ak_updates_str}'
# commit the data
with self.new_cursor() as cur:
cur.executemany(ak_insert_sql, ak_data)
def get_max_issue(self):
"""Fetch the most recent issue.
This is used to bookend what updates we pull in from the HHS metadata.
"""
with self.new_cursor() as cursor:
cursor.execute(f'''
SELECT
max(publication_date)
from
`covid_hosp_meta`
WHERE
dataset_name = "{self.table_name}"
''')
for (result,) in cursor:
if result is not None:
return pd.Timestamp(str(result))
return pd.Timestamp("1900/1/1")
| mit | b72b76d5e4f3afc7bc7f58f67b9fa1c1 | 32.179167 | 133 | 0.626648 | 3.985485 | false | false | false | false |
djangonauts/django-hstore | django_hstore/utils.py | 3 | 2608 | from __future__ import unicode_literals, absolute_import
from decimal import Decimal
from datetime import date, time, datetime
from django.core.exceptions import ObjectDoesNotExist
from django.utils import six
def acquire_reference(reference):
try:
model, identifier = reference.split(':')
module, sep, attr = model.rpartition('.')
model = getattr(__import__(module, fromlist=(attr,)), attr)
return model.objects.get(pk=identifier)
except ObjectDoesNotExist:
return None
except Exception:
raise ValueError
def identify_instance(instance):
model = type(instance)
return '%s.%s:%s' % (model.__module__, model.__name__, instance.pk)
def serialize_references(references):
refs = {}
# if None or string return empty dict
if references is None or isinstance(references, six.string_types):
return {}
# if dictionary do serialization
elif isinstance(references, dict):
for key, instance in references.items():
if not isinstance(instance, six.string_types):
refs[key] = identify_instance(instance)
else:
refs[key] = instance
else:
return refs
# else just return the object, might be doing some other operation and we don't want to interfere
else:
return references
def unserialize_references(references):
refs = {}
if references is None:
return refs
for key, reference in references.items():
if isinstance(reference, six.string_types):
refs[key] = acquire_reference(reference)
else:
refs[key] = reference
else:
return refs
def get_cast_for_param(value_annot, key):
if not isinstance(value_annot, dict):
return ''
if value_annot[key] in (True, False):
return '::boolean'
elif issubclass(value_annot[key], datetime):
return '::timestamp'
elif issubclass(value_annot[key], date):
return '::date'
elif issubclass(value_annot[key], time):
return '::time'
elif issubclass(value_annot[key], six.integer_types):
return '::bigint'
elif issubclass(value_annot[key], float):
return '::float8'
elif issubclass(value_annot[key], Decimal):
return '::numeric'
else:
return ''
def get_value_annotations(param):
# We need to store the actual value for booleans, not just the type, for isnull
get_type = lambda v: v if isinstance(v, bool) else type(v)
return dict((key, get_type(subvalue)) for key, subvalue in six.iteritems(param))
| mit | 1c4e8a6a2484fb60b16ad0ed271d18f9 | 30.421687 | 101 | 0.641488 | 4.29654 | false | false | false | false |
djangonauts/django-hstore | django_hstore/dict.py | 1 | 5377 | import json
from decimal import Decimal
from django.utils import six
from django.utils.encoding import force_text, force_str
from .compat import UnicodeMixin
from . import utils, exceptions
__all__ = [
'HStoreDict',
'HStoreReferenceDict',
]
class DecimalEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Decimal):
return float(obj)
return json.JSONEncoder.default(self, obj)
class HStoreDict(UnicodeMixin, dict):
"""
A dictionary subclass which implements hstore support.
"""
schema_mode = False # python2.6 compatibility
def __init__(self, value=None, field=None, instance=None, schema_mode=False, **kwargs):
self.schema_mode = schema_mode
# if passed value is string
# ensure is json formatted
if isinstance(value, six.string_types):
try:
value = json.loads(value)
except ValueError as e:
raise exceptions.HStoreDictException(
'HStoreDict accepts only valid json formatted strings.',
json_error_message=force_text(e)
)
elif value is None:
value = {}
# allow dictionaries only
if not isinstance(value, dict):
raise exceptions.HStoreDictException(
'HStoreDict accepts only dictionary objects, None and json formatted string representations of json objects'
)
if not self.schema_mode:
# ensure values are acceptable
for key, val in value.items():
value[key] = self.ensure_acceptable_value(val)
super(HStoreDict, self).__init__(value, **kwargs)
self.field = field
self.instance = instance
def __setitem__(self, *args, **kwargs):
"""
perform checks before setting the value of a key
"""
# ensure values are acceptable
value = self.ensure_acceptable_value(args[1])
# prepare *args
args = (args[0], value)
super(HStoreDict, self).__setitem__(*args, **kwargs)
def __getitem__(self, *args, **kwargs):
"""
retrieve value preserving type if in schema mode, string only otherwise
"""
value = super(HStoreDict, self).__getitem__(*args, **kwargs)
if self.schema_mode:
try:
return self.instance._hstore_virtual_fields[args[0]].to_python(value)
except KeyError:
pass
return value
def get(self, *args):
key = args[0]
try:
return self.__getitem__(key)
except KeyError:
if len(args) > 1:
return args[1] # return default value
else:
return None
# This method is used both for python3 and python2
# thanks to UnicodeMixin
def __unicode__(self):
return force_text(json.dumps(self))
def __getstate__(self):
return self.__dict__
def __copy__(self):
return self.__class__(self, self.field)
def update(self, *args, **kwargs):
for key, value in dict(*args, **kwargs).items():
self[key] = value
def ensure_acceptable_value(self, value):
"""
if schema_mode disabled (default behaviour):
- ensure booleans, integers, floats, Decimals, lists and dicts are
converted to string
- convert True and False objects to "true" and "false" so they can be
decoded back with the json library if needed
- convert lists and dictionaries to json formatted strings
- leave alone all other objects because they might be representation of django models
else:
- encode utf8 strings in python2
- convert to string
"""
if not self.schema_mode:
if isinstance(value, bool):
return force_text(value).lower()
elif isinstance(value, six.integer_types + (float, Decimal)):
return force_text(value)
elif isinstance(value, (list, dict)):
return force_text(json.dumps(value, cls=DecimalEncoder))
else:
return value
else:
# perform string conversion unless is None
if value is not None:
value = force_str(value)
return value
def remove(self, keys):
"""
Removes the specified keys from this dictionary.
"""
queryset = self.instance._base_manager.get_query_set()
queryset.filter(pk=self.instance.pk).hremove(self.field.name, keys)
class HStoreReferenceDict(HStoreDict):
"""
A dictionary which adds support to storing references to models
"""
def __getitem__(self, *args, **kwargs):
value = super(self.__class__, self).__getitem__(*args, **kwargs)
# if value is a string it needs to be converted to model instance
if isinstance(value, six.string_types):
reference = utils.acquire_reference(value)
self.__setitem__(args[0], reference)
return reference
# otherwise just return the relation
return value
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
| mit | 53a968aa57a5c9d2124549bc5e27892f | 31.587879 | 124 | 0.581179 | 4.587884 | false | false | false | false |
deepchem/deepchem | deepchem/hyper/base_classes.py | 1 | 5216 | import logging
from typing import Any, Callable, Dict, List, Optional, Tuple
from deepchem.data import Dataset
from deepchem.trans import Transformer
from deepchem.models import Model
from deepchem.metrics import Metric
logger = logging.getLogger(__name__)
def _convert_hyperparam_dict_to_filename(hyper_params: Dict[str, Any]) -> str:
"""Helper function that converts a dictionary of hyperparameters to a string that can be a filename.
Parameters
----------
hyper_params: Dict
Maps string of hyperparameter name to int/float/string/list etc.
Returns
-------
filename: str
A filename of form "_key1_value1_value2_..._key2..."
"""
filename = ""
keys = sorted(hyper_params.keys())
for key in keys:
filename += "_%s" % str(key)
value = hyper_params[key]
if isinstance(value, int):
filename += "_%s" % str(value)
elif isinstance(value, float):
filename += "_%f" % value
else:
filename += "%s" % str(value)
return filename
class HyperparamOpt(object):
"""Abstract superclass for hyperparameter search classes.
This class is an abstract base class for hyperparameter search
classes in DeepChem. Hyperparameter search is performed on
`dc.models.Model` classes. Each hyperparameter object accepts a
`dc.models.Model` class upon construct. When the `hyperparam_search`
class is invoked, this class is used to construct many different
concrete models which are trained on the specified training set and
evaluated on a given validation set.
Different subclasses of `HyperparamOpt` differ in the choice of
strategy for searching the hyperparameter evaluation space. This
class itself is an abstract superclass and should never be directly
instantiated.
"""
def __init__(self, model_builder: Callable[..., Model]):
"""Initialize Hyperparameter Optimizer.
Note this is an abstract constructor which should only be used by
subclasses.
Parameters
----------
model_builder: constructor function.
This parameter must be constructor function which returns an
object which is an instance of `dc.models.Model`. This function
must accept two arguments, `model_params` of type `dict` and
`model_dir`, a string specifying a path to a model directory.
See the example.
"""
if self.__class__.__name__ == "HyperparamOpt":
raise ValueError(
"HyperparamOpt is an abstract superclass and cannot be directly instantiated. \
You probably want to instantiate a concrete subclass instead.")
self.model_builder = model_builder
def hyperparam_search(
self,
params_dict: Dict,
train_dataset: Dataset,
valid_dataset: Dataset,
metric: Metric,
output_transformers: List[Transformer] = [],
nb_epoch: int = 10,
use_max: bool = True,
logfile: str = 'results.txt',
logdir: Optional[str] = None,
**kwargs) -> Tuple[Model, Dict[str, Any], Dict[str, Any]]:
"""Conduct Hyperparameter search.
This method defines the common API shared by all hyperparameter
optimization subclasses. Different classes will implement
different search methods but they must all follow this common API.
Parameters
----------
params_dict: Dict
Dictionary mapping strings to values. Note that the
precise semantics of `params_dict` will change depending on the
optimizer that you're using. Depending on the type of
hyperparameter optimization, these values can be
ints/floats/strings/lists/etc. Read the documentation for the
concrete hyperparameter optimization subclass you're using to
learn more about what's expected.
train_dataset: Dataset
dataset used for training
valid_dataset: Dataset
dataset used for validation(optimization on valid scores)
metric: Metric
metric used for evaluation
output_transformers: list[Transformer]
Transformers for evaluation. This argument is needed since
`train_dataset` and `valid_dataset` may have been transformed
for learning and need the transform to be inverted before
the metric can be evaluated on a model.
nb_epoch: int, (default 10)
Specifies the number of training epochs during each iteration of optimization.
use_max: bool, optional
If True, return the model with the highest score. Else return
model with the minimum score.
logdir: str, optional
The directory in which to store created models. If not set, will
use a temporary directory.
logfile: str, optional (default `results.txt`)
Name of logfile to write results to. If specified, this must
be a valid file name. If not specified, results of hyperparameter
search will be written to `logdir/results.txt`.
Returns
-------
Tuple[`best_model`, `best_hyperparams`, `all_scores`]
`(best_model, best_hyperparams, all_scores)` where `best_model` is
an instance of `dc.models.Model`, `best_hyperparams` is a
dictionary of parameters, and `all_scores` is a dictionary mapping
string representations of hyperparameter sets to validation
scores.
"""
raise NotImplementedError
| mit | b9a8164e6ff7b26a6abc248df003d0e0 | 36.797101 | 102 | 0.700537 | 4.504318 | false | false | false | false |
deepchem/deepchem | contrib/DeepMHC/deepmhc.py | 5 | 3829 | """DeepMHC model, found in https://www.biorxiv.org/content/early/2017/12/24/239236"""
from __future__ import division
from __future__ import unicode_literals
__author__ = "Vignesh Ram Somnath"
__license__ = "MIT"
import numpy as np
import tensorflow as tf
from deepchem.data import NumpyDataset
from deepchem.models.tensorgraph.tensor_graph import TensorGraph
from deepchem.models.tensorgraph.layers import Conv1D, MaxPool1D, Dense, Dropout
from deepchem.models.tensorgraph.layers import Flatten
from deepchem.models.tensorgraph.layers import Feature, Weights, Label
from deepchem.models.tensorgraph.layers import L2Loss, WeightedError
class DeepMHC(TensorGraph):
name = ['DeepMHC']
def __init__(self,
batch_size=64,
pad_length=13,
dropout_p=0.5,
num_amino_acids=20,
mode="regression",
**kwargs):
assert mode in ["regression", "classification"]
self.mode = mode
self.batch_size = batch_size
self.dropout_p = dropout_p
self.pad_length = pad_length
self.num_amino_acids = num_amino_acids
super(DeepMHC, self).__init__(**kwargs)
self._build_graph()
def _build_graph(self):
self.one_hot_seq = Feature(
shape=(None, self.pad_length, self.num_amino_acids), dtype=tf.float32)
conv1 = Conv1D(kernel_size=2, filters=512, in_layers=[self.one_hot_seq])
maxpool1 = MaxPool1D(strides=2, padding="VALID", in_layers=[conv1])
conv2 = Conv1D(kernel_size=3, filters=512, in_layers=[maxpool1])
flattened = Flatten(in_layers=[conv2])
dense1 = Dense(
out_channels=400, in_layers=[flattened], activation_fn=tf.nn.tanh)
dropout = Dropout(dropout_prob=self.dropout_p, in_layers=[dense1])
output = Dense(out_channels=1, in_layers=[dropout], activation_fn=None)
self.add_output(output)
if self.mode == "regression":
label = Label(shape=(None, 1))
loss = L2Loss(in_layers=[label, output])
else:
raise NotImplementedError(
"Classification support not added yet. Missing details in paper.")
weights = Weights(shape=(None,))
weighted_loss = WeightedError(in_layers=[loss, weights])
self.set_loss(weighted_loss)
def default_generator(self,
dataset,
epochs=1,
predict=False,
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b,
ids_b) in dataset.iterbatches(batch_size=self.batch_size):
feed_dict = {}
feed_dict[self.one_hot_seq] = X_b
if y_b is not None:
feed_dict[self.labels[0]] = -np.log10(y_b)
if w_b is not None and not predict:
feed_dict[self.task_weights[0]] = w_b
yield feed_dict
def predict_on_batch(self, X, transformers=[], outputs=None):
dataset = NumpyDataset(X, y=None)
generator = self.default_generator(dataset, predict=True, pad_batches=False)
preds = self.predict_on_generator(generator, transformers, outputs)
preds = 10**-preds # Since we get train on -log10(IC50)
return preds
def create_estimator_inputs(self, feature_columns, weight_column, features,
labels, mode):
tensors = dict()
for layer, column in zip(self.features, feature_columns):
feature_column = tf.feature_column.input_layer(features, [column])
if feature_column.dtype != column.dtype:
feature_column = tf.cast(feature_column, column.dtype)
tensors[layer] = feature_column
if weight_column is not None:
tensors[self.task_weights[0]] = tf.feature_column.input_layer(
features, [weight_column])
if labels is not None:
tensors[self.labels[[0]]] = labels
return tensors
| mit | 57b36d7c5f0137d7c6ad6efaa7f093c7 | 34.785047 | 85 | 0.641421 | 3.542091 | false | false | false | false |
deepchem/deepchem | deepchem/utils/pdbqt_utils.py | 3 | 10945 | """Utilities for handling PDBQT files."""
from typing import Dict, List, Optional, Set, Tuple
from deepchem.utils.typing import RDKitMol
def pdbqt_to_pdb(filename: Optional[str] = None,
pdbqt_data: Optional[List[str]] = None) -> str:
"""Extracts the PDB part of a pdbqt file as a string.
Either `filename` or `pdbqt_data` must be provided. This function
strips PDBQT charge information from the provided input.
Parameters
----------
filename: str, optional (default None)
Filename of PDBQT file
pdbqt_data: List[str], optional (default None)
Raw list of lines containing data from PDBQT file.
Returns
-------
pdb_block: str
String containing the PDB portion of pdbqt file.
"""
if filename is not None and pdbqt_data is not None:
raise ValueError("Only one of filename or pdbqt_data can be provided")
elif filename is None and pdbqt_data is None:
raise ValueError("Either filename or pdbqt_data must be provided")
elif filename is not None:
pdbqt_data = open(filename).readlines()
pdb_block = ""
# FIXME: Item "None" of "Optional[List[str]]" has no attribute "__iter__" (not iterable)
for line in pdbqt_data: # type: ignore
pdb_block += "%s\n" % line[:66]
return pdb_block
def convert_protein_to_pdbqt(mol: RDKitMol, outfile: str) -> None:
"""Convert a protein PDB file into a pdbqt file.
Writes the extra PDBQT terms directly to `outfile`.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
Protein molecule
outfile: str
filename which already has a valid pdb representation of mol
"""
lines = [x.strip() for x in open(outfile).readlines()]
out_lines = []
for line in lines:
if "ROOT" in line or "ENDROOT" in line or "TORSDOF" in line:
out_lines.append("%s\n" % line)
continue
if not line.startswith("ATOM"):
continue
line = line[:66]
atom_index = int(line[6:11])
atom = mol.GetAtoms()[atom_index - 1]
line = "%s +0.000 %s\n" % (line, atom.GetSymbol().ljust(2))
out_lines.append(line)
with open(outfile, 'w') as fout:
for line in out_lines:
fout.write(line)
def _mol_to_graph(mol: RDKitMol):
"""Convert RDKit Mol to NetworkX graph
Convert mol into a graph representation atoms are nodes, and bonds
are vertices stored as graph
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
The molecule to convert into a graph.
Returns
-------
graph: networkx.Graph
Contains atoms indices as nodes, edges as bonds.
Notes
-----
This function requires NetworkX to be installed.
"""
try:
import networkx as nx
except ModuleNotFoundError:
raise ImportError("This function requires NetworkX to be installed.")
G = nx.Graph()
num_atoms = mol.GetNumAtoms()
G.add_nodes_from(range(num_atoms))
for i in range(mol.GetNumBonds()):
from_idx = mol.GetBonds()[i].GetBeginAtomIdx()
to_idx = mol.GetBonds()[i].GetEndAtomIdx()
G.add_edge(from_idx, to_idx)
return G
def _get_rotatable_bonds(mol: RDKitMol) -> List[Tuple[int, int]]:
"""
https://github.com/rdkit/rdkit/blob/f4529c910e546af590c56eba01f96e9015c269a6/Code/GraphMol/Descriptors/Lipinski.cpp#L107
Taken from rdkit source to find which bonds are rotatable store
rotatable bonds in (from_atom, to_atom)
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
Ligand molecule
Returns
-------
rotatable_bonds: List[List[int, int]]
List of rotatable bonds in molecule
Notes
-----
This function requires RDKit to be installed.
"""
try:
from rdkit import Chem
from rdkit.Chem import rdmolops
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
pattern = Chem.MolFromSmarts(
"[!$(*#*)&!D1&!$(C(F)(F)F)&!$(C(Cl)(Cl)Cl)&!$(C(Br)(Br)Br)&!$(C([CH3])("
"[CH3])[CH3])&!$([CD3](=[N,O,S])-!@[#7,O,S!D1])&!$([#7,O,S!D1]-!@[CD3]="
"[N,O,S])&!$([CD3](=[N+])-!@[#7!D1])&!$([#7!D1]-!@[CD3]=[N+])]-!@[!$(*#"
"*)&!D1&!$(C(F)(F)F)&!$(C(Cl)(Cl)Cl)&!$(C(Br)(Br)Br)&!$(C([CH3])([CH3])"
"[CH3])]")
rdmolops.FastFindRings(mol)
rotatable_bonds = mol.GetSubstructMatches(pattern)
return rotatable_bonds
def convert_mol_to_pdbqt(mol: RDKitMol, outfile: str) -> None:
"""Writes the provided ligand molecule to specified file in pdbqt format.
Creates a torsion tree and write to pdbqt file. The torsion tree
represents rotatable bonds in the molecule.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
The molecule whose value is stored in pdb format in outfile
outfile: str
Filename for a valid pdb file with the extention .pdbqt
Notes
-----
This function requires NetworkX to be installed.
"""
try:
import networkx as nx
except ModuleNotFoundError:
raise ImportError("This function requires NetworkX to be installed.")
# Walk through the original file and extract ATOM/HETATM lines and
# add PDBQT charge annotations.
pdb_map = _create_pdb_map(outfile)
graph = _mol_to_graph(mol)
rotatable_bonds = _get_rotatable_bonds(mol)
# Remove rotatable bonds from this molecule
for bond in rotatable_bonds:
graph.remove_edge(bond[0], bond[1])
# Get the connected components now that the rotatable bonds have
# been removed.
components = [x for x in nx.connected_components(graph)]
comp_map = _create_component_map(mol, components)
used_partitions = set()
lines = []
# The root is the largest connected component.
root = max(enumerate(components), key=lambda x: len(x[1]))[0]
# Write the root component
lines.append("ROOT\n")
for atom in components[root]:
lines.append(pdb_map[atom])
lines.append("ENDROOT\n")
# We've looked at the root, so take note of that
used_partitions.add(root)
for bond in rotatable_bonds:
valid, next_partition = _valid_bond(used_partitions, bond, root, comp_map)
if not valid:
continue
_dfs(used_partitions, next_partition, bond, components, rotatable_bonds,
lines, pdb_map, comp_map)
lines.append("TORSDOF %s" % len(rotatable_bonds))
with open(outfile, 'w') as fout:
for line in lines:
fout.write(line)
def _create_pdb_map(outfile: str) -> Dict[int, str]:
"""Create a mapping from atom numbers to lines to write to pdbqt
This is a map from rdkit atom number to its line in the pdb
file. We also add the two additional columns required for
pdbqt (charge, symbol).
Note rdkit atoms are 0 indexed and pdb files are 1 indexed
Parameters
----------
outfile: str
filename which already has a valid pdb representation of mol
Returns
-------
pdb_map: Dict[int, str]
Maps rdkit atom numbers to lines to be written to PDBQT file.
"""
lines = [x.strip() for x in open(outfile).readlines()]
lines = list(
filter(lambda x: x.startswith("HETATM") or x.startswith("ATOM"), lines))
lines = [x[:66] for x in lines]
pdb_map = {}
for line in lines:
my_values = line.split()
atom_number = int(my_values[1])
atom_symbol = my_values[2]
atom_symbol = ''.join([i for i in atom_symbol if not i.isdigit()])
line = line.replace("HETATM", "ATOM ")
line = "%s +0.000 %s\n" % (line, atom_symbol.ljust(2))
pdb_map[atom_number - 1] = line
return pdb_map
def _create_component_map(mol: RDKitMol,
components: List[List[int]]) -> Dict[int, int]:
"""Creates a map from atom ids to disconnected component id
For each atom in `mol`, maps it to the id of the component in the
molecule. The intent is that this is used on a molecule whose
rotatable bonds have been removed. `components` is a list of the
connected components after this surgery.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
The molecule to find disconnected components in
components: List[List[int]]
List of connected components
Returns
-------
comp_map: Dict[int, int]
Maps atom ids to component ides
"""
comp_map = {}
for i in range(mol.GetNumAtoms()):
for j in range(len(components)):
if i in components[j]:
comp_map[i] = j
break
return comp_map
def _dfs(used_partitions: Set[int], current_partition: int,
bond: Tuple[int, int], components: List[List[int]],
rotatable_bonds: List[Tuple[int, int]], lines: List[str],
pdb_map: Dict[int, str], comp_map: Dict[int, int]) -> List[str]:
"""
This function does a depth first search through the torsion tree
Parameters
----------
used_partions: Set[int]
Partitions which have already been used
current_partition: int
The current partition to expand
bond: Tuple[int, int]
the bond which goes from the previous partition into this partition
components: List[List[int]]
List of connected components
rotatable_bonds: List[Tuple[int, int]]
List of rotatable bonds. This tuple is (from_atom, to_atom).
lines: List[str]
List of lines to write
pdb_map: Dict[int, str]
Maps atom numbers to PDBQT lines to write
comp_map: Dict[int, int]
Maps atom numbers to component numbers
Returns
-------
lines: List[str]
List of lines to write. This has more appended lines.
"""
if comp_map[bond[1]] != current_partition:
bond = (bond[1], bond[0])
used_partitions.add(comp_map[bond[0]])
used_partitions.add(comp_map[bond[1]])
lines.append("BRANCH %4s %4s\n" % (bond[0] + 1, bond[1] + 1))
for atom in components[current_partition]:
lines.append(pdb_map[atom])
for b in rotatable_bonds:
valid, next_partition = \
_valid_bond(used_partitions, b, current_partition, comp_map)
if not valid:
continue
lines = _dfs(used_partitions, next_partition, b, components,
rotatable_bonds, lines, pdb_map, comp_map)
lines.append("ENDBRANCH %4s %4s\n" % (bond[0] + 1, bond[1] + 1))
return lines
def _valid_bond(used_partitions: Set[int], bond: Tuple[int, int],
current_partition: int,
comp_map: Dict[int, int]) -> Tuple[bool, int]:
"""Helper method to find next partition to explore.
Used to check if a bond goes from the current partition into a
partition that is not yet explored
Parameters
----------
used_partions: Set[int]
Partitions which have already been used
bond: Tuple[int, int]
The bond to check if it goes to an unexplored partition.
This tuple is (from_atom, to_atom).
current_partition: int
The current partition of the DFS
comp_map: Dict[int, int]
Maps atom ids to component ids
Returns
-------
is_valid: bool
Whether to exist the next partition or not
next_partition: int
The next partition to explore
"""
part1 = comp_map[bond[0]]
part2 = comp_map[bond[1]]
if part1 != current_partition and part2 != current_partition:
return False, 0
if part1 == current_partition:
next_partition = part2
else:
next_partition = part1
return next_partition not in used_partitions, next_partition
| mit | 4f8fc5cff69df4b05093626fb699247f | 30.182336 | 122 | 0.66222 | 3.382262 | false | false | false | false |
deepchem/deepchem | deepchem/rl/envs/test_tictactoe.py | 7 | 1641 | from unittest import TestCase
import numpy as np
import deepchem.rl.envs.tictactoe
class TestTicTacToeEnvironment(TestCase):
def test_constructor(self):
board = deepchem.rl.envs.tictactoe.TicTacToeEnvironment()
assert len(board.state) == 1
assert board.state[0].shape == (3, 3, 2)
assert np.sum(board.state[0]) == 1 or np.sum(board.state[0]) == 0
def test_step(self):
board = deepchem.rl.envs.tictactoe.TicTacToeEnvironment()
X = deepchem.rl.envs.tictactoe.TicTacToeEnvironment.X
board._state = [np.zeros(shape=(3, 3, 2), dtype=np.float32)]
board.step(0)
assert np.all(board.state[0][0][0] == X)
def test_winner(self):
board = deepchem.rl.envs.tictactoe.TicTacToeEnvironment()
X = deepchem.rl.envs.tictactoe.TicTacToeEnvironment.X
board.state[0][0][0] = X
board.state[0][0][1] = X
assert not board.check_winner(X)
board.state[0][0][2] = X
assert board.check_winner(X)
def test_game_over(self):
board = deepchem.rl.envs.tictactoe.TicTacToeEnvironment()
X = deepchem.rl.envs.tictactoe.TicTacToeEnvironment.X
board.state[0][0][0] = X
board.state[0][0][1] = X
assert not board.check_winner(X)
board.state[0][0][2] = X
assert board.check_winner(X)
def test_display(self):
board = deepchem.rl.envs.tictactoe.TicTacToeEnvironment()
s = board.display()
assert s.find("X") == -1
def test_get_O_move(self):
board = deepchem.rl.envs.tictactoe.TicTacToeEnvironment()
empty = deepchem.rl.envs.tictactoe.TicTacToeEnvironment.EMPTY
move = board.get_O_move()
assert np.all(board.state[0][move[0]][move[1]] == empty)
| mit | 823056b8e3c47cd9a845d6f1b2d7ea98 | 32.489796 | 69 | 0.680073 | 2.699013 | false | true | false | false |
gae-init/gae-init-upload | main/model/config_auth.py | 12 | 5107 | # coding: utf-8
from __future__ import absolute_import
from google.appengine.ext import ndb
from api import fields
import model
class ConfigAuth(object):
azure_ad_client_id = ndb.StringProperty(default='', verbose_name='Client ID')
azure_ad_client_secret = ndb.StringProperty(default='', verbose_name='Client Secret')
bitbucket_key = ndb.StringProperty(default='', verbose_name='Key')
bitbucket_secret = ndb.StringProperty(default='', verbose_name='Secret')
dropbox_app_key = ndb.StringProperty(default='', verbose_name='App Key')
dropbox_app_secret = ndb.StringProperty(default='', verbose_name='App Secret')
facebook_app_id = ndb.StringProperty(default='', verbose_name='App ID')
facebook_app_secret = ndb.StringProperty(default='', verbose_name='App Secret')
github_client_id = ndb.StringProperty(default='', verbose_name='Client ID')
github_client_secret = ndb.StringProperty(default='', verbose_name='Client Secret')
google_client_id = ndb.StringProperty(default='', verbose_name='Client ID')
google_client_secret = ndb.StringProperty(default='', verbose_name='Client Secret')
instagram_client_id = ndb.StringProperty(default='', verbose_name='Client ID')
instagram_client_secret = ndb.StringProperty(default='', verbose_name='Client Secret')
linkedin_api_key = ndb.StringProperty(default='', verbose_name='API Key')
linkedin_secret_key = ndb.StringProperty(default='', verbose_name='Secret Key')
mailru_app_id = ndb.StringProperty(default='', verbose_name='App ID')
mailru_app_secret = ndb.StringProperty(default='', verbose_name='App Secret')
microsoft_client_id = ndb.StringProperty(default='', verbose_name='Client ID')
microsoft_client_secret = ndb.StringProperty(default='', verbose_name='Client Secret')
reddit_client_id = ndb.StringProperty(default='', verbose_name='Client ID')
reddit_client_secret = ndb.StringProperty(default='', verbose_name='Client Secret')
twitter_consumer_key = ndb.StringProperty(default='', verbose_name='Consumer Key')
twitter_consumer_secret = ndb.StringProperty(default='', verbose_name='Consumer Secret')
vk_app_id = ndb.StringProperty(default='', verbose_name='App ID')
vk_app_secret = ndb.StringProperty(default='', verbose_name='App Secret')
yahoo_consumer_key = ndb.StringProperty(default='', verbose_name='Consumer Key')
yahoo_consumer_secret = ndb.StringProperty(default='', verbose_name='Consumer Secret')
@property
def has_azure_ad(self):
return bool(self.azure_ad_client_id and self.azure_ad_client_secret)
@property
def has_bitbucket(self):
return bool(self.bitbucket_key and self.bitbucket_secret)
@property
def has_dropbox(self):
return bool(self.dropbox_app_key and self.dropbox_app_secret)
@property
def has_facebook(self):
return bool(self.facebook_app_id and self.facebook_app_secret)
@property
def has_google(self):
return bool(self.google_client_id and self.google_client_secret)
@property
def has_github(self):
return bool(self.github_client_id and self.github_client_secret)
@property
def has_instagram(self):
return bool(self.instagram_client_id and self.instagram_client_secret)
@property
def has_linkedin(self):
return bool(self.linkedin_api_key and self.linkedin_secret_key)
@property
def has_mailru(self):
return bool(self.mailru_app_id and self.mailru_app_secret)
@property
def has_microsoft(self):
return bool(self.microsoft_client_id and self.microsoft_client_secret)
@property
def has_reddit(self):
return bool(self.reddit_client_id and self.reddit_client_secret)
@property
def has_twitter(self):
return bool(self.twitter_consumer_key and self.twitter_consumer_secret)
@property
def has_vk(self):
return bool(self.vk_app_id and self.vk_app_secret)
@property
def has_yahoo(self):
return bool(self.yahoo_consumer_key and self.yahoo_consumer_secret)
FIELDS = {
'azure_ad_client_id': fields.String,
'azure_ad_client_secret': fields.String,
'bitbucket_key': fields.String,
'bitbucket_secret': fields.String,
'dropbox_app_key': fields.String,
'dropbox_app_secret': fields.String,
'facebook_app_id': fields.String,
'facebook_app_secret': fields.String,
'github_client_id': fields.String,
'github_client_secret': fields.String,
'google_client_id': fields.String,
'google_client_secret': fields.String,
'instagram_client_id': fields.String,
'instagram_client_secret': fields.String,
'linkedin_api_key': fields.String,
'linkedin_secret_key': fields.String,
'mailru_client_id': fields.String,
'mailru_client_secret': fields.String,
'microsoft_client_id': fields.String,
'microsoft_client_secret': fields.String,
'reddit_client_id': fields.String,
'reddit_client_secret': fields.String,
'twitter_consumer_key': fields.String,
'twitter_consumer_secret': fields.String,
'vk_app_id': fields.String,
'vk_app_secret': fields.String,
'yahoo_consumer_key': fields.String,
'yahoo_consumer_secret': fields.String,
}
FIELDS.update(model.Base.FIELDS)
| mit | 3a17ecbc7fe9f0eea6a2d2425612f57f | 38.898438 | 90 | 0.722929 | 3.566341 | false | false | false | false |
gae-init/gae-init-upload | main/auth/gae.py | 18 | 1164 | # coding: utf-8
from __future__ import absolute_import
from google.appengine.api import users
import flask
import auth
import model
import util
from main import app
@app.route('/signin/gae/')
def signin_gae():
auth.save_request_params()
gae_url = users.create_login_url(flask.url_for('gae_authorized'))
return flask.redirect(gae_url)
@app.route('/api/auth/callback/gae/')
def gae_authorized():
gae_user = users.get_current_user()
if gae_user is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
user_db = retrieve_user_from_gae(gae_user)
return auth.signin_user_db(user_db)
def retrieve_user_from_gae(gae_user):
auth_id = 'federated_%s' % gae_user.user_id()
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
if not user_db.admin and users.is_current_user_admin():
user_db.admin = True
user_db.put()
return user_db
return auth.create_user_db(
auth_id=auth_id,
name=util.create_name_from_email(gae_user.email()),
username=gae_user.email(),
email=gae_user.email(),
verified=True,
admin=users.is_current_user_admin(),
)
| mit | a13e6439171e408ec2d69d473cc12ae9 | 22.755102 | 67 | 0.683849 | 2.888337 | false | false | false | false |
gamechanger/dusty | tests/unit/commands/validate_test.py | 1 | 4225 | from schemer import ValidationException
from ...testcases import DustyTestCase
from ..utils import apply_required_keys
from dusty.commands.validate import (_validate_app_references, _validate_cycle_free,
_check_name_overlap)
from dusty import constants
class ValidatorTest(DustyTestCase):
def test_validate_app_with_bad_service(self):
specs = {'apps': {
'app1': {
'depends': {
'services': [
'service1',
'service2'
]
}
}
},
'services': {
'service1': {}
}
}
apply_required_keys(specs)
specs = self.make_test_specs(specs)
with self.assertRaises(ValidationException):
_validate_app_references(specs['apps']['app1'], specs)
def test_validate_app_with_bad_app(self):
specs = {'apps': {
'app1': {
'depends': {
'apps': [
'app3',
]
}
},
'app2': {}
}
}
apply_required_keys(specs)
specs = self.make_test_specs(specs)
with self.assertRaises(ValidationException):
_validate_app_references(specs['apps']['app1'], specs)
def test_validate_app_with_bad_lib(self):
specs = {'apps': {
'app1': {
'depends': {
'libs': [
'lib2',
]
}
},
},
'libs': {
'lib1': {}
}
}
apply_required_keys(specs)
specs = self.make_test_specs(specs)
with self.assertRaises(ValidationException):
_validate_app_references(specs['apps']['app1'], specs)
def test_app_cycle_detection(self):
specs = {'apps': {
'app1': {
'depends': {
'apps': [
'app1',
]
}
}
}
}
apply_required_keys(specs)
specs = self.make_test_specs(specs)
with self.assertRaises(ValidationException):
_validate_cycle_free(specs)
def test_lib_cycle_detection(self):
specs = {
'apps': {},
'libs': {
'lib1': {
'depends': {
'libs': [
'lib2',
]
}
},
'lib2': {
'depends': {
'libs': [
'lib3',
]
}
},
'lib3': {
'depends': {
'libs': [
'lib1',
]
}
}
}
}
apply_required_keys(specs)
specs = self.make_test_specs(specs)
with self.assertRaises(ValidationException):
_validate_cycle_free(specs)
def test_app_lib_unique_detection(self):
specs = {'apps': {
'overlap': {
},
},
'libs': {
'overlap': {
},
}
}
apply_required_keys(specs)
specs = self.make_test_specs(specs)
with self.assertRaises(ValidationException):
_check_name_overlap(specs)
def test_app_service_unique_detection(self):
specs = {'apps': {
'overlap': {
},
},
'services': {
'overlap': {
},
}
}
apply_required_keys(specs)
specs = self.make_test_specs(specs)
with self.assertRaises(ValidationException):
_check_name_overlap(specs)
| mit | eb92c750ca7f33927a7e8685cbbb5b47 | 28.545455 | 84 | 0.370888 | 5.029762 | false | true | false | false |
gamechanger/dusty | dusty/systems/docker/__init__.py | 1 | 3898 | import os
import docker
import logging
from ... import constants
from ...log import log_to_client
from ...memoize import memoized
from ...subprocess import check_output_demoted
from ...compiler.spec_assembler import get_specs
def exec_in_container(container, command, *args):
client = get_docker_client()
exec_instance = client.exec_create(container['Id'],
' '.join([command] + list(args)))
return client.exec_start(exec_instance['Id'])
def get_dusty_images():
"""Returns all images listed in dusty specs (apps + bundles), in the form repository:tag. Tag will be set to latest
if no tag is specified in the specs"""
specs = get_specs()
dusty_image_names = [spec['image'] for spec in specs['apps'].values() + specs['services'].values() if 'image' in spec]
dusty_images = set([name if ':' in name else "{}:latest".format(name) for name in dusty_image_names])
return dusty_images
def get_dusty_container_name(service_name):
return 'dusty_{}_1'.format(service_name)
@memoized
def get_docker_env():
env = {}
output = check_output_demoted(['docker-machine', 'env', constants.VM_MACHINE_NAME, '--shell', 'bash'], redirect_stderr=True)
for line in output.splitlines():
if not line.strip().startswith('export'):
continue
k, v = line.strip().split()[1].split('=')
v = v.replace('"', '')
env[k] = v
return env
@memoized
def get_docker_client():
"""Ripped off and slightly modified based on docker-py's
kwargs_from_env utility function."""
env = get_docker_env()
host, cert_path, tls_verify = env['DOCKER_HOST'], env['DOCKER_CERT_PATH'], env['DOCKER_TLS_VERIFY']
params = {'base_url': host.replace('tcp://', 'https://'),
'timeout': None,
'version': 'auto'}
if tls_verify and cert_path:
params['tls'] = docker.tls.TLSConfig(
client_cert=(os.path.join(cert_path, 'cert.pem'),
os.path.join(cert_path, 'key.pem')),
ca_cert=os.path.join(cert_path, 'ca.pem'),
verify=True,
ssl_version=None,
assert_hostname=False)
return docker.Client(**params)
def get_dusty_containers(services, include_exited=False):
"""Get a list of containers associated with the list
of services. If no services are provided, attempts to
return all containers associated with Dusty."""
client = get_docker_client()
if services:
containers = [get_container_for_app_or_service(service, include_exited=include_exited) for service in services]
return [container for container in containers if container]
else:
return [container
for container in client.containers(all=include_exited)
if any(name.startswith('/dusty') for name in container.get('Names', []))]
def get_container_for_app_or_service(app_or_service_name, raise_if_not_found=False, include_exited=False):
client = get_docker_client()
for container in client.containers(all=include_exited):
if '/{}'.format(get_dusty_container_name(app_or_service_name)) in container['Names']:
return container
if raise_if_not_found:
raise RuntimeError('No running container found for {}'.format(app_or_service_name))
return None
def get_canonical_container_name(container):
"""Return the canonical container name, which should be
of the form dusty_<service_name>_1. Containers are returned
from the Python client with many names based on the containers
to which they are linked, but simply taking the shortest name
should be sufficient to get us the shortest one."""
return sorted(container['Names'], key=lambda name: len(name))[0][1:]
def get_app_or_service_name_from_container(container):
return get_canonical_container_name(container).split('_')[1]
| mit | 0f4654ae97a6f546ce82a3046cefcaab | 41.369565 | 128 | 0.654182 | 3.777132 | false | false | false | false |
gamechanger/dusty | tests/unit/commands/test_test.py | 1 | 11902 | from mock import patch, call, Mock
from ...testcases import DustyTestCase
from ..utils import get_app_dusty_schema, get_lib_dusty_schema
from dusty.commands import test
from dusty.schemas.base_schema_class import DustySpecs
from dusty.source import Repo
@patch('dusty.commands.test.initialize_docker_vm')
@patch('dusty.commands.test.get_docker_client')
@patch('dusty.commands.test.get_expanded_libs_specs')
@patch('dusty.commands.test.ensure_current_image')
@patch('dusty.systems.nfs.update_nfs_with_repos')
@patch('dusty.compiler.compose.get_app_volume_mounts')
@patch('dusty.compiler.compose.get_lib_volume_mounts')
class TestTestsCommands(DustyTestCase):
def setUp(self):
super(TestTestsCommands, self).setUp()
self.specs = self.make_test_specs({'apps': {
'app-a': get_app_dusty_schema({'test': {'suites': [{'name': 'nose', 'command': ['nosetests app-a']}]},
'mount': '/app-a'},
name='app-a')},
'libs': {
'lib-a': get_lib_dusty_schema({'test': {'suites': [{'name': 'nose', 'command': ['nosetests lib-a']}]},
'mount': '/lib-a'},
name='lib-a'),
'multi-suite-lib': get_lib_dusty_schema({'test': {'suites': [{'name': 'nose1', 'command': ['nosetests lib-a']},
{'name': 'nose2', 'command': ['nosetests lib-a']}]},
'mount': '/lib-a'},
name='lib-a')}})
def test_run_one_suite_lib_not_found(self, fake_lib_get_volumes, fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image, fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_expanded_libs.return_value = self.specs
with self.assertRaises(KeyError):
test.run_one_suite('lib-c', '', [])
def test_run_one_suite_app_not_found(self, fake_lib_get_volumes, fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image, fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_expanded_libs.return_value = self.specs
with self.assertRaises(KeyError):
test.run_one_suite('app-c', '', [])
def test_run_all_suites_lib_not_found(self, fake_lib_get_volumes, fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image, fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_expanded_libs.return_value = self.specs
with self.assertRaises(KeyError):
test.run_all_suites('lib-c')
def test_run_all_suites_app_not_found(self, fake_lib_get_volumes, fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image, fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_expanded_libs.return_value = self.specs
with self.assertRaises(KeyError):
test.run_all_suites('app-c')
def test_run_one_suite_suite_not_found(self, fake_lib_get_volumes, fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image, fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_expanded_libs.return_value = self.specs
with self.assertRaises(RuntimeError):
test.run_one_suite('app-a', 'nosetests', [])
@patch('dusty.commands.test._run_tests_with_image')
@patch('dusty.command_file._write_commands_to_file')
@patch('dusty.commands.test.sys.exit')
def test_run_one_suite_lib_found(self, fake_exit, fake_write_commands, fake_run_tests, fake_lib_get_volumes,
fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image,
fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_expanded_libs.return_value = self.specs
fake_lib_get_volumes.return_value = ['/host/route:/container/route']
fake_app_get_volumes.return_value = []
fake_get_docker_client.return_value = 'docker-client'
fake_run_tests.return_value = 0
test.run_one_suite('lib-a', 'nose', [])
fake_update_nfs.assert_has_calls([])
fake_exit.assert_has_calls([call(0)])
@patch('dusty.commands.test._run_tests_with_image')
@patch('dusty.command_file._write_commands_to_file')
@patch('dusty.commands.test.sys.exit')
def test_run_one_suite_app_found(self, fake_exit, fake_write_commands, fake_run_tests, fake_lib_get_volumes,
fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image,
fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_expanded_libs.return_value = self.specs
fake_lib_get_volumes.return_value = ['/host/route:/container/route']
fake_app_get_volumes.return_value = []
fake_get_docker_client.return_value = 'docker-client'
fake_run_tests.return_value = 1
test.run_one_suite('app-a','nose', [])
fake_exit.assert_has_calls([call(1)])
@patch('dusty.commands.test._run_tests_with_image')
@patch('dusty.commands.test.sys.exit')
def test_run_all_suites_lib_found(self, fake_exit, fake_run_tests, fake_lib_get_volumes,
fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image,
fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_expanded_libs.return_value = self.specs
fake_run_tests.side_effect = [0, 1]
test.run_all_suites('multi-suite-lib')
fake_run_tests.assert_has_calls([call('multi-suite-lib', 'nose1', None),
call('multi-suite-lib', 'nose2', None)])
fake_exit.assert_has_calls([call(1)])
@patch('dusty.commands.test._run_tests_with_image')
@patch('dusty.commands.test.sys.exit')
def test_run_all_suites_app_found(self, fake_exit, fake_run_tests, fake_lib_get_volumes,
fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image,
fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_expanded_libs.return_value = self.specs
fake_run_tests.return_value = 0
test.run_all_suites('app-a')
fake_run_tests.assert_has_calls([call('app-a', 'nose', None)])
fake_exit.assert_has_calls([call(0)])
def test_construct_test_command_invalid_name_app(self,fake_lib_get_volumes,
fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image,
fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_expanded_libs.return_value = self.specs
with self.assertRaises(RuntimeError):
test._construct_test_command('app-a', 'run_tests', [])
def test_construct_test_command_invalid_name_lib(self, fake_lib_get_volumes,
fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image,
fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_expanded_libs.return_value = self.specs
with self.assertRaises(RuntimeError):
test._construct_test_command('lib-a', 'run_tests', [])
def test_construct_test_command_app_no_arguments(self, fake_lib_get_volumes,
fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image,
fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_expanded_libs.return_value = self.specs
return_command = test._construct_test_command('app-a', 'nose', [])
self.assertEquals('sh /command_files/dusty_command_file_app-a_test_nose.sh', return_command.strip())
def test_construct_test_command_app_arguments(self, fake_lib_get_volumes,
fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image,
fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_expanded_libs.return_value = self.specs
return_command = test._construct_test_command('app-a', 'nose', ['1', '2', '3'])
self.assertEquals('sh /command_files/dusty_command_file_app-a_test_nose.sh 1 2 3', return_command.strip())
def test_construct_test_command_lib_no_arguments(self, fake_lib_get_volumes,
fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image,
fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_expanded_libs.return_value = self.specs
return_command = test._construct_test_command('lib-a', 'nose', [])
self.assertEquals('sh /command_files/dusty_command_file_lib-a_test_nose.sh', return_command.strip())
def test_construct_test_command_lib_arguments(self, fake_lib_get_volumes,
fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image,
fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_expanded_libs.return_value = self.specs
return_command = test._construct_test_command('lib-a', 'nose', ['1', '2', '3'])
self.assertEquals('sh /command_files/dusty_command_file_lib-a_test_nose.sh 1 2 3', return_command.strip())
@patch('dusty.commands.test._update_test_repos')
@patch('dusty.commands.test.make_test_command_files')
@patch('dusty.commands.test.get_same_container_repos_from_spec')
def test_pull_repos_and_sync_1(self, fake_same_container_repos, fake_make, fake_update_test, fake_lib_get_volumes,
fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image,
fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_app_spec = Mock()
fake_specs = Mock()
fake_repo_spec = Mock()
fake_specs.get_app_or_lib.return_value = fake_app_spec
fake_expanded_libs.return_value = fake_specs
fake_same_container_repos.return_value = [fake_repo_spec]
test.setup_for_test('app1', pull_repos=True)
fake_update_test.assert_has_calls([call('app1')])
fake_make.assert_has_calls([call('app1', fake_specs)])
fake_update_nfs.assert_has_calls([call([fake_repo_spec])])
@patch('dusty.commands.test._update_test_repos')
@patch('dusty.commands.test.make_test_command_files')
@patch('dusty.commands.test.get_same_container_repos_from_spec')
def test_pull_repos_and_sync_2(self, fake_same_container_repos, fake_make, fake_update_test, fake_lib_get_volumes,
fake_app_get_volumes, fake_update_nfs, fake_ensure_current_image,
fake_expanded_libs, fake_get_docker_client, fake_initialize_vm):
fake_app_spec = Mock()
fake_specs = Mock()
fake_repo_spec = Mock()
fake_specs.get_app_or_lib.return_value = fake_app_spec
fake_expanded_libs.return_value = fake_specs
fake_same_container_repos.return_value = [fake_repo_spec]
test.setup_for_test('app1')
fake_update_test.assert_has_calls([])
fake_make.assert_has_calls([call('app1', fake_specs)])
fake_update_nfs.assert_has_calls([call([fake_repo_spec])])
| mit | 0d08a9dddc0716b1e5e020505deeccaf | 60.350515 | 201 | 0.593934 | 3.605574 | false | true | false | false |
skyfielders/python-skyfield | skyfield/planetarylib.py | 1 | 9586 | # -*- coding: utf-8 -*-
"""Open a BPC file, read its angles, and produce rotation matrices."""
from numpy import array, cos, nan, sin
from jplephem.pck import DAF, PCK
from .constants import ASEC2RAD, AU_KM, DAY_S, tau
from .data import text_pck
from .functions import _T, mxv, mxm, mxmxm, rot_x, rot_y, rot_z
from .units import Angle, Distance
from .vectorlib import VectorFunction
_TEXT_MAGIC_NUMBERS = b'KPL/FK', b'KPL/PCK'
_NAN3 = array((nan, nan, nan))
_halftau = tau / 2.0
_quartertau = tau / 4.0
class PlanetaryConstants(object):
"""Planetary constants manager.
You can use this class to build working models of Solar System
bodies by loading text planetary constant files and binary
orientation kernels. For a full description of how to use this, see
:doc:`planetary`.
"""
def __init__(self):
self.variables = {}
self._binary_files = []
self._segment_map = {}
@property
def assignments(self): # deprecated original name for the variables dict
return self.variables
def read_text(self, file):
"""Read frame variables from a KPL/FK file.
Appropriate files will typically have the extension ``.tf`` or
``.tpc`` and will define a series of names and values that will
be loaded into this object's ``.variables`` dictionary.
>>> from skyfield.api import load
>>> pc = PlanetaryConstants()
>>> pc.read_text(load('moon_080317.tf'))
>>> pc.variables['FRAME_31006_NAME']
'MOON_PA_DE421'
"""
file.seek(0)
try:
if not file.read(7).startswith(_TEXT_MAGIC_NUMBERS):
raise ValueError('file must start with one of the patterns:'
' {0}'.format(_TEXT_MAGIC_NUMBERS))
text_pck.load(file, self.variables)
finally:
file.close()
def read_binary(self, file):
"""Read binary segments descriptions from a DAF/PCK file.
Binary segments live in ``.bpc`` files and predict how a body
like a planet or moon will be oriented on a given date.
"""
file.seek(0)
if file.read(7) != b'DAF/PCK':
raise ValueError('file must start with the bytes "DAF/PCK"')
pck = PCK(DAF(file))
self._binary_files.append(pck)
for segment in pck.segments:
self._segment_map[segment.body] = segment
def _get_assignment(self, key):
"""Do .variables[key] but with a pretty exception on failure."""
try:
return self.variables[key]
except KeyError:
e = ValueError(_missing_name_message.format(key))
e.__cause__ = None
raise e
def build_frame_named(self, name):
"""Given a frame name, return a :class:`Frame` object."""
integer = self._get_assignment('FRAME_{0}'.format(name))
return self.build_frame(integer)
def build_frame(self, integer, _segment=None):
"""Given a frame integer code, return a :class:`Frame` object."""
center = self._get_assignment('FRAME_{0}_CENTER'.format(integer))
spec = self.variables.get('TKFRAME_{0}_SPEC'.format(integer))
if spec is None:
matrix = None
else:
if spec == 'ANGLES':
angles = self.variables['TKFRAME_{0}_ANGLES'.format(integer)]
axes = self.variables['TKFRAME_{0}_AXES'.format(integer)]
units = self.variables['TKFRAME_{0}_UNITS'.format(integer)]
scale = _unit_scales[units]
matrix = 1,0,0, 0,1,0, 0,0,1
matrix = array(matrix)
matrix.shape = 3, 3
for angle, axis in list(zip(angles, axes)):
rot = _rotations[axis]
matrix = mxm(rot(angle * scale), matrix)
elif spec == 'MATRIX':
matrix = self.variables['TKFRAME_{0}_MATRIX'.format(integer)]
matrix = array(matrix)
matrix.shape = 3, 3
else:
raise NotImplementedError('spec %r not yet implemented' % spec)
relative = self.variables['TKFRAME_{0}_RELATIVE'.format(integer)]
integer = self.variables['FRAME_{0}'.format(relative)]
if _segment is None:
segment = self._segment_map.get(integer)
else:
segment = _segment
if segment is None:
raise LookupError('you have not yet loaded a binary PCK file that'
' has a segment for frame {0}'.format(integer))
assert segment.frame == 1 # base frame should be ITRF/J2000
return Frame(center, segment, matrix)
def build_latlon_degrees(self, frame, latitude_degrees, longitude_degrees,
elevation_m=0.0):
"""Build an object representing a location on a body's surface."""
lat = Angle.from_degrees(latitude_degrees)
lon = Angle.from_degrees(longitude_degrees)
radii = self._get_assignment('BODY{0}_RADII'.format(frame.center))
if not radii[0] == radii[1] == radii[2]:
raise ValueError('only spherical bodies are supported,'
' but the radii of this body are: %s' % radii)
au = (radii[0] + elevation_m * 1e-3) / AU_KM
distance = Distance(au)
return PlanetTopos.from_latlon_distance(frame, lat, lon, distance)
_rotations = None, rot_x, rot_y, rot_z
_unit_scales = {'ARCSECONDS': ASEC2RAD}
_missing_name_message = """unknown planetary constant {0!r}
You should either use this object's `.read_text()` method to load an
additional "*.tf" PCK text file that defines the missing name, or
manually provide a value by adding the name and value to the this
object's `.variables` dictionary."""
class Frame(object):
"""Planetary constants frame, for building rotation matrices."""
def __init__(self, center, segment, matrix):
self.center = center
self._segment = segment
self._matrix = matrix
def rotation_at(self, t):
"""Return the rotation matrix for this frame at time ``t``."""
ra, dec, w = self._segment.compute(t.tdb, 0.0, False)
R = mxm(rot_z(-w), mxm(rot_x(-dec), rot_z(-ra)))
if self._matrix is not None:
R = mxm(self._matrix, R)
return R
def rotation_and_rate_at(self, t):
"""Return rotation and rate matrices for this frame at time ``t``.
The rate matrix returned is in units of angular motion per day.
"""
components, rates = self._segment.compute(t.whole, t.tdb_fraction, True)
ra, dec, w = components
radot, decdot, wdot = rates
R = mxm(rot_z(-w), mxm(rot_x(-dec), rot_z(-ra)))
zero = w * 0.0
ca = cos(w)
sa = sin(w)
u = cos(dec)
v = -sin(dec)
domega0 = wdot + u * radot
domega1 = ca * decdot - sa * v * radot
domega2 = sa * decdot + ca * v * radot
drdtrt = array((
(zero, domega0, domega2),
(-domega0, zero, domega1),
(-domega2, -domega1, zero),
))
dRdt = mxm(drdtrt, R)
if self._matrix is not None:
R = mxm(self._matrix, R)
dRdt = mxm(self._matrix, dRdt)
return R, dRdt * DAY_S
class PlanetTopos(VectorFunction):
"""Location that rotates with the surface of another Solar System body.
The location can either be on the surface of the body, or in some
other fixed position that rotates with the body's surface.
"""
def __init__(self, frame, position_au):
self.center = frame.center
self._frame = frame
self._position_au = position_au
@classmethod
def from_latlon_distance(cls, frame, latitude, longitude, distance):
r = array((distance.au, 0.0, 0.0))
r = mxv(rot_z(longitude.radians), mxv(rot_y(-latitude.radians), r))
self = cls(frame, r)
self.latitude = latitude
self.longitude = longitude
return self
@property
def target(self):
# When used as a vector function, this planetary geographic
# location computes positions from the planet's center to
# itself. (This is a property, rather than an attribute, to
# avoid a circular reference that delays garbage collection.)
return self
def _at(self, t):
# Since `_position_au` has zero velocity in this reference
# frame, velocity includes a `dRdt` term but not an `R` term.
R, dRdt = self._frame.rotation_and_rate_at(t)
r = mxv(_T(R), self._position_au)
v = mxv(_T(dRdt), self._position_au)
return r, v, None, None
def rotation_at(self, t):
"""Compute the altazimuth rotation matrix for this location’s sky."""
R = mxmxm(
# TODO: Figure out how to produce this rotation directly
# from _position_au, to support situations where we were not
# given a latitude and longitude. If that is not feasible,
# then at least cache the product of these first two matrices.
rot_y(_quartertau - self.latitude.radians),
rot_z(_halftau - self.longitude.radians),
self._frame.rotation_at(t),
)
# TODO:
# Can clockwise be turned into counterclockwise through any
# possible rotation? For now, flip the sign of y so that
# azimuth reads north-east rather than the other direction.
R[1] *= -1
return R
| mit | 78618ec24e7d2055d30742922e0ffe32 | 36.881423 | 80 | 0.587855 | 3.792639 | false | false | false | false |
ktbyers/netmiko | tests/test_netmiko_config.py | 1 | 10070 | #!/usr/bin/env python
import re
import pytest
from netmiko import ConfigInvalidException
from netmiko import ReadTimeout
def test_ssh_connect(net_connect, commands, expected_responses):
"""
Verify the connection was established successfully
"""
show_version = net_connect.send_command(commands["version"])
assert expected_responses["version_banner"] in show_version
def test_enable_mode(net_connect, commands, expected_responses):
"""
Test entering enable mode
Catch exception for devices that don't support enable
"""
try:
net_connect.enable()
enable_prompt = net_connect.find_prompt()
assert enable_prompt == expected_responses["enable_prompt"]
except AttributeError:
assert True
def test_config_mode(net_connect, commands, expected_responses):
"""
Test enter config mode
"""
# Behavior for devices with no config mode is to return null string
config_mode_command = commands.get("config_mode_command")
if config_mode_command is not None:
if net_connect.config_mode(config_command=config_mode_command) != "":
assert net_connect.check_config_mode() is True
elif net_connect.config_mode() != "":
assert net_connect.check_config_mode() is True
else:
pytest.skip("Platform doesn't support config mode.")
def test_exit_config_mode(net_connect, commands, expected_responses):
"""Test exit config mode."""
if net_connect._config_mode:
net_connect.exit_config_mode()
assert net_connect.check_config_mode() is False
else:
pytest.skip("Platform doesn't support config mode.")
def test_config_set(net_connect, commands, expected_responses):
"""Test sending configuration commands."""
config_commands = commands["config"]
config_mode_command = commands.get("config_mode_command")
support_commit = commands.get("support_commit")
config_verify = commands["config_verification"]
# Set to initial value and testing sending command as a string
net_connect.send_config_set(
config_mode_command=config_mode_command,
config_commands=config_commands[0],
)
if support_commit:
net_connect.commit()
cmd_response = expected_responses.get("cmd_response_init")
config_commands_output = net_connect.send_command(config_verify)
if cmd_response:
assert cmd_response in config_commands_output
else:
assert config_commands[0] in config_commands_output
# Test that something has changed.
net_connect.send_config_set(
config_commands=config_commands,
config_mode_command=config_mode_command,
)
if support_commit:
net_connect.commit()
cmd_response = expected_responses.get("cmd_response_final")
config_commands_output = net_connect.send_command_expect(config_verify)
if cmd_response:
assert cmd_response in config_commands_output
else:
assert config_commands[-1] in config_commands_output
def test_config_set_generator(net_connect, commands, expected_responses):
"""Test sending configuration commands as a generator."""
config_commands = commands["config"]
# Make a generator out of the config commands (to verify no issues with generators)
config_commands_gen = (cmd for cmd in config_commands)
support_commit = commands.get("support_commit")
config_verify = commands["config_verification"]
# Set to initial value and testing sending command as a string
net_connect.send_config_set(config_commands[0])
if support_commit:
net_connect.commit()
cmd_response = expected_responses.get("cmd_response_init")
config_commands_output = net_connect.send_command(config_verify)
if cmd_response:
assert cmd_response in config_commands_output
else:
assert config_commands[0] in config_commands_output
# Send the config commands as a generator
net_connect.send_config_set(config_commands_gen)
if support_commit:
net_connect.commit()
cmd_response = expected_responses.get("cmd_response_final")
config_commands_output = net_connect.send_command_expect(config_verify)
if cmd_response:
assert cmd_response in config_commands_output
else:
assert config_commands[-1] in config_commands_output
def test_config_set_longcommand(net_connect, commands, expected_responses):
"""Test sending configuration commands using long commands"""
config_commands = commands.get("config_long_command")
config_verify = commands["config_verification"] # noqa
if not config_commands:
assert True
return
output = net_connect.send_config_set(config_commands) # noqa
assert True
def test_config_hostname(net_connect, commands, expected_responses):
hostname = "test-netmiko1"
command = f"hostname {hostname}"
if "arista" in net_connect.device_type:
current_hostname = net_connect.find_prompt()[:-1]
net_connect.send_config_set(command)
new_hostname = net_connect.find_prompt()
assert hostname in new_hostname
# Reset prompt back to original value
net_connect.set_base_prompt()
net_connect.send_config_set(f"hostname {current_hostname}")
net_connect.set_base_prompt()
def test_config_from_file(net_connect, commands, expected_responses):
"""
Test sending configuration commands from a file
"""
config_file = commands.get("config_file")
config_verify = commands["config_verification"]
if config_file is not None:
net_connect.send_config_from_file(config_file)
config_commands_output = net_connect.send_command_expect(config_verify)
assert expected_responses["file_check_cmd"] in config_commands_output
else:
assert pytest.skip()
if "nokia_sros" in net_connect.device_type:
net_connect.save_config()
def test_config_error_pattern(net_connect, commands, expected_responses):
"""
Raise exception when config_error_str is present in output
"""
error_pattern = commands.get("error_pattern")
if error_pattern is None:
pytest.skip("No error_pattern defined.")
config_base = commands.get("config")
config_err = commands.get("invalid_config")
config_list = config_base + [config_err]
# Should not raise an exception since error_pattern not specified
net_connect.send_config_set(config_commands=config_list)
if config_list and error_pattern:
with pytest.raises(ConfigInvalidException):
net_connect.send_config_set(
config_commands=config_list, error_pattern=error_pattern
)
# Try it with cmd_verify=True also
with pytest.raises(ConfigInvalidException):
net_connect.send_config_set(
config_commands=config_list,
error_pattern=error_pattern,
cmd_verify=True,
)
else:
print("Skipping test: no error_pattern supplied.")
def test_banner(net_connect, commands, expected_responses):
"""
Banner configuration has a special exclusing where cmd_verify is dynamically
disabled so make sure it works.
"""
# Make sure banner comes in as separate lines
banner = commands.get("banner")
if banner is None:
pytest.skip("No banner defined.")
# Make sure banner comes in as separate lines
banner = banner.splitlines()
config_base = commands.get("config")
config_list = config_base + banner
# Remove any existing banner
net_connect.send_config_set("no banner login")
# bypass_commands="" should fail as cmd_verify will be True
with pytest.raises(ReadTimeout) as e: # noqa
net_connect.send_config_set(config_commands=config_list, bypass_commands="")
# Recover from send_config_set failure. The "%" is to finish the failed banner.
net_connect.write_channel("%\n")
net_connect.exit_config_mode()
net_connect.send_config_set(config_commands=config_list)
show_run = net_connect.send_command("show run | inc banner log")
assert "banner login" in show_run
net_connect.send_config_set("no banner login")
def test_global_cmd_verify(net_connect, commands, expected_responses):
"""
Banner configuration has a special exclusing where cmd_verify is dynamically
disabled so make sure it works.
"""
# Make sure banner comes in as separate lines
banner = commands.get("banner")
if banner is None:
pytest.skip("No banner defined.")
# Make sure banner comes in as separate lines
banner = banner.splitlines()
config_base = commands.get("config")
config_list = config_base + banner
# Remove any existing banner
net_connect.send_config_set("no banner login")
# bypass_commands="" should fail as cmd_verify will be True
with pytest.raises(ReadTimeout) as e: # noqa
net_connect.send_config_set(config_commands=config_list, bypass_commands="")
# Recover from send_config_set failure. The "%" is to finish the failed banner.
net_connect.write_channel("%\n")
net_connect.exit_config_mode()
net_connect.global_cmd_verify = False
# Should work now as global_cmd_verify is False
net_connect.send_config_set(config_commands=config_list, bypass_commands="")
show_run = net_connect.send_command("show run | inc banner log")
assert "banner login" in show_run
net_connect.send_config_set("no banner login")
def test_disconnect(net_connect, commands, expected_responses):
"""
Terminate the SSH session
"""
if net_connect.host in ["cisco3.lasthop.io", "iosxr3.lasthop.io"]:
hostname = net_connect.send_command("show run | inc hostname")
if re.search("cisco3.*long", hostname):
net_connect.send_config_set("hostname cisco3")
elif re.search("iosxr3.*long", hostname):
net_connect.send_config_set("hostname iosxr3")
net_connect.commit()
net_connect.exit_config_mode()
net_connect.disconnect()
| mit | c00db6ca551af8cf3c470bad6cb56767 | 34.964286 | 87 | 0.683416 | 3.997618 | false | true | false | false |
christiansandberg/canopen | examples/simple_ds402_node.py | 1 | 4328 | import canopen
import sys
import os
import traceback
import time
try:
# Start with creating a network representing one CAN bus
network = canopen.Network()
# Connect to the CAN bus
network.connect(bustype='kvaser', channel=0, bitrate=1000000)
network.check()
# Add some nodes with corresponding Object Dictionaries
node = canopen.BaseNode402(35, 'eds/e35.eds')
network.add_node(node)
# network.add_node(34, 'eds/example34.eds')
# node = network[34]
# Reset network
node.nmt.state = 'RESET COMMUNICATION'
#node.nmt.state = 'RESET'
node.nmt.wait_for_bootup(15)
print('node state 1) = {0}'.format(node.nmt.state))
# Iterate over arrays or records
error_log = node.sdo[0x1003]
for error in error_log.values():
print("Error {0} was found in the log".format(error.raw))
for node_id in network:
print(network[node_id])
print('node state 2) = {0}'.format(node.nmt.state))
# Read a variable using SDO
node.sdo[0x1006].raw = 1
node.sdo[0x100c].raw = 100
node.sdo[0x100d].raw = 3
node.sdo[0x1014].raw = 163
node.sdo[0x1003][0].raw = 0
# Transmit SYNC every 100 ms
network.sync.start(0.1)
node.load_configuration()
print('node state 3) = {0}'.format(node.nmt.state))
node.setup_402_state_machine()
device_name = node.sdo[0x1008].raw
vendor_id = node.sdo[0x1018][1].raw
print(device_name)
print(vendor_id)
node.state = 'SWITCH ON DISABLED'
print('node state 4) = {0}'.format(node.nmt.state))
# Read PDO configuration from node
node.tpdo.read()
# Re-map TxPDO1
node.tpdo[1].clear()
node.tpdo[1].add_variable('Statusword')
node.tpdo[1].add_variable('Velocity actual value')
node.tpdo[1].trans_type = 1
node.tpdo[1].event_timer = 0
node.tpdo[1].enabled = True
# Save new PDO configuration to node
node.tpdo.save()
# publish the a value to the control word (in this case reset the fault at the motors)
node.rpdo.read()
node.rpdo[1]['Controlword'].raw = 0x80
node.rpdo[1].transmit()
node.rpdo[1]['Controlword'].raw = 0x81
node.rpdo[1].transmit()
node.state = 'READY TO SWITCH ON'
node.state = 'SWITCHED ON'
node.rpdo.export('database.dbc')
# -----------------------------------------------------------------------------------------
print('Node booted up')
timeout = time.time() + 15
node.state = 'READY TO SWITCH ON'
while node.state != 'READY TO SWITCH ON':
if time.time() > timeout:
raise Exception('Timeout when trying to change state')
time.sleep(0.001)
timeout = time.time() + 15
node.state = 'SWITCHED ON'
while node.state != 'SWITCHED ON':
if time.time() > timeout:
raise Exception('Timeout when trying to change state')
time.sleep(0.001)
timeout = time.time() + 15
node.state = 'OPERATION ENABLED'
while node.state != 'OPERATION ENABLED':
if time.time() > timeout:
raise Exception('Timeout when trying to change state')
time.sleep(0.001)
print('Node Status {0}'.format(node.powerstate_402.state))
# -----------------------------------------------------------------------------------------
node.nmt.start_node_guarding(0.01)
while True:
try:
network.check()
except Exception:
break
# Read a value from TxPDO1
node.tpdo[1].wait_for_reception()
speed = node.tpdo[1]['Velocity actual value'].phys
# Read the state of the Statusword
statusword = node.sdo[0x6041].raw
print('statusword: {0}'.format(statusword))
print('VEL: {0}'.format(speed))
time.sleep(0.01)
except KeyboardInterrupt:
pass
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
traceback.print_exc()
finally:
# Disconnect from CAN bus
print('going to exit... stopping...')
if network:
for node_id in network:
node = network[node_id]
node.nmt.state = 'PRE-OPERATIONAL'
node.nmt.stop_node_guarding()
network.sync.stop()
network.disconnect()
| mit | 76031a0b9c0d2487e92d39ca1f283b67 | 26.392405 | 95 | 0.594732 | 3.432197 | false | false | false | false |
christiansandberg/canopen | canopen/objectdictionary/__init__.py | 1 | 17024 | """
Object Dictionary module
"""
import struct
from typing import Dict, Iterable, List, Optional, TextIO, Union
try:
from collections.abc import MutableMapping, Mapping
except ImportError:
from collections import MutableMapping, Mapping
import logging
from .datatypes import *
logger = logging.getLogger(__name__)
def export_od(od, dest:Union[str,TextIO,None]=None, doc_type:Optional[str]=None):
""" Export :class: ObjectDictionary to a file.
:param od:
:class: ObjectDictionary object to be exported
:param dest:
export destination. filename, or file-like object or None.
if None, the document is returned as string
:param doc_type: type of document to export.
If a filename is given for dest, this default to the file extension.
Otherwise, this defaults to "eds"
:rtype: str or None
"""
doctypes = {"eds", "dcf"}
if type(dest) is str:
if doc_type is None:
for t in doctypes:
if dest.endswith(f".{t}"):
doc_type = t
break
if doc_type is None:
doc_type = "eds"
dest = open(dest, 'w')
assert doc_type in doctypes
if doc_type == "eds":
from . import eds
return eds.export_eds(od, dest)
elif doc_type == "dcf":
from . import eds
return eds.export_dcf(od, dest)
def import_od(
source: Union[str, TextIO, None],
node_id: Optional[int] = None,
) -> "ObjectDictionary":
"""Parse an EDS, DCF, or EPF file.
:param source:
Path to object dictionary file or a file like object or an EPF XML tree.
:return:
An Object Dictionary instance.
"""
if source is None:
return ObjectDictionary()
if hasattr(source, "read"):
# File like object
filename = source.name
elif hasattr(source, "tag"):
# XML tree, probably from an EPF file
filename = "od.epf"
else:
# Path to file
filename = source
suffix = filename[filename.rfind("."):].lower()
if suffix in (".eds", ".dcf"):
from . import eds
return eds.import_eds(source, node_id)
elif suffix == ".epf":
from . import epf
return epf.import_epf(source)
else:
raise NotImplementedError("No support for this format")
class ObjectDictionary(MutableMapping):
"""Representation of the object dictionary as a Python dictionary."""
def __init__(self):
self.indices = {}
self.names = {}
self.comments = ""
#: Default bitrate if specified by file
self.bitrate: Optional[int] = None
#: Node ID if specified by file
self.node_id: Optional[int] = None
#: Some information about the device
self.device_information = DeviceInformation()
def __getitem__(
self, index: Union[int, str]
) -> Union["Array", "Record", "Variable"]:
"""Get object from object dictionary by name or index."""
item = self.names.get(index) or self.indices.get(index)
if item is None:
name = "0x%X" % index if isinstance(index, int) else index
raise KeyError("%s was not found in Object Dictionary" % name)
return item
def __setitem__(
self, index: Union[int, str], obj: Union["Array", "Record", "Variable"]
):
assert index == obj.index or index == obj.name
self.add_object(obj)
def __delitem__(self, index: Union[int, str]):
obj = self[index]
del self.indices[obj.index]
del self.names[obj.name]
def __iter__(self) -> Iterable[int]:
return iter(sorted(self.indices))
def __len__(self) -> int:
return len(self.indices)
def __contains__(self, index: Union[int, str]):
return index in self.names or index in self.indices
def add_object(self, obj: Union["Array", "Record", "Variable"]) -> None:
"""Add object to the object dictionary.
:param obj:
Should be either one of
:class:`~canopen.objectdictionary.Variable`,
:class:`~canopen.objectdictionary.Record`, or
:class:`~canopen.objectdictionary.Array`.
"""
obj.parent = self
self.indices[obj.index] = obj
self.names[obj.name] = obj
def get_variable(
self, index: Union[int, str], subindex: int = 0
) -> Optional["Variable"]:
"""Get the variable object at specified index (and subindex if applicable).
:return: Variable if found, else `None`
"""
obj = self.get(index)
if isinstance(obj, Variable):
return obj
elif isinstance(obj, (Record, Array)):
return obj.get(subindex)
class Record(MutableMapping):
"""Groups multiple :class:`~canopen.objectdictionary.Variable` objects using
subindices.
"""
#: Description for the whole record
description = ""
def __init__(self, name: str, index: int):
#: The :class:`~canopen.ObjectDictionary` owning the record.
self.parent: Optional[ObjectDictionary] = None
#: 16-bit address of the record
self.index = index
#: Name of record
self.name = name
#: Storage location of index
self.storage_location = None
self.subindices = {}
self.names = {}
def __getitem__(self, subindex: Union[int, str]) -> "Variable":
item = self.names.get(subindex) or self.subindices.get(subindex)
if item is None:
raise KeyError("Subindex %s was not found" % subindex)
return item
def __setitem__(self, subindex: Union[int, str], var: "Variable"):
assert subindex == var.subindex
self.add_member(var)
def __delitem__(self, subindex: Union[int, str]):
var = self[subindex]
del self.subindices[var.subindex]
del self.names[var.name]
def __len__(self) -> int:
return len(self.subindices)
def __iter__(self) -> Iterable[int]:
return iter(sorted(self.subindices))
def __contains__(self, subindex: Union[int, str]) -> bool:
return subindex in self.names or subindex in self.subindices
def __eq__(self, other: "Record") -> bool:
return self.index == other.index
def add_member(self, variable: "Variable") -> None:
"""Adds a :class:`~canopen.objectdictionary.Variable` to the record."""
variable.parent = self
self.subindices[variable.subindex] = variable
self.names[variable.name] = variable
class Array(Mapping):
"""An array of :class:`~canopen.objectdictionary.Variable` objects using
subindices.
Actual length of array must be read from the node using SDO.
"""
#: Description for the whole array
description = ""
def __init__(self, name: str, index: int):
#: The :class:`~canopen.ObjectDictionary` owning the record.
self.parent = None
#: 16-bit address of the array
self.index = index
#: Name of array
self.name = name
#: Storage location of index
self.storage_location = None
self.subindices = {}
self.names = {}
def __getitem__(self, subindex: Union[int, str]) -> "Variable":
var = self.names.get(subindex) or self.subindices.get(subindex)
if var is not None:
# This subindex is defined
pass
elif isinstance(subindex, int) and 0 < subindex < 256:
# Create a new variable based on first array item
template = self.subindices[1]
name = "%s_%x" % (template.name, subindex)
var = Variable(name, self.index, subindex)
var.parent = self
for attr in ("data_type", "unit", "factor", "min", "max", "default",
"access_type", "description", "value_descriptions",
"bit_definitions", "storage_location"):
if attr in template.__dict__:
var.__dict__[attr] = template.__dict__[attr]
else:
raise KeyError("Could not find subindex %r" % subindex)
return var
def __len__(self) -> int:
return len(self.subindices)
def __iter__(self) -> Iterable[int]:
return iter(sorted(self.subindices))
def __eq__(self, other: "Array") -> bool:
return self.index == other.index
def add_member(self, variable: "Variable") -> None:
"""Adds a :class:`~canopen.objectdictionary.Variable` to the record."""
variable.parent = self
self.subindices[variable.subindex] = variable
self.names[variable.name] = variable
class Variable(object):
"""Simple variable."""
STRUCT_TYPES = {
BOOLEAN: struct.Struct("?"),
INTEGER8: struct.Struct("b"),
INTEGER16: struct.Struct("<h"),
INTEGER32: struct.Struct("<l"),
INTEGER64: struct.Struct("<q"),
UNSIGNED8: struct.Struct("B"),
UNSIGNED16: struct.Struct("<H"),
UNSIGNED32: struct.Struct("<L"),
UNSIGNED64: struct.Struct("<Q"),
REAL32: struct.Struct("<f"),
REAL64: struct.Struct("<d")
}
def __init__(self, name: str, index: int, subindex: int = 0):
#: The :class:`~canopen.ObjectDictionary`,
#: :class:`~canopen.objectdictionary.Record` or
#: :class:`~canopen.objectdictionary.Array` owning the variable
self.parent = None
#: 16-bit address of the object in the dictionary
self.index = index
#: 8-bit sub-index of the object in the dictionary
self.subindex = subindex
#: String representation of the variable
self.name = name
#: Physical unit
self.unit: str = ""
#: Factor between physical unit and integer value
self.factor: float = 1
#: Minimum allowed value
self.min: Optional[int] = None
#: Maximum allowed value
self.max: Optional[int] = None
#: Default value at start-up
self.default: Optional[int] = None
#: Is the default value relative to the node-ID (only applies to COB-IDs)
self.relative = False
#: The value of this variable stored in the object dictionary
self.value: Optional[int] = None
#: Data type according to the standard as an :class:`int`
self.data_type: Optional[int] = None
#: Access type, should be "rw", "ro", "wo", or "const"
self.access_type: str = "rw"
#: Description of variable
self.description: str = ""
#: Dictionary of value descriptions
self.value_descriptions: Dict[int, str] = {}
#: Dictionary of bitfield definitions
self.bit_definitions: Dict[str, List[int]] = {}
#: Storage location of index
self.storage_location = None
#: Can this variable be mapped to a PDO
self.pdo_mappable = False
def __eq__(self, other: "Variable") -> bool:
return (self.index == other.index and
self.subindex == other.subindex)
def __len__(self) -> int:
if self.data_type in self.STRUCT_TYPES:
return self.STRUCT_TYPES[self.data_type].size * 8
else:
return 8
@property
def writable(self) -> bool:
return "w" in self.access_type
@property
def readable(self) -> bool:
return "r" in self.access_type or self.access_type == "const"
def add_value_description(self, value: int, descr: str) -> None:
"""Associate a value with a string description.
:param value: Value to describe
:param desc: Description of value
"""
self.value_descriptions[value] = descr
def add_bit_definition(self, name: str, bits: List[int]) -> None:
"""Associate bit(s) with a string description.
:param name: Name of bit(s)
:param bits: List of bits as integers
"""
self.bit_definitions[name] = bits
def decode_raw(self, data: bytes) -> Union[int, float, str, bytes, bytearray]:
if self.data_type == VISIBLE_STRING:
return data.rstrip(b"\x00").decode("ascii", errors="ignore")
elif self.data_type == UNICODE_STRING:
# Is this correct?
return data.rstrip(b"\x00").decode("utf_16_le", errors="ignore")
elif self.data_type in self.STRUCT_TYPES:
try:
value, = self.STRUCT_TYPES[self.data_type].unpack(data)
return value
except struct.error:
raise ObjectDictionaryError(
"Mismatch between expected and actual data size")
else:
# Just return the data as is
return data
def encode_raw(self, value: Union[int, float, str, bytes, bytearray]) -> bytes:
if isinstance(value, (bytes, bytearray)):
return value
elif self.data_type == VISIBLE_STRING:
return value.encode("ascii")
elif self.data_type == UNICODE_STRING:
# Is this correct?
return value.encode("utf_16_le")
elif self.data_type in self.STRUCT_TYPES:
if self.data_type in INTEGER_TYPES:
value = int(value)
if self.data_type in NUMBER_TYPES:
if self.min is not None and value < self.min:
logger.warning(
"Value %d is less than min value %d", value, self.min)
if self.max is not None and value > self.max:
logger.warning(
"Value %d is greater than max value %d",
value,
self.max)
try:
return self.STRUCT_TYPES[self.data_type].pack(value)
except struct.error:
raise ValueError("Value does not fit in specified type")
elif self.data_type is None:
raise ObjectDictionaryError("Data type has not been specified")
else:
raise TypeError(
"Do not know how to encode %r to data type %Xh" % (
value, self.data_type))
def decode_phys(self, value: int) -> Union[int, bool, float, str, bytes]:
if self.data_type in INTEGER_TYPES:
value *= self.factor
return value
def encode_phys(self, value: Union[int, bool, float, str, bytes]) -> int:
if self.data_type in INTEGER_TYPES:
value /= self.factor
value = int(round(value))
return value
def decode_desc(self, value: int) -> str:
if not self.value_descriptions:
raise ObjectDictionaryError("No value descriptions exist")
elif value not in self.value_descriptions:
raise ObjectDictionaryError(
"No value description exists for %d" % value)
else:
return self.value_descriptions[value]
def encode_desc(self, desc: str) -> int:
if not self.value_descriptions:
raise ObjectDictionaryError("No value descriptions exist")
else:
for value, description in self.value_descriptions.items():
if description == desc:
return value
valid_values = ", ".join(self.value_descriptions.values())
error_text = "No value corresponds to '%s'. Valid values are: %s"
raise ValueError(error_text % (desc, valid_values))
def decode_bits(self, value: int, bits: List[int]) -> int:
try:
bits = self.bit_definitions[bits]
except (TypeError, KeyError):
pass
mask = 0
for bit in bits:
mask |= 1 << bit
return (value & mask) >> min(bits)
def encode_bits(self, original_value: int, bits: List[int], bit_value: int):
try:
bits = self.bit_definitions[bits]
except (TypeError, KeyError):
pass
temp = original_value
mask = 0
for bit in bits:
mask |= 1 << bit
temp &= ~mask
temp |= bit_value << min(bits)
return temp
class DeviceInformation:
def __init__(self):
self.allowed_baudrates = set()
self.vendor_name:Optional[str] = None
self.vendor_number:Optional[int] = None
self.product_name:Optional[str] = None
self.product_number:Optional[int] = None
self.revision_number:Optional[int] = None
self.order_code:Optional[str] = None
self.simple_boot_up_master:Optional[bool] = None
self.simple_boot_up_slave:Optional[bool] = None
self.granularity:Optional[int] = None
self.dynamic_channels_supported:Optional[bool] = None
self.group_messaging:Optional[bool] = None
self.nr_of_RXPDO:Optional[bool] = None
self.nr_of_TXPDO:Optional[bool] = None
self.LSS_supported:Optional[bool] = None
class ObjectDictionaryError(Exception):
"""Unsupported operation with the current Object Dictionary."""
| mit | 98c4a5a60562ce75c56aa4265d92d84f | 34.173554 | 83 | 0.583353 | 4.099205 | false | false | false | false |
ktbyers/netmiko | netmiko/dell/dell_powerconnect.py | 1 | 4030 | """Dell PowerConnect Driver."""
from typing import Optional
from paramiko import SSHClient
import time
from os import path
from netmiko.ssh_auth import SSHClient_noauth
from netmiko.cisco_base_connection import CiscoBaseConnection
class DellPowerConnectBase(CiscoBaseConnection):
"""Dell PowerConnect Driver."""
def session_preparation(self) -> None:
"""Prepare the session after the connection has been established."""
self.ansi_escape_codes = True
self._test_channel_read(pattern=r"[>#]")
self.set_base_prompt()
self.enable()
self.disable_paging(command="terminal datadump")
def set_base_prompt(
self,
pri_prompt_terminator: str = ">",
alt_prompt_terminator: str = "#",
delay_factor: float = 1.0,
pattern: Optional[str] = None,
) -> str:
"""Sets self.base_prompt: used as delimiter for stripping of trailing prompt in output."""
prompt = super().set_base_prompt(
pri_prompt_terminator=pri_prompt_terminator,
alt_prompt_terminator=alt_prompt_terminator,
delay_factor=delay_factor,
pattern=pattern,
)
prompt = prompt.strip()
self.base_prompt = prompt
return self.base_prompt
def check_config_mode(
self,
check_string: str = "(config)#",
pattern: str = "",
force_regex: bool = False,
) -> bool:
"""Checks if the device is in configuration mode"""
return super().check_config_mode(check_string=check_string, pattern=pattern)
def config_mode(
self, config_command: str = "config", pattern: str = "", re_flags: int = 0
) -> str:
return super().config_mode(
config_command=config_command, pattern=pattern, re_flags=re_flags
)
class DellPowerConnectSSH(DellPowerConnectBase):
"""Dell PowerConnect Driver.
To make it work, we have to override the SSHClient _auth method.
If we use login/password, the ssh server use the (none) auth mechanism.
"""
def _build_ssh_client(self) -> SSHClient:
"""Prepare for Paramiko SSH connection.
See base_connection.py file for any updates.
"""
# Create instance of SSHClient object
# If user does not provide SSH key, we use noauth
remote_conn_pre: SSHClient
if not self.use_keys:
remote_conn_pre = SSHClient_noauth()
else:
remote_conn_pre = SSHClient()
# Load host_keys for better SSH security
if self.system_host_keys:
remote_conn_pre.load_system_host_keys()
if self.alt_host_keys and path.isfile(self.alt_key_file):
remote_conn_pre.load_host_keys(self.alt_key_file)
# Default is to automatically add untrusted hosts (make sure appropriate for your env)
remote_conn_pre.set_missing_host_key_policy(self.key_policy)
return remote_conn_pre
def special_login_handler(self, delay_factor: float = 1.0) -> None:
"""
Powerconnect presents with the following on login
User Name:
Password: ****
"""
delay_factor = self.select_delay_factor(delay_factor)
i = 0
time.sleep(delay_factor * 0.5)
output = ""
while i <= 12:
output = self.read_channel()
if output:
if "User Name:" in output:
assert isinstance(self.username, str)
self.write_channel(self.username + self.RETURN)
elif "Password:" in output:
assert isinstance(self.password, str)
self.write_channel(self.password + self.RETURN)
break
time.sleep(delay_factor * 1)
else:
self.write_channel(self.RETURN)
time.sleep(delay_factor * 1.5)
i += 1
class DellPowerConnectTelnet(DellPowerConnectBase):
"""Dell PowerConnect Telnet Driver."""
pass
| mit | 0d0ab85d81795e158c8a8c46ad81faf2 | 33.152542 | 98 | 0.601489 | 4.08308 | false | true | false | false |
the-blue-alliance/the-blue-alliance | src/backend/api/api_trusted_parsers/json_alliance_selections_parser.py | 1 | 1458 | from typing import AnyStr, List
from pyre_extensions import safe_json
from backend.common.datafeed_parsers.exceptions import ParserInputException
from backend.common.models.alliance import EventAlliance
from backend.common.models.keys import TeamKey
from backend.common.models.team import Team
class JSONAllianceSelectionsParser:
@staticmethod
def parse(alliances_json: AnyStr) -> List[EventAlliance]:
"""
Parse JSON that contains team_keys
Format is as follows:
[[captain1, pick1-1, pick1-2(, ...)],
['frc254', 'frc971', 'frc604'],
...
[captain8, pick8-1, pick8-2(, ...)]]
"""
alliances = safe_json.loads(alliances_json, List[List[TeamKey]])
alliance_selections: List[EventAlliance] = []
for alliance in alliances:
is_empty = True
selection: EventAlliance = {"picks": [], "declines": []}
for team_key in alliance:
if not Team.validate_key_name(team_key):
raise ParserInputException(
"Bad team_key: '{}'. Must follow format: 'frcXXX'".format(
team_key
)
)
else:
selection["picks"].append(team_key)
is_empty = False
if not is_empty:
alliance_selections.append(selection)
return alliance_selections
| mit | c0fd3c8435ac70d975caed77289743a2 | 35.45 | 82 | 0.570645 | 4.130312 | false | false | false | false |
the-blue-alliance/the-blue-alliance | src/backend/tasks_io/datafeeds/parsers/fms_api/fms_api_event_list_parser.py | 1 | 10289 | import datetime
import json
import logging
from typing import Any, Dict, List, Optional, Tuple
from google.appengine.ext import ndb
from backend.common.consts.event_type import EventType
from backend.common.consts.playoff_type import PlayoffType
from backend.common.helpers.event_short_name_helper import EventShortNameHelper
from backend.common.helpers.webcast_helper import WebcastParser
from backend.common.models.district import District
from backend.common.models.event import Event
from backend.common.models.keys import DistrictKey, Year
from backend.common.sitevars.cmp_registration_hacks import ChampsRegistrationHacks
from backend.tasks_io.datafeeds.parsers.json.parser_json import ParserJSON
class FMSAPIEventListParser(ParserJSON[Tuple[List[Event], List[District]]]):
DATE_FORMAT_STR = "%Y-%m-%dT%H:%M:%S"
EVENT_TYPES = {
"regional": EventType.REGIONAL,
"districtevent": EventType.DISTRICT,
"districtchampionshipdivision": EventType.DISTRICT_CMP_DIVISION,
"districtchampionship": EventType.DISTRICT_CMP,
"districtchampionshipwithlevels": EventType.DISTRICT_CMP,
"championshipdivision": EventType.CMP_DIVISION,
"championshipsubdivision": EventType.CMP_DIVISION,
"championship": EventType.CMP_FINALS,
"offseason": EventType.OFFSEASON,
"offseasonwithazuresync": EventType.OFFSEASON,
"remote": EventType.REMOTE,
}
PLAYOFF_TYPES = {
# Bracket Types
"TwoAlliance": PlayoffType.BRACKET_2_TEAM,
"FourAlliance": PlayoffType.BRACKET_4_TEAM,
"EightAlliance": PlayoffType.BRACKET_8_TEAM,
"SixteenAlliance": PlayoffType.BRACKET_16_TEAM,
# Round Robin Types
"SixAlliance": PlayoffType.ROUND_ROBIN_6_TEAM,
}
NON_OFFICIAL_EVENT_TYPES = ["offseason"]
EVENT_CODE_EXCEPTIONS = {
"archimedes": ("arc", "Archimedes"), # (code, short_name)
"carson": ("cars", "Carson"),
"carver": ("carv", "Carver"),
"curie": ("cur", "Curie"),
"daly": ("dal", "Daly"),
"darwin": ("dar", "Darwin"),
"galileo": ("gal", "Galileo"),
"hopper": ("hop", "Hopper"),
"newton": ("new", "Newton"),
"roebling": ("roe", "Roebling"),
"tesla": ("tes", "Tesla"),
"turing": ("tur", "Turing"),
# For Einstein, format with the name "Einstein" or "FIRST Championship" or whatever
"cmp": ("cmp", "{}"),
"cmpmi": ("cmpmi", "{} (Detroit)"),
"cmpmo": ("cmpmo", "{} (St. Louis)"),
"cmptx": ("cmptx", "{} (Houston)"),
}
EINSTEIN_SHORT_NAME_DEFAULT = "Einstein"
EINSTEIN_NAME_DEFAULT = "Einstein Field"
EINSTEIN_CODES = {"cmp", "cmpmi", "cmpmo", "cmptx"}
def __init__(self, season: Year, short: Optional[str] = None) -> None:
self.season = season
self.event_short = short
def get_code_and_short_name(self, season, code):
# Even though 2022 Einstein is listed as "cmptx", we don't want it to say "(Houston)".
if season == 2022 and code == "cmptx":
return (code, "{}")
return self.EVENT_CODE_EXCEPTIONS[code]
def parse(self, response: Dict[str, Any]) -> Tuple[List[Event], List[District]]:
events: List[Event] = []
districts: Dict[DistrictKey, District] = {}
cmp_hack_sitevar = ChampsRegistrationHacks.get()
divisions_to_skip = cmp_hack_sitevar["divisions_to_skip"]
event_name_override = cmp_hack_sitevar["event_name_override"]
events_to_change_dates = cmp_hack_sitevar["set_start_to_last_day"]
for event in response["Events"]:
code = event["code"].lower()
api_event_type = event["type"].lower()
event_type = (
EventType.PRESEASON
if code == "week0"
else self.EVENT_TYPES.get(api_event_type, None)
)
if api_event_type == "championshipdivision" and self.season != 2022:
# 2022 only has one championship and the API uses ChampionshipSubdivision
# for some reason. This didn't come up before because pre-2champs divisions
# also reproted as ChampionshipSubDivision. Weird.
logging.warning(
f"Skipping event {code} with type {api_event_type} as not a real division"
)
continue
if event_type is None and not self.event_short:
logging.warning(
"Event type '{}' not recognized!".format(api_event_type)
)
continue
# Some event types should be marked as unofficial, so sync is disabled
official = True
if api_event_type in self.NON_OFFICIAL_EVENT_TYPES:
official = False
name = event["name"]
short_name = EventShortNameHelper.get_short_name(
name, district_code=event["districtCode"]
)
district_key = (
District.render_key_name(self.season, event["districtCode"].lower())
if event["districtCode"]
else None
)
address = event.get("address")
venue = event["venue"]
city = event["city"]
state_prov = event["stateprov"]
country = event["country"]
start = datetime.datetime.strptime(event["dateStart"], self.DATE_FORMAT_STR)
end = datetime.datetime.strptime(event["dateEnd"], self.DATE_FORMAT_STR)
website = event.get("website")
webcasts = [
WebcastParser.webcast_dict_from_url(url)
for url in event.get("webcasts", [])
]
# TODO read timezone from API
# Special cases for district championship divisions
if event_type == EventType.DISTRICT_CMP_DIVISION:
split_name = name.split("-")
short_name = "{} - {}".format(
"".join(item[0].upper() for item in split_name[0].split()),
split_name[-1].replace("Division", "").strip(),
)
# Special cases for champs
if code in self.EVENT_CODE_EXCEPTIONS:
code, short_name = self.get_code_and_short_name(self.season, code)
# FIRST indicates CMP registration before divisions are assigned by adding all teams
# to Einstein. We will hack around that by not storing divisions and renaming
# Einstein to simply "Championship" when certain sitevar flags are set
if code in self.EINSTEIN_CODES:
override = [
item
for item in event_name_override
if item["event"] == "{}{}".format(self.season, code)
]
if override:
name = short_name.format(override[0]["name"])
short_name = short_name.format(override[0]["short_name"])
else: # Divisions
name = "{} Division".format(short_name)
elif self.event_short:
code = self.event_short
event_key = "{}{}".format(self.season, code)
if event_key in divisions_to_skip:
continue
# Allow an overriding the start date to be the beginning of the last day
if event_key in events_to_change_dates:
start = end.replace(hour=0, minute=0, second=0, microsecond=0)
playoff_type = self.PLAYOFF_TYPES.get(event.get("allianceCount"))
events.append(
Event(
id=event_key,
name=name,
short_name=short_name,
event_short=code,
event_type_enum=event_type,
official=official,
playoff_type=playoff_type,
start_date=start,
end_date=end,
venue=venue,
city=city,
state_prov=state_prov,
country=country,
venue_address=address,
year=self.season,
district_key=ndb.Key(District, district_key)
if district_key
else None,
website=website,
webcast_json=json.dumps(webcasts) if webcasts else None,
)
)
# Build District Model
if district_key and district_key not in districts:
districts[district_key] = District(
id=district_key,
year=self.season,
abbreviation=event["districtCode"].lower(),
)
# Prep for division <-> parent associations
district_champs_by_district = {}
champ_events = []
for event in events:
if event.event_type_enum == EventType.DISTRICT_CMP:
district_champs_by_district[event.district_key] = event
elif event.event_type_enum == EventType.CMP_FINALS:
champ_events.append(event)
# Build district cmp division <-> parent associations based on district
# Build cmp division <-> parent associations based on date
for event in events:
parent_event = None
if event.event_type_enum == EventType.DISTRICT_CMP_DIVISION:
parent_event = district_champs_by_district.get(event.district_key)
elif event.event_type_enum == EventType.CMP_DIVISION:
for parent_event in champ_events:
if abs(parent_event.end_date - event.end_date) < datetime.timedelta(
days=1
):
break
else:
parent_event = None
else:
continue
if parent_event is None:
continue
parent_event.divisions = sorted(parent_event.divisions + [event.key])
event.parent_event = parent_event.key
return events, list(districts.values())
| mit | fc6560ac10d277c34ca2b87cf07eaaff | 40.321285 | 100 | 0.555253 | 4.019141 | false | false | false | false |
the-blue-alliance/the-blue-alliance | src/backend/web/handlers/tests/team_history_test.py | 1 | 3660 | from freezegun.api import freeze_time
from werkzeug.test import Client
from backend.web.handlers.tests import helpers
def test_get_bad_team_num(web_client: Client) -> None:
resp = web_client.get("/team/0/history")
assert resp.status_code == 404
def test_team_not_found(web_client: Client, ndb_stub) -> None:
resp = web_client.get("/team/254/history")
assert resp.status_code == 404
def test_page_title(web_client: Client, ndb_stub) -> None:
helpers.preseed_team(254)
helpers.preseed_event_for_team(254, "2020test")
resp = web_client.get("/team/254/history")
assert resp.status_code == 200
assert "max-age=86400" in resp.headers["Cache-Control"]
assert (
helpers.get_page_title(resp.data)
== "The 254 Team - Team 254 (History) - The Blue Alliance"
)
@freeze_time("2020-03-01")
def test_short_cache_live_event(web_client: Client, ndb_stub) -> None:
helpers.preseed_team(254)
helpers.preseed_event_for_team(254, "2020test")
resp = web_client.get("/team/254/history")
assert resp.status_code == 200
assert "max-age=300" in resp.headers["Cache-Control"]
def test_team_info(web_client: Client, setup_full_team) -> None:
resp = web_client.get("/team/148/history")
assert resp.status_code == 200
team_info = helpers.get_team_info(resp.data)
assert team_info.header == "Team 148 - Robowranglers"
assert team_info.location == "Greenville, Texas, USA"
assert (
team_info.full_name
== "Innovation First International/L3 Harris&Greenville High School"
)
assert team_info.rookie_year == "Rookie Year: 1992"
assert team_info.website == "http://www.robowranglers148.com/"
assert team_info.district is None
assert team_info.district_link is None
assert team_info.social_media == [
("facebook-profile", "robotics-team-148-robowranglers-144761815581405"),
("youtube-channel", "robowranglers"),
("twitter-profile", "robowranglers"),
("github-profile", "team148"),
]
assert team_info.preferred_medias is None
assert team_info.current_event is None
def test_team_history_table(web_client: Client, setup_full_team) -> None:
resp = web_client.get("/team/148/history")
assert resp.status_code == 200
team_history = helpers.get_team_history(resp.data)
assert team_history == [
helpers.TeamEventHistory(year=2019, event="The Remix", awards=[]),
helpers.TeamEventHistory(
year=2019, event="Texas Robotics Invitational", awards=[]
),
helpers.TeamEventHistory(
year=2019, event="Einstein Field (Houston)", awards=[]
),
helpers.TeamEventHistory(
year=2019,
event="Roebling Division",
awards=[
"Championship Subdivision Winner",
"Quality Award sponsored by Motorola Solutions Foundation",
],
),
helpers.TeamEventHistory(
year=2019,
event="FIRST In Texas District Championship",
awards=["District Championship Winner"],
),
helpers.TeamEventHistory(
year=2019,
event="FIT District Dallas Event",
awards=[
"District Event Winner",
"Industrial Design Award sponsored by General Motors",
],
),
helpers.TeamEventHistory(
year=2019,
event="FIT District Amarillo Event",
awards=[
"District Event Winner",
"Quality Award sponsored by Motorola Solutions Foundation",
],
),
]
| mit | e6d5edcda7019106229e7cd495a90e3f | 33.857143 | 80 | 0.623224 | 3.536232 | false | true | false | false |
the-blue-alliance/the-blue-alliance | src/backend/conftest.py | 1 | 3060 | from typing import Generator
import pytest
from freezegun import api as freezegun_api
from google.appengine.api import datastore_types
from google.appengine.api.apiproxy_rpc import _THREAD_POOL
from google.appengine.ext import ndb, testbed
from backend.common.context_cache import context_cache
from backend.common.models.cached_query_result import CachedQueryResult
from backend.tests.json_data_importer import JsonDataImporter
@pytest.fixture(autouse=True, scope="session")
def drain_gae_rpc_thread_pool() -> Generator:
yield
# This thread pool can leave work dangling after the test session
# is done, which can cause pytest to hang.
# So we add this fixture to manually shut it down
_THREAD_POOL.shutdown()
@pytest.fixture(autouse=True)
def init_test_marker_env(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv("TBA_UNIT_TEST", "true")
@pytest.fixture(autouse=True)
def clear_context_cache(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(context_cache, "CACHE_DATA", {})
@pytest.fixture()
def gae_testbed() -> Generator[testbed.Testbed, None, None]:
tb = testbed.Testbed()
tb.activate()
yield tb
tb.deactivate()
@pytest.fixture()
def ndb_stub(
gae_testbed: testbed.Testbed,
memcache_stub,
monkeypatch: pytest.MonkeyPatch,
) -> testbed.datastore_file_stub.DatastoreFileStub:
gae_testbed.init_datastore_v3_stub()
# monkeypatch the ndb library to work with freezegun
fake_datetime = getattr(freezegun_api, "FakeDatetime") # pyre-ignore[16]
v = getattr(datastore_types, "_VALIDATE_PROPERTY_VALUES", {}) # pyre-ignore[16]
v[fake_datetime] = datastore_types.ValidatePropertyNothing
monkeypatch.setattr(datastore_types, "_VALIDATE_PROPERTY_VALUES", v)
p = getattr(datastore_types, "_PACK_PROPERTY_VALUES", {}) # pyre-ignore[16]
p[fake_datetime] = datastore_types.PackDatetime
monkeypatch.setattr(datastore_types, "_PACK_PROPERTY_VALUES", p)
stub = gae_testbed.get_stub(testbed.DATASTORE_SERVICE_NAME)
return stub
@pytest.fixture()
def taskqueue_stub(
gae_testbed: testbed.Testbed,
) -> testbed.taskqueue_stub.TaskQueueServiceStub:
gae_testbed.init_taskqueue_stub(root_path="src/")
return gae_testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
@pytest.fixture()
def urlfetch_stub(
gae_testbed: testbed.Testbed,
) -> testbed.urlfetch_stub.URLFetchServiceStub:
gae_testbed.init_urlfetch_stub()
return gae_testbed.get_stub(testbed.URLFETCH_SERVICE_NAME)
@pytest.fixture()
def memcache_stub(
gae_testbed: testbed.Testbed,
monkeypatch: pytest.MonkeyPatch,
) -> testbed.memcache_stub.MemcacheServiceStub:
gae_testbed.init_memcache_stub()
stub = gae_testbed.get_stub(testbed.MEMCACHE_SERVICE_NAME)
return stub
@pytest.fixture()
def ndb_context(ndb_stub):
pass
@pytest.fixture()
def test_data_importer(ndb_stub) -> JsonDataImporter:
return JsonDataImporter()
def clear_cached_queries() -> None:
ndb.delete_multi(CachedQueryResult.query().fetch(keys_only=True))
| mit | 59490c623f262f776a986c8649ef6ab2 | 29.29703 | 84 | 0.738562 | 3.308108 | false | true | false | false |
the-blue-alliance/the-blue-alliance | src/backend/common/helpers/tests/award_helper_test.py | 1 | 3904 | from typing import Optional
import pytest
from backend.common.consts.award_type import AwardType
from backend.common.helpers.award_helper import AwardHelper
from backend.common.models.award import Award
def test_organize_awards() -> None:
a1 = Award(
award_type_enum=AwardType.SAFETY,
name_str="Safety",
)
a2 = Award(
award_type_enum=AwardType.CHAIRMANS,
name_str="Chairmans",
)
a3 = Award(
award_type_enum=AwardType.WINNER,
name_str="Winner",
)
assert AwardHelper.organize_awards([a1, a2, a3]) == [a2, a3, a1]
@pytest.mark.parametrize(
"name,award_type",
[
("Chairman's", AwardType.CHAIRMANS),
("Chairman", AwardType.CHAIRMANS),
("Chairman's Award Finalist", AwardType.CHAIRMANS_FINALIST),
("Winner #1", AwardType.WINNER),
("Division Winner #2", AwardType.WINNER),
("Newton - Division Champion #3", AwardType.WINNER),
("Championship Division Winner", AwardType.WINNER),
("Championship Winner #3", AwardType.WINNER),
("Championship Champion #4", AwardType.WINNER),
("Championship Champion", AwardType.WINNER),
("Championship Winner", AwardType.WINNER),
("Winner", AwardType.WINNER),
("Finalist #1", AwardType.FINALIST),
("Division Finalist #2", AwardType.FINALIST),
("Championship Finalist #3", AwardType.FINALIST),
("Championship Finalist #4", AwardType.FINALIST),
("Championship Finalist", AwardType.FINALIST),
("Finalist", AwardType.FINALIST),
("Dean's List Finalist #1", AwardType.DEANS_LIST),
("Dean's List Finalist", AwardType.DEANS_LIST),
("Dean's List Winner #9", AwardType.DEANS_LIST),
("Dean's List Winner", AwardType.DEANS_LIST),
("Dean's List", AwardType.DEANS_LIST),
(
"Excellence in Design Award sponsored by Autodesk (3D CAD)",
AwardType.EXCELLENCE_IN_DESIGN_CAD,
),
(
"Excellence in Design Award sponsored by Autodesk (Animation)",
AwardType.EXCELLENCE_IN_DESIGN_ANIMATION,
),
("Excellence in Design Award", AwardType.EXCELLENCE_IN_DESIGN),
("Dr. Bart Kamen Memorial Scholarship #1", AwardType.BART_KAMEN_MEMORIAL),
(
"Media and Technology Award sponsored by Comcast",
AwardType.MEDIA_AND_TECHNOLOGY,
),
("Make It Loud Award", AwardType.MAKE_IT_LOUD),
("Founder's Award", AwardType.FOUNDERS),
("Championship - Web Site Award", AwardType.WEBSITE),
(
"Recognition of Extraordinary Service",
AwardType.RECOGNITION_OF_EXTRAORDINARY_SERVICE,
),
("Outstanding Cart Award", AwardType.OUTSTANDING_CART),
("Wayne State University Aim Higher Award", AwardType.WSU_AIM_HIGHER),
(
'Delphi "Driving Tommorow\'s Technology" Award',
AwardType.DRIVING_TOMORROWS_TECHNOLOGY,
),
("Delphi Drive Tommorows Technology", AwardType.DRIVING_TOMORROWS_TECHNOLOGY),
("Kleiner, Perkins, Caufield and Byers", AwardType.ENTREPRENEURSHIP),
("Leadership in Control Award", AwardType.LEADERSHIP_IN_CONTROL),
("#1 Seed", AwardType.NUM_1_SEED),
("Incredible Play Award", AwardType.INCREDIBLE_PLAY),
("People's Choice Animation Award", AwardType.PEOPLES_CHOICE_ANIMATION),
("Autodesk Award for Visualization - Grand Prize", AwardType.VISUALIZATION),
(
"Autodesk Award for Visualization - Rising Star",
AwardType.VISUALIZATION_RISING_STAR,
),
("Some Random Award Winner", None),
("Random Champion", None),
("An Award", None),
],
)
def test_parse_award_type(name: str, award_type: Optional[AwardType]) -> None:
assert AwardHelper.parse_award_type(name) == award_type
| mit | ac04ba355218694644a557bfc0ce1900 | 39.666667 | 86 | 0.627818 | 3.120703 | false | false | false | false |
the-blue-alliance/the-blue-alliance | src/backend/api/handlers/tests/update_event_matches_test.py | 1 | 15731 | import datetime
import json
from typing import Any, Dict, List, Optional
import pytest
from google.appengine.ext import ndb
from pyre_extensions import none_throws
from werkzeug.test import Client
from backend.api.trusted_api_auth_helper import TrustedApiAuthHelper
from backend.common.consts.alliance_color import AllianceColor
from backend.common.consts.auth_type import AuthType
from backend.common.consts.event_type import EventType
from backend.common.models.api_auth_access import ApiAuthAccess
from backend.common.models.event import Event
from backend.common.models.match import Match
AUTH_ID = "tEsT_id_0"
AUTH_SECRET = "321tEsTsEcReT"
REQUEST_PATH = "/api/trusted/v1/event/2014casj/matches/update"
def setup_event(
remap_teams: Optional[Dict[str, str]] = None,
timezone_id: Optional[str] = "America/Los_Angeles",
) -> None:
Event(
id="2014casj",
year=2014,
event_short="casj",
timezone_id=timezone_id,
start_date=datetime.datetime(2014, 4, 1),
end_date=datetime.datetime(2014, 4, 3),
event_type_enum=EventType.OFFSEASON,
remap_teams=remap_teams,
).put()
def setup_auth(access_types: List[AuthType]) -> None:
ApiAuthAccess(
id=AUTH_ID,
secret=AUTH_SECRET,
event_list=[ndb.Key(Event, "2014casj")],
auth_types_enum=access_types,
).put()
def get_auth_headers(request_path: str, request_body) -> Dict[str, str]:
return {
"X-TBA-Auth-Id": AUTH_ID,
"X-TBA-AUth-Sig": TrustedApiAuthHelper.compute_auth_signature(
AUTH_SECRET, request_path, request_body
),
}
def test_bad_event_key(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
resp = api_client.post(
"/api/trusted/v1/event/asdf/matches/update", data=json.dumps([])
)
assert resp.status_code == 404
def test_bad_event(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
resp = api_client.post(
"/api/trusted/v1/event/2015casj/matches/update", data=json.dumps([])
)
assert resp.status_code == 404
def test_bad_auth_type(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_INFO])
resp = api_client.post(
"/api/trusted/v1/event/2014casj/matches/update", data=json.dumps([])
)
assert resp.status_code == 401
def test_no_auth(api_client: Client) -> None:
setup_event()
request_body = json.dumps([])
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 401
@pytest.mark.parametrize(
"request_data",
[
"",
"not_json",
["is_not_dict"],
[{}],
[{"comp_level": "meow"}],
[{"comp_level": "qf", "set_number": "abc"}],
[{"comp_level": "qf", "set_number": 1, "match_number": "abc"}],
[{"comp_level": "qf", "set_number": 1, "match_number": 1, "alliances": "abc"}],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"green": {}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"teams": []}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"score": 0, "teams": ["bad_team"]}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"teams": [], "score": "abc"}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": [], "score": 0, "surrogates": ["bad_team"]}
},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"teams": [], "score": 0, "surrogates": ["frc1"]}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"teams": [], "score": 0, "dqs": ["bad_team"]}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {"red": {"teams": [], "score": 0, "dqs": ["frc1"]}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {},
"score_breakdown": "blah",
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {},
"score_breakdown": {"green": {}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {},
"score_breakdown": {"red": {"bad_key": 0}},
}
],
[
{
"comp_level": "qf",
"set_number": 1,
"match_number": 1,
"alliances": {},
"time_utc": "foo",
}
],
],
)
def test_bad_json(api_client: Client, request_data: Any) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
request_body = json.dumps(request_data)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 400
def test_matches_update(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
# add one match
matches = [
{
"comp_level": "qm",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "9:00 AM",
"time_utc": "2014-08-31T16:00:00",
}
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
event = none_throws(Event.get_by_id("2014casj"))
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 1
assert "2014casj_qm1" in [m.key.id() for m in db_matches]
# add another match
matches = [
{
"comp_level": "f",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {
"teams": ["frc1", "frc2", "frc3"],
"score": 250,
"surrogates": ["frc1"],
"dqs": ["frc2"],
},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 260},
},
"score_breakdown": {
"red": {
"auto": 20,
"assist": 40,
"truss+catch": 20,
"teleop_goal+foul": 20,
},
"blue": {
"auto": 40,
"assist": 60,
"truss+catch": 10,
"teleop_goal+foul": 40,
},
},
"time_string": "10:00 AM",
"time_utc": "2014-08-31T17:00:00",
}
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 2
assert "2014casj_qm1" in [m.key.id() for m in db_matches]
assert "2014casj_f1m1" in [m.key.id() for m in db_matches]
# verify match data
match = Match.get_by_id("2014casj_f1m1")
assert match is not None
assert match.time == datetime.datetime(2014, 8, 31, 17, 0)
assert match.time_string == "10:00 AM"
assert match.alliances[AllianceColor.RED]["teams"] == ["frc1", "frc2", "frc3"]
assert match.alliances[AllianceColor.RED]["score"] == 250
assert match.alliances[AllianceColor.RED]["surrogates"] == ["frc1"]
assert match.alliances[AllianceColor.RED]["dqs"] == ["frc1", "frc2", "frc3"]
breakdown = match.score_breakdown
assert breakdown is not None
assert breakdown[AllianceColor.RED]["truss+catch"] == 20
assert match.alliances[AllianceColor.BLUE]["teams"] == ["frc4", "frc5", "frc6"]
assert match.alliances[AllianceColor.BLUE]["score"] == 260
assert match.alliances[AllianceColor.BLUE]["surrogates"] == []
assert match.alliances[AllianceColor.BLUE]["dqs"] == []
def test_calculate_match_time(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
matches = [
# day 1
{
"comp_level": "qm",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "9:00 AM",
},
{
"comp_level": "qm",
"set_number": 1,
"match_number": 2,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "12:00 PM",
},
{
"comp_level": "qm",
"set_number": 1,
"match_number": 3,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "4:00 PM",
},
# day 2
{
"comp_level": "qm",
"set_number": 1,
"match_number": 4,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "9:00 AM",
},
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
event = none_throws(Event.get_by_id("2014casj"))
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 4
# verify match data
match = Match.get_by_id("2014casj_qm1")
assert match is not None
assert match.time == datetime.datetime(2014, 4, 2, 16, 0)
match = Match.get_by_id("2014casj_qm2")
assert match is not None
assert match.time == datetime.datetime(2014, 4, 2, 19, 0)
match = Match.get_by_id("2014casj_qm3")
assert match is not None
assert match.time == datetime.datetime(2014, 4, 2, 23, 0)
match = Match.get_by_id("2014casj_qm4")
assert match is not None
assert match.time == datetime.datetime(2014, 4, 3, 16, 0)
def test_calculate_match_time_bad_time(api_client: Client) -> None:
setup_event()
setup_auth(access_types=[AuthType.EVENT_MATCHES])
matches = [
{
"comp_level": "qm",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "blahhh",
},
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
event = none_throws(Event.get_by_id("2014casj"))
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 1
# verify match data - we should have skipped over the time
match = Match.get_by_id("2014casj_qm1")
assert match is not None
assert match.time is None
def test_calculate_match_time_skip_no_timezone(api_client: Client) -> None:
setup_event(timezone_id=None)
setup_auth(access_types=[AuthType.EVENT_MATCHES])
matches = [
{
"comp_level": "qm",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "9:00 AM",
},
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
event = none_throws(Event.get_by_id("2014casj"))
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 1
# verify match data - we should have skipped over the time
match = Match.get_by_id("2014casj_qm1")
assert match is not None
assert match.time is None
def test_add_match_remapteams(api_client: Client) -> None:
setup_event(remap_teams={"frc6": "frc254B"})
setup_auth(access_types=[AuthType.EVENT_MATCHES])
# add one match
matches = [
# day 1
{
"comp_level": "qm",
"set_number": 1,
"match_number": 1,
"alliances": {
"red": {"teams": ["frc1", "frc2", "frc3"], "score": 25},
"blue": {"teams": ["frc4", "frc5", "frc6"], "score": 26},
},
"time_string": "9:00 AM",
},
]
request_body = json.dumps(matches)
response = api_client.post(
REQUEST_PATH,
headers=get_auth_headers(REQUEST_PATH, request_body),
data=request_body,
)
assert response.status_code == 200
event = none_throws(Event.get_by_id("2014casj"))
db_matches = Match.query(Match.event == event.key).fetch()
assert len(db_matches) == 1
# verify match data
match = Match.get_by_id("2014casj_qm1")
assert match is not None
assert match.alliances[AllianceColor.RED]["teams"] == ["frc1", "frc2", "frc3"]
assert match.alliances[AllianceColor.BLUE]["teams"] == ["frc4", "frc5", "frc254B"]
assert match.team_key_names == ["frc1", "frc2", "frc3", "frc4", "frc5", "frc254B"]
| mit | 9d3bff5cc002e6359e7b079e38d95a67 | 29.664717 | 88 | 0.493929 | 3.513737 | false | false | false | false |
the-blue-alliance/the-blue-alliance | src/backend/common/helpers/event_remapteams_helper.py | 1 | 5511 | import json
from typing import Dict, List, Optional
from google.appengine.ext import ndb
from pyre_extensions import none_throws
from backend.common.consts.alliance_color import ALLIANCE_COLORS
from backend.common.manipulators.award_manipulator import AwardManipulator
from backend.common.manipulators.event_details_manipulator import (
EventDetailsManipulator,
)
from backend.common.manipulators.match_manipulator import MatchManipulator
from backend.common.models.alliance import EventAlliance
from backend.common.models.award import Award
from backend.common.models.event import Event
from backend.common.models.event_ranking import EventRanking
from backend.common.models.keys import EventKey
from backend.common.models.match import Match
from backend.common.models.team import Team
class EventRemapTeamsHelper:
@classmethod
def remap_teams(cls, event_key: EventKey) -> None:
event: Optional[Event] = Event.get_by_id(event_key)
if not event or not event.remap_teams:
return None
event.prep_awards_matches_teams()
# Remap matches
cls.remapteams_matches(event.matches, event.remap_teams)
MatchManipulator.createOrUpdate(event.matches)
# Remap alliance selections
if event.alliance_selections:
cls.remapteams_alliances(
none_throws(event.alliance_selections), event.remap_teams
)
# Remap rankings
if event.rankings:
cls.remapteams_rankings2(event.rankings, event.remap_teams)
EventDetailsManipulator.createOrUpdate(event.details)
# Remap awards
cls.remapteams_awards(event.awards, event.remap_teams)
AwardManipulator.createOrUpdate(event.awards, auto_union=False)
@classmethod
def remapteams_awards(
cls, awards: List[Award], remap_teams: Dict[str, str]
) -> None:
"""
Remaps teams in awards. Mutates in place.
In `remap_teams` dictionary, key is the old team key, value is the new team key
"""
for award in awards:
new_recipient_json_list = []
new_team_list = []
# Compute new recipient list and team list
for recipient in award.recipient_list:
for old_team, new_team in remap_teams.items():
# Convert recipient `team_number` to string for safe comparision
if str(recipient["team_number"]) == old_team[3:]:
award._dirty = True
recipient["team_number"] = new_team[3:]
# Convert `team_number` down to an int, if possible
recipient_team_number = recipient["team_number"]
if (
type(recipient_team_number) is str
and recipient_team_number.isdigit()
):
award._dirty = True
recipient["team_number"] = int(recipient_team_number)
new_recipient_json_list.append(json.dumps(recipient))
new_team_list.append(
ndb.Key(Team, "frc{}".format(recipient["team_number"]))
)
# Update
award.recipient_json_list = new_recipient_json_list
award.team_list = new_team_list
@classmethod
def remapteams_matches(
cls, matches: List[Match], remap_teams: Dict[str, str]
) -> None:
"""
Remaps teams in matches
Mutates in place
"""
for match in matches:
for old_team, new_team in remap_teams.items():
# Update alliances
for color in ALLIANCE_COLORS:
for attr in ["teams", "surrogates", "dqs"]:
for i, key in enumerate(
match.alliances[color].get(attr, []) # pyre-ignore[26]
):
if key == old_team:
match._dirty = True
match.alliances[color][attr][ # pyre-ignore[26]
i
] = new_team
match.alliances_json = json.dumps(match.alliances)
# Update team key names
match.team_key_names = []
for alliance in match.alliances:
match.team_key_names.extend(
none_throws(match.alliances[alliance].get("teams", None))
)
@classmethod
def remapteams_alliances(
cls, alliance_selections: List[EventAlliance], remap_teams: Dict[str, str]
) -> None:
"""
Remaps teams in alliance selections
Mutates in place
"""
for row in alliance_selections:
for choice in ["picks", "declines"]:
for old_team, new_team in remap_teams.items():
for i, key in enumerate(row.get(choice, [])): # pyre-ignore[26]
if key == old_team:
row[choice][i] = new_team # pyre-ignore[26,6]
@classmethod
def remapteams_rankings2(
cls, rankings2: List[EventRanking], remap_teams: Dict[str, str]
) -> None:
"""
Remaps teams in rankings2
Mutates in place
"""
for ranking in rankings2:
if ranking["team_key"] in remap_teams:
ranking["team_key"] = remap_teams[ranking["team_key"]]
| mit | f2c690df115c1fa3c42ebf66e25e7774 | 37.809859 | 87 | 0.570677 | 4.149849 | false | false | false | false |
the-blue-alliance/the-blue-alliance | src/backend/common/cache_clearing/get_affected_queries.py | 1 | 12018 | from typing import Any, List, Set, Tuple, Type
from google.appengine.ext import ndb
from backend.common.models.cached_model import TAffectedReferences
from backend.common.models.district_team import DistrictTeam
from backend.common.models.event import Event
from backend.common.models.event_team import EventTeam
from backend.common.queries import (
award_query,
district_query,
event_details_query,
event_query,
match_query,
media_query,
robot_query,
team_query,
)
from backend.common.queries.database_query import CachedDatabaseQuery
TCacheKeyAndQuery = Tuple[str, Type[CachedDatabaseQuery]]
def _queries_to_cache_keys_and_queries(
queries: List[CachedDatabaseQuery],
) -> List[TCacheKeyAndQuery]:
out = []
for query in queries:
out.append((query.cache_key, type(query)))
return out
def _filter(refs: Set[Any]) -> Set[Any]:
# Default filter() filters zeros, so we can't use it.
return {r for r in refs if r is not None}
def award_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
event_keys = _filter(affected_refs["event"])
team_keys = _filter(affected_refs["team_list"])
years = _filter(affected_refs["year"])
event_types = _filter(affected_refs["event_type_enum"])
award_types = _filter(affected_refs["award_type_enum"])
queries: List[CachedDatabaseQuery] = []
for event_key in event_keys:
queries.append(award_query.EventAwardsQuery(event_key.id()))
for team_key in team_keys:
queries.append(
award_query.TeamEventAwardsQuery(team_key.id(), event_key.id())
)
for team_key in team_keys:
queries.append(award_query.TeamAwardsQuery(team_key.id()))
for year in years:
queries.append(award_query.TeamYearAwardsQuery(team_key.id(), year))
for event_type in event_types:
for award_type in award_types:
queries.append(
award_query.TeamEventTypeAwardsQuery(
team_key.id(), event_type, award_type
)
)
return _queries_to_cache_keys_and_queries(queries)
def event_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
event_keys = _filter(affected_refs["key"])
years = _filter(affected_refs["year"])
event_district_keys = _filter(affected_refs["district_key"])
event_team_keys_future = EventTeam.query(
EventTeam.event.IN([event_key for event_key in event_keys]) # pyre-ignore[16]
).fetch_async(None, keys_only=True)
events_future = ndb.get_multi_async(event_keys)
queries: List[CachedDatabaseQuery] = []
for event_key in event_keys:
queries.append(event_query.EventQuery(event_key.id()))
queries.append(event_query.EventDivisionsQuery(event_key.id()))
for year in years:
queries.append(event_query.EventListQuery(year))
for event_district_key in event_district_keys:
queries.append(event_query.DistrictEventsQuery(event_district_key.id()))
if event_keys:
for et_key in event_team_keys_future.get_result():
team_key = et_key.id().split("_")[1]
year = int(et_key.id()[:4])
queries.append(event_query.TeamEventsQuery(team_key))
queries.append(event_query.TeamYearEventsQuery(team_key, year))
queries.append(event_query.TeamYearEventTeamsQuery(team_key, year))
events_with_parents = filter(
lambda e: e.get_result() is not None
and e.get_result().parent_event is not None,
events_future,
)
parent_keys = set([e.get_result().parent_event for e in events_with_parents])
for parent_key in parent_keys:
queries.append(event_query.EventDivisionsQuery(parent_key.id()))
return _queries_to_cache_keys_and_queries(queries)
def event_details_updated(
affected_refs: TAffectedReferences,
) -> List[TCacheKeyAndQuery]:
event_details_keys = _filter(affected_refs["key"])
queries: List[CachedDatabaseQuery] = []
for event_details_key in event_details_keys:
queries.append(event_details_query.EventDetailsQuery(event_details_key.id()))
return _queries_to_cache_keys_and_queries(queries)
def match_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
match_keys = _filter(affected_refs["key"])
event_keys = _filter(affected_refs["event"])
team_keys = _filter(affected_refs["team_keys"])
years = _filter(affected_refs["year"])
queries: List[CachedDatabaseQuery] = []
for match_key in match_keys:
queries.append(match_query.MatchQuery(match_key.id()))
# queries.append(match_query.MatchGdcvDataQuery(match_key.id()))
for event_key in event_keys:
queries.append(match_query.EventMatchesQuery(event_key.id()))
# queries.append(match_query.EventMatchesGdcvDataQuery(event_key.id()))
for team_key in team_keys:
queries.append(
match_query.TeamEventMatchesQuery(team_key.id(), event_key.id())
)
for team_key in team_keys:
for year in years:
queries.append(match_query.TeamYearMatchesQuery(team_key.id(), year))
return _queries_to_cache_keys_and_queries(queries)
def media_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
reference_keys = _filter(affected_refs["references"])
years = _filter(affected_refs["year"])
media_tags = _filter(affected_refs["media_tag_enum"])
team_keys = list(filter(lambda x: x.kind() == "Team", reference_keys))
event_team_keys_future = (
EventTeam.query(EventTeam.team.IN(team_keys)).fetch_async( # pyre-ignore[16]
None, keys_only=True
)
if team_keys
else None
)
queries: List[CachedDatabaseQuery] = []
for reference_key in reference_keys:
if reference_key.kind() == "Team":
for year in years:
queries.append(media_query.TeamYearMediaQuery(reference_key.id(), year))
for media_tag in media_tags:
queries.append(
media_query.TeamYearTagMediasQuery(
reference_key.id(), year, media_tag
)
)
for media_tag in media_tags:
queries.append(
media_query.TeamTagMediasQuery(reference_key.id(), media_tag)
)
queries.append(media_query.TeamSocialMediaQuery(reference_key.id()))
if reference_key.kind() == "Event":
queries.append(media_query.EventMediasQuery(reference_key.id()))
if event_team_keys_future:
for event_team_key in event_team_keys_future.get_result():
event_key = event_team_key.id().split("_")[0]
year = int(event_key[:4])
if year in years:
queries.append(media_query.EventTeamsMediasQuery(event_key))
queries.append(media_query.EventTeamsPreferredMediasQuery(event_key))
return _queries_to_cache_keys_and_queries(queries)
def robot_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
team_keys = _filter(affected_refs["team"])
queries: List[CachedDatabaseQuery] = []
for team_key in team_keys:
queries.append(robot_query.TeamRobotsQuery(team_key.id()))
return _queries_to_cache_keys_and_queries(queries)
def team_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
team_keys = _filter(affected_refs["key"])
event_team_keys_future = EventTeam.query(
EventTeam.team.IN([team_key for team_key in team_keys]) # pyre-ignore[16]
).fetch_async(None, keys_only=True)
district_team_keys_future = DistrictTeam.query(
DistrictTeam.team.IN([team_key for team_key in team_keys])
).fetch_async(None, keys_only=True)
queries: List[CachedDatabaseQuery] = []
for team_key in team_keys:
queries.append(team_query.TeamQuery(team_key.id()))
page_num = team_query.get_team_page_num(team_key.id())
queries.append(team_query.TeamListQuery(page_num))
for et_key in event_team_keys_future.get_result():
year = int(et_key.id()[:4])
event_key = et_key.id().split("_")[0]
page_num = team_query.get_team_page_num(et_key.id().split("_")[1])
queries.append(team_query.TeamListYearQuery(year, page_num))
queries.append(team_query.EventTeamsQuery(event_key))
queries.append(team_query.EventEventTeamsQuery(event_key))
for dt_key in district_team_keys_future.get_result():
district_key = dt_key.id().split("_")[0]
queries.append(team_query.DistrictTeamsQuery(district_key))
return _queries_to_cache_keys_and_queries(queries)
def eventteam_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
event_keys = _filter(affected_refs["event"])
team_keys = _filter(affected_refs["team"])
years = _filter(affected_refs["year"])
queries: List[CachedDatabaseQuery] = []
for team_key in team_keys:
queries.append(event_query.TeamEventsQuery(team_key.id()))
queries.append(team_query.TeamParticipationQuery(team_key.id()))
page_num = team_query.get_team_page_num(team_key.id())
for year in years:
queries.append(event_query.TeamYearEventsQuery(team_key.id(), year))
queries.append(event_query.TeamYearEventTeamsQuery(team_key.id(), year))
queries.append(team_query.TeamListYearQuery(year, page_num))
for event_key in event_keys:
queries.append(team_query.EventTeamsQuery(event_key.id()))
queries.append(team_query.EventEventTeamsQuery(event_key.id()))
queries.append(media_query.EventTeamsMediasQuery(event_key.id()))
queries.append(media_query.EventTeamsPreferredMediasQuery(event_key.id()))
return _queries_to_cache_keys_and_queries(queries)
def districtteam_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
district_keys = _filter(affected_refs["district_key"])
team_keys = _filter(affected_refs["team"])
queries: List[CachedDatabaseQuery] = []
for district_key in district_keys:
queries.append(team_query.DistrictTeamsQuery(district_key.id()))
for team_key in team_keys:
queries.append(district_query.TeamDistrictsQuery(team_key.id()))
return _queries_to_cache_keys_and_queries(queries)
def district_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
years = _filter(affected_refs["year"])
district_abbrevs = _filter(affected_refs["abbreviation"])
district_keys = _filter(affected_refs["key"])
district_team_keys_future = DistrictTeam.query(
DistrictTeam.district_key.IN(list(district_keys))
).fetch_async(None, keys_only=True)
district_event_keys_future = Event.query(
Event.district_key.IN(list(district_keys)) # pyre-ignore[16]
).fetch_async(keys_only=True)
queries: List[CachedDatabaseQuery] = []
for year in years:
queries.append(district_query.DistrictsInYearQuery(year))
for abbrev in district_abbrevs:
queries.append(district_query.DistrictHistoryQuery(abbrev))
for key in district_keys:
queries.append(district_query.DistrictQuery(key.id()))
for dt_key in district_team_keys_future.get_result():
team_key = dt_key.id().split("_")[1]
queries.append(district_query.TeamDistrictsQuery(team_key))
# Necessary because APIv3 Event models include the District model
affected_event_refs = {
"key": set(),
"year": set(),
"district_key": district_keys,
}
for event_key in district_event_keys_future.get_result():
affected_event_refs["key"].add(event_key)
affected_event_refs["year"].add(int(event_key.id()[:4]))
return _queries_to_cache_keys_and_queries(queries) + event_updated(
affected_event_refs
)
| mit | 5a64b04ec9aa012ea3186c633bdad652 | 38.019481 | 88 | 0.662506 | 3.494621 | false | false | false | false |
the-blue-alliance/the-blue-alliance | old_py2/controllers/admin/admin_offseason_spreadsheet_controller.py | 11 | 3111 | import datetime
import logging
import os
from google.appengine.ext import ndb
from google.appengine.ext.webapp import template
from controllers.base_controller import LoggedInHandler
from datafeeds.datafeed_offseason_spreadsheet import DatafeedOffseasonSpreadsheet
from consts.event_type import EventType
from helpers.event_manipulator import EventManipulator
from models.event import Event
class AdminOffseasonSpreadsheetController(LoggedInHandler):
"""
View and add un-added offseasons from a Google Sheet:
https://docs.google.com/spreadsheet/ccc?key=0ApfHZGrvPfuAdHVZcTczVnhQVnh1RlNEdVFmM2c3Y1E
"""
SHEET_KEY = "0ApfHZGrvPfuAdHVZcTczVnhQVnh1RlNEdVFmM2c3Y1E"
def get(self):
self._require_admin()
df = DatafeedOffseasonSpreadsheet()
new_events = df.getEventList(self.SHEET_KEY)
old_events = Event.query().filter(
Event.event_type_enum == EventType.OFFSEASON).filter(
Event.year == datetime.datetime.now().year).fetch(100)
old_titles = [event.name for event in old_events]
truly_new_events = [event for event in new_events if event.name not in old_titles]
self.template_values.update({
"events": truly_new_events,
"event_key": self.request.get("event_key"),
"success": self.request.get("success"),
})
path = os.path.join(os.path.dirname(__file__), '../../templates/admin/offseasons_spreadsheet.html')
self.response.out.write(template.render(path, self.template_values))
def post(self):
self._require_admin()
if self.request.get("submit") == "duplicate":
# how to do this?
self.redirect("/admin/offseasons/spreadsheet?success=duplicate&event_key=%s" % self.request.get("duplicate_event_key"))
return
if self.request.get("submit") == "create":
start_date = None
if self.request.get("event_start_date"):
start_date = datetime.datetime.strptime(self.request.get("event_start_date"), "%Y-%m-%d")
end_date = None
if self.request.get("event_end_date"):
end_date = datetime.datetime.strptime(self.request.get("event_end_date"), "%Y-%m-%d")
event_key = str(self.request.get("event_year")) + str.lower(str(self.request.get("event_short")))
event = Event(
id=event_key,
event_type_enum=int(self.request.get("event_type_enum")),
event_short=self.request.get("event_short"),
name=self.request.get("event_name"),
year=int(self.request.get("event_year")),
start_date=start_date,
end_date=end_date,
location=self.request.get("event_location"),
venue=self.request.get("event_venue"),
)
event = EventManipulator.createOrUpdate(event)
self.redirect("/admin/offseasons/spreadsheet?success=create&event_key=%s" % event_key)
return
self.redirect("/admin/offseasons/spreadsheet")
| mit | ae814e72b435a06f092a8cd6f3441e46 | 36.481928 | 131 | 0.633558 | 3.681657 | false | false | false | false |
the-blue-alliance/the-blue-alliance | src/backend/common/queries/dict_converters/award_converter.py | 1 | 2819 | import json
from typing import Dict, List, NewType
from google.appengine.ext import ndb
from backend.common.consts.api_version import ApiMajorVersion
from backend.common.models.award import Award
from backend.common.models.event import Event
from backend.common.models.team import Team
from backend.common.queries.dict_converters.converter_base import ConverterBase
AwardDict = NewType("AwardDict", Dict)
class AwardConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
ApiMajorVersion.API_V3: 3,
}
@classmethod
def _convert_list(
cls, model_list: List[Award], version: ApiMajorVersion
) -> List[AwardDict]:
AWARD_CONVERTERS = {
ApiMajorVersion.API_V3: cls.awardsConverter_v3,
}
return AWARD_CONVERTERS[version](model_list)
@classmethod
def awardsConverter_v3(cls, awards: List[Award]) -> List[AwardDict]:
return list(map(cls.awardConverter_v3, awards))
@classmethod
def awardConverter_v3(cls, award: Award) -> AwardDict:
recipient_list_fixed = []
for recipient in award.recipient_list:
recipient_list_fixed.append(
{
"awardee": recipient["awardee"],
"team_key": "frc{}".format(recipient["team_number"])
if recipient["team_number"]
else None,
}
)
return AwardDict(
{
"name": award.name_str,
"award_type": award.award_type_enum,
"year": award.year,
"event_key": award.event.id(),
"recipient_list": recipient_list_fixed,
}
)
@staticmethod
def dictToModel_v3(data: Dict, event: Event) -> Award:
award = Award(id=Award.render_key_name(data["event_key"], data["award_type"]))
award.event = ndb.Key(Event, data["event_key"])
award.award_type_enum = data["award_type"]
award.year = data["year"]
award.name_str = data["name"]
award.event_type_enum = event.event_type_enum
recipient_list_fixed = []
team_keys = []
for recipient in data["recipient_list"]:
if recipient["team_key"]:
team_keys.append(ndb.Key(Team, recipient["team_key"]))
recipient_list_fixed.append(
json.dumps(
{
"awardee": recipient["awardee"],
"team_number": recipient["team_key"][3:]
if recipient["team_key"]
else None,
}
)
)
award.recipient_json_list = recipient_list_fixed
award.team_list = team_keys
return award
| mit | 07b581b382f30ad8cfac198cd776b8d4 | 33.802469 | 86 | 0.564385 | 3.877579 | false | false | false | false |
the-blue-alliance/the-blue-alliance | old_py2/helpers/insight_manipulator.py | 12 | 1096 | from helpers.manipulator_base import ManipulatorBase
class InsightManipulator(ManipulatorBase):
"""
Handle Insight database writes.
"""
@classmethod
def updateMerge(self, new_insight, old_insight, auto_union=True):
"""
Given an "old" and a "new" Insight object, replace the fields in the
"old" Insight that are present in the "new" Insight, but keep fields from
the "old" Insight that are null in the "new" insight.
"""
attrs = [
'name',
'year',
'data_json',
]
for attr in attrs:
if getattr(new_insight, attr) is not None:
if getattr(new_insight, attr) != getattr(old_insight, attr):
setattr(old_insight, attr, getattr(new_insight, attr))
old_insight.dirty = True
if getattr(new_insight, attr) == "None":
if getattr(old_insight, attr, None) != None:
setattr(old_insight, attr, None)
old_insight.dirty = True
return old_insight
| mit | ccd0c931eca11c376e37534acb63190c | 33.25 | 81 | 0.556569 | 3.985455 | false | false | false | false |
the-blue-alliance/the-blue-alliance | src/backend/common/helpers/event_short_name_helper.py | 1 | 2918 | import re
from typing import Optional, Set
from backend.common.decorators import memoize
from backend.common.models.district import District
class EventShortNameHelper:
"""
A helper class to compute event short names
"""
@staticmethod
@memoize(timeout=86400) # 1 day
def _get_all_district_codes() -> Set[str]:
codes = set(
[d.id()[4:].upper() for d in District.query().fetch(keys_only=True)]
)
if "MAR" in codes: # MAR renamed to FMA in 2019
codes.add("FMA")
if "TX" in codes: # TX and FIT used interchangeably
codes.add("FIT")
if "IN" in codes: # IN and FIN used interchangeably
codes.add("FIN")
return codes
@classmethod
def get_short_name(cls, name_str: str, district_code: Optional[str] = None) -> str:
"""
Extracts a short name like "Silicon Valley" from an event name like
"Silicon Valley Regional sponsored by Google.org".
See https://github.com/the-blue-alliance/the-blue-alliance-android/blob/master/android/src/test/java/com/thebluealliance/androidclient/test/helpers/EventHelperTest.java
"""
all_district_codes = cls._get_all_district_codes()
if district_code is not None:
all_district_codes.add(district_code)
district_code_regex = "|".join(all_district_codes)
# Account for 2020 suspensions
if name_str.startswith("***SUSPENDED***"):
name_str = name_str.replace("***SUSPENDED***", "")
# 2015+ districts
# Numbered events with no name
re_string = r"({}) District Event (#\d+)".format(district_code_regex)
match = re.match(re_string, name_str)
if match:
return "{} {}".format(match.group(1).strip(), match.group(2).strip())
# The rest
re_string = r"(?:{}) District -?(.+)".format(district_code_regex)
match = re.match(re_string, name_str)
if match:
partial = match.group(1).strip()
match2 = re.sub(r"(?<=[\w\s])Event\s*(?:[\w\s]*$)?", "", partial)
return match2.strip()
# 2014- districts
# district championships, other districts, and regionals
name_str = re.sub(r"\s?Event", "", name_str)
match = re.match(
r"\s*(?:MAR |PNW |)(?:FIRST Robotics|FRC|)(.+)(?:District|Regional|Region|Provincial|State|Tournament|FRC|Field)(?:\b)(?:[\w\s]+?(#\d*)*)?(Day \d+)?",
name_str,
)
if match:
short = "".join(match.groups(""))
match = re.match(r"(.+)(?:FIRST Robotics|FRC)", short)
if match:
result = match.group(1).strip()
else:
result = short.strip()
if result.startswith("FIRST"):
result = result[5:]
return result.strip()
return name_str.strip()
| mit | 10a98eeec91a9400e86aadd0b208fce1 | 35.936709 | 176 | 0.566484 | 3.494611 | false | false | false | false |
the-blue-alliance/the-blue-alliance | src/backend/common/consts/fcm/platform_type.py | 1 | 1528 | from __future__ import annotations
import enum
from typing import Dict
@enum.unique
class PlatformType(enum.IntEnum):
"""
Constants for the type of FCM platforms.
https://firebase.google.com/docs/reference/fcm/rest/v1/projects.messages
"""
ANDROID = 0
APNS = 1
WEBPUSH = 2
@staticmethod
def validate(platform_type: PlatformType) -> None:
"""Validate that the platform_type is supported.
Raises:
ValueError: If platform_type is an unsupported platform type.
"""
if platform_type not in list(PlatformType):
raise ValueError("Unsupported platform_type: {}".format(platform_type))
@staticmethod
def collapse_key_key(platform_type: PlatformType) -> str:
# Validate that platform_type is supported
PlatformType.validate(platform_type)
COLLAPSE_KEY_KEYS: Dict[PlatformType, str] = {
PlatformType.ANDROID: "collapse_key",
PlatformType.APNS: "apns-collapse-id",
PlatformType.WEBPUSH: "Topic",
}
return COLLAPSE_KEY_KEYS[platform_type]
@staticmethod
def priority_key(platform_type: PlatformType) -> str:
# Validate that platform_type is supported
PlatformType.validate(platform_type)
PRIORITY_KEYS: Dict[PlatformType, str] = {
PlatformType.ANDROID: "priority",
PlatformType.APNS: "apns-priority",
PlatformType.WEBPUSH: "Urgency",
}
return PRIORITY_KEYS[platform_type]
| mit | b1a1e2203d2c526195ef2bf7bfa2c6cf | 29.56 | 83 | 0.64267 | 4.107527 | false | false | false | false |
the-blue-alliance/the-blue-alliance | src/backend/api/handlers/match.py | 1 | 1566 | from typing import Optional
from flask import Response
from backend.api.handlers.decorators import api_authenticated, validate_keys
from backend.api.handlers.helpers.model_properties import (
filter_match_properties,
ModelType,
)
from backend.api.handlers.helpers.profiled_jsonify import profiled_jsonify
from backend.api.handlers.helpers.track_call import track_call_after_response
from backend.common.consts.api_version import ApiMajorVersion
from backend.common.decorators import cached_public
from backend.common.models.keys import MatchKey
from backend.common.models.zebra_motionworks import ZebraMotionWorks
from backend.common.queries.match_query import MatchQuery
@api_authenticated
@validate_keys
@cached_public
def match(match_key: MatchKey, model_type: Optional[ModelType] = None) -> Response:
"""
Returns details about one match, specified by |match_key|.
"""
track_call_after_response("match", match_key, model_type)
match = MatchQuery(match_key=match_key).fetch_dict(ApiMajorVersion.API_V3)
if model_type is not None:
match = filter_match_properties([match], model_type)[0]
return profiled_jsonify(match)
@api_authenticated
@validate_keys
@cached_public
def zebra_motionworks(match_key: MatchKey) -> Response:
"""
Returns Zebra Motionworks data for a given match.
"""
track_call_after_response("zebra_motionworks_match", match_key)
zebra_data = ZebraMotionWorks.get_by_id(match_key)
data = zebra_data.data if zebra_data is not None else None
return profiled_jsonify(data)
| mit | 6e7cb8ecfa85da531aa45994ca57548f | 33.8 | 83 | 0.765645 | 3.591743 | false | false | false | false |
the-blue-alliance/the-blue-alliance | src/backend/web/handlers/suggestions/tests/suggest_team_media_review_controller_test.py | 1 | 8438 | import re
from typing import List
from urllib.parse import urlparse
import pytest
from bs4 import BeautifulSoup
from google.appengine.ext import ndb
from werkzeug.test import Client
from backend.common.consts.account_permission import AccountPermission
from backend.common.consts.media_type import MediaType
from backend.common.consts.suggestion_state import SuggestionState
from backend.common.models.media import Media
from backend.common.models.suggestion import Suggestion
from backend.common.models.team import Team
from backend.common.suggestions.suggestion_creator import (
SuggestionCreationStatus,
SuggestionCreator,
)
@pytest.fixture
def login_user_with_permission(login_user):
login_user.permissions = [AccountPermission.REVIEW_MEDIA]
return login_user
def get_suggestion_queue(web_client: Client) -> List[str]:
response = web_client.get("/suggest/team/media/review")
assert response.status_code == 200
soup = BeautifulSoup(response.data, "html.parser")
review_form = soup.find(id="review_media")
assert review_form is not None
suggestions = review_form.find_all(class_="suggestion-item")
queue = []
for suggestion in suggestions:
accept_button = suggestion.find(
"input",
attrs={
"name": re.compile("accept_reject-.*"),
"value": re.compile("accept::.*"),
},
)
assert accept_button is not None
reject_button = suggestion.find(
"input",
attrs={
"name": re.compile("accept_reject-.*"),
"value": re.compile("reject::.*"),
},
)
assert reject_button is not None
year = suggestion.find("input", attrs={"name": re.compile("year-.*")})
assert year is not None
queue.append(accept_button["value"].split("::")[1])
return queue
def createSuggestion(logged_in_user) -> str:
status = SuggestionCreator.createTeamMediaSuggestion(
logged_in_user.account_key, "http://imgur.com/foobar", "frc1124", "2016"
)
assert status[0] == SuggestionCreationStatus.SUCCESS
return Suggestion.render_media_key_name(2016, "team", "frc1124", "imgur", "foobar")
def test_login_redirect(web_client: Client) -> None:
response = web_client.get("/suggest/team/media/review")
assert response.status_code == 302
assert urlparse(response.headers["Location"]).path == "/account/login"
def test_no_permissions(login_user, web_client: Client) -> None:
response = web_client.get("/suggest/team/media/review")
assert response.status_code == 401
def test_nothing_to_review(login_user_with_permission, web_client: Client) -> None:
queue = get_suggestion_queue(web_client)
assert queue == []
def test_accept_suggestion(
login_user_with_permission,
ndb_stub,
web_client: Client,
taskqueue_stub,
) -> None:
suggestion_id = createSuggestion(login_user_with_permission)
queue = get_suggestion_queue(web_client)
assert queue == [suggestion_id]
response = web_client.post(
"/suggest/team/media/review",
data={
f"accept_reject-{suggestion_id}": f"accept::{suggestion_id}",
},
follow_redirects=True,
)
assert response.status_code == 200
suggestion = Suggestion.get_by_id(suggestion_id)
assert suggestion is not None
assert suggestion.review_state == SuggestionState.REVIEW_ACCEPTED
media = Media.get_by_id(Media.render_key_name(MediaType.IMGUR, "foobar"))
assert media is not None
assert media.year == 2016
assert media.foreign_key == "foobar"
assert media.media_type_enum == MediaType.IMGUR
assert ndb.Key(Team, "frc1124") in media.references
assert media.preferred_references == []
def test_accept_suggestion_change_year(
login_user_with_permission,
ndb_stub,
web_client: Client,
taskqueue_stub,
) -> None:
suggestion_id = createSuggestion(login_user_with_permission)
queue = get_suggestion_queue(web_client)
assert queue == [suggestion_id]
response = web_client.post(
"/suggest/team/media/review",
data={
f"accept_reject-{suggestion_id}": f"accept::{suggestion_id}",
f"year-{suggestion_id}": "2017",
},
follow_redirects=True,
)
assert response.status_code == 200
suggestion = Suggestion.get_by_id(suggestion_id)
assert suggestion is not None
assert suggestion.review_state == SuggestionState.REVIEW_ACCEPTED
media = Media.get_by_id(Media.render_key_name(MediaType.IMGUR, "foobar"))
assert media is not None
assert media.year == 2017
assert media.foreign_key == "foobar"
assert media.media_type_enum == MediaType.IMGUR
assert ndb.Key(Team, "frc1124") in media.references
assert media.preferred_references == []
def test_accept_suggestion_as_preferred(
login_user_with_permission,
ndb_stub,
web_client: Client,
taskqueue_stub,
) -> None:
suggestion_id = createSuggestion(login_user_with_permission)
queue = get_suggestion_queue(web_client)
assert queue == [suggestion_id]
response = web_client.post(
"/suggest/team/media/review",
data={
f"accept_reject-{suggestion_id}": f"accept::{suggestion_id}",
"preferred_keys[]": [f"preferred::{suggestion_id}"],
},
follow_redirects=True,
)
assert response.status_code == 200
suggestion = Suggestion.get_by_id(suggestion_id)
assert suggestion is not None
assert suggestion.review_state == SuggestionState.REVIEW_ACCEPTED
media = Media.get_by_id(Media.render_key_name(MediaType.IMGUR, "foobar"))
assert media is not None
assert media.year == 2016
assert media.foreign_key == "foobar"
assert media.media_type_enum == MediaType.IMGUR
assert ndb.Key(Team, "frc1124") in media.references
assert ndb.Key(Team, "frc1124") in media.preferred_references
def test_accept_suggestion_as_preferred_and_replace(
login_user_with_permission,
ndb_stub,
web_client: Client,
taskqueue_stub,
) -> None:
# Create an existing preferred media
existing_preferred = Media(
id=Media.render_key_name(MediaType.IMGUR, "baz"),
foreign_key="baz",
media_type_enum=MediaType.IMGUR,
year=2016,
preferred_references=[ndb.Key(Team, "frc1124")],
)
existing_preferred.put()
suggestion_id = createSuggestion(login_user_with_permission)
queue = get_suggestion_queue(web_client)
assert queue == [suggestion_id]
response = web_client.post(
"/suggest/team/media/review",
data={
f"accept_reject-{suggestion_id}": f"accept::{suggestion_id}",
"preferred_keys[]": [f"preferred::{suggestion_id}"],
f"replace-preferred-{suggestion_id}": existing_preferred.key_name,
},
follow_redirects=True,
)
assert response.status_code == 200
suggestion = Suggestion.get_by_id(suggestion_id)
assert suggestion is not None
assert suggestion.review_state == SuggestionState.REVIEW_ACCEPTED
media = Media.get_by_id(Media.render_key_name(MediaType.IMGUR, "foobar"))
assert media is not None
assert media.year == 2016
assert media.foreign_key == "foobar"
assert media.media_type_enum == MediaType.IMGUR
assert ndb.Key(Team, "frc1124") in media.references
assert ndb.Key(Team, "frc1124") in media.preferred_references
old_preferred_media = Media.get_by_id(existing_preferred.key_name)
assert old_preferred_media is not None
assert ndb.Key(Team, "frc1124") not in old_preferred_media.preferred_references
def test_reject_suggestion(
login_user_with_permission, ndb_stub, web_client: Client
) -> None:
suggestion_id = createSuggestion(login_user_with_permission)
queue = get_suggestion_queue(web_client)
assert queue == [suggestion_id]
response = web_client.post(
"/suggest/team/media/review",
data={
f"accept_reject-{suggestion_id}": f"reject::{suggestion_id}",
},
follow_redirects=True,
)
assert response.status_code == 200
suggestion = Suggestion.get_by_id(suggestion_id)
assert suggestion is not None
assert suggestion.review_state == SuggestionState.REVIEW_REJECTED
# Verify no medias are created
medias = Media.query().fetch()
assert medias == []
| mit | 29580ed0048ed210ae737d4082346c7e | 32.61753 | 87 | 0.669471 | 3.595228 | false | false | false | false |
the-blue-alliance/the-blue-alliance | src/backend/common/models/tba_video.py | 1 | 1501 | from typing import List, Optional
from backend.common.models.keys import EventKey, MatchKey
class TBAVideo(object):
"""
Same interface as the retired TBAVideo class.
"""
TBA_NET_VID_PATTERN = "http://videos.thebluealliance.net/%s/%s.%s"
THUMBNAIL_FILETYPES = ["jpg", "jpeg"]
STREAMABLE_FILETYPES = ["mp4", "flv"]
DOWNLOADABLE_FILETYPES = ["mp4", "mov", "avi", "wmv", "flv"]
event_key: EventKey
match_key: MatchKey
# A list of filetypes where videos exist
match_tba_videos: List[str]
def __init__(
self, event_key: EventKey, match_key: MatchKey, match_tba_videos: List[str]
):
self.event_key = event_key
self.match_key = match_key
self.match_tba_videos = match_tba_videos
@property
def thumbnail_path(self) -> Optional[str]:
return self._best_path_of(self.THUMBNAIL_FILETYPES)
@property
def streamable_path(self) -> Optional[str]:
return self._best_path_of(self.STREAMABLE_FILETYPES)
@property
def downloadable_path(self) -> Optional[str]:
return self._best_path_of(self.DOWNLOADABLE_FILETYPES)
def _best_path_of(self, consider_filetypes: List[str]) -> Optional[str]:
for filetype in consider_filetypes:
if filetype in self.match_tba_videos:
return self.TBA_NET_VID_PATTERN % (
self.event_key,
self.match_key,
filetype,
)
return None
| mit | 47a8b1846cfb7e271b39952ca9f9f659 | 29.02 | 83 | 0.612258 | 3.458525 | false | false | false | false |
the-blue-alliance/the-blue-alliance | src/backend/web/handlers/district.py | 1 | 5798 | import datetime
import logging
from datetime import timedelta
from operator import itemgetter
from typing import List, Optional, Tuple
from flask import abort
from google.appengine.ext import ndb
from werkzeug.wrappers import Response
from backend.common.decorators import cached_public
from backend.common.flask_cache import make_cached_response
from backend.common.helpers.event_helper import EventHelper
from backend.common.helpers.season_helper import SeasonHelper
from backend.common.helpers.team_helper import TeamHelper
from backend.common.models.event_team import EventTeam
from backend.common.models.keys import DistrictAbbreviation, Year
from backend.common.queries.district_query import (
DistrictHistoryQuery,
DistrictQuery,
DistrictsInYearQuery,
)
from backend.common.queries.event_query import DistrictEventsQuery
from backend.common.queries.team_query import DistrictTeamsQuery, EventTeamsQuery
from backend.web.profiled_render import render_template
@cached_public
def district_detail(
district_abbrev: DistrictAbbreviation, year: Optional[Year]
) -> Response:
explicit_year = year is not None
if year is None:
year = SeasonHelper.get_current_season()
valid_years = list(reversed(SeasonHelper.get_valid_years()))
if year not in valid_years:
abort(404)
district = DistrictQuery("{}{}".format(year, district_abbrev)).fetch()
if not district:
abort(404)
events_future = DistrictEventsQuery(district.key_name).fetch_async()
# needed for district teams
district_teams_future = DistrictTeamsQuery(district.key_name).fetch_async()
# needed for valid_years
history_future = DistrictHistoryQuery(district.abbreviation).fetch_async()
# needed for valid_districts
districts_in_year_future = DistrictsInYearQuery(district.year).fetch_async()
# needed for active team statuses
live_events = []
live_eventteams_futures = []
current_year = False
if year == datetime.datetime.now().year: # Only show active teams for current year
current_year = True
live_events = EventHelper.week_events()
for event in live_events:
live_eventteams_futures.append(EventTeamsQuery(event.key_name).fetch_async())
events = EventHelper.sorted_events(events_future.get_result())
events_by_key = {}
for event in events:
events_by_key[event.key.id()] = event
week_events = EventHelper.group_by_week(events)
valid_districts: List[Tuple[str, DistrictAbbreviation]] = []
districts_in_year = districts_in_year_future.get_result()
for dist in districts_in_year:
valid_districts.append((dist.display_name, dist.abbreviation))
valid_districts = sorted(valid_districts, key=itemgetter(0))
teams = TeamHelper.sort_teams(district_teams_future.get_result())
team_keys = set([t.key.id() for t in teams])
num_teams = len(teams)
middle_value = num_teams // 2
if num_teams % 2 != 0:
middle_value += 1
teams_a, teams_b = teams[:middle_value], teams[middle_value:]
# Currently Competing Team Status
event_team_keys = []
for event, teams_future in zip(live_events, live_eventteams_futures):
for team in teams_future.get_result():
if team.key.id() in team_keys:
event_team_keys.append(
ndb.Key(EventTeam, "{}_{}".format(event.key.id(), team.key.id()))
) # Should be in context cache
ndb.get_multi(event_team_keys) # Warms context cache
live_events_with_teams = []
for event, teams_future in zip(live_events, live_eventteams_futures):
teams_and_statuses = []
has_teams = False
for team in teams_future.get_result():
if team.key.id() in team_keys:
has_teams = True
event_team = EventTeam.get_by_id(
"{}_{}".format(event.key.id(), team.key.id())
) # Should be in context cache
if event_team is None:
logging.info(
"No EventTeam for {}_{}".format(event.key.id(), team.key.id())
)
continue
teams_and_statuses.append(
(team, event_team.status, event_team.status_strings)
)
if has_teams:
teams_and_statuses.sort(key=lambda x: x[0].team_number)
live_events_with_teams.append((event, teams_and_statuses))
live_events_with_teams.sort(key=lambda x: x[0].name)
live_events_with_teams.sort(
key=lambda x: EventHelper.start_date_or_distant_future(x[0])
)
live_events_with_teams.sort(
key=lambda x: EventHelper.end_date_or_distant_future(x[0])
)
# Get valid years
district_history = history_future.get_result()
valid_years = map(lambda d: d.year, district_history)
valid_years = sorted(valid_years)
rankings = district.rankings
# Do not show district rankings for 2021
if district.year == 2021:
rankings = None
template_values = {
"explicit_year": explicit_year,
"year": year,
"valid_years": valid_years,
"valid_districts": valid_districts,
"district_name": district.display_name,
"district_abbrev": district_abbrev,
"week_events": week_events,
"events_by_key": events_by_key,
"rankings": rankings,
"advancement": district.advancement,
"num_teams": num_teams,
"teams_a": teams_a,
"teams_b": teams_b,
"live_events_with_teams": live_events_with_teams,
}
return make_cached_response(
render_template("district_details.html", template_values),
ttl=timedelta(minutes=15) if current_year else timedelta(days=1),
)
| mit | 6646b748baccae14b2bf36284d38f94d | 35.696203 | 87 | 0.656951 | 3.653434 | false | false | false | false |
the-blue-alliance/the-blue-alliance | src/backend/common/consts/district_point_values.py | 1 | 5542 | from backend.common.consts.award_type import AwardType
class DistrictPointValues:
"""
A class that contains various district point constants over the years:
Documents containing point systems:
- 2016: same as 2015
- 2015: http://www.firstinspires.org/sites/default/files/uploads/resource_library/frc/game-and-season-info/archive/2015/AdminManual20150407.pdf
- 2014: http://www.firstinmichigan.org/FRC_2014/District_Standard_Points_Ranking_System.pdf
- 2013: http://www.firstinmichigan.org/FRC_2013/2013_Rules_Supplement.pdf
- 2012: http://www.firstinmichigan.org/FRC_2012/2012_Rules_Supplement.pdf
- 2011: http://www.firstinmichigan.org/FRC_2011/2011_Rules_Supplement.pdf
- 2010: http://www.firstinmichigan.org/FRC_2010/2010_Update_3.pdf
- 2009: https://www.chiefdelphi.com/forums/showpost.php?p=759453&postcount=67
"""
STANDARD_MULTIPLIER = 1
# Since 2014, points earned at District CMP has 3x bonus
DISTRICT_CMP_MULIPLIER_DEFAULT = 3
DISTRICT_CMP_MULTIPLIER = {2013: 1, 2012: 1, 2011: 1, 2010: 1, 2009: 1}
# In years prior to 2015, teams get points for a win/tie in a qualification match
MATCH_WIN = 2
MATCH_TIE = 1
# Used to determine alliance selection points
# Captain/First pick get 17-alliance number, second pick gets 17 - draft order
ALLIANCE_MAX_DEFAULT = 17
# In 2009 - 2013 (except 2010), second pick teams got fewer elim round advancement points as captain/pick 1
# TODO many of these events don't have alliance selection data, so we can't factor this in
ELIM_SECOND_PICK_MULTIPLIER_DEFAULT = 1
ELIM_SECOND_PICK_MULTIPLIER = {2013: 0.8, 2012: 0.8, 2011: 0.8, 2009: 0.8}
# Used to determine elim/playoff points.
# Teams on each round's winning alliance gets points per match won
# For the 2015 game, these are awarded for participating in a qf/sf match, since there were no wins
QF_WIN_DEFAULT = 5
QF_WIN = {2015: 5.0}
SF_WIN_DEFAULT = 5
SF_WIN = {
2015: 3.3,
}
F_WIN_DEFAULT = 5
F_WIN = {2015: 5.0}
# Chairman's Award
CHAIRMANS_DEFAULT = 10
CHAIRMANS = {2013: 0, 2012: 0, 2011: 0, 2009: 0}
# Engineering Inspiration and Rookie All-Star
EI_AND_RAS_DEFAULT = 8
OTHER_AWARD_DEFAULT = 5
# Points for playing your first two events as
# back-to-back single day events
BACK_TO_BACK_2022_BONUS = 2
# Pre-2014 Awards, all worth either 5 or 2 points
LEGACY_5_PT_AWARDS = {
2013: [
AwardType.INDUSTRIAL_DESIGN,
AwardType.QUALITY,
AwardType.ENGINEERING_EXCELLENCE,
AwardType.INNOVATION_IN_CONTROL,
AwardType.CREATIVITY,
],
2012: [
AwardType.INDUSTRIAL_DESIGN,
AwardType.QUALITY,
AwardType.ENGINEERING_EXCELLENCE,
AwardType.INNOVATION_IN_CONTROL,
AwardType.CREATIVITY,
AwardType.ENTREPRENEURSHIP,
AwardType.COOPERTITION,
],
2011: [
AwardType.INDUSTRIAL_DESIGN,
AwardType.QUALITY,
AwardType.ENGINEERING_EXCELLENCE,
AwardType.INNOVATION_IN_CONTROL,
AwardType.CREATIVITY,
AwardType.ENTREPRENEURSHIP,
AwardType.COOPERTITION,
AwardType.EXCELLENCE_IN_DESIGN,
],
2010: [
AwardType.INDUSTRIAL_DESIGN,
AwardType.QUALITY,
AwardType.ENGINEERING_EXCELLENCE,
AwardType.INNOVATION_IN_CONTROL,
AwardType.CREATIVITY,
AwardType.ROOKIE_ALL_STAR,
AwardType.ENGINEERING_INSPIRATION,
AwardType.ENTREPRENEURSHIP,
AwardType.COOPERTITION,
],
2009: [
AwardType.INDUSTRIAL_DESIGN,
AwardType.QUALITY,
AwardType.DRIVING_TOMORROWS_TECHNOLOGY,
AwardType.INNOVATION_IN_CONTROL,
AwardType.CREATIVITY,
],
}
LEGACY_2_PT_AWARDS = {
2013: [
AwardType.SPIRIT,
AwardType.GRACIOUS_PROFESSIONALISM,
AwardType.IMAGERY,
AwardType.HIGHEST_ROOKIE_SEED,
AwardType.SAFETY,
AwardType.JUDGES,
AwardType.ROOKIE_INSPIRATION,
],
2012: [
AwardType.SPIRIT,
AwardType.GRACIOUS_PROFESSIONALISM,
AwardType.IMAGERY,
AwardType.HIGHEST_ROOKIE_SEED,
AwardType.SAFETY,
AwardType.JUDGES,
AwardType.ROOKIE_INSPIRATION,
AwardType.WEBSITE,
],
2011: [
AwardType.SPIRIT,
AwardType.GRACIOUS_PROFESSIONALISM,
AwardType.IMAGERY,
AwardType.HIGHEST_ROOKIE_SEED,
AwardType.SAFETY,
AwardType.JUDGES,
AwardType.ROOKIE_INSPIRATION,
AwardType.WEBSITE,
],
2010: [
AwardType.SPIRIT,
AwardType.GRACIOUS_PROFESSIONALISM,
AwardType.IMAGERY,
AwardType.HIGHEST_ROOKIE_SEED,
AwardType.SAFETY,
AwardType.JUDGES,
AwardType.ROOKIE_INSPIRATION,
AwardType.WEBSITE,
],
2009: [
AwardType.SPIRIT,
AwardType.GRACIOUS_PROFESSIONALISM,
AwardType.IMAGERY,
AwardType.JUDGES,
AwardType.ROOKIE_INSPIRATION,
AwardType.SAFETY,
AwardType.WSU_AIM_HIGHER,
AwardType.WEBSITE,
],
}
| mit | 21ef41b6102a37a612c23d8cff4d4acd | 33.42236 | 148 | 0.608625 | 3.216483 | false | false | false | false |
the-blue-alliance/the-blue-alliance | ops/dev/vagrant/create_auth_emulator_accounts.py | 1 | 2099 | import argparse
parser = argparse.ArgumentParser(
description="Create deafult users in the Firebase authentication emulator."
)
parser.add_argument(
"--project", dest="project", required=True, help="project ID for auth emulator"
)
parser.add_argument(
"--emulator-host",
dest="emulator_host",
default="localhost:9099",
help="hostname + port for auth emulator (default: localhost:9099)",
)
def main():
args = parser.parse_args()
import os
os.environ["FIREBASE_AUTH_EMULATOR_HOST"] = args.emulator_host
import firebase_admin
firebase_admin.initialize_app(options={"projectId": args.project})
from firebase_admin import auth
admin_uid = "1"
admin_email = "admin@thebluealliance.com"
admin_name = "TBA Admin"
admin_record = auth.ImportUserRecord(
admin_uid,
email=admin_email,
display_name=admin_name,
provider_data=[
auth.UserProvider(
admin_uid,
provider_id="google.com",
email=admin_email,
display_name=admin_name,
),
auth.UserProvider(
admin_uid,
provider_id="apple.com",
email=admin_email,
display_name=admin_name,
),
],
custom_claims={"admin": True},
)
user_uid = "2"
user_email = "user@thebluealliance.com"
user_name = "TBA User"
user_record = auth.ImportUserRecord(
user_uid,
email=user_email,
display_name=user_name,
provider_data=[
auth.UserProvider(
user_uid,
provider_id="google.com",
email=user_email,
display_name=user_name,
),
auth.UserProvider(
user_uid,
provider_id="apple.com",
email=user_email,
display_name=user_name,
),
],
custom_claims={},
)
auth.import_users([admin_record, user_record])
if __name__ == "__main__":
main()
| mit | 3f1491e6a9c4e8d0953a738fbaca248a | 24.289157 | 83 | 0.546451 | 3.982922 | false | false | false | false |
the-blue-alliance/the-blue-alliance | old_py2/controllers/gcm/gcm.py | 5 | 11587 | ################################################################################
# gae_python_gcm/gcm.py
#
# In Python, for Google App Engine
# Originally ported from https://github.com/Instagram/node2dm
# Extended to support new GCM API.
# Greg Bayer <greg@gbayer.com>
#
# From: https://github.com/gregbayer/gae-python-gcm
#
###############################################################################
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
from datetime import datetime, timedelta
import logging
import re
import json
import importlib
from google.appengine.api import taskqueue # Google App Engine specific
from helpers.push_helper import PushHelper
from models.sitevar import Sitevar
class GCMMessage:
device_tokens = None
notification = None
collapse_key = None
delay_while_idle = None
time_to_live = None
priority = None
def __init__(self, device_tokens, notification, collapse_key=None, delay_while_idle=None, time_to_live=None, priority=None):
if isinstance(device_tokens, list):
self.device_tokens = device_tokens
else:
self.device_tokens = [device_tokens]
self.notification = notification
self.collapse_key = collapse_key
self.delay_while_idle = delay_while_idle
self.time_to_live = time_to_live
self.priority = priority
def __unicode__(self):
return "%s:%s:%s:%s:%s" % (repr(self.device_tokens), repr(self.notification), repr(self.collapse_key), repr(self.delay_while_idle), repr(self.time_to_live))
def json_string(self):
if not self.device_tokens or not isinstance(self.device_tokens, list):
logging.error('FCMMessage generate_json_string error. Invalid device tokens: ' + repr(self))
raise Exception('FCMMessage generate_json_string error. Invalid device tokens.')
json_dict = {}
json_dict['registration_ids'] = self.device_tokens
# If message is a dict, send each key individually
# Else, send entire message under data key
if isinstance(self.notification, dict):
json_dict['data'] = self.notification
else:
json_dict['data'] = {'data': self.notification}
if self.collapse_key:
json_dict['collapse_key'] = self.collapse_key
if self.delay_while_idle:
json_dict['delay_while_idle'] = self.delay_while_idle
if self.time_to_live:
json_dict['time_to_live'] = self.time_to_live
if self.priority:
json_dict['priority'] = self.priority
json_str = json.dumps(json_dict)
return json_str
# Instantiate to send GCM message. No initialization required.
class GCMConnection:
################################ Config ###############################
# settings.py
#
# GCM_CONFIG = {'gcm_api_key': '',
# 'delete_bad_token_callback_func': lambda x: x,
# 'update_token_callback_func': lambda x: x}
##############################################################################
def __init__(self):
self.LOCALHOST = False
self.SERVER_KEY = Sitevar.get_by_id('gcm.serverKey')
if self.SERVER_KEY is None:
raise Exception("Missing sitevar: gcm.serverKey. Can't send FCM messages.")
self.GCM_CONFIG = {'gcm_api_key': self.SERVER_KEY.contents['gcm_key']}
self.GOOGLE_LOGIN_URL = 'https://www.google.com/accounts/ClientLogin'
# Can't use https on localhost due to Google cert bug
self.GOOGLE_GCM_SEND_URL = 'https://fcm.googleapis.com/fcm/send'
self.GCM_QUEUE_NAME = 'gcm-retries'
self.GCM_QUEUE_CALLBACK_URL = '/gae_python_gcm/send_request'
# Call this to send a push notification
def notify_device(self, message, deferred=False):
self._submit_message(message, deferred=deferred)
##### Public Utils #####
def debug(self, option):
if option == "help":
return "Commands: help stats\n"
elif option == "stats":
output = ''
# resp += "uptime: " + elapsed + " seconds\n"
return output
else:
return "Invalid command\nCommands: help stats\n"
# Hooks - Override to change functionality #####
def delete_bad_token(self, bad_device_token):
PushHelper.delete_bad_gcm_token(bad_device_token)
def update_token(self, old_device_token, new_device_token):
PushHelper.update_token(old_device_token, new_device_token)
# Currently unused
def login_complete(self):
# Retries are handled by the gae task queue
# self.retry_pending_messages()
pass
# Add message to queue
def _requeue_message(self, message):
taskqueue.add(queue_name=self.GCM_QUEUE_NAME, url=self.GCM_QUEUE_CALLBACK_URL, params={'device_token': message.device_tokens, 'collapse_key': message.collapse_key, 'notification': message.notification})
# If send message now or add it to the queue
def _submit_message(self, message, deferred=False):
if deferred:
self._requeue_message(message)
else:
self._send_request(message)
# Try sending message now
def _send_request(self, message):
import urllib2
if message.device_tokens is None or message.notification is None:
logging.error('Message must contain device_tokens and notification.')
return False
# Check for resend_after
retry_after = None
if retry_after is not None and retry_after > datetime.now():
logging.warning('RETRY_AFTER: ' + repr(retry_after) + ', requeueing message: ' + repr(message))
self._requeue_message(message)
return
# Build request
headers = {
'Authorization': 'key=' + self.GCM_CONFIG['gcm_api_key'],
'Content-Type': 'application/json'
}
gcm_post_json_str = ''
try:
gcm_post_json_str = message.json_string()
except:
logging.exception('Error generating json string for message: ' + repr(message))
return
logging.info('Sending gcm_post_body: ' + repr(gcm_post_json_str))
request = urllib2.Request(self.GOOGLE_GCM_SEND_URL, gcm_post_json_str, headers)
# Post
try:
resp = urllib2.urlopen(request)
resp_json_str = resp.read()
resp_json = json.loads(resp_json_str)
logging.info('_send_request() resp_json: ' + repr(resp_json))
# multicast_id = resp_json['multicast_id']
# success = resp_json['success']
failure = resp_json['failure']
canonical_ids = resp_json['canonical_ids']
results = resp_json['results']
# If the value of failure and canonical_ids is 0, it's not necessary to parse the remainder of the response.
if failure == 0 and canonical_ids == 0:
# Success, nothing to do
return
else:
# Process result messages for each token (result index matches original token index from message)
result_index = 0
for result in results:
if 'message_id' in result and 'registration_id' in result:
# Update device token
try:
old_device_token = message.device_tokens[result_index]
new_device_token = result['registration_id']
self.update_token(old_device_token, new_device_token)
except:
logging.exception('Error updating device token')
elif 'error' in result:
# Handle GCM error
error_msg = result.get('error')
try:
device_token = message.device_tokens[result_index]
self._on_error(device_token, error_msg, message)
except:
logging.exception('Error handling GCM error: ' + repr(error_msg))
result_index += 1
except urllib2.HTTPError, e:
if e.code == 400:
logging.error('400, Invalid GCM JSON message: ' + repr(gcm_post_json_str))
logging.exception(str(e.code) + " " + e.msg + " " + e.read())
elif e.code == 401:
logging.error('401, Error authenticating with GCM. Retrying message. Might need to fix auth key!')
self._requeue_message(message)
elif e.code == 500:
logging.error('500, Internal error in the GCM server while trying to send message: ' + repr(gcm_post_json_str))
elif e.code == 503:
retry_seconds = int(resp.headers.get('Retry-After')) or 10
logging.error('503, Throttled. Retry after delay. Requeuing message. Delay in seconds: ' + str(retry_seconds))
retry_timestamp = datetime.now() + timedelta(seconds=retry_seconds)
self._requeue_message(message)
else:
logging.exception('Unexpected HTTPError: ' + str(e.code) + " " + e.msg + " " + e.read())
def _on_error(self, device_token, error_msg, message):
if error_msg == "MissingRegistration":
logging.error('ERROR: GCM message sent without device token. This should not happen!')
elif error_msg == "InvalidRegistration":
self.delete_bad_token(device_token)
elif error_msg == "MismatchSenderId":
logging.error('ERROR: Device token is tied to a different sender id: ' + repr(device_token))
self.delete_bad_token(device_token)
elif error_msg == "NotRegistered":
self.delete_bad_token(device_token)
elif error_msg == "MessageTooBig":
logging.error("ERROR: GCM message too big (max 4096 bytes).")
elif error_msg == "InvalidTtl":
logging.error("ERROR: GCM Time to Live field must be an integer representing a duration in seconds between 0 and 2,419,200 (4 weeks).")
elif error_msg == "MessageTooBig":
logging.error("ERROR: GCM message too big (max 4096 bytes).")
elif error_msg == "Unavailable":
retry_seconds = 10
logging.error('ERROR: GCM Unavailable. Retry after delay. Requeuing message. Delay in seconds: ' + str(retry_seconds))
retry_timestamp = datetime.now() + timedelta(seconds=retry_seconds)
self._requeue_message(message)
elif error_msg == "InternalServerError":
logging.error("ERROR: Internal error in the GCM server while trying to send message: " + repr(message))
else:
logging.error("Unknown error: %s for device token: %s" % (repr(error_msg), repr(device_token)))
| mit | 9c97af8bf1394d52dc7fdb8be2433a5c | 39.799296 | 210 | 0.581427 | 4.192113 | false | false | false | false |
the-blue-alliance/the-blue-alliance | src/backend/tasks_io/datafeeds/parsers/parser_html.py | 1 | 2085 | import abc
# import re
from typing import TypeVar
from backend.tasks_io.datafeeds.parsers.parser_base import ParserBase
TParsedResponse = TypeVar("TParsedResponse")
class ParserHTML(ParserBase[TParsedResponse]):
"""
Provides a basic structure for parsing pages.
Parsers are not allowed to return Model objects, only dictionaries.
"""
@abc.abstractmethod
def parse(self, response: bytes) -> TParsedResponse:
"""
Given a chunk of HTML, return a (result dictionary, more_pages) tuple
"""
...
# @classmethod
# def _recurseUntilString(cls, node):
# """
# Digs through HTML that Word made worse.
# Written to deal with http://www2.usfirst.org/2011comp/Events/cmp/matchresults.html
# """
# from bs4 import NavigableString
# if node.string is not None:
# return re.sub('\s+', ' ', node.string.replace(u'\xa0', ' ')).strip() # remove multiple whitespaces
# if isinstance(node, NavigableString):
# return node
# if hasattr(node, 'contents'):
# results = []
# for content in node.contents:
# result = self._recurseUntilString(content)
# if result is not None:
# result = result.strip().replace('\r', '').replace('\n', '').replace(' ', ' ')
# if result is not None and result != "":
# results.append(result)
# if results != []:
# return ' '.join(results)
# return None
#
# @classmethod
# def _html_unescape(cls, html):
# import HTMLParser
# h = HTMLParser.HTMLParser()
# return h.unescape(html)
#
# @classmethod
# def _html_unescape_items(cls, d):
# """
# Unescapes HTML in a dict
# """
# import HTMLParser
# h = HTMLParser.HTMLParser()
# for key, value in d.items():
# try:
# d[key] = h.unescape(value)
# except TypeError:
# continue
| mit | d59ddfc20471e78b5761c90c847451cb | 31.076923 | 113 | 0.548201 | 3.979008 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.