gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# -*- coding: utf-8 -*-
"""
pygments.lexers.rebol
~~~~~~~~~~~~~~~~~~~~~
Lexers for the REBOL and related languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from ..lexer import RegexLexer, bygroups
from ..token import Text, Comment, Operator, Keyword, Name, String, \
Number, Generic, Whitespace
__all__ = ['RebolLexer', 'RedLexer']
class RebolLexer(RegexLexer):
"""
A `REBOL <http://www.rebol.com/>`_ lexer.
.. versionadded:: 1.1
"""
name = 'REBOL'
aliases = ['rebol']
filenames = ['*.r', '*.r3', '*.reb']
mimetypes = ['text/x-rebol']
flags = re.IGNORECASE | re.MULTILINE
escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
def word_callback(lexer, match):
word = match.group()
if re.match(".*:$", word):
yield match.start(), Generic.Subheading, word
elif re.match(
r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|'
r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|'
r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|'
r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|'
r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|'
r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|'
r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|'
r'while|compress|decompress|secure|open|close|read|read-io|'
r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|'
r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|'
r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|'
r'browse|launch|stats|get-modes|set-modes|to-local-file|'
r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|'
r'hide|draw|show|size-text|textinfo|offset-to-caret|'
r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|'
r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|'
r'dsa-make-key|dsa-generate-key|dsa-make-signature|'
r'dsa-verify-signature|rsa-make-key|rsa-generate-key|'
r'rsa-encrypt)$', word):
yield match.start(), Name.Builtin, word
elif re.match(
r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|'
r'minimum|maximum|negate|complement|absolute|random|head|tail|'
r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|'
r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|'
r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|'
r'copy)$', word):
yield match.start(), Name.Function, word
elif re.match(
r'(error|source|input|license|help|install|echo|Usage|with|func|'
r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|'
r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|'
r'remold|charset|array|replace|move|extract|forskip|forall|alter|'
r'first+|also|take|for|forever|dispatch|attempt|what-dir|'
r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|'
r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|'
r'build-tag|process-source|build-markup|decode-cgi|read-cgi|'
r'write-user|save-user|set-user-name|protect-system|parse-xml|'
r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|'
r'scroll-para|get-face|alert|set-face|uninstall|unfocus|'
r'request-dir|center-face|do-events|net-error|decode-url|'
r'parse-header|parse-header-date|parse-email-addrs|import-email|'
r'send|build-attach-body|resend|show-popup|hide-popup|open-events|'
r'find-key-face|do-face|viewtop|confine|find-window|'
r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|'
r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|'
r'read-thru|load-thru|do-thru|launch-thru|load-image|'
r'request-download|do-face-alt|set-font|set-para|get-style|'
r'set-style|make-face|stylize|choose|hilight-text|hilight-all|'
r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|'
r'resize-face|load-stock|load-stock-block|notify|request|flash|'
r'request-color|request-pass|request-text|request-list|'
r'request-date|request-file|dbug|editor|link-relative-path|'
r'emailer|parse-error)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match(
r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|'
r'return|exit|break)$', word):
yield match.start(), Name.Exception, word
elif re.match('REBOL$', word):
yield match.start(), Generic.Heading, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
elif re.match('(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$',
word):
yield match.start(), Operator, word
elif re.match(".*\?$", word):
yield match.start(), Keyword, word
elif re.match(".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
elif re.match("#.*", word):
yield match.start(), Name.Label, word # issue
elif re.match("%.*", word):
yield match.start(), Name.Decorator, word # file
else:
yield match.start(), Name.Variable, word
tokens = {
'root': [
(r'[^R]+', Comment),
(r'REBOL\s+\[', Generic.Strong, 'script'),
(r'R', Comment)
],
'script': [
(r'\s+', Text),
(r'#"', String.Char, 'char'),
(r'#\{[0-9a-f]*\}', Number.Hex),
(r'2#\{', Number.Hex, 'bin2'),
(r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
(r'"', String, 'string'),
(r'\{', String, 'string2'),
(r';#+.*\n', Comment.Special),
(r';\*+.*\n', Comment.Preproc),
(r';.*\n', Comment),
(r'%"', Name.Decorator, 'stringFile'),
(r'%[^(^{")\s\[\]]+', Name.Decorator),
(r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
(r'\d+[\-/][0-9a-z]+[\-/]\d+(\/\d+\:\d+((\:\d+)?'
r'([.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+X\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
(r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
(r'[+-]?\d+(\'\d+)?', Number),
(r'[\[\]()]', Generic.Strong),
(r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url
(r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url
(r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email
(r'comment\s"', Comment, 'commentString1'),
(r'comment\s\{', Comment, 'commentString2'),
(r'comment\s\[', Comment, 'commentBlock'),
(r'comment\s[^(\s{"\[]+', Comment),
(r'/[^(^{")\s/[\]]*', Name.Attribute),
(r'([^(^{")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
(r'<[\w:.-]*>', Name.Tag),
(r'<[^(<>\s")]+', Name.Tag, 'tag'),
(r'([^(^{")\s]+)', Text),
],
'string': [
(r'[^(^")]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'"', String, '#pop'),
],
'string2': [
(r'[^(^{})]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
'stringFile': [
(r'[^(^")]+', Name.Decorator),
(escape_re, Name.Decorator),
(r'\^.', Name.Decorator),
(r'"', Name.Decorator, '#pop'),
],
'char': [
(escape_re + '"', String.Char, '#pop'),
(r'\^."', String.Char, '#pop'),
(r'."', String.Char, '#pop'),
],
'tag': [
(escape_re, Name.Tag),
(r'"', Name.Tag, 'tagString'),
(r'[^(<>\r\n")]+', Name.Tag),
(r'>', Name.Tag, '#pop'),
],
'tagString': [
(r'[^(^")]+', Name.Tag),
(escape_re, Name.Tag),
(r'[(|)]+', Name.Tag),
(r'\^.', Name.Tag),
(r'"', Name.Tag, '#pop'),
],
'tuple': [
(r'(\d+\.)+', Keyword.Constant),
(r'\d+', Keyword.Constant, '#pop'),
],
'bin2': [
(r'\s+', Number.Hex),
(r'([01]\s*){8}', Number.Hex),
(r'\}', Number.Hex, '#pop'),
],
'commentString1': [
(r'[^(^")]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'"', Comment, '#pop'),
],
'commentString2': [
(r'[^(^{})]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'\{', Comment, '#push'),
(r'\}', Comment, '#pop'),
],
'commentBlock': [
(r'\[', Comment, '#push'),
(r'\]', Comment, '#pop'),
(r'"', Comment, "commentString1"),
(r'\{', Comment, "commentString2"),
(r'[^(\[\]"{)]+', Comment),
],
}
def analyse_text(text):
"""
Check if code contains REBOL header and so it probably not R code
"""
if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
# The code starts with REBOL header
return 1.0
elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE):
# The code contains REBOL header but also some text before it
return 0.5
class RedLexer(RegexLexer):
"""
A `Red-language <http://www.red-lang.org/>`_ lexer.
.. versionadded:: 2.0
"""
name = 'Red'
aliases = ['red', 'red/system']
filenames = ['*.red', '*.reds']
mimetypes = ['text/x-red', 'text/x-red-system']
flags = re.IGNORECASE | re.MULTILINE
escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
def word_callback(lexer, match):
word = match.group()
if re.match(".*:$", word):
yield match.start(), Generic.Subheading, word
elif re.match(r'(if|unless|either|any|all|while|until|loop|repeat|'
r'foreach|forall|func|function|does|has|switch|'
r'case|reduce|compose|get|set|print|prin|equal\?|'
r'not-equal\?|strict-equal\?|lesser\?|greater\?|lesser-or-equal\?|'
r'greater-or-equal\?|same\?|not|type\?|stats|'
r'bind|union|replace|charset|routine)$', word):
yield match.start(), Name.Builtin, word
elif re.match(r'(make|random|reflect|to|form|mold|absolute|add|divide|multiply|negate|'
r'power|remainder|round|subtract|even\?|odd\?|and~|complement|or~|xor~|'
r'append|at|back|change|clear|copy|find|head|head\?|index\?|insert|'
r'length\?|next|pick|poke|remove|reverse|select|sort|skip|swap|tail|tail\?|'
r'take|trim|create|close|delete|modify|open|open\?|query|read|rename|'
r'update|write)$', word):
yield match.start(), Name.Function, word
elif re.match(r'(yes|on|no|off|true|false|tab|cr|lf|newline|escape|slash|sp|space|null|'
r'none|crlf|dot|null-byte)$', word):
yield match.start(), Name.Builtin.Pseudo, word
elif re.match(r'(#system-global|#include|#enum|#define|#either|#if|#import|#export|'
r'#switch|#default|#get-definition)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match(r'(system|halt|quit|quit-return|do|load|q|recycle|call|run|ask|parse|'
r'raise-error|return|exit|break|alias|push|pop|probe|\?\?|spec-of|body-of|'
r'quote|forever)$', word):
yield match.start(), Name.Exception, word
elif re.match(r'(action\?|block\?|char\?|datatype\?|file\?|function\?|get-path\?|zero\?|'
r'get-word\?|integer\?|issue\?|lit-path\?|lit-word\?|logic\?|native\?|'
r'op\?|paren\?|path\?|refinement\?|set-path\?|set-word\?|string\?|unset\?|'
r'any-struct\?|none\?|word\?|any-series\?)$', word):
yield match.start(), Keyword, word
elif re.match(r'(JNICALL|stdcall|cdecl|infix)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
elif re.match('(\+|-\*\*|-|\*\*|//|/|\*|and|or|xor|=\?|===|==|=|<>|<=|>=|'
'<<<|>>>|<<|>>|<|>%)$', word):
yield match.start(), Operator, word
elif re.match(".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
elif re.match("#.*", word):
yield match.start(), Name.Label, word # issue
elif re.match("%.*", word):
yield match.start(), Name.Decorator, word # file
elif re.match(":.*", word):
yield match.start(), Generic.Subheading, word # get-word
else:
yield match.start(), Name.Variable, word
tokens = {
'root': [
(r'[^R]+', Comment),
(r'Red/System\s+\[', Generic.Strong, 'script'),
(r'Red\s+\[', Generic.Strong, 'script'),
(r'R', Comment)
],
'script': [
(r'\s+', Text),
(r'#"', String.Char, 'char'),
(r'#\{[0-9a-f\s]*\}', Number.Hex),
(r'2#\{', Number.Hex, 'bin2'),
(r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
(r'([0-9a-f]+)(h)((\s)|(?=[\[\]{}"()]))',
bygroups(Number.Hex, Name.Variable, Whitespace)),
(r'"', String, 'string'),
(r'\{', String, 'string2'),
(r';#+.*\n', Comment.Special),
(r';\*+.*\n', Comment.Preproc),
(r';.*\n', Comment),
(r'%"', Name.Decorator, 'stringFile'),
(r'%[^(^{")\s\[\]]+', Name.Decorator),
(r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
(r'\d+[\-/][0-9a-z]+[\-/]\d+(/\d+:\d+((:\d+)?'
r'([\.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+X\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
(r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
(r'[+-]?\d+(\'\d+)?', Number),
(r'[\[\]()]', Generic.Strong),
(r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url
(r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url
(r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email
(r'comment\s"', Comment, 'commentString1'),
(r'comment\s\{', Comment, 'commentString2'),
(r'comment\s\[', Comment, 'commentBlock'),
(r'comment\s[^(\s{"\[]+', Comment),
(r'/[^(^{^")\s/[\]]*', Name.Attribute),
(r'([^(^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
(r'<[\w:.-]*>', Name.Tag),
(r'<[^(<>\s")]+', Name.Tag, 'tag'),
(r'([^(^{")\s]+)', Text),
],
'string': [
(r'[^(^")]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'"', String, '#pop'),
],
'string2': [
(r'[^(^{})]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
'stringFile': [
(r'[^(^")]+', Name.Decorator),
(escape_re, Name.Decorator),
(r'\^.', Name.Decorator),
(r'"', Name.Decorator, '#pop'),
],
'char': [
(escape_re + '"', String.Char, '#pop'),
(r'\^."', String.Char, '#pop'),
(r'."', String.Char, '#pop'),
],
'tag': [
(escape_re, Name.Tag),
(r'"', Name.Tag, 'tagString'),
(r'[^(<>\r\n")]+', Name.Tag),
(r'>', Name.Tag, '#pop'),
],
'tagString': [
(r'[^(^")]+', Name.Tag),
(escape_re, Name.Tag),
(r'[(|)]+', Name.Tag),
(r'\^.', Name.Tag),
(r'"', Name.Tag, '#pop'),
],
'tuple': [
(r'(\d+\.)+', Keyword.Constant),
(r'\d+', Keyword.Constant, '#pop'),
],
'bin2': [
(r'\s+', Number.Hex),
(r'([01]\s*){8}', Number.Hex),
(r'\}', Number.Hex, '#pop'),
],
'commentString1': [
(r'[^(^")]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'"', Comment, '#pop'),
],
'commentString2': [
(r'[^(^{})]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'\{', Comment, '#push'),
(r'\}', Comment, '#pop'),
],
'commentBlock': [
(r'\[', Comment, '#push'),
(r'\]', Comment, '#pop'),
(r'"', Comment, "commentString1"),
(r'\{', Comment, "commentString2"),
(r'[^(\[\]"{)]+', Comment),
],
}
| |
import browser.html
import re
class URL:
def __init__(self,src):
elts = src.split(maxsplit=1)
self.href = elts[0]
self.alt = ''
if len(elts)==2:
alt = elts[1]
if alt[0]=='"' and alt[-1]=='"':self.alt=alt[1:-1]
elif alt[0]=="'" and alt[-1]=="'":self.alt=alt[1:-1]
elif alt[0]=="(" and alt[-1]==")":self.alt=alt[1:-1]
class CodeBlock:
def __init__(self,line):
self.lines = [line]
def to_html(self):
if self.lines[0].startswith("`"):
self.lines.pop(0)
res = escape('\n'.join(self.lines))
res = unmark(res)
res = '<pre class="marked">%s</pre>\n' %res
return res,[]
class Marked:
def __init__(self, line=''):
self.line = line
self.children = []
def to_html(self):
return apply_markdown(self.line)
# get references
refs = {}
ref_pattern = r"^\[(.*)\]:\s+(.*)"
def mark(src):
global refs
refs = {}
# split source in sections
# sections can be :
# - a block-level HTML element (markdown syntax will not be processed)
# - a script
# - a span-level HTML tag (markdown syntax will be processed)
# - a code block
# normalise line feeds
src = src.replace('\r\n','\n')
# lines followed by dashes
src = re.sub(r'(.*?)\n=+\n', '\n# \\1\n', src)
src = re.sub(r'(.*?)\n-+\n', '\n## \\1\n', src)
lines = src.split('\n')
i = bq = 0
ul = ol = 0
while i<len(lines):
# enclose lines starting by > in a blockquote
if lines[i].startswith('>'):
nb = 1
while nb<len(lines[i]) and lines[i][nb]=='>':
nb += 1
lines[i] = lines[i][nb:]
if nb>bq:
lines.insert(i,'<blockquote>'*(nb-bq))
i += 1
bq = nb
elif nb<bq:
lines.insert(i,'</blockquote>'*(bq-nb))
i += 1
bq = nb
elif bq>0:
lines.insert(i,'</blockquote>'*bq)
i += 1
bq = 0
# unordered lists
if lines[i].strip() and lines[i].lstrip()[0] in '-+*' \
and (i==0 or ul or not lines[i-1].strip()):
print('is ul',lines[i])
# line indentation indicates nesting level
nb = 1+len(lines[i])-len(lines[i].lstrip())
lines[i] = '<li>'+lines[i][1+nb:]
if nb>ul:
lines.insert(i,'<ul>'*(nb-ul))
i += 1
elif nb<ul:
lines.insert(i,'</ul>'*(ul-nb))
i += 1
ul = nb
elif ul:
lines.insert(i,'</ul>'*ul)
i += 1
ul = 0
# ordered lists
mo = re.search(r'^(\d+\.)',lines[i])
if mo:
if not ol:
lines.insert(i,'<ol>')
i += 1
lines[i] = '<li>'+lines[i][len(mo.groups()[0]):]
ol = 1
elif ol:
lines.insert(i,'</ol>')
i += 1
ol = 0
i += 1
sections = []
scripts = []
section = Marked()
i = 0
while i<len(lines):
line = lines[i]
if line.strip() and line.startswith(' '):
if isinstance(section,Marked) and section.line:
sections.append(section)
section = CodeBlock(line[4:])
j = i+1
while j<len(lines) and lines[j].strip() \
and lines[j].startswith(' '):
section.lines.append(lines[j][4:])
j += 1
sections.append(section)
section = Marked()
i = j
continue
elif line.lower().startswith('<script'):
if isinstance(section,Marked) and section.line:
sections.append(section)
section = Marked()
j = i+1
while j<len(lines):
if lines[j].lower().startswith('</script>'):
scripts.append('\n'.join(lines[i+1:j]))
for k in range(i,j+1):
lines[k] = ''
break
j += 1
i = j
continue
else:
mo = re.search(ref_pattern,line)
if mo is not None:
if isinstance(section,Marked) and section.line:
sections.append(section)
section = Marked()
key = mo.groups()[0]
value = URL(mo.groups()[1])
refs[key.lower()] = value
else:
if line.strip():
if section.line:
section.line += ' '
section.line += line
else:
sections.append(section)
section = Marked()
i += 1
res = ''
for section in sections:
mk,_scripts = section.to_html()
res += '<p>'+mk+'\n'
scripts += _scripts
return res,scripts
def escape(czone):
czone = czone.replace('&','&')
czone = czone.replace('<','<')
czone = czone.replace('>','>')
return czone
def s_escape(mo):
# used in re.sub
czone = mo.string[mo.start():mo.end()]
return escape(czone)
def unmark(code_zone):
# convert _ to _ inside inline code
code_zone = code_zone.replace('_','_')
return code_zone
def s_unmark(mo):
# convert _ to _ inside inline code
code_zone = mo.string[mo.start():mo.end()]
code_zone = code_zone.replace('_','_')
return code_zone
def apply_markdown(src):
scripts = []
# replace \` by `
src = re.sub(r'\\\`','`',src)
# escape < > & in inline code
code_pattern = r'\`(\S.*?\S)\`'
src = re.sub(code_pattern,s_escape,src)
# also convert _
src = re.sub(code_pattern,s_unmark,src)
# inline links
link_pattern1 = r'\[(.+?)\]\s?\((.+?)\)'
def repl(mo):
g1,g2 = mo.groups()
g2 = re.sub('_','_',g2)
return '<a href="%s">%s</a>' %(g2,g1)
src = re.sub(link_pattern1,repl,src)
# reference links
link_pattern2 = r'\[(.+?)\]\s?\[(.*?)\]'
while True:
mo = re.search(link_pattern2,src)
if mo is None:break
text,key = mo.groups()
print(text,key)
if not key:key=text # implicit link name
if key.lower() not in refs:
raise KeyError('unknow reference %s' %key)
url = refs[key.lower()]
repl = '<a href="'+url.href+'"'
if url.alt:
repl += ' title="'+url.alt+'"'
repl += '>%s</a>' %text
src = re.sub(link_pattern2,repl,src,count=1)
# emphasis
# replace \* by *
src = re.sub(r'\\\*','*',src)
# replace \_ by _
src = re.sub(r'\\\_','_',src)
# _ and * surrounded by spaces are not markup
src = re.sub(r' _ ',' _ ',src)
src = re.sub(r' \* ',' * ',src)
strong_patterns = [('STRONG',r'\*\*(.*?)\*\*'),('B',r'__(.*?)__')]
for tag,strong_pattern in strong_patterns:
src = re.sub(strong_pattern,r'<%s>\1</%s>' %(tag,tag),src)
em_patterns = [('EM',r'\*(.*?)\*'),('I',r'\_(.*?)\_')]
for tag,em_pattern in em_patterns:
src = re.sub(em_pattern,r'<%s>\1</%s>' %(tag,tag),src)
# inline code
# replace \` by `
src = re.sub(r'\\\`','`',src)
code_pattern = r'\`(.*?)\`'
src = re.sub(code_pattern,r'<code>\1</code>',src)
# ordered lists
lines = src.split('\n')
atx_header_pattern = '^(#+)(.*)(#*)'
for i,line in enumerate(lines):
print('line [%s]' %line, line.startswith('#'))
mo = re.search(atx_header_pattern,line)
if not mo:continue
print('pattern matches')
level = len(mo.groups()[0])
lines[i] = re.sub(atx_header_pattern,
'<H%s>%s</H%s>\n' %(level,mo.groups()[1],level),
line,count=1)
src = '\n'.join(lines)
src = re.sub('\n\n+','\n<p>',src)+'\n'
return src,scripts
| |
"""
@package mi.instrument.um.thsph.thsph.driver
@file marine-integrations/mi/instrument/um/thsph/thsph/driver.py
@author Richard Han
@brief Driver for the thsph
Release notes:
Vent Chemistry Instrument Driver
"""
__author__ = 'Richard Han'
__license__ = 'Apache 2.0'
import time
import re
from ion.agents.instrument.exceptions import InstrumentException
from mi.core.driver_scheduler import DriverSchedulerConfigKey, TriggerType
from mi.core.exceptions import SampleException, InstrumentProtocolException, InstrumentParameterException, \
InstrumentTimeoutException
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.protocol_param_dict import ParameterDictType
from mi.core.log import get_logger, get_logging_metaclass
log = get_logger()
from mi.core.common import BaseEnum, Units
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol
from mi.core.instrument.instrument_fsm import InstrumentFSM
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver, DriverConfigKey
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.core.instrument.data_particle import DataParticle
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.instrument.data_particle import CommonDataParticleType
from mi.core.instrument.chunker import StringChunker
###
# Driver Constant Definitions
###
# newline.
NEWLINE = '\r\n'
# default timeout.
TIMEOUT = 10
# Maximum number of communication test to wake up the instrument
MAX_COMM_TEST = 2
# Time wait for the instrument response
CMD_RESP_TIME = .1
#The timeout to wake the device
WAKEUP_TIMEOUT = 3
# The time to look for response to a wake up attempt
RESPONSE_TIMEOUT = 1
class ScheduledJob(BaseEnum):
AUTO_SAMPLE = 'auto_sample'
class DataParticleType(BaseEnum):
"""
Data particle types produced by this driver
"""
RAW = CommonDataParticleType.RAW
THSPH_PARSED = 'thsph_sample'
class Command(BaseEnum):
"""
Instrument command strings
"""
GET_SAMPLE = 'get_sample_cmd' # Gets data sample from ADC
COMM_TEST = 'comm_test_cmd' # Communication test, returns aP#
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
DISCOVER = DriverEvent.DISCOVER
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
GET = DriverEvent.GET
SET = DriverEvent.SET
SCHEDULE_ACQUIRE_SAMPLE = 'DRIVER_EVENT_SCHEDULE_ACQUIRE_SAMPLE'
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
GET = ProtocolEvent.GET
SET = ProtocolEvent.SET
class Parameter(DriverParameter):
"""
Device specific parameters for THSPH.
"""
INTERVAL = 'SampleInterval'
INSTRUMENT_SERIES = 'InstrumentSeries'
class Prompt(BaseEnum):
"""
Device i/o prompts for THSPH
"""
COMM_RESPONSE = 'aP#'
###############################################################################
# Data Particles
###############################################################################
class THSPHDataParticleKey(BaseEnum):
HIGH_IMPEDANCE_ELECTRODE_1 = "thsph_hie1" # High Impedance Electrode 1 for pH
HIGH_IMPEDANCE_ELECTRODE_2 = "thsph_hie2" # High Impedance Electrode 2 for pH
H2_ELECTRODE = "thsph_h2electrode" # H2 electrode
S2_ELECTRODE = "thsph_s2electrode" # Sulfide Electrode
THERMOCOUPLE1 = "thsph_thermocouple1" # Type E thermocouple 1-high
THERMOCOUPLE2 = "thsph_thermocouple2" # Type E thermocouple 2-low
REFERENCE_THERMISTOR = "thsph_rthermistor" # Reference Thermistor
BOARD_THERMISTOR = "thsph_bthermistor" # Board Thermistor
class THSPHParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
The data signal is a concatenation of 8 channels of 14-bit resolution data.
Each channel is output as a four ASCII character hexadecimal number (0000 to 3FFF).
Each channel, 1-8, should be parsed as a 4 character hexadecimal number and converted
to a raw decimal number.
Sample:
aH200A200720DE20AA10883FFF2211225E#
Format:
aHaaaabbbbccccddddeeeeffffgggghhhh#
aaaa = Chanel 1 High Input Impedance Electrode;
bbbb = Chanel 2 High Input Impedance Electrode;
cccc = H2 Electrode;
dddd = S2 Electrode;
eeee = TYPE E Thermocouple 1;
ffff = TYPE E Thermocouple 2;
gggg = Thermistor;
hhhh Board 2 Thermistor;
"""
_data_particle_type = DataParticleType.THSPH_PARSED
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
@return: regex string
"""
pattern = r'aH' # pattern starts with 'aH'
pattern += r'([0-9A-F]{4})' # Chanel 1 High Input Impedance Electrode
pattern += r'([0-9A-F]{4})' # Chanel 2 High Input Impedance Electrode
pattern += r'([0-9A-F]{4})' # H2 Electrode
pattern += r'([0-9A-F]{4})' # S2 Electrode
pattern += r'([0-9A-F]{4})' # Type E Thermocouple 1
pattern += r'([0-9A-F]{4})' # Type E Thermocouple 2
pattern += r'([0-9A-F]{4})' # Reference Thermistor
pattern += r'([0-9A-F]{4})' # Board Thermocouple
pattern += r'#' # pattern ends with '#'
return pattern
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
return re.compile(THSPHParticle.regex())
def _build_parsed_values(self):
"""
Take something in the ADC data format and split it into
Chanel 1 High Input Impedance Electrode, Chanel 2 High Input
Impedance Electrode, H2 Electrode, S2 Electrode, Type E Thermocouple 1,
Type E Thermocouple 2, Reference Thermistor, Board Thermistor
@throws SampleException If there is a problem with sample creation
"""
match = THSPHParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("No regex match of THSPH parsed sample data: [%s]" %
self.raw_data)
try:
electrode1 = self.hex2value(match.group(1))
electrode2 = self.hex2value(match.group(2))
h2electrode = self.hex2value(match.group(3))
s2electrode = self.hex2value(match.group(4))
thermocouple1 = self.hex2value(match.group(5))
thermocouple2 = self.hex2value(match.group(6))
ref_thermistor = self.hex2value(match.group(7))
board_thermistor = self.hex2value(match.group(8))
except ValueError:
raise SampleException("ValueError while converting data: [%s]" %
self.raw_data)
result = [{DataParticleKey.VALUE_ID: THSPHDataParticleKey.HIGH_IMPEDANCE_ELECTRODE_1,
DataParticleKey.VALUE: electrode1},
{DataParticleKey.VALUE_ID: THSPHDataParticleKey.HIGH_IMPEDANCE_ELECTRODE_2,
DataParticleKey.VALUE: electrode2},
{DataParticleKey.VALUE_ID: THSPHDataParticleKey.H2_ELECTRODE,
DataParticleKey.VALUE: h2electrode},
{DataParticleKey.VALUE_ID: THSPHDataParticleKey.S2_ELECTRODE,
DataParticleKey.VALUE: s2electrode},
{DataParticleKey.VALUE_ID: THSPHDataParticleKey.THERMOCOUPLE1,
DataParticleKey.VALUE: thermocouple1},
{DataParticleKey.VALUE_ID: THSPHDataParticleKey.THERMOCOUPLE2,
DataParticleKey.VALUE: thermocouple2},
{DataParticleKey.VALUE_ID: THSPHDataParticleKey.REFERENCE_THERMISTOR,
DataParticleKey.VALUE: ref_thermistor},
{DataParticleKey.VALUE_ID: THSPHDataParticleKey.BOARD_THERMISTOR,
DataParticleKey.VALUE: board_thermistor}]
return result
def hex2value(self, hex_value):
"""
Convert a ADC hex value to an int value.
@param hex_value: string to convert
@return: int of the converted value
"""
if not isinstance(hex_value, str):
raise InstrumentParameterException("hex value not a string")
value = int(hex_value, 16)
return value
###############################################################################
# Driver
###############################################################################
class InstrumentDriver(SingleConnectionInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state
machine.
"""
def __init__(self, evt_callback):
"""
Driver constructor.
@param evt_callback Driver process event callback.
"""
#Construct superclass.
SingleConnectionInstrumentDriver.__init__(self, evt_callback)
########################################################################
# Protocol builder.
########################################################################
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = THSPHProtocol(Prompt, NEWLINE, self._driver_event)
###########################################################################
# Protocol
###########################################################################
class THSPHProtocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
SERIES_A = 'A'
SERIES_B = 'B'
SERIES_C = 'C'
GET_SAMPLE_SERIES_A = 'aH*' # Gets data sample from ADC for series A
COMM_TEST_SERIES_A = 'aP*' # Communication test for series A. Returns aP#
GET_SAMPLE_SERIES_B = 'bH*' # Gets data sample from ADC for series B
COMM_TEST_SERIES_B = 'bP*' # Communication test for series B. Returns aP#
GET_SAMPLE_SERIES_C = 'cH*' # Gets data sample from ADC for series C
COMM_TEST_SERIES_C = 'cP*' # Communication test for series C. Returns aP#
# THSPH commands for instrument series A, B and C
THSPH_COMMANDS = {
SERIES_A : { Command.COMM_TEST : COMM_TEST_SERIES_A, Command.GET_SAMPLE : GET_SAMPLE_SERIES_A},
SERIES_B : { Command.COMM_TEST : COMM_TEST_SERIES_B, Command.GET_SAMPLE : GET_SAMPLE_SERIES_B},
SERIES_C : { Command.COMM_TEST : COMM_TEST_SERIES_C, Command.GET_SAMPLE : GET_SAMPLE_SERIES_C},
}
__metaclass__ = get_logging_metaclass(log_level='debug')
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = InstrumentFSM(ProtocolState, ProtocolEvent,
ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.ENTER, self._handler_unknown_enter)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.EXIT, self._handler_unknown_exit)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.DISCOVER, self._handler_unknown_discover)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ENTER, self._handler_command_enter)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.EXIT, self._handler_command_exit)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_AUTOSAMPLE,
self._handler_command_start_autosample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_SAMPLE,
self._handler_command_acquire_sample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.GET, self._handler_command_get)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SET, self._handler_command_set)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_DIRECT,
self._handler_command_start_direct)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ENTER, self._handler_autosample_enter)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.EXIT, self._handler_autosample_exit)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.SCHEDULE_ACQUIRE_SAMPLE,
self._handler_command_acquire_sample)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.STOP_AUTOSAMPLE,
self._handler_autosample_stop_autosample)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.ENTER,
self._handler_direct_access_enter)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXIT,
self._handler_direct_access_exit)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXECUTE_DIRECT,
self._handler_direct_access_execute_direct)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.STOP_DIRECT,
self._handler_direct_access_stop_direct)
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_driver_dict()
self._build_command_dict()
self._build_param_dict()
# Add build handlers for device commands.
self._add_build_handler(Command.GET_SAMPLE, self._build_simple_command)
self._add_build_handler(Command.COMM_TEST, self._build_simple_command)
# Add response handlers for device commands.
# State state machine in COMMAND state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
# commands sent to device to be filtered in responses for telnet DA
self._sent_cmds = []
self._chunker = StringChunker(THSPHProtocol.sieve_function)
# Set Get Sample Command and Communication Test Command for Series A as default
self._get_sample_cmd = self.GET_SAMPLE_SERIES_A
self._comm_test_cmd = self.COMM_TEST_SERIES_A
@staticmethod
def sieve_function(raw_data):
"""
The method that splits samples
"""
matchers = []
return_list = []
matchers.append(THSPHParticle.regex_compiled())
for matcher in matchers:
log.trace('matcher: %r raw_data: %r', matcher.pattern, raw_data)
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _got_chunk(self, chunk, timestamp):
"""
The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
if not self._extract_sample(THSPHParticle, THSPHParticle.regex_compiled(), chunk, timestamp):
raise InstrumentProtocolException("Unhandled chunk")
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, True)
def _build_command_dict(self):
"""
Populate the command dictionary with command.
"""
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="start autosample")
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="stop autosample")
self._cmd_dict.add(Capability.ACQUIRE_SAMPLE, display_name="acquire sample")
self._cmd_dict.add(Capability.SET, display_name="set")
self._cmd_dict.add(Capability.GET, display_name="get")
def _build_param_dict(self):
"""
Populate the parameter dictionary with THSPH parameters.
For each parameter key, add match string, match lambda function,
and value formatting function for set commands.
"""
# Add parameter handlers to parameter dict.
self._param_dict.add(Parameter.INTERVAL,
r'Auto Polled Interval = (\d+)',
lambda match: int(match.group(1)),
str,
type=ParameterDictType.INT,
units=Units.SECOND,
display_name="Polled Interval",
startup_param=True,
direct_access=False,
default_value=5)
self._param_dict.add(Parameter.INSTRUMENT_SERIES,
r'Instrument Series = ([A-C])',
lambda match: int(match.group(1)),
str,
type=ParameterDictType.STRING,
display_name="Instrument Series",
startup_param=True,
direct_access=False,
default_value='A')
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
"""
return [x for x in events if Capability.has(x)]
########################################################################
# Unknown State handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_unknown_exit(self, *args, **kwargs):
"""
Exit unknown state.
"""
pass
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state; Change next state to be COMMAND state.
@retval (next_state, result).
"""
log.debug('_handler_unknown_discover ')
next_state = ProtocolState.COMMAND
next_agent_state = ResourceAgentState.IDLE
return next_state, next_agent_state
########################################################################
# Command State handlers.
########################################################################
def _handler_command_acquire_sample(self, *args, **kwargs):
"""
Get device status
"""
next_state = None
next_agent_state = None
result = None
self._do_cmd_no_resp(Command.GET_SAMPLE, timeout=TIMEOUT)
return next_state, (next_agent_state, result)
def _handler_command_enter(self, *args, **kwargs):
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._init_params()
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_exit(self, *args, **kwargs):
pass
def _handler_command_get(self, *args, **kwargs):
"""
Get device parameters from the parameter dict. First we set a baseline timestamp
that all data expirations will be calculated against. Then we try to get parameter
value. If we catch an expired parameter then we will update all parameters and get
values using the original baseline time that we set at the beginning of this method.
Assuming our _update_params is updating all parameter values properly then we can
ensure that all data will be fresh. Nobody likes stale data!
@param args[0] list of parameters to retrieve, or DriverParameter.ALL.
"""
return self._handler_get(*args, **kwargs)
def _handler_command_set(self, *args, **kwargs):
"""
Perform a set command.
@param args[0] parameter : value dict.
@retval (next_state, result) tuple, (None, None).
@throws InstrumentParameterException if missing set parameters, if set parameters not ALL and
not a dict, or if parameter can't be properly formatted.
"""
next_state = None
result = None
startup = False
# Retrieve required parameter.
# Raise if no parameter provided, or not a dict.
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('Set command requires a parameter dict.')
if not isinstance(params, dict):
raise InstrumentParameterException('Set parameters not a dict.')
try:
startup = args[1]
except IndexError:
pass
old_config = self._param_dict.get_config()
self._set_params(params, startup)
new_config = self._param_dict.get_config()
if old_config != new_config:
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
return next_state, result
def _set_params(self, *args, **kwargs):
"""
Set various parameters internally to the driver. No issuing commands to the
instrument needed for this driver.
"""
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('Set command requires a parameter dict.')
#list can be null, like in the case of direct access params, in this case do nothing
if not params:
return
# Do a range check before we start all sets
for (key, val) in params.iteritems():
if key == Parameter.INTERVAL and not (0 < val < 601):
log.debug("Auto Sample Interval not in 1 to 600 range ")
raise InstrumentParameterException("sample interval out of range [1, 600]")
if key == Parameter.INSTRUMENT_SERIES:
if val not in 'ABC':
log.debug("Instrument Series is not A, B or C ")
raise InstrumentParameterException("Instrument Series is not invalid ")
else:
self._get_sample_cmd = self.THSPH_COMMANDS[val][Command.GET_SAMPLE]
self._comm_test_cmd = self.THSPH_COMMANDS[val][Command.COMM_TEST]
log.debug('key = (%s), value = (%s)' % (key, val))
self._param_dict.set_value(key, val)
def _handler_command_start_autosample(self, *args, **kwargs):
"""
Switch into autosample mode.
@retval (next_state, result) tuple, (ProtocolState.AUTOSAMPLE,
(next_agent_state, None) if successful.
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
result = None
next_state = ProtocolState.AUTOSAMPLE
next_agent_state = ResourceAgentState.STREAMING
return next_state, (next_agent_state, result)
def _handler_command_start_direct(self):
"""
Start direct access
"""
return ProtocolState.DIRECT_ACCESS, (ResourceAgentState.DIRECT_ACCESS, None)
#######################################################################
# Autosample State handlers.
########################################################################
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample state Because this is an instrument that must be
polled we need to ensure the scheduler is added when we are in an
autosample state. This scheduler raises events to poll the
instrument for data.
@retval next_state, (next_agent_state, result)
"""
self._init_params()
self._setup_autosample_config()
# Schedule auto sample task
self._add_scheduler_event(ScheduledJob.AUTO_SAMPLE, ProtocolEvent.SCHEDULE_ACQUIRE_SAMPLE)
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
return None, (None, None)
def _setup_autosample_config(self):
"""
Set up auto sample configuration and add it to the scheduler.
"""
# Start the scheduler to poll the instrument for
# data every sample interval seconds
job_name = ScheduledJob.AUTO_SAMPLE
polled_interval = self._param_dict.get_config_value(Parameter.INTERVAL)
config = {
DriverConfigKey.SCHEDULER: {
job_name: {
DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.INTERVAL,
DriverSchedulerConfigKey.SECONDS: polled_interval
}
}
}
}
self.set_init_params(config)
# Start the scheduler if it is not running
if not self._scheduler:
self.initialize_scheduler()
def _handler_autosample_exit(self, *args, **kwargs):
"""
Exit auto sample state. Remove the auto sample task
"""
next_state = None
next_agent_state = None
result = None
return next_state, (next_agent_state, result)
def _handler_autosample_stop_autosample(self, *args, **kwargs):
"""
Remove the auto sample task. Exit Auto sample state
"""
result = None
# Stop the Auto Poll scheduling
self._remove_scheduler(ScheduledJob.AUTO_SAMPLE)
next_state = ProtocolState.COMMAND
next_agent_state = ResourceAgentState.COMMAND
return next_state, (next_agent_state, result)
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_exit(self, *args, **kwargs):
"""
Exit direct access state.
"""
pass
def _handler_direct_access_execute_direct(self, data):
"""
Execute direct command
"""
next_state = None
result = None
next_agent_state = None
self._do_cmd_direct(data)
# add sent command to list for 'echo' filtering in callback
self._sent_cmds.append(data)
return next_state, (next_agent_state, result)
def _handler_direct_access_stop_direct(self):
"""
@throw InstrumentProtocolException on invalid command
"""
result = None
next_state = ProtocolState.COMMAND
next_agent_state = ResourceAgentState.COMMAND
return next_state, (next_agent_state, result)
def _build_simple_command(self, cmd, *args):
"""
Build handler for basic THSPH commands.
@param cmd the simple ooicore command to format.
@retval The command to be sent to the device.
"""
instrument_series = self._param_dict.get(Parameter.INSTRUMENT_SERIES)
if cmd == Command.COMM_TEST:
instrument_cmd = self.THSPH_COMMANDS[instrument_series][Command.COMM_TEST]
elif cmd == Command.GET_SAMPLE:
instrument_cmd = self.THSPH_COMMANDS[instrument_series][Command.GET_SAMPLE]
else:
raise InstrumentException('Unknown THSPH driver command %s' % cmd)
return "%s%s" % (instrument_cmd, NEWLINE)
def _build_set_command(self, cmd, param, val):
"""
Build handler for set commands. param=val followed by newline.
String val constructed by param dict formatting function.
@param param the parameter key to set.
@param val the parameter value to set.
@ retval The set command to be sent to the device.
@throws InstrumentParameterException if the parameter is not valid or
if the formatting function could not accept the value passed.
@throws InstrumentProtocolException if there is no build handler for the
communication test command.
"""
try:
str_val = self._param_dict.format(param, val)
if param == 'INTERVAL':
param = 'sampleinterval'
elif param == 'INSTRUMENT_SERIES':
param = 'instrument_series'
set_cmd = '%s=%s' % (param, str_val)
set_cmd += NEWLINE
except KeyError:
raise InstrumentParameterException('Unknown driver parameter %s' % param)
return set_cmd
def _wakeup(self, wakeup_timeout=WAKEUP_TIMEOUT, response_timeout=RESPONSE_TIMEOUT):
"""
waking this instrument up by sending MAX_COM_TEST communication test commands
(aP*)
@param wakeup_timeout The timeout to wake the device.
@param response_timeout The time to look for response to a wakeup attempt.
@throw InstrumentTimeoutException if the device could not be woken.
"""
log.debug("_wakeup ")
sleep_time = CMD_RESP_TIME
cmd_line = self._build_simple_command(Command.COMM_TEST)
# Grab start time for overall wakeup timeout.
start_time = time.time()
test_count = 0
while test_count < MAX_COMM_TEST:
# Clear the prompt buffer.
self._promptbuf = ''
# Send a communication test command and wait delay amount for response.
self._connection.send(cmd_line)
time.sleep(sleep_time)
if self._promptbuf.find(Prompt.COMM_RESPONSE) != -1:
# instrument is awake
log.debug('_wakeup: got communication test response %s', Prompt.COMM_RESPONSE)
test_count += 1
else:
#clear test_count since we want MAX_COMM_TEST consecutive successful communication test
test_count = 0
# Stop wake up the instrument if the wake up time out has elapsed
if time.time() > start_time + wakeup_timeout:
break
if test_count != MAX_COMM_TEST:
log.debug('instrument failed to wakeup in %d seconds time' % wakeup_timeout)
raise InstrumentTimeoutException(
"_wakeup(): instrument failed to wakeup in %d seconds time" % wakeup_timeout)
else:
return Prompt.COMM_RESPONSE
| |
import re
import sys
from collections import defaultdict, namedtuple
from checkfort.exceptions import *
from checkfort.logging import p_debug, p_verbose, p_info
class EventInstance(object):
def __init__(self, code, culprit, linenum=None, filename=None):
assert not filename or "../" not in filename
self.code = code
self.culprit = culprit
self.filename = filename
if not linenum is None:
self.linenum = int(linenum)
else:
self.linenum = None
@property
def link(self):
if not self.filename:
return None
return "src/%s.html#line-%d" % (self.filename.replace(' ', '_'),
self.linenum)
class ParserState(object):
def __init__(self, legacy_mode=False, ignore_list=None):
self.legacy_mode = legacy_mode
self.sums = {}
self.event_message = defaultdict(str)
self.event_counter = defaultdict(int)
self.event_instances = defaultdict(list)
self.file_events = defaultdict(list)
#self.global_events = defaultdict(list)
self.ignore_list = set(int(x) for x in ignore_list)
self.debug_required = False
def _should_ignore(self, code):
if not self.ignore_list:
return False
numeric, syntax = code.split(None, 1)
return (int(numeric) in self.ignore_list)
def _store_event(self, code, message, instance):
if self._should_ignore(code):
return
self.event_instances[code].append(instance)
self.event_counter[code] += 1
if not code in self.event_message:
self.event_message[code] = message
else:
if message != self.event_message[code]:
self.debug_required = True
p_debug("Seeing different messages for "
"event code (%s).\n" % code)
def store_sums(self, name, total):
self.sums[name] = total
def store_file_event(self, filename, linenum, code, message, culprit):
if self._should_ignore(code):
return
instance = EventInstance(code, culprit, linenum, filename)
self._store_event(code, message, instance)
if filename is not None:
self.file_events[filename].append(instance)
def store_global_event(self, code, message, details):
if self._should_ignore(code):
return
instance = EventInstance(code, details)
self._store_event(code, message, instance)
#self.global_events[code].append(instance)
class ForcheckParser(object):
# set legacy mode for forcheck version <14.1
def __init__(self, forcheck_listfile,
legacy_mode=False, ignore_list=None):
self.listfile = forcheck_listfile
self.state = ParserState(legacy_mode, ignore_list=ignore_list)
self._parse()
def _parse(self):
p_info("\nParsing forcheck listfile")
if self.state.ignore_list:
p_info("(ignoring the following forcheck events: "
"%s)" % ", ".join(str(x) for x in self.state.ignore_list))
stages = iter((
{"name": "file events",
"end_marker": "global program analysis:",
"parser": FileEvent(self.state)},
{"name": "global events",
"end_marker": "program_units and procedures analysed:",
"parser": GlobalEvent(self.state)},
{"name": "program units",
# "end_marker": "*END OF ANALYSIS*" # no longer appear in >14.3
"end_marker": "messages presented:",
"parser": None},
{"name": "forcheck summary",
"end_marker": None,
"parser": SummaryEvent(self.state)},
))
lines = ("", "", "") # (current, previous, previous-1)
stage = stages.next()
p_info(" - Parsing %s" % stage["name"])
def forward_to_content(file_iterator):
"""
Forwards file iterator the the actual content, then returns
the source file addressed by forcheck output page
"""
# each new page starts with a header
line = next(file_iterator)
assert line.startswith("FORCHECK"), "Unexpected listfile format"
# this is followed by "(options...) target_file" if the output is
# file specific
line = next(file_iterator)
if line.strip():
target_file = line.rsplit(None, 1)[-1]
line = next(file_iterator) # following line should be blank
assert not line.strip(), "Unexpected listfile format"
else:
target_file = None
return target_file
with open(self.listfile) as f:
target_file = forward_to_content(f)
for L in f:
if L.startswith("\f"): # new page. forward to content
target_file = forward_to_content(f)
continue
lines = (L.strip(), lines[0], lines[1]) # shift
if lines[0] == stage["end_marker"]:
stage = stages.next()
p_info(" - Parsing %s" % stage["name"])
elif stage["parser"]: # if event has a parser
stage["parser"].slurp(target_file, *lines)
if self.state.debug_required:
import shutil
listfile_out = "forcheck_listfile.debug"
shutil.copy(self.listfile, listfile_out)
p_debug(
"There appears to be a problem in the parsing. \n"
" Forcheck results written to %s. \n"
" Please send the file and the checkfort version info\n"
" to the checkfort devs for further investigation"
% (listfile_out, ))
else:
p_verbose("Parse successful")
class Event(object):
"""Base class for Event parsers"""
# precompile frequently used regex patterns
re_err = re.compile(r"^\*\*\[\s*(\d+ [IEWO])\] (.*)")
re_file = re.compile(r"^\(file: ([^,]+), line:\s+(\d+)\)")
re_summary = re.compile(r"^(\d+)x\[\s*(\d+ [IEWO])\] (.+)")
re_numof = re.compile(r"number of ([^:]*?):\s+(\d+)")
def __init__(self, state):
self.state = state
class FileEvent(Event):
def slurp(self, target_file, line, line1, line2):
# file events always start with "**["
if not line.startswith("**["):
return
# forcheck changed the output format from 14.1 onwards
if self.state.legacy_mode:
culprit_line, fileinfo_line = line2, line1
else:
culprit_line, fileinfo_line = line1, line2
# extract event code and message
try:
code, message = Event.re_err.match(line).groups()
except AttributeError:
raise ParseError("Unknown error format - " + line)
# extract filename and line number
try:
filename, linenum = Event.re_file.match(fileinfo_line).groups()
except AttributeError: # perhaps culprit not defined and lines shifted
match = Event.re_file.match(culprit_line)
if match:
filename, linenum = match.groups()
culprit_line = ""
else:
filename, linenum = target_file, 0 # guess filename
# extract details of the event
culprit = culprit_line
if message.startswith("("):
# Some forcheck erros do not have specific culprits. There's no
# consistent way to detect this, but often this occurs when
# the event message start with "("
culprit = "" # assume culprit not specified
self.state.store_file_event(filename, linenum, code, message, culprit)
class GlobalEvent(Event):
def slurp(self, target_file, line, line1, line2):
assert target_file is None
# file events always start with "**["
if not line.startswith("**["):
return
# extract event code and message
try:
code, message = Event.re_err.match(line).groups()
except AttributeError:
raise ParseError("Unknown error format - " + line)
# there are cases where global events do not have an associated details
# The only time we can detect this is when it is preceded by another
# event.
if line1.startswith("**["):
details = ""
else:
details = line1
self.state.store_global_event(code, message, details)
class SummaryEvent(Event):
def slurp(self, target_file, line, line1, line2):
assert target_file is None
match = Event.re_summary.match(line)
if match:
self.validate_sums(*match.groups())
elif line.startswith("number of "):
self.state.store_sums(*Event.re_numof.match(line).groups())
def validate_sums(self, count, code, message):
if self.state._should_ignore(code):
return
if self.state.event_counter[code] != int(count):
self.state.debug_required = True
p_debug("Parsed results does not match "
"forcheck summary (%s)." % code)
p_debug(" Found %d, summary states %s" %
(self.state.event_counter[code], count))
| |
# Copyright 2016 Kevin B Jacobs
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities to perform variant graph matching."""
import sys
from dataclasses import dataclass
from itertools import chain
from typing import Optional
from vgraph.bed import load_bedmap
from vgraph.norm import NormalizedLocus, fancy_match, normalize_seq, ReferenceMismatch
from vgraph.intervals import union
from vgraph.iterstuff import sort_almost_sorted, is_empty_iter, unique_everseen
from vgraph.lazy_fasta import LazyFastaContig
from vgraph.linearmatch import generate_graph, generate_paths, generate_genotypes_with_paths, generate_genotypes, intersect_paths, OverlapError
@dataclass(frozen=True)
class AlleleMatch:
"""Dataclass for allele matching results."""
allele_ploidy: int
allele_depth: int
ref_ploidy: int
ref_depth: Optional[int]
other_ploidy: Optional[int]
other_depth: Optional[int]
def valid_alleles(alleles):
"""Return if alleles are valid (i.e. at least one non-symbolic alt)."""
return len(alleles) > 1 and not any('<' in a or '[' in a or ']' in a for a in alleles)
def is_alt_genotype(record, name):
"""Return if the named sample has a non-reference genotype call."""
sample = record.samples[name]
indices = sample.allele_indices
return not (not indices or None in indices or indices.count(0) == len(indices) or max(indices) >= len(record.alleles))
def records_to_loci(ref, records, name, variant_padding):
"""Convert variant records to NormalizedLocus records."""
for recnum, record in enumerate(records):
try:
yield NormalizedLocus(recnum, record, ref, name, variant_padding)
except ReferenceMismatch:
print(f'Reference mismatch: {record.contig}:{record.start}-{record.stop}')
def all_contigs(varfiles):
"""Return all contigs in order seen in the variant file header and index.
Args:
varfiles: Input variant file object
Returns:
All unique contigs of the file
"""
contigs = list(varfiles.header.contigs)
if varfiles.index is not None:
contigs.extend(varfiles.index)
return unique_everseen(contigs)
def informative_contigs(varfile):
"""Scan an indexed variant file to determine which contigs have alignments.
Args:
varfile: Input variant file object
Returns:
All contigs that have data
"""
if varfile.index is None:
raise ValueError('Variant file requires index')
return (contig for contig in varfile.index if not is_empty_iter(varfile.fetch(contig)))
def region_filter_include(records, include):
"""Remove records that do not overlap those provided."""
for _, _, (rec, inc) in union([records, include]):
if inc:
yield from rec
def region_filter_exclude(records, exclude):
"""Remove records that overlap those provided."""
for _, _, (rec, exc) in union([records, exclude]):
if not exc:
yield from rec
def filter_gq(records, name, min_gq):
"""Filter records based on a minimum genotype quality value."""
for record in records:
if 'GQ' in record.format:
gq = record.samples[name]['GQ']
if gq is not None and gq < min_gq:
continue
yield record
def filter_records(records, name, args):
"""Filter records based on multiple criteria."""
if args.min_gq is not None:
records = filter_gq(records, name, args.min_gq)
if args.include_filter:
include = {f.strip() for fs in args.include_filter for f in fs.split(',')}
records = (record for record in records if not include.isdisjoint(record.filter))
if args.exclude_filter:
exclude = {f.strip() for fs in args.exclude_filter for f in fs.split(',')}
records = (record for record in records if exclude.isdisjoint(record.filter))
return records
def records_by_chromosome(refs, varfiles, names, args, get_all=False):
"""Group variant records by chromosome."""
contigs_all = unique_everseen(chain.from_iterable(all_contigs(var) for var in varfiles))
contigs_seen = set(chain.from_iterable(informative_contigs(var) for var in varfiles))
contigs_fetch = [contig for contig in contigs_all if contig in contigs_seen]
if args.include_regions is not None:
include = load_bedmap(args.include_regions)
contigs_fetch = [contig for contig in contigs_fetch if contig in include]
if args.exclude_regions is not None:
exclude = load_bedmap(args.exclude_regions)
for contig in contigs_fetch:
try:
if args.lazy_ref:
ref = LazyFastaContig(refs, contig)
else:
ref = refs.fetch(contig)
except KeyError:
continue
records = [var.fetch(contig) if contig in var.index else [] for var in varfiles]
if get_all:
all_records = records = [list(l) for l in records]
records = [filter_records(r, name, args) for r, name in zip(records, names)]
if args.include_regions is not None:
records = [region_filter_include(r, include[contig]) for r in records]
if args.exclude_regions is not None:
records = [region_filter_exclude(r, exclude[contig]) for r in records]
loci = [records_to_loci(ref, r, name, args.reference_padding) for name, r in zip(names, records)]
loci = [sort_almost_sorted(l, key=NormalizedLocus.natural_order_key) for l in loci]
if get_all:
yield contig, ref, loci, all_records
else:
yield contig, ref, loci
def get_superlocus_bounds(superloci):
"""Get the most 5' and 3' boundaries of a superlocus."""
start = min(locus.min_start for super in superloci for locus in super)
stop = max(locus.max_stop for super in superloci for locus in super)
return start, stop
def locus_equal_trivial(locus1, locus2):
"""Compare two loci for trivial equality (i.e. no graph-based matching)."""
left1, left2 = locus1.left, locus2.left
if left1.start != left2.start:
return False
if left1.stop != left2.stop:
return False
alleles1, alleles2 = left1.alleles, left2.alleles
g1 = tuple(alleles1[i] for i in locus1.allele_indices)
g2 = tuple(alleles2[i] for i in locus2.allele_indices)
if not locus1.phased or not locus2.phased:
g1, g2 = tuple(sorted(g1)), tuple(sorted(g2))
return g1 == g2
def superlocus_equal_trivial(super1, super2):
"""Compare two superloci for trivial equality (i.e. no graph-based matching)."""
if len(super1) != len(super2):
return False
for locus1, locus2 in zip(super1, super2):
if not locus_equal_trivial(locus1, locus2):
return False
return True
def superlocus_equal(ref, start, stop, super1, super2, debug=False):
"""Compare two superloci."""
if superlocus_equal_trivial(super1, super2):
return True, 'T'
# Bounds come from normalized extremes
start, stop = get_superlocus_bounds([super1, super2])
# Create genotype sets for each superlocus
try:
graph1, constraints1 = generate_graph(ref, start, stop, super1, debug)
graph2, constraints2 = generate_graph(ref, start, stop, super2, debug)
paths1 = generate_paths(graph1, debug=debug)
paths2 = generate_paths(graph2, feasible_paths=paths1, debug=debug)
paths1, paths2 = intersect_paths(paths1, paths2)
except OverlapError:
status = None, 'N'
else:
genos1 = set(generate_genotypes(paths1, constraints1, debug))
genos2 = set(generate_genotypes(paths2, constraints2, debug))
# Test whether genotype sets intersect
if genos1.isdisjoint(genos2):
status = False, 'H'
else:
status = True, 'H'
return status
def find_allele_exact_match(ref, allele, superlocus):
"""Search for allele using a fast exact match criteria."""
for locus in superlocus:
a = allele.left
ll = locus.left
if a.start == ll.start and a.stop == ll.stop and 'PASS' in locus.record.filter:
return sum(
locus.allele_indices.count(locus.alleles.index(alt))
for alt in set(ll.alts).intersect(a.alts)
)
return 0
def path_to_ads(path):
"""Convert a path through a variant graph into a sequence of allele depths."""
for p in path:
# Skip nodes with no VCF record
if not p.locus:
continue
record = p.locus.record
sample = record.samples[0]
if p.index is None or 'AD' not in sample:
yield sample.get('MIN_DP', 0)
else:
yield sample['AD'][p.index]
def path_to_ref_ads(path):
"""Convert a path through a variant graph into a sequence of reference allele depths."""
for p in path:
# Skip nodes with no VCF record
if not p.locus:
continue
record = p.locus.record
sample = record.samples[0]
# Do not report ref AD if reference was called,
# since that AD will be part of FOUND or OTHER
if 'AD' in sample and p.index:
yield sample['AD'][0]
elif 'AD' not in sample and 'MIN_DP' in sample:
yield sample['MIN_DP']
else:
yield 0
def build_match_result(geno, matches, super_ref):
"""Build match results."""
seqs, paths = zip(*geno)
allele_depths = [list(path_to_ads(path)) for path in paths]
found, ref, other = [], [], []
for m, seq, ad in zip(matches, seqs, allele_depths):
if m:
found.append(ad)
elif seq == super_ref: # no need for fancy_match; super_ref has no wildcards
ref.append(ad)
else:
other.append(ad)
ref_ploidy = len(ref)
allele_ploidy = len(found)
other_ploidy = len(other)
# If a reference allele was not called, then collect all reference allele depths
# from an arbitrary path, as AD always contains reference counts.
if not ref_ploidy and paths:
ref = [list(path_to_ref_ads(paths[0]))]
allele_ad = empty_min(chain.from_iterable(found), default=None)
ref_ad = empty_min(chain.from_iterable(ref), default=None)
other_ad = empty_min(chain.from_iterable(other), default=None)
return AlleleMatch(allele_ploidy, allele_ad, ref_ploidy, ref_ad, other_ploidy, other_ad)
nothing = object()
def empty_min(items, default=nothing):
"""Return the min of items unless it is empty and a default is provided."""
items = list(items)
if not items and default is not nothing:
return default
return min(items)
def build_match_strings(ref, start, stop, allele, mode='sensitive', debug=False):
"""Build allele matching strings."""
alts = allele.alts
if debug:
print(' Allele: start={}, stop={}, size={}, ref={}, seq={}'.format(
allele.start,
allele.stop,
allele.stop - allele.start,
allele.ref,
','.join(alts),
), file=sys.stderr)
super_ref = normalize_seq(ref[start:stop])
# Require reference matches within the wobble zone + padding built into each normalized allele
if mode == 'specific':
super_alleles = [normalize_seq(ref[start:allele.start] + alt + ref[allele.stop:stop]) for alt in alts]
elif mode == 'sensitive':
super_alleles = [
normalize_seq(
'*' * (allele.min_start - start)
+ ref[allele.min_start:allele.start]
+ alt
+ ref[allele.stop:allele.max_stop]
+ '*' * (stop - allele.max_stop)
) for alt in alts
]
else:
raise ValueError(f'invalid match mode specified: {mode}')
if debug:
print(' MODE:', mode, file=sys.stderr)
print(' SUPER ALLELES:', super_alleles, file=sys.stderr)
print(' SUPER REF: ', super_ref, file=sys.stderr)
assert all(len(a) == stop - start - len(allele.ref) + len(alt) for a, alt in zip(super_alleles, alts))
return super_ref, super_alleles
def compare_alleles(alleles, seq):
"""Compare sequence with each potential allele using fancy matching.
Args:
alleles: candidate alleles
seq: sequence with which to compare
Returns:
True: if any allele fancy-matches seq
False: if no alleles match and none are uncertain
None: if no alleles match and one or more is uncertain
"""
notfound = False
for allele in alleles:
match = fancy_match(allele, seq)
if match:
return True
elif match is None:
notfound = None
return notfound
def find_allele_matches(ref, start, stop, allele, genos, ploidy, mode, debug=False):
"""Analyze graph paths to find allele matches."""
# superlocus contains impossible genotypes and no paths are valid
super_ref, super_alleles = build_match_strings(ref, start, stop, allele, mode, debug)
if not genos:
return None
all_matches = (
(
[compare_alleles(super_alleles, seq) for (seq, _) in geno],
geno,
) for geno in genos
)
zygosity, nocalls, _, geno, matches = max(
(
sum(m or 0 for m in matches),
-matches.count(None),
i,
geno,
matches,
) for i, (matches, geno) in enumerate(all_matches)
)
if not zygosity and nocalls:
return None
result = build_match_result(geno, matches, super_ref)
if debug:
for super_allele in super_alleles:
print(' ALLELE:{} {}'.format(len(super_allele), super_allele), file=sys.stderr)
for i, (g, m) in enumerate(zip(genos, matches)):
print(' GENO{:02d}:{} {}'.format(i, tuple(map(len, g)), g), file=sys.stderr)
print(f' MATCH{i:02d}: {m}', file=sys.stderr)
print(file=sys.stderr)
print(
f'ALLELE: id={allele.record.id}, allele_ploidy={result.allele_ploidy}, '
f'ref_ploidy={result.ref_ploidy}, other_ploidy={result.other_ploidy}, ploidy={result.ploidy}',
file=sys.stderr
)
print(f' ZYGOSITY: {zygosity}', file=sys.stderr)
return result
def find_allele(ref, allele, superlocus, mode='sensitive', debug=False):
"""Check for the presence of an allele within a superlocus."""
start, stop = get_superlocus_bounds([[allele], superlocus])
# Create genotype sets for each superlocus
try:
graph, constraints = generate_graph(ref, start, stop, superlocus, debug)
if debug:
graph = list(graph)
for i, (astart, astop, alleles) in enumerate(graph):
print(f' GRAPH{i:02d}: start={astart}, stop={astop}, alleles={alleles}', file=sys.stderr)
print(file=sys.stderr)
paths = list(generate_paths(graph, debug=debug))
if debug:
for i, p in enumerate(paths):
print(f' PATH{i:02d}: {p}', file=sys.stderr)
print(file=sys.stderr)
except OverlapError:
return None
# Generate the set of diploid genotypes (actually haplotypes)
ploidy = max(len(locus.allele_indices) for locus in superlocus) if superlocus else 2
genos = list(generate_genotypes_with_paths(paths, constraints, ploidy))
return find_allele_matches(ref, start, stop, allele, genos, ploidy, mode, debug)
| |
# -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: November 25, 2009
# Author: Francesc Alted - faltet@pytables.com
#
# $Id$
#
########################################################################
"""Create links in the HDF5 file.
This module implements containers for soft and external links. Hard
links doesn't need a container as such as they are the same as regular
nodes (groups or leaves).
Classes:
SoftLink
ExternalLink
Functions:
Misc variables:
"""
import os
import tables
from tables import linkextension
from tables.node import Node
from tables.utils import lazyattr
from tables.attributeset import AttributeSet
import tables.file
def _g_get_link_class(parent_id, name):
"""Guess the link class."""
return linkextension._get_link_class(parent_id, name)
class Link(Node):
"""Abstract base class for all PyTables links.
A link is a node that refers to another node. The Link class inherits from
Node class and the links that inherits from Link are SoftLink and
ExternalLink. There is not a HardLink subclass because hard links behave
like a regular Group or Leaf. Contrarily to other nodes, links cannot have
HDF5 attributes. This is an HDF5 library limitation that might be solved
in future releases.
See :ref:`LinksTutorial` for a small tutorial on how to work with links.
.. rubric:: Link attributes
.. attribute:: target
The path string to the pointed node.
"""
# Properties
@lazyattr
def _v_attrs(self):
"""
A *NoAttrs* instance replacing the typical *AttributeSet* instance of
other node objects. The purpose of *NoAttrs* is to make clear that
HDF5 attributes are not supported in link nodes.
"""
class NoAttrs(AttributeSet):
def __getattr__(self, name):
raise KeyError("you cannot get attributes from this "
"`%s` instance" % self.__class__.__name__)
def __setattr__(self, name, value):
raise KeyError("you cannot set attributes to this "
"`%s` instance" % self.__class__.__name__)
def _g_close(self):
pass
return NoAttrs(self)
def __init__(self, parentnode, name, target=None, _log=False):
self._v_new = target is not None
self.target = target
"""The path string to the pointed node."""
super(Link, self).__init__(parentnode, name, _log)
# Public and tailored versions for copy, move, rename and remove methods
def copy(self, newparent=None, newname=None,
overwrite=False, createparents=False):
"""Copy this link and return the new one.
See :meth:`Node._f_copy` for a complete explanation of the arguments.
Please note that there is no recursive flag since links do not have
child nodes.
"""
newnode = self._f_copy(newparent=newparent, newname=newname,
overwrite=overwrite,
createparents=createparents)
# Insert references to a `newnode` via `newname`
newnode._v_parent._g_refnode(newnode, newname, True)
return newnode
def move(self, newparent=None, newname=None, overwrite=False):
"""Move or rename this link.
See :meth:`Node._f_move` for a complete explanation of the arguments.
"""
return self._f_move(newparent=newparent, newname=newname,
overwrite=overwrite)
def remove(self):
"""Remove this link from the hierarchy."""
return self._f_remove()
def rename(self, newname=None, overwrite=False):
"""Rename this link in place.
See :meth:`Node._f_rename` for a complete explanation of the arguments.
"""
return self._f_rename(newname=newname, overwrite=overwrite)
def __repr__(self):
return str(self)
class SoftLink(linkextension.SoftLink, Link):
"""Represents a soft link (aka symbolic link).
A soft link is a reference to another node in the *same* file hierarchy.
Provided that the target node exists, its attributes and methods can be
accessed directly from the softlink using the normal `.` syntax.
Softlinks also have the following public methods/attributes:
* `target`
* `dereference()`
* `copy()`
* `move()`
* `remove()`
* `rename()`
* `is_dangling()`
Note that these will override any correspondingly named methods/attributes
of the target node.
For backwards compatibility, it is also possible to obtain the target node
via the `__call__()` special method (this action is called *dereferencing*;
see below)
Examples
--------
::
>>> f = tables.open_file('/tmp/test_softlink.h5', 'w')
>>> a = f.create_array('/', 'A', np.arange(10))
>>> link_a = f.create_soft_link('/', 'link_A', target='/A')
# transparent read/write access to a softlinked node
>>> link_a[0] = -1
>>> print(link_a[:], link_a.dtype)
(array([-1, 1, 2, 3, 4, 5, 6, 7, 8, 9]), dtype('int64'))
# dereferencing a softlink using the __call__() method
>>> print(link_a() is a)
True
# SoftLink.remove() overrides Array.remove()
>>> link_a.remove()
>>> print(link_a)
<closed tables.link.SoftLink at 0x7febe97186e0>
>>> print(a[:], a.dtype)
(array([-1, 1, 2, 3, 4, 5, 6, 7, 8, 9]), dtype('int64'))
"""
# Class identifier.
_c_classid = 'SOFTLINK'
# attributes with these names/prefixes are treated as attributes of the
# SoftLink rather than the target node
_link_attrnames = ('target', 'dereference', 'is_dangling', 'copy', 'move',
'remove', 'rename', '__init__', '__str__', '__repr__',
'__class__', '__dict__')
_link_attrprefixes = ('_f_', '_c_', '_g_', '_v_')
def __call__(self):
"""Dereference `self.target` and return the object.
Examples
--------
::
>>> f=tables.open_file('data/test.h5')
>>> print(f.root.link0)
/link0 (SoftLink) -> /another/path
>>> print(f.root.link0())
/another/path (Group) ''
"""
return self.dereference()
def dereference(self):
if self._v_isopen:
target = self.target
# Check for relative pathnames
if not self.target.startswith('/'):
target = self._v_parent._g_join(self.target)
return self._v_file._get_node(target)
else:
return None
def __getattribute__(self, attrname):
# get attribute of the SoftLink itself
if (attrname in SoftLink._link_attrnames
or attrname[:3] in SoftLink._link_attrprefixes):
return object.__getattribute__(self, attrname)
# get attribute of the target node
elif not self._v_isopen:
raise tables.ClosedNodeError('the node object is closed')
elif self.is_dangling():
return None
else:
target_node = self.dereference()
try:
# __getattribute__() fails to get children of Groups
return target_node.__getattribute__(attrname)
except AttributeError:
# some node classes (e.g. Array) don't implement __getattr__()
return target_node.__getattr__(attrname)
def __setattr__(self, attrname, value):
# set attribute of the SoftLink itself
if (attrname in SoftLink._link_attrnames
or attrname[:3] in SoftLink._link_attrprefixes):
object.__setattr__(self, attrname, value)
# set attribute of the target node
elif not self._v_isopen:
raise tables.ClosedNodeError('the node object is closed')
elif self.is_dangling():
raise ValueError("softlink target does not exist")
else:
self.dereference().__setattr__(attrname, value)
def __getitem__(self, key):
"""__getitem__ must be defined in the SoftLink class in order for array
indexing syntax to work"""
if not self._v_isopen:
raise tables.ClosedNodeError('the node object is closed')
elif self.is_dangling():
raise ValueError("softlink target does not exist")
else:
return self.dereference().__getitem__(key)
def __setitem__(self, key, value):
"""__setitem__ must be defined in the SoftLink class in order for array
indexing syntax to work"""
if not self._v_isopen:
raise tables.ClosedNodeError('the node object is closed')
elif self.is_dangling():
raise ValueError("softlink target does not exist")
else:
self.dereference().__setitem__(key, value)
def is_dangling(self):
return not (self.dereference() in self._v_file)
def __str__(self):
"""Return a short string representation of the link.
Examples
--------
::
>>> f=tables.open_file('data/test.h5')
>>> print(f.root.link0)
/link0 (SoftLink) -> /path/to/node
"""
classname = self.__class__.__name__
target = str(self.target)
# Check for relative pathnames
if not self.target.startswith('/'):
target = self._v_parent._g_join(self.target)
if self._v_isopen:
closed = ""
else:
closed = "closed "
if target not in self._v_file:
dangling = " (dangling)"
else:
dangling = ""
return "%s%s (%s) -> %s%s" % (closed, self._v_pathname, classname,
self.target, dangling)
class ExternalLink(linkextension.ExternalLink, Link):
"""Represents an external link.
An external link is a reference to a node in *another* file.
Getting access to the pointed node (this action is called
*dereferencing*) is done via the :meth:`__call__` special method
(see below).
.. rubric:: ExternalLink attributes
.. attribute:: extfile
The external file handler, if the link has been dereferenced.
In case the link has not been dereferenced yet, its value is
None.
"""
# Class identifier.
_c_classid = 'EXTERNALLINK'
def __init__(self, parentnode, name, target=None, _log=False):
self.extfile = None
"""The external file handler, if the link has been dereferenced.
In case the link has not been dereferenced yet, its value is
None."""
super(ExternalLink, self).__init__(parentnode, name, target, _log)
def _get_filename_node(self):
"""Return the external filename and nodepath from `self.target`."""
# This is needed for avoiding the 'C:\\file.h5' filepath notation
filename, target = self.target.split(':/')
return filename, '/' + target
def __call__(self, **kwargs):
"""Dereference self.target and return the object.
You can pass all the arguments supported by the :func:`open_file`
function (except filename, of course) so as to open the referenced
external file.
Examples
--------
::
>>> f=tables.open_file('data1/test1.h5')
>>> print(f.root.link2)
/link2 (ExternalLink) -> data2/test2.h5:/path/to/node
>>> plink2 = f.root.link2('a') # open in 'a'ppend mode
>>> print(plink2)
/path/to/node (Group) ''
>>> print(plink2._v_filename)
'data2/test2.h5' # belongs to referenced file
"""
filename, target = self._get_filename_node()
if not os.path.isabs(filename):
# Resolve the external link with respect to the this
# file's directory. See #306.
base_directory = os.path.dirname(self._v_file.filename)
filename = os.path.join(base_directory, filename)
if self.extfile is None or not self.extfile.isopen:
self.extfile = tables.open_file(filename, **kwargs)
else:
# XXX: implement better consistency checks
assert self.extfile.filename == filename
assert self.extfile.mode == kwargs.get('mode', 'r')
return self.extfile._get_node(target)
def umount(self):
"""Safely unmount self.extfile, if opened."""
extfile = self.extfile
# Close external file, if open
if extfile is not None and extfile.isopen:
extfile.close()
self.extfile = None
def _f_close(self):
"""Especific close for external links."""
self.umount()
super(ExternalLink, self)._f_close()
def __str__(self):
"""Return a short string representation of the link.
Examples
--------
::
>>> f=tables.open_file('data1/test1.h5')
>>> print(f.root.link2)
/link2 (ExternalLink) -> data2/test2.h5:/path/to/node
"""
classname = self.__class__.__name__
return "%s (%s) -> %s" % (self._v_pathname, classname, self.target)
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
| |
# Copyright 2017-present Adtran, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import random
import xmltodict
from twisted.internet import reactor
from twisted.internet.defer import returnValue, inlineCallbacks, succeed
from adtran_device_handler import AdtranDeviceHandler
from voltha.adapters.adtran_olt.resources import adtranolt_platform as platform
from download import Download
from codec.olt_state import OltState
from flow.flow_entry import FlowEntry
from resources.adtran_olt_resource_manager import AdtranOltResourceMgr
from net.pio_zmq import PioClient
from net.pon_zmq import PonClient
from voltha.core.flow_decomposer import *
from voltha.extensions.omci.omci import *
from voltha.protos.common_pb2 import AdminState, OperStatus
from voltha.protos.device_pb2 import ImageDownload, Image
from voltha.protos.openflow_13_pb2 import OFPP_MAX
from common.tech_profile.tech_profile import *
from voltha.protos.device_pb2 import Port
class AdtranOltHandler(AdtranDeviceHandler):
"""
The OLT Handler is used to wrap a single instance of a 10G OLT 1-U pizza-box
"""
MIN_OLT_HW_VERSION = datetime.datetime(2017, 1, 5)
# Full table output
GPON_OLT_HW_URI = '/restconf/data/gpon-olt-hw'
GPON_OLT_HW_STATE_URI = GPON_OLT_HW_URI + ':olt-state'
GPON_OLT_HW_CONFIG_URI = GPON_OLT_HW_URI + ':olt'
GPON_PON_CONFIG_LIST_URI = GPON_OLT_HW_CONFIG_URI + '/pon'
# Per-PON info
GPON_PON_STATE_URI = GPON_OLT_HW_STATE_URI + '/pon={}' # .format(pon-id)
GPON_PON_CONFIG_URI = GPON_PON_CONFIG_LIST_URI + '={}' # .format(pon-id)
GPON_ONU_CONFIG_LIST_URI = GPON_PON_CONFIG_URI + '/onus/onu' # .format(pon-id)
GPON_ONU_CONFIG_URI = GPON_ONU_CONFIG_LIST_URI + '={}' # .format(pon-id,onu-id)
GPON_TCONT_CONFIG_LIST_URI = GPON_ONU_CONFIG_URI + '/t-conts/t-cont' # .format(pon-id,onu-id)
GPON_TCONT_CONFIG_URI = GPON_TCONT_CONFIG_LIST_URI + '={}' # .format(pon-id,onu-id,alloc-id)
GPON_GEM_CONFIG_LIST_URI = GPON_ONU_CONFIG_URI + '/gem-ports/gem-port' # .format(pon-id,onu-id)
GPON_GEM_CONFIG_URI = GPON_GEM_CONFIG_LIST_URI + '={}' # .format(pon-id,onu-id,gem-id)
GPON_PON_DISCOVER_ONU = '/restconf/operations/gpon-olt-hw:discover-onu'
BASE_ONU_OFFSET = 64
def __init__(self, **kwargs):
super(AdtranOltHandler, self).__init__(**kwargs)
self.status_poll = None
self.status_poll_interval = 5.0
self.status_poll_skew = self.status_poll_interval / 10
self._pon_agent = None
self._pio_agent = None
self._ssh_deferred = None
self._system_id = None
self._download_protocols = None
self._download_deferred = None
self._downloads = {} # name -> Download obj
self._pio_exception_map = []
self.downstream_shapping_supported = True # 1971320F1-ML-4154 and later
# FIXME: Remove once we containerize. Only exists to keep BroadCom OpenOMCI ONU Happy
# when it reaches up our rear and tries to yank out a UNI port number
self.platform_class = None
# To keep broadcom ONU happy
from voltha.adapters.adtran_olt.resources.adtranolt_platform import adtran_platform
self.platform = adtran_platform() # TODO: Remove once tech-profiles & containerization is done !!!
def __del__(self):
# OLT Specific things here.
#
# If you receive this during 'enable' of the object, you probably threw an
# uncaught exception which triggered an errback in the VOLTHA core.
d, self.status_poll = self.status_poll, None
# Clean up base class as well
AdtranDeviceHandler.__del__(self)
def _cancel_deferred(self):
d1, self.status_poll = self.status_poll, None
d2, self._ssh_deferred = self._ssh_deferred, None
d3, self._download_deferred = self._download_deferred, None
for d in [d1, d2, d3]:
try:
if d is not None and not d.called:
d.cancel()
except:
pass
def __str__(self):
return "AdtranOltHandler: {}".format(self.ip_address)
@property
def system_id(self):
return self._system_id
@system_id.setter
def system_id(self, value):
if self._system_id != value:
self._system_id = value
data = json.dumps({'olt-id': str(value)})
uri = AdtranOltHandler.GPON_OLT_HW_CONFIG_URI
self.rest_client.request('PATCH', uri, data=data, name='olt-system-id')
@inlineCallbacks
def get_device_info(self, _device):
"""
Perform an initial network operation to discover the device hardware
and software version. Serial Number would be helpful as well.
Upon successfully retrieving the information, remember to call the
'start_heartbeat' method to keep in contact with the device being managed
:param _device: A voltha.Device object, with possible device-type
specific extensions. Such extensions shall be described as part of
the device type specification returned by device_types().
"""
from codec.physical_entities_state import PhysicalEntitiesState
# TODO: After a CLI 'reboot' command, the device info may get messed up (prints labels and not values)
# # Enter device and type 'show'
device = {
'model': 'n/a',
'hardware_version': 'unknown',
'serial_number': 'unknown',
'vendor': 'ADTRAN, Inc.',
'firmware_version': 'unknown',
'running-revision': 'unknown',
'candidate-revision': 'unknown',
'startup-revision': 'unknown',
'software-images': []
}
if self.is_virtual_olt:
returnValue(device)
try:
pe_state = PhysicalEntitiesState(self.netconf_client)
self.startup = pe_state.get_state()
results = yield self.startup
if results.ok:
modules = pe_state.get_physical_entities('adtn-phys-mod:module')
if isinstance(modules, list):
module = modules[0]
name = str(module.get('model-name', 'n/a')).translate(None, '?')
model = str(module.get('model-number', 'n/a')).translate(None, '?')
device['model'] = '{} - {}'.format(name, model) if len(name) > 0 else \
module.get('parent-entity', 'n/a')
device['hardware_version'] = str(module.get('hardware-revision',
'n/a')).translate(None, '?')
device['serial_number'] = str(module.get('serial-number',
'n/a')).translate(None, '?')
if 'software' in module:
if 'software' in module['software']:
software = module['software']['software']
if isinstance(software, dict):
device['running-revision'] = str(software.get('running-revision',
'n/a')).translate(None, '?')
device['candidate-revision'] = str(software.get('candidate-revision',
'n/a')).translate(None, '?')
device['startup-revision'] = str(software.get('startup-revision',
'n/a')).translate(None, '?')
elif isinstance(software, list):
for sw_item in software:
sw_type = sw_item.get('name', '').lower()
if sw_type == 'firmware':
device['firmware_version'] = str(sw_item.get('running-revision',
'unknown')).translate(None, '?')
elif sw_type == 'software':
for rev_type in ['startup-revision',
'running-revision',
'candidate-revision']:
if rev_type in sw_item:
image = Image(name=rev_type,
version=sw_item[rev_type],
is_active=(rev_type == 'running-revision'),
is_committed=True,
is_valid=True,
install_datetime='Not Available',
hash='Not Available')
device['software-images'].append(image)
# Update features based on version
# Format expected to be similar to: 1971320F1-ML-4154
running_version = next((image.version for image in device.get('software-images', list())
if image.is_active), '').split('-')
if len(running_version) > 2:
try:
self.downstream_shapping_supported = int(running_version[-1]) >= 4154
except ValueError:
pass
except Exception as e:
self.log.exception('dev-info-failure', e=e)
raise
returnValue(device)
def initialize_resource_manager(self):
# Initialize the resource and tech profile managers
extra_args = '--olt_model {}'.format(self.resource_manager_key)
self.resource_mgr = AdtranOltResourceMgr(self.device_id,
self.host_and_port,
extra_args,
self.default_resource_mgr_device_info)
self._populate_tech_profile_per_pon_port()
@property
def default_resource_mgr_device_info(self):
class AdtranOltDevInfo(object):
def __init__(self, pon_ports):
self.technology = "xgspon"
self.onu_id_start = 0
self.onu_id_end = platform.MAX_ONUS_PER_PON
self.alloc_id_start = platform.MIN_TCONT_ALLOC_ID
self.alloc_id_end = platform.MAX_TCONT_ALLOC_ID
self.gemport_id_start = platform.MIN_GEM_PORT_ID
self.gemport_id_end = platform.MAX_GEM_PORT_ID
self.pon_ports = len(pon_ports)
self.max_tconts = platform.MAX_TCONTS_PER_ONU
self.max_gem_ports = platform.MAX_GEM_PORTS_PER_ONU
self.intf_ids = pon_ports.keys() # PON IDs
return AdtranOltDevInfo(self.southbound_ports)
def _populate_tech_profile_per_pon_port(self):
self.tech_profiles = {intf_id: self.resource_mgr.resource_managers[intf_id].tech_profile
for intf_id in self.resource_mgr.device_info.intf_ids}
# Make sure we have as many tech_profiles as there are pon ports on
# the device
assert len(self.tech_profiles) == self.resource_mgr.device_info.pon_ports
def get_tp_path(self, intf_id, ofp_port_name):
# TODO: Should get Table id form the flow, as of now hardcoded to DEFAULT_TECH_PROFILE_TABLE_ID (64)
# 'tp_path' contains the suffix part of the tech_profile_instance path.
# The prefix to the 'tp_path' should be set to \
# TechProfile.KV_STORE_TECH_PROFILE_PATH_PREFIX by the ONU adapter.
return self.tech_profiles[intf_id].get_tp_path(DEFAULT_TECH_PROFILE_TABLE_ID,
ofp_port_name)
def delete_tech_profile_instance(self, intf_id, onu_id, logical_port):
# Remove the TP instance associated with the ONU
ofp_port_name = self.get_ofp_port_name(intf_id, onu_id, logical_port)
tp_path = self.get_tp_path(intf_id, ofp_port_name)
return self.tech_profiles[intf_id].delete_tech_profile_instance(tp_path)
def get_ofp_port_name(self, pon_id, onu_id, logical_port_number):
parent_port_no = self.pon_id_to_port_number(pon_id)
child_device = self.adapter_agent.get_child_device(self.device_id,
parent_port_no=parent_port_no, onu_id=onu_id)
if child_device is None:
self.log.error("could-not-find-child-device", parent_port_no=pon_id, onu_id=onu_id)
return None, None
ports = self.adapter_agent.get_ports(child_device.id, Port.ETHERNET_UNI)
port = next((port for port in ports if port.port_no == logical_port_number), None)
logical_port = self.adapter_agent.get_logical_port(self.logical_device_id,
port.label)
ofp_port_name = (logical_port.ofp_port.name, logical_port.ofp_port.port_no)
return ofp_port_name
@inlineCallbacks
def enumerate_northbound_ports(self, device):
"""
Enumerate all northbound ports of this device.
:param device: A voltha.Device object, with possible device-type
specific extensions.
:return: (Deferred or None).
"""
from net.rcmd import RCmd
try:
# Also get the MAC Address for the OLT
command = "ip link | grep -A1 eth0 | sed -n -e 's/^.*ether //p' | awk '{ print $1 }'"
rcmd = RCmd(self.ip_address, self.netconf_username, self.netconf_password,
command)
address = yield rcmd.execute()
self.mac_address = address.replace('\n', '')
self.log.info("mac-addr", mac_addr=self.mac_address)
except Exception as e:
log.exception('mac-address', e=e)
raise
try:
from codec.ietf_interfaces import IetfInterfacesState
from nni_port import MockNniPort
ietf_interfaces = IetfInterfacesState(self.netconf_client)
if self.is_virtual_olt:
results = MockNniPort.get_nni_port_state_results()
else:
self.startup = ietf_interfaces.get_state()
results = yield self.startup
ports = ietf_interfaces.get_port_entries(results, 'ethernet')
returnValue(ports)
except Exception as e:
log.exception('enumerate_northbound_ports', e=e)
raise
def process_northbound_ports(self, device, results):
"""
Process the results from the 'enumerate_northbound_ports' method.
:param device: A voltha.Device object, with possible device-type
specific extensions.
:param results: Results from the 'enumerate_northbound_ports' method that
you implemented. The type and contents are up to you to
:return: (Deferred or None).
"""
from nni_port import NniPort, MockNniPort
for port in results.itervalues():
port_no = port.get('port_no')
assert port_no, 'Port number not found'
# May already exist if device was not fully reachable when first enabled
if port_no not in self.northbound_ports:
self.log.info('processing-nni', port_no=port_no, name=port['port_no'])
self.northbound_ports[port_no] = NniPort(self, **port) if not self.is_virtual_olt \
else MockNniPort(self, **port)
if len(self.northbound_ports) >= self.max_nni_ports: # TODO: For now, limit number of NNI ports to make debugging easier
break
self.num_northbound_ports = len(self.northbound_ports)
def _olt_version(self):
# Version
# 0 Unknown
# 1 V1 OMCI format
# 2 V2 OMCI format
# 3 2018-01-11 or later
version = 0
info = self._rest_support.get('module-info', [dict()])
hw_mod_ver_str = next((mod.get('revision') for mod in info
if mod.get('module-name', '').lower() == 'gpon-olt-hw'), None)
if hw_mod_ver_str is not None:
try:
from datetime import datetime
hw_mod_dt = datetime.strptime(hw_mod_ver_str, '%Y-%m-%d')
version = 2 if hw_mod_dt >= datetime(2017, 9, 21) else 2
except Exception as e:
self.log.exception('ver-str-check', e=e)
return version
@inlineCallbacks
def enumerate_southbound_ports(self, device):
"""
Enumerate all southbound ports of this device.
:param device: A voltha.Device object, with possible device-type
specific extensions.
:return: (Deferred or None).
"""
###############################################################################
# Determine number of southbound ports. We know it is 16, but this keeps this
# device adapter generic for our other OLTs up to this point.
self.startup = self.rest_client.request('GET', self.GPON_PON_CONFIG_LIST_URI,
'pon-config')
try:
from codec.ietf_interfaces import IetfInterfacesState
from nni_port import MockNniPort
results = yield self.startup
ietf_interfaces = IetfInterfacesState(self.netconf_client)
if self.is_virtual_olt:
nc_results = MockNniPort.get_pon_port_state_results()
else:
self.startup = ietf_interfaces.get_state()
nc_results = yield self.startup
ports = ietf_interfaces.get_port_entries(nc_results, 'xpon')
if len(ports) == 0:
ports = ietf_interfaces.get_port_entries(nc_results,
'channel-termination')
for data in results:
pon_id = data['pon-id']
port = ports[pon_id + 1]
port['pon-id'] = pon_id
port['admin_state'] = AdminState.ENABLED \
if data.get('enabled', True)\
else AdminState.DISABLED
except Exception as e:
log.exception('enumerate_southbound_ports', e=e)
raise
returnValue(ports)
def process_southbound_ports(self, device, results):
"""
Process the results from the 'enumerate_southbound_ports' method.
:param device: A voltha.Device object, with possible device-type
specific extensions.
:param results: Results from the 'enumerate_southbound_ports' method that
you implemented. The type and contents are up to you to
:return: (Deferred or None).
"""
from pon_port import PonPort
for pon in results.itervalues():
pon_id = pon.get('pon-id')
assert pon_id is not None, 'PON ID not found'
if pon['ifIndex'] is None:
pon['port_no'] = self.pon_id_to_port_number(pon_id)
else:
pass # Need to adjust ONU numbering !!!!
# May already exist if device was not fully reachable when first enabled
if pon_id not in self.southbound_ports:
self.southbound_ports[pon_id] = PonPort(self, **pon)
self.num_southbound_ports = len(self.southbound_ports)
def pon(self, pon_id):
return self.southbound_ports.get(pon_id)
def complete_device_specific_activation(self, device, reconciling):
"""
Perform an initial network operation to discover the device hardware
and software version. Serial Number would be helpful as well.
This method is called from within the base class's activate generator.
:param device: A voltha.Device object, with possible device-type
specific extensions. Such extensions shall be described as part of
the device type specification returned by device_types().
:param reconciling: (boolean) True if taking over for another VOLTHA
"""
# ZeroMQ clients
self._zmq_startup()
# Download support
self._download_deferred = reactor.callLater(0, self._get_download_protocols)
# Register for adapter messages
self.adapter_agent.register_for_inter_adapter_messages()
# PON Status
self.status_poll = reactor.callLater(5, self.poll_for_status)
return succeed('Done')
def on_heatbeat_alarm(self, active):
if not active:
self.ready_network_access()
@inlineCallbacks
def _get_download_protocols(self):
if self._download_protocols is None:
try:
config = '<filter>' + \
'<file-servers-state xmlns="http://www.adtran.com/ns/yang/adtran-file-servers">' + \
'<profiles>' + \
'<supported-protocol/>' + \
'</profiles>' + \
'</file-servers-state>' + \
'</filter>'
results = yield self.netconf_client.get(config)
result_dict = xmltodict.parse(results.data_xml)
entries = result_dict['data']['file-servers-state']['profiles']['supported-protocol']
self._download_protocols = [entry['#text'].split(':')[-1] for entry in entries
if '#text' in entry]
except Exception as e:
self.log.exception('protocols', e=e)
self._download_protocols = None
self._download_deferred = reactor.callLater(10, self._get_download_protocols)
@inlineCallbacks
def ready_network_access(self):
from net.rcmd import RCmd
# Check for port status
command = 'netstat -pan | grep -i 0.0.0.0:{} | wc -l'.format(self.pon_agent_port)
rcmd = RCmd(self.ip_address, self.netconf_username, self.netconf_password, command)
try:
self.log.debug('check-request', command=command)
results = yield rcmd.execute()
self.log.info('check-results', results=results, result_type=type(results))
create_it = int(results) != 1
except Exception as e:
self.log.exception('find', e=e)
create_it = True
if create_it:
def v1_method():
command = 'mkdir -p /etc/pon_agent; touch /etc/pon_agent/debug.conf; '
command += 'ps -ae | grep -i ngpon2_agent; '
command += 'service_supervisor stop ngpon2_agent; service_supervisor start ngpon2_agent; '
command += 'ps -ae | grep -i ngpon2_agent'
self.log.debug('create-request', command=command)
return RCmd(self.ip_address, self.netconf_username, self.netconf_password, command)
def v2_v3_method():
# Old V2 method
# For V2 images, want -> export ZMQ_LISTEN_ON_ANY_ADDRESS=1
# For V3+ images, want -> export AGENT_LISTEN_ON_ANY_ADDRESS=1
# V3 unifies listening port, compatible with v2
cmd = "sed --in-place '/add feature/aexport ZMQ_LISTEN_ON_ANY_ADDRESS=1' " \
"/etc/ngpon2_agent/ngpon2_agent_feature_flags; "
cmd += "sed --in-place '/add feature/aexport AGENT_LISTEN_ON_ANY_ADDRESS=1' " \
"/etc/ngpon2_agent/ngpon2_agent_feature_flags; "
# Note: 'ps' commands are to help decorate the logfile with useful info
cmd += 'ps -ae | grep -i ngpon2_agent; '
cmd += 'service_supervisor stop ngpon2_agent; service_supervisor start ngpon2_agent; '
cmd += 'ps -ae | grep -i ngpon2_agent'
self.log.debug('create-request', command=cmd)
return RCmd(self.ip_address, self.netconf_username, self.netconf_password, cmd)
# Look for version
next_run = 15
version = v2_v3_method # NOTE: Only v2 or later supported.
if version is not None:
try:
rcmd = version()
results = yield rcmd.execute()
self.log.info('create-results', results=results, result_type=type(results))
except Exception as e:
self.log.exception('mkdir-and-restart', e=e)
else:
next_run = 0
if next_run > 0:
self._ssh_deferred = reactor.callLater(next_run, self.ready_network_access)
returnValue('retrying' if next_run > 0 else 'ready')
def _zmq_startup(self):
# ZeroMQ clients
self._pon_agent = PonClient(self.ip_address,
port=self.pon_agent_port,
rx_callback=self.rx_pa_packet)
try:
self._pio_agent = PioClient(self.ip_address,
port=self.pio_port,
rx_callback=self.rx_pio_packet)
except Exception as e:
self._pio_agent = None
self.log.exception('pio-agent', e=e)
def _zmq_shutdown(self):
pon, self._pon_agent = self._pon_agent, None
pio, self._pio_agent = self._pio_agent, None
for c in [pon, pio]:
if c is not None:
try:
c.shutdown()
except:
pass
def _unregister_for_inter_adapter_messages(self):
try:
self.adapter_agent.unregister_for_inter_adapter_messages()
except:
pass
def disable(self):
self._cancel_deferred()
# Drop registration for adapter messages
self._unregister_for_inter_adapter_messages()
self._zmq_shutdown()
self._pio_exception_map = []
# Remove any UNI ports that were created for any activated ONUs
uni_ports = self.adapter_agent.get_ports(self.device_id, Port.ETHERNET_UNI)
for uni_port in uni_ports:
self.adapter_agent.delete_port(self.device_id, uni_port)
super(AdtranOltHandler, self).disable()
def reenable(self, done_deferred=None):
super(AdtranOltHandler, self).reenable(done_deferred=done_deferred)
# Only do the re-enable if we fully came up on the very first enable attempt.
# If we had not, the base class will have initiated the 'activate' for us
if self._initial_enable_complete:
self._zmq_startup()
self.adapter_agent.register_for_inter_adapter_messages()
self.status_poll = reactor.callLater(1, self.poll_for_status)
def reboot(self):
if not self._initial_enable_complete:
# Never contacted the device on the initial startup, do 'activate' steps instead
return
self._cancel_deferred()
# Drop registration for adapter messages
self._unregister_for_inter_adapter_messages()
self._zmq_shutdown()
# Download supported protocols may change (if new image gets activated)
self._download_protocols = None
super(AdtranOltHandler, self).reboot()
def _finish_reboot(self, timeout, previous_oper_status, previous_conn_status):
super(AdtranOltHandler, self)._finish_reboot(timeout, previous_oper_status, previous_conn_status)
self.ready_network_access()
# Download support
self._download_deferred = reactor.callLater(0, self._get_download_protocols)
# Register for adapter messages
self.adapter_agent.register_for_inter_adapter_messages()
self._zmq_startup()
self.status_poll = reactor.callLater(5, self.poll_for_status)
def delete(self):
self._cancel_deferred()
# Drop registration for adapter messages
self._unregister_for_inter_adapter_messages()
self._zmq_shutdown()
super(AdtranOltHandler, self).delete()
def rx_pa_packet(self, packets):
if self._pon_agent is not None:
for packet in packets:
try:
pon_id, onu_id, msg_bytes, is_omci = self._pon_agent.decode_packet(packet)
if is_omci:
proxy_address = self._pon_onu_id_to_proxy_address(pon_id, onu_id)
if proxy_address is not None:
self.adapter_agent.receive_proxied_message(proxy_address, msg_bytes)
except Exception as e:
self.log.exception('rx-pon-agent-packet', e=e)
def _compute_logical_port_no(self, port_no, evc_map, packet):
logical_port_no = None
# Upstream direction?
if self.is_pon_port(port_no):
#TODO: Validate the evc-map name
from flow.evc_map import EVCMap
map_info = EVCMap.decode_evc_map_name(evc_map)
logical_port_no = int(map_info.get('ingress-port'))
if logical_port_no is None:
# Get PON
pon = self.get_southbound_port(port_no)
# Examine Packet and decode gvid
if packet is not None:
pass
elif self.is_nni_port(port_no):
nni = self.get_northbound_port(port_no)
logical_port = nni.get_logical_port() if nni is not None else None
logical_port_no = logical_port.ofp_port.port_no if logical_port is not None else None
# TODO: Need to decode base on port_no & evc_map
return logical_port_no
def rx_pio_packet(self, packets):
self.log.debug('rx-packet-in', type=type(packets), data=packets)
assert isinstance(packets, list), 'Expected a list of packets'
# TODO self._pio_agent.socket.socket.closed might be a good check here as well
if self.logical_device_id is not None and self._pio_agent is not None:
for packet in packets:
url_type = self._pio_agent.get_url_type(packet)
if url_type == PioClient.UrlType.EVCMAPS_RESPONSE:
exception_map = self._pio_agent.decode_query_response_packet(packet)
self.log.debug('rx-pio-packet', exception_map=exception_map)
# update latest pio exception map
self._pio_exception_map = exception_map
elif url_type == PioClient.UrlType.PACKET_IN:
try:
from scapy.layers.l2 import Ether, Dot1Q
ifindex, evc_map, packet = self._pio_agent.decode_packet(packet)
# convert ifindex to physical port number
# pon port numbers start at 60001 and end at 600016 (16 pons)
if ifindex > 60000 and ifindex < 60017:
port_no = (ifindex - 60000) + 4
# nni port numbers start at 1401 and end at 1404 (16 nnis)
elif ifindex > 1400 and ifindex < 1405:
port_no = ifindex - 1400
else:
raise ValueError('Unknown physical port. ifindex: {}'.format(ifindex))
logical_port_no = self._compute_logical_port_no(port_no, evc_map, packet)
if logical_port_no is not None:
if self.is_pon_port(port_no) and packet.haslayer(Dot1Q):
# Scrub g-vid
inner_pkt = packet.getlayer(Dot1Q)
assert inner_pkt.haslayer(Dot1Q), 'Expected a C-Tag'
packet = Ether(src=packet.src, dst=packet.dst, type=inner_pkt.type)\
/ inner_pkt.payload
self.adapter_agent.send_packet_in(logical_device_id=self.logical_device_id,
logical_port_no=logical_port_no,
packet=str(packet))
else:
self.log.warn('logical-port-not-found', port_no=port_no, evc_map=evc_map)
except Exception as e:
self.log.exception('rx-pio-packet', e=e)
else:
self.log.warn('packet-in-unknown-url-type', url_type=url_type)
def packet_out(self, egress_port, msg):
"""
Pass a packet_out message content to adapter so that it can forward it
out to the device. This is only called on root devices.
:param egress_port: egress logical port number
:param msg: actual message
:return: None """
if self.pio_port is not None:
from scapy.layers.l2 import Ether, Dot1Q
from scapy.layers.inet import UDP
from common.frameio.frameio import hexify
self.log.debug('sending-packet-out', egress_port=egress_port,
msg=hexify(msg))
pkt = Ether(msg)
# Remove any extra tags
while pkt.type == 0x8100:
msg_hex = hexify(msg)
msg_hex = msg_hex[:24] + msg_hex[32:]
bytes = []
msg_hex = ''.join(msg_hex.split(" "))
for i in range(0, len(msg_hex), 2):
bytes.append(chr(int(msg_hex[i:i+2], 16)))
msg = ''.join(bytes)
pkt = Ether(msg)
if self._pio_agent is not None:
port, ctag, vlan_id, evcmapname = FlowEntry.get_packetout_info(self, egress_port)
exceptiontype = None
if pkt.type == FlowEntry.EtherType.EAPOL:
exceptiontype = 'eapol'
ctag = self.utility_vlan
elif pkt.type == 2:
exceptiontype = 'igmp'
elif pkt.type == FlowEntry.EtherType.IPv4:
if UDP in pkt and pkt[UDP].sport == 67 and pkt[UDP].dport == 68:
exceptiontype = 'dhcp'
if exceptiontype is None:
self.log.warn('packet-out-exceptiontype-unknown', eEtherType=pkt.type)
elif port is not None and ctag is not None and vlan_id is not None and \
evcmapname is not None and self.pio_exception_exists(evcmapname, exceptiontype):
self.log.debug('sending-pio-packet-out', port=port, ctag=ctag, vlan_id=vlan_id,
evcmapname=evcmapname, exceptiontype=exceptiontype)
out_pkt = (
Ether(src=pkt.src, dst=pkt.dst) /
Dot1Q(vlan=vlan_id) /
Dot1Q(vlan=ctag, type=pkt.type) /
pkt.payload
)
data = self._pio_agent.encode_packet(port, str(out_pkt), evcmapname, exceptiontype)
self.log.debug('pio-packet-out', message=data)
try:
self._pio_agent.send(data)
except Exception as e:
self.log.exception('pio-send', egress_port=egress_port, e=e)
else:
self.log.warn('packet-out-flow-not-found', egress_port=egress_port)
def pio_exception_exists(self, name, exp):
# verify exception is in the OLT's reported exception map for this evcmap name
if exp is None:
return False
entry = next((entry for entry in self._pio_exception_map if entry['evc-map-name'] == name), None)
if entry is None:
return False
if exp not in entry['exception-types']:
return False
return True
def send_packet_exceptions_request(self):
if self._pio_agent is not None:
request = self._pio_agent.query_request_packet()
try:
self._pio_agent.send(request)
except Exception as e:
self.log.exception('pio-send', e=e)
def poll_for_status(self):
self.log.debug('Initiating-status-poll')
device = self.adapter_agent.get_device(self.device_id)
if device.admin_state == AdminState.ENABLED and\
device.oper_status != OperStatus.ACTIVATING and\
self.rest_client is not None:
uri = AdtranOltHandler.GPON_OLT_HW_STATE_URI
name = 'pon-status-poll'
self.status_poll = self.rest_client.request('GET', uri, name=name)
self.status_poll.addBoth(self.status_poll_complete)
else:
self.status_poll = reactor.callLater(0, self.status_poll_complete, 'inactive')
def status_poll_complete(self, results):
"""
Results of the status poll
:param results:
"""
from pon_port import PonPort
if isinstance(results, dict) and 'pon' in results:
try:
self.log.debug('status-success')
for pon_id, pon in OltState(results).pons.iteritems():
pon_port = self.southbound_ports.get(pon_id, None)
if pon_port is not None and pon_port.state == PonPort.State.RUNNING:
pon_port.process_status_poll(pon)
except Exception as e:
self.log.exception('PON-status-poll', e=e)
# Reschedule
delay = self.status_poll_interval
delay += random.uniform(-delay / 10, delay / 10)
self.status_poll = reactor.callLater(delay, self.poll_for_status)
def _create_utility_flow(self):
nni_port = self.northbound_ports.get(1).port_no
pon_port = self.southbound_ports.get(0).port_no
return mk_flow_stat(
priority=200,
match_fields=[
in_port(nni_port),
vlan_vid(ofp.OFPVID_PRESENT + self.utility_vlan)
],
actions=[output(pon_port)]
)
@inlineCallbacks
def update_flow_table(self, flows, device):
"""
Update the flow table on the OLT. If an existing flow is not in the list, it needs
to be removed from the device.
:param flows: List of flows that should be installed upon completion of this function
:param device: A voltha.Device object, with possible device-type
specific extensions.
"""
self.log.debug('bulk-flow-update', num_flows=len(flows),
device_id=device.id, flows=flows)
valid_flows = []
if flows:
# Special helper egress Packet In/Out flows
special_flow = self._create_utility_flow()
valid_flow, evc = FlowEntry.create(special_flow, self)
if valid_flow is not None:
valid_flows.append(valid_flow.flow_id)
if evc is not None:
try:
evc.schedule_install()
self.add_evc(evc)
except Exception as e:
evc.status = 'EVC Install Exception: {}'.format(e.message)
self.log.exception('EVC-install', e=e)
# verify exception flows were installed by OLT PET process
reactor.callLater(5, self.send_packet_exceptions_request)
# Now process bulk flows
for flow in flows:
try:
# Try to create an EVC.
#
# The first result is the flow entry that was created. This could be a match to an
# existing flow since it is a bulk update. None is returned only if no match to
# an existing entry is found and decode failed (unsupported field)
#
# The second result is the EVC this flow should be added to. This could be an
# existing flow (so your adding another EVC-MAP) or a brand new EVC (no existing
# EVC-MAPs). None is returned if there are not a valid EVC that can be created YET.
valid_flow, evc = FlowEntry.create(flow, self)
if valid_flow is not None:
valid_flows.append(valid_flow.flow_id)
if evc is not None:
try:
evc.schedule_install()
self.add_evc(evc)
except Exception as e:
evc.status = 'EVC Install Exception: {}'.format(e.message)
self.log.exception('EVC-install', e=e)
except Exception as e:
self.log.exception('bulk-flow-update-add', e=e)
# Now drop all flows from this device that were not in this bulk update
try:
yield FlowEntry.drop_missing_flows(self, valid_flows)
except Exception as e:
self.log.exception('bulk-flow-update-remove', e=e)
# @inlineCallbacks
def send_proxied_message(self, proxy_address, msg):
self.log.debug('sending-proxied-message', msg=msg)
if isinstance(msg, Packet):
msg = str(msg)
if self._pon_agent is not None:
pon_id, onu_id = self._proxy_address_to_pon_onu_id(proxy_address)
pon = self.southbound_ports.get(pon_id)
if pon is not None and pon.enabled:
onu = pon.onu(onu_id)
if onu is not None and onu.enabled:
data = self._pon_agent.encode_omci_packet(msg, pon_id, onu_id)
try:
self._pon_agent.send(data)
except Exception as e:
self.log.exception('pon-agent-send', pon_id=pon_id, onu_id=onu_id, e=e)
else:
self.log.debug('onu-invalid-or-disabled', pon_id=pon_id, onu_id=onu_id)
else:
self.log.debug('pon-invalid-or-disabled', pon_id=pon_id)
def _onu_offset(self, onu_id):
# Start ONU's just past the southbound PON port numbers. Since ONU ID's start
# at zero, add one
# assert AdtranOltHandler.BASE_ONU_OFFSET > (self.num_northbound_ports + self.num_southbound_ports + 1)
assert AdtranOltHandler.BASE_ONU_OFFSET > (4 + self.num_southbound_ports + 1) # Skip over uninitialized ports
return AdtranOltHandler.BASE_ONU_OFFSET + onu_id
def _pon_onu_id_to_proxy_address(self, pon_id, onu_id):
if pon_id in self.southbound_ports:
pon = self.southbound_ports[pon_id]
onu = pon.onu(onu_id)
proxy_address = onu.proxy_address if onu is not None else None
else:
proxy_address = None
return proxy_address
def _proxy_address_to_pon_onu_id(self, proxy_address):
"""
Convert the proxy address to the PON-ID and ONU-ID
:param proxy_address: (ProxyAddress)
:return: (tuple) pon-id, onu-id
"""
onu_id = proxy_address.onu_id
pon_id = self._port_number_to_pon_id(proxy_address.channel_id)
return pon_id, onu_id
def pon_id_to_port_number(self, pon_id):
return pon_id + 1 + 4 # Skip over uninitialized ports
def _port_number_to_pon_id(self, port):
if self.is_uni_port(port):
# Convert to OLT device port
port = platform.intf_id_from_uni_port_num(port)
return port - 1 - 4 # Skip over uninitialized ports
def is_pon_port(self, port):
return self._port_number_to_pon_id(port) in self.southbound_ports
def is_uni_port(self, port):
return OFPP_MAX >= port >= (5 << 11)
def get_southbound_port(self, port):
pon_id = self._port_number_to_pon_id(port)
return self.southbound_ports.get(pon_id, None)
def get_northbound_port(self, port):
return self.northbound_ports.get(port, None)
def get_port_name(self, port, logical_name=False):
"""
Get the name for a port
Port names are used in various ways within and outside of VOLTHA.
Typically, the physical port name will be used during device handler conversations
with the hardware (REST, NETCONF, ...) while the logical port name is what the
outside world (ONOS, SEBA, ...) uses.
All ports have a physical port name, but only ports exposed through VOLTHA
as a logical port will have a logical port name
"""
if self.is_nni_port(port):
port = self.get_northbound_port(port)
return port.logical_port_name if logical_name else port.physical_port_name
if self.is_pon_port(port):
port = self.get_southbound_port(port)
return port.logical_port_name if logical_name else port.physical_port_name
if self.is_uni_port(port):
return 'uni-{}'.format(port)
if self.is_logical_port(port):
raise NotImplemented('Logical OpenFlow ports are not supported')
def _update_download_status(self, request, download):
if download is not None:
request.state = download.download_state
request.reason = download.failure_reason
request.image_state = download.image_state
request.additional_info = download.additional_info
request.downloaded_bytes = download.downloaded_bytes
else:
request.state = ImageDownload.DOWNLOAD_UNKNOWN
request.reason = ImageDownload.UNKNOWN_ERROR
request.image_state = ImageDownload.IMAGE_UNKNOWN
request.additional_info = "Download request '{}' not found".format(request.name)
request.downloaded_bytes = 0
self.adapter_agent.update_image_download(request)
def start_download(self, device, request, done):
"""
This is called to request downloading a specified image into
the standby partition of a device based on a NBI call.
:param device: A Voltha.Device object.
:param request: A Voltha.ImageDownload object.
:param done: (Deferred) Deferred to fire when done
:return: (Deferred) Shall be fired to acknowledge the download.
"""
log.info('image_download', request=request)
try:
if not self._initial_enable_complete:
# Never contacted the device on the initial startup, do 'activate' steps instead
raise Exception('Device has not finished initial activation')
if request.name in self._downloads:
raise Exception("Download request with name '{}' already exists".
format(request.name))
try:
download = Download.create(self, request, self._download_protocols)
except Exception:
request.additional_info = 'Download request creation failed due to exception'
raise
try:
self._downloads[download.name] = download
self._update_download_status(request, download)
done.callback('started')
return done
except Exception:
request.additional_info = 'Download request startup failed due to exception'
del self._downloads[download.name]
download.cancel_download(request)
raise
except Exception as e:
self.log.exception('create', e=e)
request.reason = ImageDownload.UNKNOWN_ERROR if self._initial_enable_complete\
else ImageDownload.DEVICE_BUSY
request.state = ImageDownload.DOWNLOAD_FAILED
if not request.additional_info:
request.additional_info = e.message
self.adapter_agent.update_image_download(request)
# restore admin state to enabled
device.admin_state = AdminState.ENABLED
self.adapter_agent.update_device(device)
raise
def download_status(self, device, request, done):
"""
This is called to inquire about a requested image download status based
on a NBI call.
The adapter is expected to update the DownloadImage DB object with the
query result
:param device: A Voltha.Device object.
:param request: A Voltha.ImageDownload object.
:param done: (Deferred) Deferred to fire when done
:return: (Deferred) Shall be fired to acknowledge
"""
log.info('download_status', request=request)
download = self._downloads.get(request.name)
self._update_download_status(request, download)
if request.state not in [ImageDownload.DOWNLOAD_STARTED,
ImageDownload.DOWNLOAD_SUCCEEDED,
ImageDownload.DOWNLOAD_FAILED]:
# restore admin state to enabled
device.admin_state = AdminState.ENABLED
self.adapter_agent.update_device(device)
done.callback(request.state)
return done
def cancel_download(self, device, request, done):
"""
This is called to cancel a requested image download based on a NBI
call. The admin state of the device will not change after the
download.
:param device: A Voltha.Device object.
:param request: A Voltha.ImageDownload object.
:param done: (Deferred) Deferred to fire when done
:return: (Deferred) Shall be fired to acknowledge
"""
log.info('cancel_download', request=request)
download = self._downloads.get(request.name)
if download is not None:
del self._downloads[request.name]
result = download.cancel_download(request)
self._update_download_status(request, download)
done.callback(result)
else:
self._update_download_status(request, download)
done.errback(KeyError('Download request not found'))
if device.admin_state == AdminState.DOWNLOADING_IMAGE:
device.admin_state = AdminState.ENABLED
self.adapter_agent.update_device(device)
return done
def activate_image(self, device, request, done):
"""
This is called to activate a downloaded image from a standby partition
into active partition.
Depending on the device implementation, this call may or may not
cause device reboot. If no reboot, then a reboot is required to make
the activated image running on device
:param device: A Voltha.Device object.
:param request: A Voltha.ImageDownload object.
:param done: (Deferred) Deferred to fire when done
:return: (Deferred) OperationResponse object.
"""
log.info('activate_image', request=request)
download = self._downloads.get(request.name)
if download is not None:
del self._downloads[request.name]
result = download.activate_image()
self._update_download_status(request, download)
done.callback(result)
else:
self._update_download_status(request, download)
done.errback(KeyError('Download request not found'))
# restore admin state to enabled
device.admin_state = AdminState.ENABLED
self.adapter_agent.update_device(device)
return done
def revert_image(self, device, request, done):
"""
This is called to deactivate the specified image at active partition,
and revert to previous image at standby partition.
Depending on the device implementation, this call may or may not
cause device reboot. If no reboot, then a reboot is required to
make the previous image running on device
:param device: A Voltha.Device object.
:param request: A Voltha.ImageDownload object.
:param done: (Deferred) Deferred to fire when done
:return: (Deferred) OperationResponse object.
"""
log.info('revert_image', request=request)
download = self._downloads.get(request.name)
if download is not None:
del self._downloads[request.name]
result = download.revert_image()
self._update_download_status(request, download)
done.callback(result)
else:
self._update_download_status(request, download)
done.errback(KeyError('Download request not found'))
# restore admin state to enabled
device.admin_state = AdminState.ENABLED
self.adapter_agent.update_device(device)
return done
def add_onu_device(self, pon_id, onu_id, serial_number):
onu_device = self.adapter_agent.get_child_device(self.device_id,
serial_number=serial_number)
if onu_device is not None:
return onu_device
try:
from voltha.protos.voltha_pb2 import Device
# NOTE - channel_id of onu is set to pon_id
pon_port = self.pon_id_to_port_number(pon_id)
proxy_address = Device.ProxyAddress(device_id=self.device_id,
channel_id=pon_port,
onu_id=onu_id,
onu_session_id=onu_id)
self.log.debug("added-onu", port_no=pon_id,
onu_id=onu_id, serial_number=serial_number,
proxy_address=proxy_address)
self.adapter_agent.add_onu_device(
parent_device_id=self.device_id,
parent_port_no=pon_port,
vendor_id=serial_number[:4],
proxy_address=proxy_address,
root=True,
serial_number=serial_number,
admin_state=AdminState.ENABLED,
)
except Exception as e:
self.log.exception('onu-activation-failed', e=e)
return None
def setup_onu_tech_profile(self, pon_id, onu_id, logical_port_number):
# Send ONU Adapter related tech profile information.
self.log.debug('add-tech-profile-info')
uni_id = self.platform.uni_id_from_uni_port(logical_port_number)
parent_port_no = self.pon_id_to_port_number(pon_id)
onu_device = self.adapter_agent.get_child_device(self.device_id,
onu_id=onu_id,
parent_port_no=parent_port_no)
ofp_port_name, ofp_port_no = self.get_ofp_port_name(pon_id, onu_id,
logical_port_number)
if ofp_port_name is None:
self.log.error("port-name-not-found")
return
tp_path = self.get_tp_path(pon_id, ofp_port_name)
self.log.debug('Load-tech-profile-request-to-onu-handler', tp_path=tp_path)
msg = {'proxy_address': onu_device.proxy_address, 'uni_id': uni_id,
'event': 'download_tech_profile', 'event_data': tp_path}
# Send the event message to the ONU adapter
self.adapter_agent.publish_inter_adapter_message(onu_device.id, msg)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import re
import sys
import unicodedata
import six
from troveclient.openstack.common.gettextutils import _ # noqa
# Used for looking up extensions of text
# to their 'multiplied' byte amount
BYTE_MULTIPLIERS = {
'': 1,
't': 1024 ** 4,
'g': 1024 ** 3,
'm': 1024 ** 2,
'k': 1024,
}
BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)')
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else is considered False.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, six.string_types):
subject = str(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return False
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming str using `incoming` if they're not already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an isntance of str
"""
if not isinstance(text, six.string_types):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming str/unicode using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an isntance of str
"""
if not isinstance(text, six.string_types):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, six.text_type):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
return text
def to_bytes(text, default=0):
"""Converts a string into an integer of bytes.
Looks at the last characters of the text to determine
what conversion is needed to turn the input text into a byte number.
Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive)
:param text: String input for bytes size conversion.
:param default: Default return value when text is blank.
"""
match = BYTE_REGEX.search(text)
if match:
magnitude = int(match.group(1))
mult_key_org = match.group(2)
if not mult_key_org:
return magnitude
elif text:
msg = _('Invalid string format: %s') % text
raise TypeError(msg)
else:
return default
mult_key = mult_key_org.lower().replace('b', '', 1)
multiplier = BYTE_MULTIPLIERS.get(mult_key)
if multiplier is None:
msg = _('Unknown byte multiplier: %s') % mult_key_org
raise TypeError(msg)
return magnitude * multiplier
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of str
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loader implementation for SavedModel with hermetic, language-neutral exports.
"""
import os
import sys
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.core.protobuf import graph_debug_info_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import utils_impl as saved_model_utils
from tensorflow.python.saved_model.pywrap_saved_model import metrics
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# API label for SavedModel metrics.
_LOADER_LABEL = "loader"
def parse_saved_model_with_debug_info(export_dir):
"""Reads the savedmodel as well as the graph debug info.
Args:
export_dir: Directory containing the SavedModel and GraphDebugInfo files.
Returns:
`SavedModel` and `GraphDebugInfo` protocol buffers.
Raises:
IOError: If the saved model file does not exist, or cannot be successfully
parsed. Missing graph debug info file is fine.
"""
saved_model = parse_saved_model(export_dir)
debug_info_path = file_io.join(
saved_model_utils.get_debug_dir(export_dir),
constants.DEBUG_INFO_FILENAME_PB)
debug_info = graph_debug_info_pb2.GraphDebugInfo()
if file_io.file_exists(debug_info_path):
with file_io.FileIO(debug_info_path, "rb") as debug_file:
try:
debug_info.ParseFromString(debug_file.read())
except message.DecodeError as e:
raise IOError(f"Cannot parse file {debug_info_path}: {e}.")
return (saved_model, debug_info)
@tf_export("__internal__.saved_model.parse_saved_model", v1=[])
def parse_saved_model(export_dir):
"""Reads the savedmodel.pb or savedmodel.pbtxt file containing `SavedModel`.
Args:
export_dir: String or Pathlike, path to the directory containing the
SavedModel file.
Returns:
A `SavedModel` protocol buffer.
Raises:
IOError: If the file does not exist, or cannot be successfully parsed.
"""
# Build the path to the SavedModel in pbtxt format.
path_to_pbtxt = file_io.join(
compat.as_bytes(compat.path_to_str(export_dir)),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
# Build the path to the SavedModel in pb format.
path_to_pb = file_io.join(
compat.as_bytes(compat.path_to_str(export_dir)),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
# Parse the SavedModel protocol buffer.
saved_model = saved_model_pb2.SavedModel()
if file_io.file_exists(path_to_pb):
with file_io.FileIO(path_to_pb, "rb") as f:
file_content = f.read()
try:
saved_model.ParseFromString(file_content)
return saved_model
except message.DecodeError as e:
raise IOError(f"Cannot parse file {path_to_pb}: {str(e)}.")
elif file_io.file_exists(path_to_pbtxt):
with file_io.FileIO(path_to_pbtxt, "rb") as f:
file_content = f.read()
try:
text_format.Merge(file_content.decode("utf-8"), saved_model)
return saved_model
except text_format.ParseError as e:
raise IOError(f"Cannot parse file {path_to_pbtxt}: {str(e)}.")
else:
raise IOError(
f"SavedModel file does not exist at: {export_dir}{os.path.sep}"
f"{{{constants.SAVED_MODEL_FILENAME_PBTXT}|"
f"{constants.SAVED_MODEL_FILENAME_PB}}}")
def get_asset_tensors(export_dir, meta_graph_def_to_load, import_scope=None):
"""Gets the asset tensors, if defined in the meta graph def to load.
Args:
export_dir: Directory where the SavedModel is located.
meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.
import_scope: Optional `string` -- if specified, prepend this followed by
'/' to all returned asset tensor names.
Returns:
A dictionary of asset tensors, keyed by the name of the asset tensor. The
value in the map corresponds to the absolute path of the asset file.
"""
# Collection-def that may contain the assets key.
collection_def = meta_graph_def_to_load.collection_def
asset_tensor_dict = {}
asset_protos = []
if meta_graph_def_to_load.asset_file_def:
asset_protos = meta_graph_def_to_load.asset_file_def
elif constants.ASSETS_KEY in collection_def:
assets_any_proto = collection_def[constants.ASSETS_KEY].any_list.value
for asset_any_proto in assets_any_proto:
asset_proto = meta_graph_pb2.AssetFileDef()
asset_any_proto.Unpack(asset_proto)
asset_protos.append(asset_proto)
# Location of the assets for SavedModel.
assets_directory = file_io.join(
compat.as_bytes(export_dir), compat.as_bytes(constants.ASSETS_DIRECTORY))
# Process each asset and add it to the asset tensor dictionary.
for asset_proto in asset_protos:
tensor_name = asset_proto.tensor_info.name
if import_scope:
tensor_name = "%s/%s" % (import_scope, tensor_name)
asset_tensor_dict[tensor_name] = file_io.join(
compat.as_bytes(assets_directory),
compat.as_bytes(asset_proto.filename))
return asset_tensor_dict
def _get_main_op_tensor(
meta_graph_def_to_load, init_op_key=constants.MAIN_OP_KEY):
"""Gets the main op tensor, if one exists.
Args:
meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.
init_op_key: name of the collection to check; should be one of MAIN_OP_KEY
or the deprecated LEGACY_INIT_OP_KEY
Returns:
The main op tensor, if it exists and `None` otherwise.
Raises:
RuntimeError: If the collection def corresponding to the main op key has
other than exactly one tensor.
"""
# TODO(kathywu): Rename this method to _get_op_from_collection when
# dependency from SavedModelEstimator is removed.
collection_def = meta_graph_def_to_load.collection_def
init_op = None
if init_op_key in collection_def:
init_op_list = collection_def[init_op_key].node_list.value
if len(init_op_list) != 1:
raise RuntimeError("Expected exactly one SavedModel init op. "
f"Found {len(init_op_list)}: {init_op_list}.")
init_op = ops.get_collection(init_op_key)[0]
return init_op
def _get_op_from_collection(meta_graph_def, op_key):
return _get_main_op_tensor(meta_graph_def, op_key)
def _get_op_from_signature_def(meta_graph_def, op_signature_key, import_scope):
"""Retrieve op stored in the imported meta graph's signature def."""
if op_signature_key in meta_graph_def.signature_def:
return signature_def_utils.load_op_from_signature_def(
meta_graph_def.signature_def[op_signature_key], op_signature_key,
import_scope)
else:
return None
def get_init_op(meta_graph_def, import_scope=None):
return (_get_op_from_signature_def(
meta_graph_def, constants.INIT_OP_SIGNATURE_KEY, import_scope) or
_get_op_from_collection(meta_graph_def, constants.MAIN_OP_KEY) or
_get_op_from_collection(meta_graph_def, constants.LEGACY_INIT_OP_KEY))
def get_train_op(meta_graph_def, import_scope=None):
train_op = _get_op_from_signature_def(
meta_graph_def, constants.TRAIN_OP_SIGNATURE_KEY, import_scope)
if train_op is None:
train_op = _get_op_from_collection(meta_graph_def, constants.TRAIN_OP_KEY)
return train_op
@tf_export(v1=[
"saved_model.contains_saved_model",
"saved_model.maybe_saved_model_directory",
"saved_model.loader.maybe_saved_model_directory"
])
@deprecation.deprecated_endpoints(
"saved_model.loader.maybe_saved_model_directory")
def maybe_saved_model_directory(export_dir):
"""Checks whether the provided export directory could contain a SavedModel.
Note that the method does not load any data by itself. If the method returns
`false`, the export directory definitely does not contain a SavedModel. If the
method returns `true`, the export directory may contain a SavedModel but
provides no guarantee that it can be loaded.
Args:
export_dir: Absolute string path to possible export location. For example,
'/my/foo/model'.
Returns:
True if the export directory contains SavedModel files, False otherwise.
"""
txt_path = file_io.join(export_dir, constants.SAVED_MODEL_FILENAME_PBTXT)
pb_path = file_io.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)
return file_io.file_exists(txt_path) or file_io.file_exists(pb_path)
@tf_export("saved_model.contains_saved_model", v1=[])
def contains_saved_model(export_dir):
"""Checks whether the provided export directory could contain a SavedModel.
Note that the method does not load any data by itself. If the method returns
`false`, the export directory definitely does not contain a SavedModel. If the
method returns `true`, the export directory may contain a SavedModel but
provides no guarantee that it can be loaded.
Args:
export_dir: Absolute string path to possible export location. For example,
'/my/foo/model'.
Returns:
True if the export directory contains SavedModel files, False otherwise.
"""
return maybe_saved_model_directory(export_dir)
@tf_export(v1=["saved_model.load", "saved_model.loader.load"])
@deprecation.deprecated(
None,
"This function will only be available through the v1 compatibility "
"library as tf.compat.v1.saved_model.loader.load or "
"tf.compat.v1.saved_model.load. There will be a new function for importing "
"SavedModels in Tensorflow 2.0.")
def load(sess, tags, export_dir, import_scope=None, **saver_kwargs):
"""Loads the model from a SavedModel as specified by tags.
Args:
sess: The TensorFlow session to restore the variables.
tags: Set of string tags to identify the required MetaGraphDef. These should
correspond to the tags used when saving the variables using the
SavedModel `save()` API.
export_dir: Directory in which the SavedModel protocol buffer and variables
to be loaded are located.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
**saver_kwargs: Optional keyword arguments passed through to Saver.
Returns:
The `MetaGraphDef` protocol buffer loaded in the provided session. This
can be used to further extract signature-defs, collection-defs, etc.
Raises:
RuntimeError: MetaGraphDef associated with the tags cannot be found.
@compatibility(TF2)
`tf.compat.v1.saved_model.load` or `tf.compat.v1.saved_model.loader.load` is
not compatible with eager execution. Please use `tf.saved_model.load` instead
to load your model. You can refer to the [SavedModel guide]
(https://www.tensorflow.org/guide/saved_model) for more information as well as
"Importing SavedModels from TensorFlow 1.x" in the [`tf.saved_model.load`]
(https://www.tensorflow.org/api_docs/python/tf/saved_model/load) docstring.
#### How to Map Arguments
| TF1 Arg Name | TF2 Arg Name | Note |
| :-------------------- | :-------------- | :------------------------- |
| `sess` | Not supported | - |
| `tags` | `tags` | - |
| `export_dir` | `export_dir` | - |
| `import_scope` | Not supported | Name scopes are not needed.
: : : By default, variables are :
: : : associated with the loaded :
: : : object and function names :
: : : are deduped. :
| `saver_kwargs` | Not supported | - |
#### Before & After Usage Example
Before:
```
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
tf.compat.v1.saved_model.loader.load(sess, ["foo-tag"], export_dir)
```
After:
```
model = tf.saved_model.load(export_dir, tags=["foo-tag"])
```
@end_compatibility
"""
loader = SavedModelLoader(export_dir)
return loader.load(sess, tags, import_scope, **saver_kwargs)
class SavedModelLoader(object):
"""Load graphs and restore variable values from a `SavedModel`."""
def __init__(self, export_dir):
"""Creates a `SavedModelLoader`.
Args:
export_dir: Directory in which the SavedModel protocol buffer and
variables to be loaded are located.
"""
self._export_dir = export_dir
self._variables_path = saved_model_utils.get_variables_path(export_dir)
self._saved_model = parse_saved_model(export_dir)
@property
def export_dir(self):
"""Directory containing the SavedModel."""
return self._export_dir
@property
def variables_path(self):
"""Path to variable checkpoint files."""
return self._variables_path
@property
def saved_model(self):
"""SavedModel object parsed from the export directory."""
return self._saved_model
def get_meta_graph_def_from_tags(self, tags):
"""Return MetaGraphDef with the exact specified tags.
Args:
tags: A list or set of string tags that identify the MetaGraphDef.
Returns:
MetaGraphDef with the same tags.
Raises:
RuntimeError: if no metagraphs were found with the associated tags.
"""
found_match = False
available_tags = []
for meta_graph_def in self._saved_model.meta_graphs:
available_tags.append(set(meta_graph_def.meta_info_def.tags))
if set(meta_graph_def.meta_info_def.tags) == set(tags):
meta_graph_def_to_load = meta_graph_def
found_match = True
break
if not found_match:
raise RuntimeError(
f"MetaGraphDef associated with tags {str(tags).strip('[]')} "
"could not be found in SavedModel, with available tags "
f"'{available_tags}'. To inspect available tag-sets in"
" the SavedModel, please use the SavedModel CLI: `saved_model_cli`.")
return meta_graph_def_to_load
def load_graph(self, graph, tags, import_scope=None, **saver_kwargs):
"""Load ops and nodes from SavedModel MetaGraph into graph.
Args:
graph: tf.Graph object.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
**saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph.
Returns:
A tuple of
* Saver defined by the MetaGraph, which can be used to restore the
variable values.
* List of `Operation`/`Tensor` objects returned from
`tf.import_graph_def` (may be `None`).
"""
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
if sys.byteorder == "big":
saved_model_utils.swap_function_tensor_content(meta_graph_def, "little",
"big")
with graph.as_default():
return tf_saver._import_meta_graph_with_return_elements( # pylint: disable=protected-access
meta_graph_def, import_scope=import_scope, **saver_kwargs)
def restore_variables(self, sess, saver, import_scope=None):
"""Restore SavedModel variable values into the session.
Args:
sess: tf.compat.v1.Session to restore variable values.
saver: a tf.compat.v1.train.Saver object. Can be None if there are no
variables in graph. This may be the saver returned by the load_graph()
function, or a default `tf.compat.v1.train.Saver()`.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
Raises:
ValueError: if no saver was passed to the saver argument, and there are
variables in the graph.
"""
with sess.graph.as_default():
if (saver is None and
not variables._all_saveable_objects(scope=import_scope)): # pylint: disable=protected-access
tf_logging.info("The specified SavedModel has no variables; no "
"checkpoints were restored.")
elif isinstance(saver, tf_saver.Saver):
saver.restore(sess, self._variables_path)
else:
raise ValueError(
"No tf.train.Saver object was passed to the function "
"`SavedModelLoader.restore_variables`. Since there are variables in"
" the graph, a saver is required.")
def run_init_ops(self, sess, tags, import_scope=None):
"""Run initialization ops defined in the `MetaGraphDef`.
Args:
sess: tf.compat.v1.Session to restore variable values.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
"""
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
with sess.graph.as_default():
# Get asset tensors, if any.
asset_tensors_dictionary = get_asset_tensors(
self._export_dir, meta_graph_def, import_scope=import_scope)
init_op = get_init_op(meta_graph_def, import_scope)
if init_op is not None:
sess.run(fetches=[init_op], feed_dict=asset_tensors_dictionary)
def load(self, sess, tags, import_scope=None, **saver_kwargs):
"""Load the MetaGraphDef graph and restore variable values into the session.
Args:
sess: tf.compat.v1.Session to restore variable values.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
**saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph.
Returns:
`MetagraphDef` proto of the graph that was loaded.
"""
saved_model_proto = parse_saved_model(self._export_dir)
metrics.IncrementReadApi(_LOADER_LABEL)
with sess.graph.as_default():
saver, _ = self.load_graph(sess.graph, tags, import_scope,
**saver_kwargs)
self.restore_variables(sess, saver, import_scope)
self.run_init_ops(sess, tags, import_scope)
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
if (len(saved_model_proto.meta_graphs) == 1 and
saved_model_proto.meta_graphs[0].HasField("object_graph_def")):
metrics.IncrementRead(write_version="2")
else:
metrics.IncrementRead(write_version="1")
return meta_graph_def
| |
"""The Shelly integration."""
from __future__ import annotations
import asyncio
from datetime import timedelta
import logging
from typing import Any, Final, cast
import aioshelly
from aioshelly.block_device import BlockDevice
from aioshelly.rpc_device import RpcDevice
import async_timeout
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_DEVICE_ID,
CONF_HOST,
CONF_PASSWORD,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import Event, HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, device_registry, update_coordinator
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.typing import ConfigType
from .const import (
AIOSHELLY_DEVICE_TIMEOUT_SEC,
ATTR_CHANNEL,
ATTR_CLICK_TYPE,
ATTR_DEVICE,
ATTR_GENERATION,
BATTERY_DEVICES_WITH_PERMANENT_CONNECTION,
BLOCK,
CONF_COAP_PORT,
DATA_CONFIG_ENTRY,
DEFAULT_COAP_PORT,
DEVICE,
DOMAIN,
DUAL_MODE_LIGHT_MODELS,
ENTRY_RELOAD_COOLDOWN,
EVENT_SHELLY_CLICK,
INPUTS_EVENTS_DICT,
POLLING_TIMEOUT_SEC,
REST,
REST_SENSORS_UPDATE_INTERVAL,
RPC,
RPC_INPUTS_EVENTS_TYPES,
RPC_RECONNECT_INTERVAL,
SHBTN_MODELS,
SLEEP_PERIOD_MULTIPLIER,
UPDATE_PERIOD_MULTIPLIER,
)
from .utils import (
get_block_device_name,
get_block_device_sleep_period,
get_coap_context,
get_device_entry_gen,
get_rpc_device_name,
)
BLOCK_PLATFORMS: Final = ["binary_sensor", "cover", "light", "sensor", "switch"]
BLOCK_SLEEPING_PLATFORMS: Final = ["binary_sensor", "sensor"]
RPC_PLATFORMS: Final = ["binary_sensor", "light", "sensor", "switch"]
_LOGGER: Final = logging.getLogger(__name__)
COAP_SCHEMA: Final = vol.Schema(
{
vol.Optional(CONF_COAP_PORT, default=DEFAULT_COAP_PORT): cv.port,
}
)
CONFIG_SCHEMA: Final = vol.Schema({DOMAIN: COAP_SCHEMA}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Shelly component."""
hass.data[DOMAIN] = {DATA_CONFIG_ENTRY: {}}
if (conf := config.get(DOMAIN)) is not None:
hass.data[DOMAIN][CONF_COAP_PORT] = conf[CONF_COAP_PORT]
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Shelly from a config entry."""
# The custom component for Shelly devices uses shelly domain as well as core
# integration. If the user removes the custom component but doesn't remove the
# config entry, core integration will try to configure that config entry with an
# error. The config entry data for this custom component doesn't contain host
# value, so if host isn't present, config entry will not be configured.
if not entry.data.get(CONF_HOST):
_LOGGER.warning(
"The config entry %s probably comes from a custom integration, please remove it if you want to use core Shelly integration",
entry.title,
)
return False
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id] = {}
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][DEVICE] = None
if get_device_entry_gen(entry) == 2:
return await async_setup_rpc_entry(hass, entry)
return await async_setup_block_entry(hass, entry)
async def async_setup_block_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Shelly block based device from a config entry."""
temperature_unit = "C" if hass.config.units.is_metric else "F"
options = aioshelly.common.ConnectionOptions(
entry.data[CONF_HOST],
entry.data.get(CONF_USERNAME),
entry.data.get(CONF_PASSWORD),
temperature_unit,
)
coap_context = await get_coap_context(hass)
device = await BlockDevice.create(
aiohttp_client.async_get_clientsession(hass),
coap_context,
options,
False,
)
dev_reg = device_registry.async_get(hass)
device_entry = None
if entry.unique_id is not None:
device_entry = dev_reg.async_get_device(
identifiers={(DOMAIN, entry.unique_id)}, connections=set()
)
if device_entry and entry.entry_id not in device_entry.config_entries:
device_entry = None
sleep_period = entry.data.get("sleep_period")
@callback
def _async_device_online(_: Any) -> None:
_LOGGER.debug("Device %s is online, resuming setup", entry.title)
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][DEVICE] = None
if sleep_period is None:
data = {**entry.data}
data["sleep_period"] = get_block_device_sleep_period(device.settings)
data["model"] = device.settings["device"]["type"]
hass.config_entries.async_update_entry(entry, data=data)
hass.async_create_task(async_block_device_setup(hass, entry, device))
if sleep_period == 0:
# Not a sleeping device, finish setup
_LOGGER.debug("Setting up online block device %s", entry.title)
try:
async with async_timeout.timeout(AIOSHELLY_DEVICE_TIMEOUT_SEC):
await device.initialize()
except (asyncio.TimeoutError, OSError) as err:
raise ConfigEntryNotReady from err
await async_block_device_setup(hass, entry, device)
elif sleep_period is None or device_entry is None:
# Need to get sleep info or first time sleeping device setup, wait for device
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][DEVICE] = device
_LOGGER.debug(
"Setup for device %s will resume when device is online", entry.title
)
device.subscribe_updates(_async_device_online)
else:
# Restore sensors for sleeping device
_LOGGER.debug("Setting up offline block device %s", entry.title)
await async_block_device_setup(hass, entry, device)
return True
async def async_block_device_setup(
hass: HomeAssistant, entry: ConfigEntry, device: BlockDevice
) -> None:
"""Set up a block based device that is online."""
device_wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][
BLOCK
] = BlockDeviceWrapper(hass, entry, device)
device_wrapper.async_setup()
platforms = BLOCK_SLEEPING_PLATFORMS
if not entry.data.get("sleep_period"):
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][
REST
] = ShellyDeviceRestWrapper(hass, device)
platforms = BLOCK_PLATFORMS
hass.config_entries.async_setup_platforms(entry, platforms)
async def async_setup_rpc_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Shelly RPC based device from a config entry."""
options = aioshelly.common.ConnectionOptions(
entry.data[CONF_HOST],
entry.data.get(CONF_USERNAME),
entry.data.get(CONF_PASSWORD),
)
_LOGGER.debug("Setting up online RPC device %s", entry.title)
try:
async with async_timeout.timeout(AIOSHELLY_DEVICE_TIMEOUT_SEC):
device = await RpcDevice.create(
aiohttp_client.async_get_clientsession(hass), options
)
except (asyncio.TimeoutError, OSError) as err:
raise ConfigEntryNotReady from err
device_wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][
RPC
] = RpcDeviceWrapper(hass, entry, device)
device_wrapper.async_setup()
hass.config_entries.async_setup_platforms(entry, RPC_PLATFORMS)
return True
class BlockDeviceWrapper(update_coordinator.DataUpdateCoordinator):
"""Wrapper for a Shelly block based device with Home Assistant specific functions."""
def __init__(
self, hass: HomeAssistant, entry: ConfigEntry, device: BlockDevice
) -> None:
"""Initialize the Shelly device wrapper."""
self.device_id: str | None = None
if sleep_period := entry.data["sleep_period"]:
update_interval = SLEEP_PERIOD_MULTIPLIER * sleep_period
else:
update_interval = (
UPDATE_PERIOD_MULTIPLIER * device.settings["coiot"]["update_period"]
)
device_name = (
get_block_device_name(device) if device.initialized else entry.title
)
super().__init__(
hass,
_LOGGER,
name=device_name,
update_interval=timedelta(seconds=update_interval),
)
self.hass = hass
self.entry = entry
self.device = device
self._debounced_reload = Debouncer(
hass,
_LOGGER,
cooldown=ENTRY_RELOAD_COOLDOWN,
immediate=False,
function=self._async_reload_entry,
)
entry.async_on_unload(self._debounced_reload.async_cancel)
self._last_cfg_changed: int | None = None
self._last_mode: str | None = None
self._last_effect: int | None = None
entry.async_on_unload(
self.async_add_listener(self._async_device_updates_handler)
)
self._last_input_events_count: dict = {}
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self._handle_ha_stop)
)
async def _async_reload_entry(self) -> None:
"""Reload entry."""
_LOGGER.debug("Reloading entry %s", self.name)
await self.hass.config_entries.async_reload(self.entry.entry_id)
@callback
def _async_device_updates_handler(self) -> None:
"""Handle device updates."""
if not self.device.initialized:
return
assert self.device.blocks
# For buttons which are battery powered - set initial value for last_event_count
if self.model in SHBTN_MODELS and self._last_input_events_count.get(1) is None:
for block in self.device.blocks:
if block.type != "device":
continue
if len(block.wakeupEvent) == 1 and block.wakeupEvent[0] == "button":
self._last_input_events_count[1] = -1
break
# Check for input events and config change
cfg_changed = 0
for block in self.device.blocks:
if block.type == "device":
cfg_changed = block.cfgChanged
# For dual mode bulbs ignore change if it is due to mode/effect change
if self.model in DUAL_MODE_LIGHT_MODELS:
if "mode" in block.sensor_ids and self.model != "SHRGBW2":
if self._last_mode != block.mode:
self._last_cfg_changed = None
self._last_mode = block.mode
if "effect" in block.sensor_ids:
if self._last_effect != block.effect:
self._last_cfg_changed = None
self._last_effect = block.effect
if (
"inputEvent" not in block.sensor_ids
or "inputEventCnt" not in block.sensor_ids
):
continue
channel = int(block.channel or 0) + 1
event_type = block.inputEvent
last_event_count = self._last_input_events_count.get(channel)
self._last_input_events_count[channel] = block.inputEventCnt
if (
last_event_count is None
or last_event_count == block.inputEventCnt
or event_type == ""
):
continue
if event_type in INPUTS_EVENTS_DICT:
self.hass.bus.async_fire(
EVENT_SHELLY_CLICK,
{
ATTR_DEVICE_ID: self.device_id,
ATTR_DEVICE: self.device.settings["device"]["hostname"],
ATTR_CHANNEL: channel,
ATTR_CLICK_TYPE: INPUTS_EVENTS_DICT[event_type],
ATTR_GENERATION: 1,
},
)
else:
_LOGGER.warning(
"Shelly input event %s for device %s is not supported, please open issue",
event_type,
self.name,
)
if self._last_cfg_changed is not None and cfg_changed > self._last_cfg_changed:
_LOGGER.info(
"Config for %s changed, reloading entry in %s seconds",
self.name,
ENTRY_RELOAD_COOLDOWN,
)
self.hass.async_create_task(self._debounced_reload.async_call())
self._last_cfg_changed = cfg_changed
async def _async_update_data(self) -> None:
"""Fetch data."""
if sleep_period := self.entry.data.get("sleep_period"):
# Sleeping device, no point polling it, just mark it unavailable
raise update_coordinator.UpdateFailed(
f"Sleeping device did not update within {sleep_period} seconds interval"
)
_LOGGER.debug("Polling Shelly Block Device - %s", self.name)
try:
async with async_timeout.timeout(POLLING_TIMEOUT_SEC):
await self.device.update()
except OSError as err:
raise update_coordinator.UpdateFailed("Error fetching data") from err
@property
def model(self) -> str:
"""Model of the device."""
return cast(str, self.entry.data["model"])
@property
def mac(self) -> str:
"""Mac address of the device."""
return cast(str, self.entry.unique_id)
def async_setup(self) -> None:
"""Set up the wrapper."""
dev_reg = device_registry.async_get(self.hass)
sw_version = self.device.firmware_version if self.device.initialized else ""
entry = dev_reg.async_get_or_create(
config_entry_id=self.entry.entry_id,
name=self.name,
connections={(device_registry.CONNECTION_NETWORK_MAC, self.mac)},
manufacturer="Shelly",
model=aioshelly.const.MODEL_NAMES.get(self.model, self.model),
sw_version=sw_version,
configuration_url=f"http://{self.entry.data[CONF_HOST]}",
)
self.device_id = entry.id
self.device.subscribe_updates(self.async_set_updated_data)
def shutdown(self) -> None:
"""Shutdown the wrapper."""
self.device.shutdown()
@callback
def _handle_ha_stop(self, _event: Event) -> None:
"""Handle Home Assistant stopping."""
_LOGGER.debug("Stopping BlockDeviceWrapper for %s", self.name)
self.shutdown()
class ShellyDeviceRestWrapper(update_coordinator.DataUpdateCoordinator):
"""Rest Wrapper for a Shelly device with Home Assistant specific functions."""
def __init__(self, hass: HomeAssistant, device: BlockDevice) -> None:
"""Initialize the Shelly device wrapper."""
if (
device.settings["device"]["type"]
in BATTERY_DEVICES_WITH_PERMANENT_CONNECTION
):
update_interval = (
SLEEP_PERIOD_MULTIPLIER * device.settings["coiot"]["update_period"]
)
else:
update_interval = REST_SENSORS_UPDATE_INTERVAL
super().__init__(
hass,
_LOGGER,
name=get_block_device_name(device),
update_interval=timedelta(seconds=update_interval),
)
self.device = device
async def _async_update_data(self) -> None:
"""Fetch data."""
try:
async with async_timeout.timeout(AIOSHELLY_DEVICE_TIMEOUT_SEC):
_LOGGER.debug("REST update for %s", self.name)
await self.device.update_status()
except OSError as err:
raise update_coordinator.UpdateFailed("Error fetching data") from err
@property
def mac(self) -> str:
"""Mac address of the device."""
return cast(str, self.device.settings["device"]["mac"])
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
if get_device_entry_gen(entry) == 2:
unload_ok = await hass.config_entries.async_unload_platforms(
entry, RPC_PLATFORMS
)
if unload_ok:
await hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][RPC].shutdown()
hass.data[DOMAIN][DATA_CONFIG_ENTRY].pop(entry.entry_id)
return unload_ok
device = hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id].get(DEVICE)
if device is not None:
# If device is present, device wrapper is not setup yet
device.shutdown()
return True
platforms = BLOCK_SLEEPING_PLATFORMS
if not entry.data.get("sleep_period"):
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][REST] = None
platforms = BLOCK_PLATFORMS
unload_ok = await hass.config_entries.async_unload_platforms(entry, platforms)
if unload_ok:
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][BLOCK].shutdown()
hass.data[DOMAIN][DATA_CONFIG_ENTRY].pop(entry.entry_id)
return unload_ok
def get_block_device_wrapper(
hass: HomeAssistant, device_id: str
) -> BlockDeviceWrapper | None:
"""Get a Shelly block device wrapper for the given device id."""
if not hass.data.get(DOMAIN):
return None
dev_reg = device_registry.async_get(hass)
if device := dev_reg.async_get(device_id):
for config_entry in device.config_entries:
if not hass.data[DOMAIN][DATA_CONFIG_ENTRY].get(config_entry):
continue
if wrapper := hass.data[DOMAIN][DATA_CONFIG_ENTRY][config_entry].get(BLOCK):
return cast(BlockDeviceWrapper, wrapper)
return None
def get_rpc_device_wrapper(
hass: HomeAssistant, device_id: str
) -> RpcDeviceWrapper | None:
"""Get a Shelly RPC device wrapper for the given device id."""
if not hass.data.get(DOMAIN):
return None
dev_reg = device_registry.async_get(hass)
if device := dev_reg.async_get(device_id):
for config_entry in device.config_entries:
if not hass.data[DOMAIN][DATA_CONFIG_ENTRY].get(config_entry):
continue
if wrapper := hass.data[DOMAIN][DATA_CONFIG_ENTRY][config_entry].get(RPC):
return cast(RpcDeviceWrapper, wrapper)
return None
class RpcDeviceWrapper(update_coordinator.DataUpdateCoordinator):
"""Wrapper for a Shelly RPC based device with Home Assistant specific functions."""
def __init__(
self, hass: HomeAssistant, entry: ConfigEntry, device: RpcDevice
) -> None:
"""Initialize the Shelly device wrapper."""
self.device_id: str | None = None
device_name = get_rpc_device_name(device) if device.initialized else entry.title
super().__init__(
hass,
_LOGGER,
name=device_name,
update_interval=timedelta(seconds=RPC_RECONNECT_INTERVAL),
)
self.entry = entry
self.device = device
self._debounced_reload = Debouncer(
hass,
_LOGGER,
cooldown=ENTRY_RELOAD_COOLDOWN,
immediate=False,
function=self._async_reload_entry,
)
entry.async_on_unload(self._debounced_reload.async_cancel)
entry.async_on_unload(
self.async_add_listener(self._async_device_updates_handler)
)
self._last_event: dict[str, Any] | None = None
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self._handle_ha_stop)
)
async def _async_reload_entry(self) -> None:
"""Reload entry."""
_LOGGER.debug("Reloading entry %s", self.name)
await self.hass.config_entries.async_reload(self.entry.entry_id)
@callback
def _async_device_updates_handler(self) -> None:
"""Handle device updates."""
if (
not self.device.initialized
or not self.device.event
or self.device.event == self._last_event
):
return
self._last_event = self.device.event
for event in self.device.event["events"]:
event_type = event.get("event")
if event_type is None:
continue
if event_type == "config_changed":
_LOGGER.info(
"Config for %s changed, reloading entry in %s seconds",
self.name,
ENTRY_RELOAD_COOLDOWN,
)
self.hass.async_create_task(self._debounced_reload.async_call())
elif event_type not in RPC_INPUTS_EVENTS_TYPES:
continue
self.hass.bus.async_fire(
EVENT_SHELLY_CLICK,
{
ATTR_DEVICE_ID: self.device_id,
ATTR_DEVICE: self.device.hostname,
ATTR_CHANNEL: event["id"] + 1,
ATTR_CLICK_TYPE: event["event"],
ATTR_GENERATION: 2,
},
)
async def _async_update_data(self) -> None:
"""Fetch data."""
if self.device.connected:
return
try:
_LOGGER.debug("Reconnecting to Shelly RPC Device - %s", self.name)
async with async_timeout.timeout(AIOSHELLY_DEVICE_TIMEOUT_SEC):
await self.device.initialize()
except OSError as err:
raise update_coordinator.UpdateFailed("Device disconnected") from err
@property
def model(self) -> str:
"""Model of the device."""
return cast(str, self.entry.data["model"])
@property
def mac(self) -> str:
"""Mac address of the device."""
return cast(str, self.entry.unique_id)
def async_setup(self) -> None:
"""Set up the wrapper."""
dev_reg = device_registry.async_get(self.hass)
sw_version = self.device.firmware_version if self.device.initialized else ""
entry = dev_reg.async_get_or_create(
config_entry_id=self.entry.entry_id,
name=self.name,
connections={(device_registry.CONNECTION_NETWORK_MAC, self.mac)},
manufacturer="Shelly",
model=aioshelly.const.MODEL_NAMES.get(self.model, self.model),
sw_version=sw_version,
configuration_url=f"http://{self.entry.data[CONF_HOST]}",
)
self.device_id = entry.id
self.device.subscribe_updates(self.async_set_updated_data)
async def shutdown(self) -> None:
"""Shutdown the wrapper."""
await self.device.shutdown()
async def _handle_ha_stop(self, _event: Event) -> None:
"""Handle Home Assistant stopping."""
_LOGGER.debug("Stopping RpcDeviceWrapper for %s", self.name)
await self.shutdown()
| |
from __future__ import print_function
import os
import subprocess
import sys
import textwrap
from tracing import Tracing
from buck_tool import BuckTool, JAVA_MAX_HEAP_SIZE_MB, platform_path
from buck_tool import BuckToolException, RestartBuck
from subprocess import check_output
from subprocutils import which
import buck_version
JAVA_CLASSPATHS = [
"build/abi_processor/classes",
"build/classes",
"build/src-gen/classes",
"build/aosp/classes",
"build/dx_classes",
"src",
"src-gen",
"third-party/java/android/sdklib-25.2.0.jar",
"third-party/java/android/sdk-common-25.2.0.jar",
"third-party/java/android/common-25.2.0.jar",
"third-party/java/android/layoutlib-api-25.2.0.jar",
"third-party/java/aopalliance/aopalliance.jar",
"third-party/java/args4j/args4j-2.0.30.jar",
"third-party/java/asm/asm-debug-all-5.0.3.jar",
"third-party/java/closure-templates/soy-excluding-deps.jar",
"third-party/java/commons-compress/commons-compress-1.8.1.jar",
"third-party/java/commons-logging/commons-logging-1.2.jar",
"third-party/java/concurrent-locks/concurrent-locks-1.0.0.jar",
"third-party/java/dd-plist/dd-plist.jar",
"third-party/java/ddmlib/ddmlib-25.2.0.jar",
"third-party/java/diffutils/diffutils-1.3.0.jar",
"third-party/java/eclipse/org.eclipse.core.contenttype_3.5.100.v20160418-1621.jar",
"third-party/java/eclipse/org.eclipse.core.jobs_3.8.0.v20160319-0610.jar",
"third-party/java/eclipse/org.eclipse.core.resources_3.11.0.v20160422-0304.jar",
"third-party/java/eclipse/org.eclipse.core.runtime_3.12.0.v20160427-1901.jar",
"third-party/java/eclipse/org.eclipse.equinox.common_3.8.0.v20160422-1942.jar",
"third-party/java/eclipse/org.eclipse.equinox.preferences_3.6.0.v20160120-1756.jar",
"third-party/java/eclipse/org.eclipse.jdt.core.prefs",
"third-party/java/eclipse/org.eclipse.jdt.core_3.12.0.v20160426-1326.jar",
"third-party/java/eclipse/org.eclipse.osgi_3.11.0.v20160427-2120.jar",
"third-party/java/gson/gson-2.2.4.jar",
"third-party/java/guava/guava-20.0.jar",
"third-party/java/errorprone/error-prone-annotations-2.0.15.jar",
"third-party/java/guice/guice-3.0.jar",
"third-party/java/guice/guice-assistedinject-3.0.jar",
"third-party/java/guice/guice-multibindings-3.0.jar",
"third-party/java/httpcomponents/httpclient-4.4.1.jar",
"third-party/java/httpcomponents/httpcore-4.4.1.jar",
"third-party/java/icu4j/icu4j-54.1.1.jar",
"third-party/java/infer-annotations/infer-annotations-4.1.jar",
"third-party/java/ini4j/ini4j-0.5.2.jar",
"third-party/java/jackson/jackson-annotations-2.7.8.jar",
"third-party/java/jackson/jackson-core-2.7.8.jar",
"third-party/java/jackson/jackson-databind-2.7.8.jar",
"third-party/java/jackson/jackson-datatype-jdk8-2.7.8.jar",
"third-party/java/jackson/jackson-datatype-guava-2.7.8.jar",
"third-party/java/jetty/jetty-all-9.2.10.v20150310.jar",
"third-party/java/jna/jna-4.2.0.jar",
"third-party/java/jna/jna-platform-4.2.0.jar",
"third-party/java/jsr/javax.inject-1.jar",
"third-party/java/jsr/jsr305.jar",
"third-party/java/kxml2/kxml2-2.3.0.jar",
"third-party/java/nailgun/nailgun-server-0.9.2-SNAPSHOT.jar",
"third-party/java/nuprocess/nuprocess-1.1.0.jar",
"third-party/java/ObjCBridge/ObjCBridge.jar",
"third-party/java/okhttp/okhttp-3.6.0.jar",
"third-party/java/okio/okio-1.11.0.jar",
"third-party/java/oshi/oshi-core-3.3-SNAPSHOT.jar",
"third-party/java/rocksdbjni/rocksdbjni-5.1.2.jar",
"third-party/java/servlet-api/javax.servlet-api-3.1.0.jar",
"third-party/java/slf4j/slf4j-jdk14-1.7.5.jar",
"third-party/java/stringtemplate/ST-4.0.8.jar",
"third-party/java/thrift/libthrift-0.9.3.jar",
"third-party/java/xz-java-1.5/xz-1.5.jar",
# maven/aether libs
"third-party/java/aether/aether-api-1.0.2.v20150114.jar",
"third-party/java/aether/aether-connector-basic-1.0.2.v20150114.jar",
"third-party/java/aether/aether-impl-1.0.0.v20140518.jar",
"third-party/java/aether/aether-spi-1.0.2.v20150114.jar",
"third-party/java/aether/aether-transport-http-1.0.2.v20150114.jar",
"third-party/java/aether/aether-transport-file-1.0.2.v20150114.jar",
"third-party/java/aether/aether-util-1.0.2.v20150114.jar",
"third-party/java/commons-codec/commons-codec-1.6.jar",
"third-party/java/maven/maven-aether-provider-3.2.5.jar",
"third-party/java/maven/maven-model-3.2.5.jar",
"third-party/java/maven/maven-model-builder-3.2.5.jar",
"third-party/java/slf4j/slf4j-api-1.7.5.jar",
"third-party/java/plexus/plexus-utils-3.0.20.jar",
"third-party/java/plexus/plexus-interpolation-1.21.jar",
"third-party/java/eden/eden.jar",
"third-party/java/eden/java-thrift-dependencies.jar",
]
RESOURCES = {
"abi_processor_classes": "build/abi_processor/classes",
"android_agent_path": "assets/android/agent.apk",
"buck_server": "bin/buck",
"buck_build_type_info": "config/build_type/LOCAL_ANT/type.txt",
"dx": "third-party/java/dx/etc/dx",
"jacoco_agent_jar": "third-party/java/jacoco/jacocoagent.jar",
"libjcocoa.dylib": "third-party/java/ObjCBridge/libjcocoa.dylib",
"logging_config_file": "config/logging.properties.st",
"native_exopackage_fake_path": "assets/android/native-exopackage-fakes.apk",
"path_to_asm_jar": "third-party/java/asm/asm-debug-all-5.0.3.jar",
"path_to_rawmanifest_py": "src/com/facebook/buck/util/versioncontrol/rawmanifest.py",
"path_to_intellij_py": "src/com/facebook/buck/ide/intellij/deprecated/intellij.py",
"path_to_pex": "src/com/facebook/buck/python/make_pex.py",
"path_to_sh_binary_template": "src/com/facebook/buck/shell/sh_binary_template",
"path_to_static_content": "webserver/static",
"report_generator_jar": "build/report-generator.jar",
"testrunner_classes": "build/testrunner/classes",
# python resources used by buck file parser.
"path_to_pathlib_py": "third-party/py/pathlib/pathlib.py",
"path_to_pywatchman": "third-party/py/pywatchman",
"path_to_typing": "third-party/py/typing/python2",
}
def get_ant_env(max_heap_size_mb):
ant_env = os.environ.copy()
ant_opts = ant_env.get('ANT_OPTS', '')
if ant_opts.find('-Xmx') == -1:
# Adjust the max heap size if it's not already specified.
ant_max_heap_arg = '-Xmx{0}m'.format(max_heap_size_mb)
if ant_opts:
ant_opts += ' '
ant_opts += ant_max_heap_arg
ant_env['ANT_OPTS'] = ant_opts
return ant_env
class BuckRepo(BuckTool):
def __init__(self, buck_bin_dir, buck_project):
super(BuckRepo, self).__init__(buck_project)
self._buck_dir = platform_path(os.path.dirname(buck_bin_dir))
self._build_success_file = os.path.join(
self._buck_dir, "build", "successful-build")
dot_git = os.path.join(self._buck_dir, '.git')
self._is_git = os.path.exists(dot_git) and os.path.isdir(dot_git) and which('git') and \
sys.platform != 'cygwin'
self._is_buck_repo_dirty_override = os.environ.get('BUCK_REPOSITORY_DIRTY')
buck_version = buck_project.buck_version
if self._is_git and not buck_project.has_no_buck_check and buck_version:
revision = buck_version[0]
branch = buck_version[1] if len(buck_version) > 1 else None
self._checkout_and_clean(revision, branch)
self._build()
def _checkout_and_clean(self, revision, branch):
with Tracing('BuckRepo._checkout_and_clean'):
if not self._revision_exists(revision):
print(textwrap.dedent("""\
Required revision {0} is not
available in the local repository.
Buck is fetching updates from git. You can disable this by creating
a '.nobuckcheck' file in your repository, but this might lead to
strange bugs or build failures.""".format(revision)),
file=sys.stderr)
git_command = ['git', 'fetch']
git_command.extend(['--all'] if not branch else ['origin', branch])
try:
subprocess.check_call(
git_command,
stdout=sys.stderr,
cwd=self._buck_dir)
except subprocess.CalledProcessError:
raise BuckToolException(textwrap.dedent("""\
Failed to fetch Buck updates from git."""))
current_revision = self._get_git_revision()
if current_revision != revision:
print(textwrap.dedent("""\
Buck is at {0}, but should be {1}.
Buck is updating itself. To disable this, add a '.nobuckcheck'
file to your project root. In general, you should only disable
this if you are developing Buck.""".format(
current_revision, revision)),
file=sys.stderr)
try:
subprocess.check_call(
['git', 'checkout', '--quiet', revision],
cwd=self._buck_dir)
except subprocess.CalledProcessError:
raise BuckToolException(textwrap.dedent("""\
Failed to update Buck to revision {0}.""".format(revision)))
if os.path.exists(self._build_success_file):
os.remove(self._build_success_file)
ant = self._check_for_ant()
self._run_ant_clean(ant)
raise RestartBuck()
def _join_buck_dir(self, relative_path):
return os.path.join(self._buck_dir, *(relative_path.split('/')))
def _has_local_changes(self):
if not self._is_git:
return False
output = check_output(
['git', 'ls-files', '-m'],
cwd=self._buck_dir)
return bool(output.strip())
def _get_git_revision(self):
if not self._is_git:
return 'N/A'
return buck_version.get_git_revision(self._buck_dir)
def _get_git_commit_timestamp(self):
if self._is_buck_repo_dirty_override or not self._is_git:
return -1
return buck_version.get_git_revision_timestamp(self._buck_dir)
def _revision_exists(self, revision):
returncode = subprocess.call(
['git', 'cat-file', '-e', revision],
cwd=self._buck_dir)
return returncode == 0
def _check_for_ant(self):
ant = which('ant')
if not ant:
message = "You do not have ant on your $PATH. Cannot build Buck."
if sys.platform == "darwin":
message += "\nTry running 'brew install ant'."
raise BuckToolException(message)
return ant
def _print_ant_failure_and_exit(self, ant_log_path):
print(textwrap.dedent("""\
::: 'ant' failed in the buck repo at '{0}',
::: and 'buck' is not properly built. It will be unusable
::: until the error is corrected. You can check the logs
::: at {1} to figure out what broke.""".format(
self._buck_dir, ant_log_path)), file=sys.stderr)
if self._is_git:
raise BuckToolException(textwrap.dedent("""\
::: It is possible that running this command will fix it:
::: git -C "{0}" clean -xfd""".format(self._buck_dir)))
else:
raise BuckToolException(textwrap.dedent("""\
::: It is possible that running this command will fix it:
::: rm -rf "{0}"/build""".format(self._buck_dir)))
def _run_ant_clean(self, ant):
clean_log_path = os.path.join(self._buck_project.get_buck_out_log_dir(), 'ant-clean.log')
with open(clean_log_path, 'w') as clean_log:
exitcode = subprocess.call([ant, 'clean'], stdout=clean_log,
cwd=self._buck_dir, env=get_ant_env(JAVA_MAX_HEAP_SIZE_MB))
if exitcode is not 0:
self._print_ant_failure_and_exit(clean_log_path)
def _run_ant(self, ant):
ant_log_path = os.path.join(self._buck_project.get_buck_out_log_dir(), 'ant.log')
with open(ant_log_path, 'w') as ant_log:
exitcode = subprocess.call([ant], stdout=ant_log,
cwd=self._buck_dir, env=get_ant_env(JAVA_MAX_HEAP_SIZE_MB))
if exitcode is not 0:
self._print_ant_failure_and_exit(ant_log_path)
def _build(self):
with Tracing('BuckRepo._build'):
if not os.path.exists(self._build_success_file):
print(
"Buck does not appear to have been built -- building Buck!",
file=sys.stderr)
ant = self._check_for_ant()
self._run_ant_clean(ant)
self._run_ant(ant)
print("All done, continuing with build.", file=sys.stderr)
def _get_resource_lock_path(self):
return None
def _has_resource(self, resource):
return True
def _get_resource(self, resource, exe=False):
return self._join_buck_dir(RESOURCES[resource.name])
def _get_buck_version_uid(self):
with Tracing('BuckRepo._get_buck_version_uid'):
# Check if the developer has requested that we impersonate some other version.
fake_buck_version_file_path = os.path.join(self._buck_dir, ".fakebuckversion")
if os.path.exists(fake_buck_version_file_path):
with open(fake_buck_version_file_path) as fake_buck_version_file:
fake_buck_version = fake_buck_version_file.read().strip()
print(textwrap.dedent("""\
::: Faking buck version %s, despite your buck directory not being that version."""
% fake_buck_version),
file=sys.stderr)
return fake_buck_version
# First try to get the "clean" buck version. If it succeeds,
# return it.
clean_version = buck_version.get_clean_buck_version(
self._buck_dir,
allow_dirty=self._is_buck_repo_dirty_override == "1")
if clean_version is not None:
return clean_version
# Otherwise, if there is a .nobuckcheck file, or if there isn't
# a .buckversion file, fall back to a "dirty" version.
if (self._buck_project.has_no_buck_check or
not self._buck_project.buck_version):
return buck_version.get_dirty_buck_version(self._buck_dir)
if self._has_local_changes():
print(textwrap.dedent("""\
::: Your buck directory has local modifications, and therefore
::: builds will not be able to use a distributed cache.
::: The following files must be either reverted or committed:"""),
file=sys.stderr)
subprocess.call(
['git', 'ls-files', '-m'],
stdout=sys.stderr,
cwd=self._buck_dir)
elif os.environ.get('BUCK_CLEAN_REPO_IF_DIRTY') != 'NO':
print(textwrap.dedent("""\
::: Your local buck directory is dirty, and therefore builds will
::: not be able to use a distributed cache."""), file=sys.stderr)
if sys.stdout.isatty():
print(
"::: Do you want to clean your buck directory? [y/N]",
file=sys.stderr)
choice = raw_input().lower()
if choice == "y":
subprocess.call(
['git', 'clean', '-fd'],
stdout=sys.stderr,
cwd=self._buck_dir)
raise RestartBuck()
return buck_version.get_dirty_buck_version(self._buck_dir)
def _is_buck_production(self):
return False
def _get_extra_java_args(self):
with Tracing('BuckRepo._get_extra_java_args'):
return [
"-Dbuck.git_commit={0}".format(self._get_git_revision()),
"-Dbuck.git_commit_timestamp={0}".format(
self._get_git_commit_timestamp()),
"-Dbuck.git_dirty={0}".format(
int(self._is_buck_repo_dirty_override == "1" or
buck_version.is_dirty(self._buck_dir))),
]
def _get_bootstrap_classpath(self):
return self._join_buck_dir("build/bootstrapper/bootstrapper.jar")
def _get_java_classpath(self):
return self._pathsep.join([self._join_buck_dir(p) for p in JAVA_CLASSPATHS])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
| |
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import logging
import argparse
import platform
import subprocess
os.environ["PYTHONUNBUFFERED"] = "y"
PY2 = sys.version_info[0] == 2
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ZULIP_PATH)
from scripts.lib.zulip_tools import run, subprocess_text_output, OKBLUE, ENDC, WARNING
from scripts.lib.setup_venv import setup_virtualenv, VENV_DEPENDENCIES
from scripts.lib.node_cache import setup_node_modules, NPM_CACHE_PATH
from version import PROVISION_VERSION
if False:
from typing import Any
SUPPORTED_PLATFORMS = {
"Ubuntu": [
"trusty",
"xenial",
],
}
PY2_VENV_PATH = "/srv/zulip-venv"
PY3_VENV_PATH = "/srv/zulip-py3-venv"
VAR_DIR_PATH = os.path.join(ZULIP_PATH, 'var')
LOG_DIR_PATH = os.path.join(VAR_DIR_PATH, 'log')
UPLOAD_DIR_PATH = os.path.join(VAR_DIR_PATH, 'uploads')
TEST_UPLOAD_DIR_PATH = os.path.join(VAR_DIR_PATH, 'test_uploads')
COVERAGE_DIR_PATH = os.path.join(VAR_DIR_PATH, 'coverage')
LINECOVERAGE_DIR_PATH = os.path.join(VAR_DIR_PATH, 'linecoverage-report')
NODE_TEST_COVERAGE_DIR_PATH = os.path.join(VAR_DIR_PATH, 'node-coverage')
# TODO: De-duplicate this with emoji_dump.py
EMOJI_CACHE_PATH = "/srv/zulip-emoji-cache"
if 'TRAVIS' in os.environ:
# In Travis CI, we don't have root access
EMOJI_CACHE_PATH = "/home/travis/zulip-emoji-cache"
if PY2:
VENV_PATH = PY2_VENV_PATH
else:
VENV_PATH = PY3_VENV_PATH
if not os.path.exists(os.path.join(ZULIP_PATH, ".git")):
print("Error: No Zulip git repository present!")
print("To setup the Zulip development environment, you should clone the code")
print("from GitHub, rather than using a Zulip production release tarball.")
sys.exit(1)
# Check the RAM on the user's system, and throw an effort if <1.5GB.
# This avoids users getting segfaults running `pip install` that are
# generally more annoying to debug.
with open("/proc/meminfo") as meminfo:
ram_size = meminfo.readlines()[0].strip().split(" ")[-2]
ram_gb = float(ram_size) / 1024.0 / 1024.0
if ram_gb < 1.5:
print("You have insufficient RAM (%s GB) to run the Zulip development environment." % (
round(ram_gb, 2),))
print("We recommend at least 2 GB of RAM, and require at least 1.5 GB.")
sys.exit(1)
try:
run(["mkdir", "-p", VAR_DIR_PATH])
if os.path.exists(os.path.join(VAR_DIR_PATH, 'zulip-test-symlink')):
os.remove(os.path.join(VAR_DIR_PATH, 'zulip-test-symlink'))
os.symlink(
os.path.join(ZULIP_PATH, 'README.md'),
os.path.join(VAR_DIR_PATH, 'zulip-test-symlink')
)
os.remove(os.path.join(VAR_DIR_PATH, 'zulip-test-symlink'))
except OSError as err:
print("Error: Unable to create symlinks. Make sure you have permission to create symbolic links.")
print("See this page for more information:")
print(" http://zulip.readthedocs.io/en/latest/dev-env-first-time-contributors.html#os-symlink-error")
sys.exit(1)
if platform.architecture()[0] == '64bit':
arch = 'amd64'
elif platform.architecture()[0] == '32bit':
arch = "i386"
else:
logging.critical("Only x86 is supported; ping zulip-devel@googlegroups.com if you want another architecture.")
sys.exit(1)
# Ideally we wouldn't need to install a dependency here, before we
# know the codename.
subprocess.check_call(["sudo", "apt-get", "install", "-y", "lsb-release"])
vendor = subprocess_text_output(["lsb_release", "-is"])
codename = subprocess_text_output(["lsb_release", "-cs"])
if not (vendor in SUPPORTED_PLATFORMS and codename in SUPPORTED_PLATFORMS[vendor]):
logging.critical("Unsupported platform: {} {}".format(vendor, codename))
sys.exit(1)
POSTGRES_VERSION_MAP = {
"trusty": "9.3",
"xenial": "9.5",
}
POSTGRES_VERSION = POSTGRES_VERSION_MAP[codename]
UBUNTU_COMMON_APT_DEPENDENCIES = [
"closure-compiler",
"memcached",
"rabbitmq-server",
"redis-server",
"hunspell-en-us",
"supervisor",
"git",
"libssl-dev",
"yui-compressor",
"wget",
"ca-certificates", # Explicit dependency in case e.g. wget is already installed
"puppet", # Used by lint
"gettext", # Used by makemessages i18n
"curl", # Used for fetching PhantomJS as wget occasionally fails on redirects
"netcat", # Used for flushing memcached
] + VENV_DEPENDENCIES
APT_DEPENDENCIES = {
"trusty": UBUNTU_COMMON_APT_DEPENDENCIES + [
"postgresql-9.3",
"postgresql-9.3-tsearch-extras",
"postgresql-9.3-pgroonga",
],
"xenial": UBUNTU_COMMON_APT_DEPENDENCIES + [
"postgresql-9.5",
"postgresql-9.5-tsearch-extras",
"postgresql-9.5-pgroonga",
],
}
TSEARCH_STOPWORDS_PATH = "/usr/share/postgresql/%s/tsearch_data/" % (POSTGRES_VERSION,)
REPO_STOPWORDS_PATH = os.path.join(
ZULIP_PATH,
"puppet",
"zulip",
"files",
"postgresql",
"zulip_english.stop",
)
LOUD = dict(_out=sys.stdout, _err=sys.stderr)
user_id = os.getuid()
def setup_shell_profile(shell_profile):
# type: (str) -> None
source_activate_command = "source %s\n" % (os.path.join(VENV_PATH, "bin", "activate"),)
shell_profile_path = os.path.expanduser(shell_profile)
if os.path.exists(shell_profile_path):
with open(shell_profile_path, 'a+') as shell_profile_file:
if source_activate_command not in shell_profile_file.read():
shell_profile_file.writelines(source_activate_command)
else:
with open(shell_profile_path, 'w') as shell_profile_file:
shell_profile_file.writelines(source_activate_command)
def main(options):
# type: (Any) -> int
# npm install and management commands expect to be run from the root of the
# project.
os.chdir(ZULIP_PATH)
# setup-apt-repo does an `apt-get update`
run(["sudo", "./scripts/lib/setup-apt-repo"])
run(["sudo", "apt-get", "-y", "install", "--no-install-recommends"] + APT_DEPENDENCIES[codename])
if options.is_travis:
if PY2:
MYPY_REQS_FILE = os.path.join(ZULIP_PATH, "requirements", "mypy.txt")
setup_virtualenv(PY3_VENV_PATH, MYPY_REQS_FILE, patch_activate_script=True,
virtualenv_args=['-p', 'python3'])
DEV_REQS_FILE = os.path.join(ZULIP_PATH, "requirements", "py2_dev.txt")
setup_virtualenv(PY2_VENV_PATH, DEV_REQS_FILE, patch_activate_script=True)
else:
DEV_REQS_FILE = os.path.join(ZULIP_PATH, "requirements", "py3_dev.txt")
setup_virtualenv(VENV_PATH, DEV_REQS_FILE, patch_activate_script=True,
virtualenv_args=['-p', 'python3'])
else:
# Import tools/setup_venv.py instead of running it so that we get an
# activated virtualenv for the rest of the provisioning process.
from tools.setup import setup_venvs
setup_venvs.main()
# Put Python2 virtualenv activation in .bash_profile.
setup_shell_profile('~/.bash_profile')
# Put Python2 virtualenv activation in .zprofile (for Zsh users).
setup_shell_profile('~/.zprofile')
run(["sudo", "cp", REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH])
# create log directory `zulip/var/log`
run(["mkdir", "-p", LOG_DIR_PATH])
# create upload directory `var/uploads`
run(["mkdir", "-p", UPLOAD_DIR_PATH])
# create test upload directory `var/test_upload`
run(["mkdir", "-p", TEST_UPLOAD_DIR_PATH])
# create coverage directory`var/coverage`
run(["mkdir", "-p", COVERAGE_DIR_PATH])
# create linecoverage directory`var/linecoverage-report`
run(["mkdir", "-p", LINECOVERAGE_DIR_PATH])
# create linecoverage directory`var/node-coverage`
run(["mkdir", "-p", NODE_TEST_COVERAGE_DIR_PATH])
run(["tools/setup/emoji/build_emoji"])
run(["tools/setup/build_pygments_data.py"])
run(["scripts/setup/generate_secrets.py", "--development"])
run(["tools/update-authors-json", "--use-fixture"])
if options.is_travis and not options.is_production_travis:
run(["sudo", "service", "rabbitmq-server", "restart"])
run(["sudo", "service", "redis-server", "restart"])
run(["sudo", "service", "memcached", "restart"])
elif options.is_docker:
run(["sudo", "service", "rabbitmq-server", "restart"])
run(["sudo", "pg_dropcluster", "--stop", POSTGRES_VERSION, "main"])
run(["sudo", "pg_createcluster", "-e", "utf8", "--start", POSTGRES_VERSION, "main"])
run(["sudo", "service", "redis-server", "restart"])
run(["sudo", "service", "memcached", "restart"])
if not options.is_production_travis:
# These won't be used anyway
run(["scripts/setup/configure-rabbitmq"])
run(["tools/setup/postgres-init-dev-db"])
run(["tools/do-destroy-rebuild-database"])
# Need to set up Django before using is_template_database_current.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
import django
django.setup()
from zerver.lib.test_fixtures import is_template_database_current
if options.is_force or not is_template_database_current():
run(["tools/setup/postgres-init-test-db"])
run(["tools/do-destroy-rebuild-test-database"])
else:
print("No need to regenerate the test DB.")
run(["./manage.py", "compilemessages"])
# Here we install nvm, node, and npm.
run(["sudo", "scripts/lib/install-node"])
# This is a wrapper around `npm install`, which we run last since
# it can often fail due to network issues beyond our control.
try:
# Hack: We remove `node_modules` as root to work around an
# issue with the symlinks being improperly owned by root.
if os.path.islink("node_modules"):
run(["sudo", "rm", "-f", "node_modules"])
if not os.path.isdir(NPM_CACHE_PATH):
run(["sudo", "mkdir", NPM_CACHE_PATH])
run(["sudo", "chown", "%s:%s" % (user_id, user_id), NPM_CACHE_PATH])
setup_node_modules()
except subprocess.CalledProcessError:
print(WARNING + "`npm install` failed; retrying..." + ENDC)
setup_node_modules()
version_file = os.path.join(ZULIP_PATH, 'var/provision_version')
print('writing to %s\n' % (version_file,))
open(version_file, 'w').write(PROVISION_VERSION + '\n')
print()
print(OKBLUE + "Zulip development environment setup succeeded!" + ENDC)
return 0
if __name__ == "__main__":
description = ("Provision script to install Zulip")
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--force', action='store_true', dest='is_force',
default=False,
help="Ignore all provisioning optimizations.")
parser.add_argument('--travis', action='store_true', dest='is_travis',
default=False,
help="Provision for Travis but without production settings.")
parser.add_argument('--production-travis', action='store_true',
dest='is_production_travis',
default=False,
help="Provision for Travis but with production settings.")
parser.add_argument('--docker', action='store_true',
dest='is_docker',
default=False,
help="Provision for Docker.")
options = parser.parse_args()
sys.exit(main(options))
| |
import json
from django.contrib.auth.decorators import user_passes_test
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from crits.backdoors.forms import AddBackdoorForm
from crits.backdoors.handlers import add_new_backdoor, get_backdoor_details
from crits.backdoors.handlers import backdoor_remove, set_backdoor_name
from crits.backdoors.handlers import update_backdoor_aliases
from crits.backdoors.handlers import set_backdoor_version
from crits.backdoors.handlers import generate_backdoor_csv
from crits.backdoors.handlers import generate_backdoor_jtable
from crits.core import form_consts
from crits.core.data_tools import json_handler
from crits.core.user_tools import user_can_view_data
from crits.vocabulary.acls import BackdoorACL
@user_passes_test(user_can_view_data)
def backdoors_listing(request,option=None):
"""
Generate the Backdoor listing page.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', 'csv', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
request.user._setup()
user = request.user
if user.has_access_to(BackdoorACL.READ):
if option == "csv":
return generate_backdoor_csv(request)
elif option== "jtdelete" and not user.has_access_to(BackdoorACL.DELETE):
result = {'sucess':False,
'message':'User does not have permission to delete Backdoor.'}
return HttpResponse(json.dumps(result,
default=json_handler),
content_type="application/json")
return generate_backdoor_jtable(request, option)
else:
return render(request, "error.html",
{'error': 'User does not have permission to view backdoor listing.'})
@user_passes_test(user_can_view_data)
def backdoor_detail(request, id_):
"""
Generate the Backdoor details page.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param id_: The Backdoor ObjectId to get details for.
:type id_: str
:returns: :class:`django.http.HttpResponse`
"""
template = "backdoor_detail.html"
request.user._setup()
user = request.user
if user.has_access_to(BackdoorACL.READ):
(new_template, args) = get_backdoor_details(id_, user)
if new_template:
template = new_template
args['BackdoorACL'] = BackdoorACL
return render(request, template,
args)
else:
return render(request, "error.html",
{'error': 'User does not have permission to view backdoor listing.'})
@user_passes_test(user_can_view_data)
def add_backdoor(request):
"""
Add a backdoor. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
request.user._setup()
user = request.user
data = request.POST
form = AddBackdoorForm(request.user, data)
if form.is_valid():
if user.has_access_to(BackdoorACL.WRITE):
cleaned_data = form.cleaned_data
name = cleaned_data['name']
aliases = cleaned_data['aliases']
description = cleaned_data['description']
version = cleaned_data['version']
source = cleaned_data['source_name']
reference = cleaned_data['source_reference']
method = cleaned_data['source_method']
tlp = cleaned_data['source_tlp']
campaign = cleaned_data['campaign']
confidence = cleaned_data['confidence']
user = request.user
bucket_list = cleaned_data.get(form_consts.Common.BUCKET_LIST_VARIABLE_NAME)
ticket = cleaned_data.get(form_consts.Common.TICKET_VARIABLE_NAME)
related_id = cleaned_data['related_id']
related_type = cleaned_data['related_type']
relationship_type = cleaned_data['relationship_type']
result = add_new_backdoor(name,
version=version,
aliases=aliases,
description=description,
source=source,
source_method=method,
source_reference=reference,
source_tlp=tlp,
campaign=campaign,
confidence=confidence,
user=user,
bucket_list=bucket_list,
ticket=ticket,
related_id=related_id,
related_type=related_type,
relationship_type=relationship_type)
else:
result = {"success":False,
"message":"User does not have permission to add new Backdoor."}
return HttpResponse(json.dumps(result, default=json_handler),
content_type="application/json")
return HttpResponse(json.dumps({'success': False,
'form':form.as_table()}),
content_type="application/json")
return render(request, "error.html", {'error': 'Expected AJAX/POST'})
@user_passes_test(user_can_view_data)
def remove_backdoor(request, id_):
"""
Remove a Backdoor.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param id_: The ObjectId of the Backdoor to remove.
:type id_: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST":
request.user._setup()
user = request.user
if user.has_access_to(BackdoorACL.DELETE):
backdoor_remove(id_, user.username)
return HttpResponseRedirect(reverse('crits-backdoors-views-backdoors_listing'))
return render(request, 'error.html',
{'error':'Expected AJAX/POST'})
@user_passes_test(user_can_view_data)
def edit_backdoor_name(request, id_):
"""
Set backdoor name. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param id_: The ObjectId of the Backdoor.
:type id_: str
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
user = request.user
name = request.POST.get('name', None)
if user.has_access_to(BackdoorACL.NAME_EDIT):
if not name:
return HttpResponse(json.dumps({'success': False,
'message': 'Not all info provided.'}),
content_type="application/json")
result = set_backdoor_name(id_,
name,
user)
return HttpResponse(json.dumps(result),
content_type="application/json")
else:
return HttpResponse(json.dumps({'success': False,
'message': 'User does not have permission to edit name.'}),
content_type="application/json")
else:
error = "Expected AJAX POST"
return render(request, "error.html", {"error" : error })
@user_passes_test(user_can_view_data)
def edit_backdoor_aliases(request):
"""
Update aliases for a Backdoor.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
aliases = request.POST.get('aliases', None)
id_ = request.POST.get('oid', None)
request.user._setup()
user = request.user
if user.has_access_to(BackdoorACL.ALIASES_EDIT):
result = update_backdoor_aliases(id_, aliases, user)
else:
result = {'success':False,
'message':'User does not have permission to modify aliases.'}
return HttpResponse(json.dumps(result),
content_type="application/json")
else:
error = "Expected AJAX POST"
return render(request, "error.html", {"error" : error })
@user_passes_test(user_can_view_data)
def edit_backdoor_version(request, id_):
"""
Set backdoor version. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param id_: The ObjectId of the Backdoor.
:type id_: str
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
request.user._setup()
user = request.user
version = request.POST.get('version', None)
if version == None:
return HttpResponse(json.dumps({'success': False,
'message': 'Not all info provided.'}),
content_type="application/json")
if user.has_access_to(BackdoorACL.VERSION_EDIT):
result = set_backdoor_version(id_, version, user)
return HttpResponse(json.dumps(result), content_type="application/json")
else:
return HttpResponse(json.dumps({'success': False,
'message': 'Not all info provided.'}),
content_type="application/json")
else:
error = "Expected AJAX POST"
return render(request, "error.html", {"error" : error })
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Issues sharded slavekill, delete build directory, and reboot commands."""
import multiprocessing
import optparse
import os
import subprocess
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from common import chromium_utils
from master import slaves_list
def get_masters(parser, options):
"""Given parser options, find suitable master directories."""
paths = []
masters_path = chromium_utils.ListMasters()
# Populates by defaults with every masters with a twistd.pid, thus has
# been started.
if not options.master:
for m_p in masters_path:
if os.path.isfile(os.path.join(m_p, 'twistd.pid')):
paths.append(m_p)
elif options.master == 'all':
paths.extend(masters_path)
elif options.master in (os.path.basename(p) for p in masters_path):
full_master = next(
p for p in masters_path if os.path.basename(p) == options.master)
paths.append(full_master)
else:
parser.error('Unknown master \'%s\'.\nChoices are:\n %s' % (
options.master, '\n '.join((
os.path.basename(p) for p in masters_path))))
return paths
def get_slaves(master_paths, slavelist):
"""Return slaves split up by OS.
Takes a list of master paths and an optional slave whitelist."""
slavedict = {}
for path in master_paths:
for slave in chromium_utils.GetSlavesFromMasterPath(path):
if 'hostname' in slave:
slavedict[slave['hostname']] = slave
slaves = slaves_list.BaseSlavesList(slavedict.values())
def F(os_type):
out = slaves.GetSlaves(os=os_type)
named_slaves = [s.get('hostname') for s in out]
if slavelist:
return [s for s in named_slaves if s in slavelist]
else:
return named_slaves
slave_dict = {}
slave_dict['win'] = list(set(F('win')))
slave_dict['linux'] = list(set(F('linux')))
slave_dict['mac'] = list(set(F('mac')))
return slave_dict
def get_commands(slaves):
"""Depending on OS, yield the proper nuke-and-pave command sequence."""
commands = {}
for slave in slaves['win']:
def cmd(command):
return 'cmd.exe /c "%s"' % command
def cygwin(command):
return 'c:\\cygwin\\bin\\bash --login -c "%s"' % (
command.replace('"', '\\"'))
commands[slave] = [
cmd('taskkill /IM python.exe /F'),
cygwin('sleep 3'),
cygwin('rm -r -f /cygdrive/e/b/build/slave/*/build'),
cmd('shutdown -r -f -t 1'),
]
for slave in slaves['mac'] + slaves['linux']:
commands[slave] = [
'make -C /b/build/slave stop',
'sleep 3',
'rm -rf /b/build/slave/*/build',
'sudo shutdown -r now',
]
return commands
def status_writer(queue):
# Send None to kill the status writer.
msg = queue.get()
while msg:
print '\n'.join(msg)
msg = queue.get()
def stdout_writer(queue):
# Send None to kill the stdout writer.
slave = queue.get()
while slave:
print '%s: finished' % slave
slave = queue.get()
def journal_writer(filename, queue):
# Send None to kill the journal writer.
with open(filename, 'a') as f:
slave = queue.get()
while slave:
# pylint: disable=C0323
print >>f, slave
slave = queue.get()
def shard_slaves(slaves, max_per_shard):
"""Shart slaves with no more than max_per_shard in each shard."""
shards = []
for i in xrange(0, len(slaves), max_per_shard):
shards.append(list(slaves.iteritems())[i:i+max_per_shard])
return shards
def run_ssh_command(slavepair, worklog, status, errorlog, options):
"""Execute an ssh command as chrome-bot."""
slave, commands = slavepair
needs_connect = slave.endswith('-c4')
if options.corp:
slave = slave + '.chrome'
if needs_connect:
ssh = ['connect', slave, '-r']
else:
identity = ['chrome-bot@%s' % slave]
ssh = ['ssh', '-o ConnectTimeout=5'] + identity
if options.dry_run:
for command in commands:
status.put(['%s: %s' % (slave, command)])
return
retcode = 0
for command in commands:
status.put(['%s: %s' % (slave, command)])
retcode = subprocess.call(ssh + [command])
if options.verbose:
status.put(['%s: previous command returned code %d' % (slave, retcode)])
if retcode != 0 and command != commands[0]: # Don't fail on slavekill.
break
if retcode == 0:
worklog.put(slave)
else:
errorlog.put(slave)
class Worker(object):
def __init__(self, out_queue, status, errorlog, options):
self.out_queue = out_queue
self.status = status
self.options = options
self.errorlog = errorlog
def __call__(self, slave):
run_ssh_command(slave, self.out_queue, self.status, self.errorlog,
self.options)
def main():
usage = '%prog [options]'
parser = optparse.OptionParser(usage=usage)
parser.add_option('--master',
help=('Master to use to load the slaves list. If omitted, all masters '
'that were started at least once are included. If \'all\', all '
'masters are selected.'))
parser.add_option('--slavelist',
help=('List of slaves to contact, separated by newlines.'))
parser.add_option('--max-per-shard', default=50,
help=('Each shard has no more than max-per-shard slaves.'))
parser.add_option('--max-connections', default=16,
help=('Maximum concurrent SSH sessions.'))
parser.add_option('--journal',
help=('Log completed slaves to a journal file, skipping them'
'on the next run.'))
parser.add_option('--errorlog',
help='Log failed slaves to a file instead out stdout.')
parser.add_option('--dry-run', action='store_true',
help='Don\'t execute commands, only print them.')
parser.add_option('--corp', action='store_true',
help='Connect to bots within the corp network.')
parser.add_option('-v', '--verbose', action='store_true')
options, _ = parser.parse_args(sys.argv)
masters = get_masters(parser, options)
if options.verbose:
print 'reading from:'
for master in masters:
print ' ', master
slavelist = []
if options.slavelist:
with open(options.slavelist) as f:
slavelist = [s.strip() for s in f.readlines()]
slaves = get_slaves(masters, slavelist)
if options.verbose and options.slavelist:
wanted_slaves = set(slavelist)
got_slaves = set()
for _, s in slaves.iteritems():
got_slaves.update(s)
diff = wanted_slaves - got_slaves
if diff:
print 'Following slaves are not on selected masters:'
for s in diff:
print ' ', s
if options.journal and os.path.exists(options.journal):
skipped = set()
with open(options.journal) as f:
finished_slaves = set([s.strip() for s in f.readlines()])
for os_type in slaves:
skipped.update(set(slaves[os_type]) & finished_slaves)
slaves[os_type] = list(set(slaves[os_type]) - finished_slaves)
if options.verbose:
print 'Following slaves have already been processed:'
for s in skipped:
print ' ', s
commands = get_commands(slaves)
shards = shard_slaves(commands, options.max_per_shard)
pool = multiprocessing.Pool(processes=options.max_connections)
m = multiprocessing.Manager()
worklog = m.Queue()
status = m.Queue()
errors = m.Queue()
# Set up the worklog and status writers.
if options.journal:
p = multiprocessing.Process(target=journal_writer,
args=(options.journal, worklog))
else:
p = multiprocessing.Process(target=stdout_writer, args=(worklog,))
s = multiprocessing.Process(target=status_writer, args=(status,))
p.start()
s.start()
# Execute commands.
for shard in shards:
if options.verbose:
print 'Starting next shard with slaves:'
for slave in shard:
print ' ', slave
pool.map_async(Worker(worklog, status, errors, options), shard).get(9999999)
raw_input('Shard finished, press enter to continue...')
# Clean up the worklog and status writers.
worklog.put(None) # Signal worklog writer to stop.
status.put(None) # Signal status writer to stop.
p.join()
s.join()
# Print out errors.
error_list = []
errors.put(None) # Signal end of error list.
e = errors.get()
while e:
error_list.append(e)
e = errors.get()
if error_list:
if options.errorlog:
with open(options.errorlog, 'w') as f:
for error in error_list:
# pylint: disable=C0323
print >>f, error
else:
print 'Following slaves had errors:'
for error in error_list:
print ' ', error
return 0
if __name__ == '__main__':
sys.exit(main())
| |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
from collections.abc import MappingView
from types import MappingProxyType
import numpy as np
from astropy import units as u
from astropy.utils.state import ScienceState
from astropy.utils.decorators import format_doc, classproperty, deprecated
from astropy.coordinates.angles import Angle
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product, matrix_transpose
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import (BaseCoordinateFrame,
frame_transform_graph,
base_doc)
from astropy.coordinates.attributes import (CoordinateAttribute,
QuantityAttribute,
DifferentialAttribute)
from astropy.coordinates.transformations import AffineTransform
from astropy.coordinates.errors import ConvertError
from .icrs import ICRS
__all__ = ['Galactocentric']
# Measured by minimizing the difference between a plane of coordinates along
# l=0, b=[-90,90] and the Galactocentric x-z plane
# This is not used directly, but accessed via `get_roll0`. We define it here to
# prevent having to create new Angle objects every time `get_roll0` is called.
_ROLL0 = Angle(58.5986320306*u.degree)
class _StateProxy(MappingView):
"""
`~collections.abc.MappingView` with a read-only ``getitem`` through
`~types.MappingProxyType`.
"""
def __init__(self, mapping):
super().__init__(mapping)
self._mappingproxy = MappingProxyType(self._mapping) # read-only
def __getitem__(self, key):
"""Read-only ``getitem``."""
return self._mappingproxy[key]
def __deepcopy__(self, memo):
return copy.deepcopy(self._mapping, memo=memo)
class galactocentric_frame_defaults(ScienceState):
"""This class controls the global setting of default values for the frame
attributes in the `~astropy.coordinates.Galactocentric` frame, which may be
updated in future versions of ``astropy``. Note that when using
`~astropy.coordinates.Galactocentric`, changing values here will not affect
any attributes that are set explicitly by passing values in to the
`~astropy.coordinates.Galactocentric` initializer. Modifying these defaults
will only affect the frame attribute values when using the frame as, e.g.,
``Galactocentric`` or ``Galactocentric()`` with no explicit arguments.
This class controls the parameter settings by specifying a string name,
with the following pre-specified options:
- 'pre-v4.0': The current default value, which sets the default frame
attribute values to their original (pre-astropy-v4.0) values.
- 'v4.0': The attribute values as updated in Astropy version 4.0.
- 'latest': An alias of the most recent parameter set (currently: 'v4.0')
Alternatively, user-defined parameter settings may be registered, with
:meth:`~astropy.coordinates.galactocentric_frame_defaults.register`,
and used identically as pre-specified parameter sets. At minimum,
registrations must have unique names and a dictionary of parameters
with keys "galcen_coord", "galcen_distance", "galcen_v_sun", "z_sun",
"roll". See examples below.
This class also tracks the references for all parameter values in the
attribute ``references``, as well as any further information the registry.
The pre-specified options can be extended to include similar
state information as user-defined parameter settings -- for example, to add
parameter uncertainties.
The preferred method for getting a parameter set and metadata, by name, is
:meth:`~galactocentric_frame_defaults.get_from_registry` since
it ensures the immutability of the registry.
See :ref:`astropy-coordinates-galactocentric-defaults` for more information.
Examples
--------
The default `~astropy.coordinates.Galactocentric` frame parameters can be
modified globally::
>>> from astropy.coordinates import galactocentric_frame_defaults
>>> _ = galactocentric_frame_defaults.set('v4.0') # doctest: +SKIP
>>> Galactocentric() # doctest: +SKIP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.122 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg)>
>>> _ = galactocentric_frame_defaults.set('pre-v4.0') # doctest: +SKIP
>>> Galactocentric() # doctest: +SKIP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.3 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)>
The default parameters can also be updated by using this class as a context
manager::
>>> with galactocentric_frame_defaults.set('pre-v4.0'):
... print(Galactocentric()) # doctest: +FLOAT_CMP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.3 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)>
Again, changing the default parameter values will not affect frame
attributes that are explicitly specified::
>>> import astropy.units as u
>>> with galactocentric_frame_defaults.set('pre-v4.0'):
... print(Galactocentric(galcen_distance=8.0*u.kpc)) # doctest: +FLOAT_CMP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.0 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)>
Additional parameter sets may be registered, for instance to use the
Dehnen & Binney (1998) measurements of the solar motion. We can also
add metadata, such as the 1-sigma errors. In this example we will modify
the required key "parameters", change the recommended key "references" to
match "parameters", and add the extra key "error" (any key can be added)::
>>> state = galactocentric_frame_defaults.get_from_registry("v4.0")
>>> state["parameters"]["galcen_v_sun"] = (10.00, 225.25, 7.17) * (u.km / u.s)
>>> state["references"]["galcen_v_sun"] = "https://ui.adsabs.harvard.edu/full/1998MNRAS.298..387D"
>>> state["error"] = {"galcen_v_sun": (0.36, 0.62, 0.38) * (u.km / u.s)}
>>> galactocentric_frame_defaults.register(name="DB1998", **state)
Just as in the previous examples, the new parameter set can be retrieved with::
>>> state = galactocentric_frame_defaults.get_from_registry("DB1998")
>>> print(state["error"]["galcen_v_sun"]) # doctest: +FLOAT_CMP
[0.36 0.62 0.38] km / s
"""
_latest_value = 'v4.0'
_value = None
_references = None
_state = dict() # all other data
# Note: _StateProxy() produces read-only view of enclosed mapping.
_registry = {
"v4.0": {
"parameters": _StateProxy(
{
"galcen_coord": ICRS(
ra=266.4051 * u.degree, dec=-28.936175 * u.degree
),
"galcen_distance": 8.122 * u.kpc,
"galcen_v_sun": r.CartesianDifferential(
[12.9, 245.6, 7.78] * (u.km / u.s)
),
"z_sun": 20.8 * u.pc,
"roll": 0 * u.deg,
}
),
"references": _StateProxy(
{
"galcen_coord": "https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R",
"galcen_distance": "https://ui.adsabs.harvard.edu/abs/2018A%26A...615L..15G",
"galcen_v_sun": [
"https://ui.adsabs.harvard.edu/abs/2018RNAAS...2..210D",
"https://ui.adsabs.harvard.edu/abs/2018A%26A...615L..15G",
"https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R",
],
"z_sun": "https://ui.adsabs.harvard.edu/abs/2019MNRAS.482.1417B",
"roll": None,
}
),
},
"pre-v4.0": {
"parameters": _StateProxy(
{
"galcen_coord": ICRS(
ra=266.4051 * u.degree, dec=-28.936175 * u.degree
),
"galcen_distance": 8.3 * u.kpc,
"galcen_v_sun": r.CartesianDifferential(
[11.1, 220 + 12.24, 7.25] * (u.km / u.s)
),
"z_sun": 27.0 * u.pc,
"roll": 0 * u.deg,
}
),
"references": _StateProxy(
{
"galcen_coord": "https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R",
"galcen_distance": "https://ui.adsabs.harvard.edu/#abs/2009ApJ...692.1075G",
"galcen_v_sun": [
"https://ui.adsabs.harvard.edu/#abs/2010MNRAS.403.1829S",
"https://ui.adsabs.harvard.edu/#abs/2015ApJS..216...29B",
],
"z_sun": "https://ui.adsabs.harvard.edu/#abs/2001ApJ...553..184C",
"roll": None,
}
),
},
}
@classproperty # read-only
def parameters(cls):
return cls._value
@classproperty # read-only
def references(cls):
return cls._references
@classmethod
def get_from_registry(cls, name: str):
"""
Return Galactocentric solar parameters and metadata given string names
for the parameter sets. This method ensures the returned state is a
mutable copy, so any changes made do not affect the registry state.
Returns
-------
state : dict
Copy of the registry for the string name.
Should contain, at minimum:
- "parameters": dict
Galactocentric solar parameters
- "references" : Dict[str, Union[str, Sequence[str]]]
References for "parameters".
Fields are str or sequence of str.
Raises
------
KeyError
If invalid string input to registry
to retrieve solar parameters for Galactocentric frame.
"""
# Resolve the meaning of 'latest': latest parameter set is from v4.0
# - update this as newer parameter choices are added
if name == 'latest':
name = cls._latest_value
# Get the state from the registry.
# Copy to ensure registry is immutable to modifications of "_value".
# Raises KeyError if `name` is invalid string input to registry
# to retrieve solar parameters for Galactocentric frame.
state = copy.deepcopy(cls._registry[name]) # ensure mutable
return state
@deprecated("v4.2", alternative="`get_from_registry`")
@classmethod
def get_solar_params_from_string(cls, arg):
"""
Return Galactocentric solar parameters given string names
for the parameter sets.
Returns
-------
parameters : dict
Copy of Galactocentric solar parameters from registry
Raises
------
KeyError
If invalid string input to registry
to retrieve solar parameters for Galactocentric frame.
"""
return cls.get_from_registry(arg)["parameters"]
@classmethod
def validate(cls, value):
if value is None:
value = cls._latest_value
if isinstance(value, str):
state = cls.get_from_registry(value)
cls._references = state["references"]
cls._state = state
parameters = state["parameters"]
elif isinstance(value, dict):
parameters = value
elif isinstance(value, Galactocentric):
# turn the frame instance into a dict of frame attributes
parameters = dict()
for k in value.frame_attributes:
parameters[k] = getattr(value, k)
cls._references = value.frame_attribute_references.copy()
cls._state = dict(parameters=parameters,
references=cls._references)
else:
raise ValueError("Invalid input to retrieve solar parameters for "
"Galactocentric frame: input must be a string, "
"dict, or Galactocentric instance")
return parameters
@classmethod
def register(cls, name: str, parameters: dict, references=None,
**meta: dict):
"""Register a set of parameters.
Parameters
----------
name : str
The registration name for the parameter and metadata set.
parameters : dict
The solar parameters for Galactocentric frame.
references : dict or None, optional
References for contents of `parameters`.
None becomes empty dict.
**meta: dict, optional
Any other properties to register.
"""
# check on contents of `parameters`
must_have = {"galcen_coord", "galcen_distance", "galcen_v_sun",
"z_sun", "roll"}
missing = must_have.difference(parameters)
if missing:
raise ValueError(f"Missing parameters: {missing}")
references = references or {} # None -> {}
state = dict(parameters=parameters, references=references)
state.update(meta) # meta never has keys "parameters" or "references"
cls._registry[name] = state
doc_components = """
x : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`x` position component.
y : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`y` position component.
z : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`z` position component.
v_x : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`v_x` velocity component.
v_y : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`v_y` velocity component.
v_z : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`v_z` velocity component.
"""
doc_footer = """
Other parameters
----------------
galcen_coord : `ICRS`, optional, must be keyword
The ICRS coordinates of the Galactic center.
galcen_distance : `~astropy.units.Quantity`, optional, must be keyword
The distance from the sun to the Galactic center.
galcen_v_sun : `~astropy.coordinates.representation.CartesianDifferential`, optional, must be keyword
The velocity of the sun *in the Galactocentric frame* as Cartesian
velocity components.
z_sun : `~astropy.units.Quantity`, optional, must be keyword
The distance from the sun to the Galactic midplane.
roll : `~astropy.coordinates.Angle`, optional, must be keyword
The angle to rotate about the final x-axis, relative to the
orientation for Galactic. For example, if this roll angle is 0,
the final x-z plane will align with the Galactic coordinates x-z
plane. Unless you really know what this means, you probably should
not change this!
Examples
--------
To transform to the Galactocentric frame with the default
frame attributes, pass the uninstantiated class name to the
``transform_to()`` method of a `~astropy.coordinates.SkyCoord` object::
>>> import astropy.units as u
>>> import astropy.coordinates as coord
>>> c = coord.SkyCoord(ra=[158.3122, 24.5] * u.degree,
... dec=[-17.3, 81.52] * u.degree,
... distance=[11.5, 24.12] * u.kpc,
... frame='icrs')
>>> c.transform_to(coord.Galactocentric) # doctest: +FLOAT_CMP
<SkyCoord (Galactocentric: galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.122 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg): (x, y, z) in kpc
[( -9.43489286, -9.40062188, 6.51345359),
(-21.11044918, 18.76334013, 7.83175149)]>
To specify a custom set of parameters, you have to include extra keyword
arguments when initializing the Galactocentric frame object::
>>> c.transform_to(coord.Galactocentric(galcen_distance=8.1*u.kpc)) # doctest: +FLOAT_CMP
<SkyCoord (Galactocentric: galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.1 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg): (x, y, z) in kpc
[( -9.41284763, -9.40062188, 6.51346272),
(-21.08839478, 18.76334013, 7.83184184)]>
Similarly, transforming from the Galactocentric frame to another coordinate frame::
>>> c = coord.SkyCoord(x=[-8.3, 4.5] * u.kpc,
... y=[0., 81.52] * u.kpc,
... z=[0.027, 24.12] * u.kpc,
... frame=coord.Galactocentric)
>>> c.transform_to(coord.ICRS) # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)
[( 88.22423301, 29.88672864, 0.17813456),
(289.72864549, 49.9865043 , 85.93949064)]>
Or, with custom specification of the Galactic center::
>>> c = coord.SkyCoord(x=[-8.0, 4.5] * u.kpc,
... y=[0., 81.52] * u.kpc,
... z=[21.0, 24120.0] * u.pc,
... frame=coord.Galactocentric,
... z_sun=21 * u.pc, galcen_distance=8. * u.kpc)
>>> c.transform_to(coord.ICRS) # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)
[( 86.2585249 , 28.85773187, 2.75625475e-05),
(289.77285255, 50.06290457, 8.59216010e+01)]>
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class Galactocentric(BaseCoordinateFrame):
r"""
A coordinate or frame in the Galactocentric system.
This frame allows specifying the Sun-Galactic center distance, the height of
the Sun above the Galactic midplane, and the solar motion relative to the
Galactic center. However, as there is no modern standard definition of a
Galactocentric reference frame, it is important to pay attention to the
default values used in this class if precision is important in your code.
The default values of the parameters of this frame are taken from the
original definition of the frame in 2014. As such, the defaults are somewhat
out of date relative to recent measurements made possible by, e.g., Gaia.
The defaults can, however, be changed at runtime by setting the parameter
set name in `~astropy.coordinates.galactocentric_frame_defaults`.
The current default parameter set is ``"pre-v4.0"``, indicating that the
parameters were adopted before ``astropy`` version 4.0. A regularly-updated
parameter set can instead be used by setting
``galactocentric_frame_defaults.set ('latest')``, and other parameter set
names may be added in future versions. To find out the scientific papers
that the current default parameters are derived from, use
``galcen.frame_attribute_references`` (where ``galcen`` is an instance of
this frame), which will update even if the default parameter set is changed.
The position of the Sun is assumed to be on the x axis of the final,
right-handed system. That is, the x axis points from the position of
the Sun projected to the Galactic midplane to the Galactic center --
roughly towards :math:`(l,b) = (0^\circ,0^\circ)`. For the default
transformation (:math:`{\rm roll}=0^\circ`), the y axis points roughly
towards Galactic longitude :math:`l=90^\circ`, and the z axis points
roughly towards the North Galactic Pole (:math:`b=90^\circ`).
For a more detailed look at the math behind this transformation, see
the document :ref:`coordinates-galactocentric`.
The frame attributes are listed under **Other Parameters**.
"""
default_representation = r.CartesianRepresentation
default_differential = r.CartesianDifferential
# frame attributes
galcen_coord = CoordinateAttribute(frame=ICRS)
galcen_distance = QuantityAttribute(unit=u.kpc)
galcen_v_sun = DifferentialAttribute(
allowed_classes=[r.CartesianDifferential])
z_sun = QuantityAttribute(unit=u.pc)
roll = QuantityAttribute(unit=u.deg)
def __init__(self, *args, **kwargs):
# Set default frame attribute values based on the ScienceState instance
# for the solar parameters defined above
default_params = galactocentric_frame_defaults.get()
self.frame_attribute_references = \
galactocentric_frame_defaults.references.copy()
for k in default_params:
if k in kwargs:
# If a frame attribute is set by the user, remove its reference
self.frame_attribute_references.pop(k, None)
# Keep the frame attribute if it is set by the user, otherwise use
# the default value
kwargs[k] = kwargs.get(k, default_params[k])
super().__init__(*args, **kwargs)
@classmethod
def get_roll0(cls):
"""
The additional roll angle (about the final x axis) necessary to align
the final z axis to match the Galactic yz-plane. Setting the ``roll``
frame attribute to -this method's return value removes this rotation,
allowing the use of the `Galactocentric` frame in more general contexts.
"""
# note that the actual value is defined at the module level. We make at
# a property here because this module isn't actually part of the public
# API, so it's better for it to be accessable from Galactocentric
return _ROLL0
# ICRS to/from Galactocentric ----------------------->
def get_matrix_vectors(galactocentric_frame, inverse=False):
"""
Use the ``inverse`` argument to get the inverse transformation, matrix and
offsets to go from Galactocentric to ICRS.
"""
# shorthand
gcf = galactocentric_frame
# rotation matrix to align x(ICRS) with the vector to the Galactic center
mat1 = rotation_matrix(-gcf.galcen_coord.dec, 'y')
mat2 = rotation_matrix(gcf.galcen_coord.ra, 'z')
# extra roll away from the Galactic x-z plane
mat0 = rotation_matrix(gcf.get_roll0() - gcf.roll, 'x')
# construct transformation matrix and use it
R = matrix_product(mat0, mat1, mat2)
# Now need to translate by Sun-Galactic center distance around x' and
# rotate about y' to account for tilt due to Sun's height above the plane
translation = r.CartesianRepresentation(gcf.galcen_distance * [1., 0., 0.])
z_d = gcf.z_sun / gcf.galcen_distance
H = rotation_matrix(-np.arcsin(z_d), 'y')
# compute total matrices
A = matrix_product(H, R)
# Now we re-align the translation vector to account for the Sun's height
# above the midplane
offset = -translation.transform(H)
if inverse:
# the inverse of a rotation matrix is a transpose, which is much faster
# and more stable to compute
A = matrix_transpose(A)
offset = (-offset).transform(A)
offset_v = r.CartesianDifferential.from_cartesian(
(-gcf.galcen_v_sun).to_cartesian().transform(A))
offset = offset.with_differentials(offset_v)
else:
offset = offset.with_differentials(gcf.galcen_v_sun)
return A, offset
def _check_coord_repr_diff_types(c):
if isinstance(c.data, r.UnitSphericalRepresentation):
raise ConvertError("Transforming to/from a Galactocentric frame "
"requires a 3D coordinate, e.g. (angle, angle, "
"distance) or (x, y, z).")
if ('s' in c.data.differentials and
isinstance(c.data.differentials['s'],
(r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential))):
raise ConvertError("Transforming to/from a Galactocentric frame "
"requires a 3D velocity, e.g., proper motion "
"components and radial velocity.")
@frame_transform_graph.transform(AffineTransform, ICRS, Galactocentric)
def icrs_to_galactocentric(icrs_coord, galactocentric_frame):
_check_coord_repr_diff_types(icrs_coord)
return get_matrix_vectors(galactocentric_frame)
@frame_transform_graph.transform(AffineTransform, Galactocentric, ICRS)
def galactocentric_to_icrs(galactocentric_coord, icrs_frame):
_check_coord_repr_diff_types(galactocentric_coord)
return get_matrix_vectors(galactocentric_coord, inverse=True)
| |
# Copyright 2015 SimpliVity Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_log import log as logging
from cinder import exception
from cinder import objects
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import objects as test_objects
LOG = logging.getLogger(__name__)
fake_db_snapshot = fake_snapshot.fake_db_snapshot()
del fake_db_snapshot['metadata']
del fake_db_snapshot['volume']
# NOTE(andrey-mp): make Snapshot object here to check object algorithms
fake_snapshot_obj = {
'id': '1',
'volume_id': 'fake_id',
'status': "creating",
'progress': '0%',
'volume_size': 1,
'display_name': 'fake_name',
'display_description': 'fake_description',
'metadata': {},
}
class TestSnapshot(test_objects.BaseObjectsTestCase):
@staticmethod
def _compare(test, expected, actual):
for field, value in expected.items():
test.assertEqual(expected[field], actual[field],
"Field '%s' is not equal" % field)
@mock.patch('cinder.db.snapshot_get', return_value=fake_db_snapshot)
def test_get_by_id(self, snapshot_get):
snapshot = objects.Snapshot.get_by_id(self.context, 1)
self._compare(self, fake_snapshot_obj, snapshot)
def test_reset_changes(self):
snapshot = objects.Snapshot()
snapshot.metadata = {'key1': 'value1'}
self.assertEqual({}, snapshot._orig_metadata)
snapshot.obj_reset_changes(['metadata'])
self.assertEqual({'key1': 'value1'}, snapshot._orig_metadata)
@mock.patch('cinder.db.snapshot_create', return_value=fake_db_snapshot)
def test_create(self, snapshot_create):
snapshot = objects.Snapshot(context=self.context)
snapshot.create()
self.assertEqual(fake_snapshot_obj['id'], snapshot.id)
self.assertEqual(fake_snapshot_obj['volume_id'], snapshot.volume_id)
@mock.patch('cinder.db.snapshot_create')
def test_create_with_provider_id(self, snapshot_create):
snapshot_create.return_value = copy.deepcopy(fake_db_snapshot)
snapshot_create.return_value['provider_id'] = '1111-aaaa'
snapshot = objects.Snapshot(context=self.context)
snapshot.create()
self.assertEqual('1111-aaaa', snapshot.provider_id)
@mock.patch('cinder.db.snapshot_update')
def test_save(self, snapshot_update):
snapshot = objects.Snapshot._from_db_object(
self.context, objects.Snapshot(), fake_db_snapshot)
snapshot.display_name = 'foobar'
snapshot.save()
snapshot_update.assert_called_once_with(self.context, snapshot.id,
{'display_name': 'foobar'})
@mock.patch('cinder.db.snapshot_metadata_update',
return_value={'key1': 'value1'})
@mock.patch('cinder.db.snapshot_update')
def test_save_with_metadata(self, snapshot_update,
snapshot_metadata_update):
snapshot = objects.Snapshot._from_db_object(
self.context, objects.Snapshot(), fake_db_snapshot)
snapshot.display_name = 'foobar'
snapshot.metadata = {'key1': 'value1'}
self.assertEqual({'display_name': 'foobar',
'metadata': {'key1': 'value1'}},
snapshot.obj_get_changes())
snapshot.save()
snapshot_update.assert_called_once_with(self.context, snapshot.id,
{'display_name': 'foobar'})
snapshot_metadata_update.assert_called_once_with(self.context, '1',
{'key1': 'value1'},
True)
@mock.patch('cinder.db.snapshot_destroy')
def test_destroy(self, snapshot_destroy):
snapshot = objects.Snapshot(context=self.context, id=1)
snapshot.destroy()
snapshot_destroy.assert_called_once_with(self.context, '1')
@mock.patch('cinder.db.snapshot_metadata_delete')
def test_delete_metadata_key(self, snapshot_metadata_delete):
snapshot = objects.Snapshot(self.context, id=1)
snapshot.metadata = {'key1': 'value1', 'key2': 'value2'}
self.assertEqual({}, snapshot._orig_metadata)
snapshot.delete_metadata_key(self.context, 'key2')
self.assertEqual({'key1': 'value1'}, snapshot.metadata)
snapshot_metadata_delete.assert_called_once_with(self.context, '1',
'key2')
def test_obj_fields(self):
volume = objects.Volume(context=self.context, id=2, _name_id=2)
snapshot = objects.Snapshot(context=self.context, id=1,
volume=volume)
self.assertEqual(['name', 'volume_name'], snapshot.obj_extra_fields)
self.assertEqual('snapshot-1', snapshot.name)
self.assertEqual('volume-2', snapshot.volume_name)
@mock.patch('cinder.objects.volume.Volume.get_by_id')
def test_obj_load_attr(self, volume_get_by_id):
snapshot = objects.Snapshot._from_db_object(
self.context, objects.Snapshot(), fake_db_snapshot)
volume = objects.Volume(context=self.context, id=2)
volume_get_by_id.return_value = volume
self.assertEqual(volume, snapshot.volume)
volume_get_by_id.assert_called_once_with(self.context,
snapshot.volume_id)
@mock.patch('cinder.db.snapshot_data_get_for_project')
def test_snapshot_data_get_for_project(self, snapshot_data_get):
snapshot = objects.Snapshot._from_db_object(
self.context, objects.Snapshot(), fake_db_snapshot)
volume_type_id = mock.sentinel.volume_type_id
snapshot.snapshot_data_get_for_project(self.context,
self.project_id,
volume_type_id)
snapshot_data_get.assert_called_once_with(self.context,
self.project_id,
volume_type_id)
class TestSnapshotList(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.objects.volume.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_all', return_value=[fake_db_snapshot])
def test_get_all(self, snapshot_get_all, volume_get_by_id):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
volume_get_by_id.return_value = fake_volume_obj
search_opts = mock.sentinel.search_opts
snapshots = objects.SnapshotList.get_all(
self.context, search_opts)
self.assertEqual(1, len(snapshots))
TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0])
snapshot_get_all.assert_called_once_with(self.context, search_opts)
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_by_host',
return_value=[fake_db_snapshot])
def test_get_by_host(self, get_by_host, volume_get_by_id):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
volume_get_by_id.return_value = fake_volume_obj
snapshots = objects.SnapshotList.get_by_host(
self.context, 'fake-host')
self.assertEqual(1, len(snapshots))
TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0])
@mock.patch('cinder.objects.volume.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_all_by_project',
return_value=[fake_db_snapshot])
def test_get_all_by_project(self, get_all_by_project, volume_get_by_id):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
volume_get_by_id.return_value = fake_volume_obj
search_opts = mock.sentinel.search_opts
snapshots = objects.SnapshotList.get_all_by_project(
self.context, self.project_id, search_opts)
self.assertEqual(1, len(snapshots))
TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0])
get_all_by_project.assert_called_once_with(self.context,
self.project_id,
search_opts)
@mock.patch('cinder.objects.volume.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_all_for_volume',
return_value=[fake_db_snapshot])
def test_get_all_for_volume(self, get_all_for_volume, volume_get_by_id):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
volume_get_by_id.return_value = fake_volume_obj
snapshots = objects.SnapshotList.get_all_for_volume(
self.context, fake_volume_obj.id)
self.assertEqual(1, len(snapshots))
TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0])
@mock.patch('cinder.objects.volume.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_active_by_window',
return_value=[fake_db_snapshot])
def test_get_active_by_window(self, get_active_by_window,
volume_get_by_id):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
volume_get_by_id.return_value = fake_volume_obj
snapshots = objects.SnapshotList.get_active_by_window(
self.context, mock.sentinel.begin, mock.sentinel.end)
self.assertEqual(1, len(snapshots))
TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0])
@mock.patch('cinder.objects.volume.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_all_for_cgsnapshot',
return_value=[fake_db_snapshot])
def test_get_all_for_cgsnapshot(self, get_all_for_cgsnapshot,
volume_get_by_id):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
volume_get_by_id.return_value = fake_volume_obj
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
self.context, mock.sentinel.cgsnapshot_id)
self.assertEqual(1, len(snapshots))
TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0])
@mock.patch('cinder.objects.volume.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_all')
def test_get_all_without_metadata(self, snapshot_get_all,
volume_get_by_id):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
volume_get_by_id.return_value = fake_volume_obj
snapshot = copy.deepcopy(fake_db_snapshot)
del snapshot['snapshot_metadata']
snapshot_get_all.return_value = [snapshot]
search_opts = mock.sentinel.search_opts
self.assertRaises(exception.MetadataAbsent,
objects.SnapshotList.get_all,
self.context, search_opts)
@mock.patch('cinder.objects.volume.Volume.get_by_id')
@mock.patch('cinder.db.snapshot_get_all')
def test_get_all_with_metadata(self, snapshot_get_all, volume_get_by_id):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
volume_get_by_id.return_value = fake_volume_obj
db_snapshot = copy.deepcopy(fake_db_snapshot)
db_snapshot['snapshot_metadata'] = [{'key': 'fake_key',
'value': 'fake_value'}]
snapshot_get_all.return_value = [db_snapshot]
search_opts = mock.sentinel.search_opts
snapshots = objects.SnapshotList.get_all(
self.context, search_opts)
self.assertEqual(1, len(snapshots))
snapshot_obj = copy.deepcopy(fake_snapshot_obj)
snapshot_obj['metadata'] = {'fake_key': 'fake_value'}
TestSnapshot._compare(self, snapshot_obj, snapshots[0])
snapshot_get_all.assert_called_once_with(self.context, search_opts)
| |
#!/usr/bin/env python
#
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Adds the code parts to a resource APK."""
import argparse
import itertools
import os
import shutil
import sys
import zipfile
from util import build_utils
# Taken from aapt's Package.cpp:
_NO_COMPRESS_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.gif', '.wav', '.mp2',
'.mp3', '.ogg', '.aac', '.mpg', '.mpeg', '.mid',
'.midi', '.smf', '.jet', '.rtttl', '.imy', '.xmf',
'.mp4', '.m4a', '.m4v', '.3gp', '.3gpp', '.3g2',
'.3gpp2', '.amr', '.awb', '.wma', '.wmv', '.webm')
def _ParseArgs(args):
parser = argparse.ArgumentParser()
build_utils.AddDepfileOption(parser)
parser.add_argument('--assets',
help='GYP-list of files to add as assets in the form '
'"srcPath:zipPath", where ":zipPath" is optional.',
default='[]')
parser.add_argument('--write-asset-list',
action='store_true',
help='Whether to create an assets/assets_list file.')
parser.add_argument('--uncompressed-assets',
help='Same as --assets, except disables compression.',
default='[]')
parser.add_argument('--resource-apk',
help='An .ap_ file built using aapt',
required=True)
parser.add_argument('--output-apk',
help='Path to the output file',
required=True)
parser.add_argument('--dex-file',
help='Path to the classes.dex to use')
parser.add_argument('--native-libs',
action='append',
help='GYP-list of native libraries to include. '
'Can be specified multiple times.',
default=[])
parser.add_argument('--secondary-native-libs',
action='append',
help='GYP-list of native libraries for secondary '
'android-abi. Can be specified multiple times.',
default=[])
parser.add_argument('--android-abi',
help='Android architecture to use for native libraries')
parser.add_argument('--secondary-android-abi',
help='The secondary Android architecture to use for'
'secondary native libraries')
parser.add_argument('--native-lib-placeholders',
help='GYP-list of native library placeholders to add.',
default='[]')
parser.add_argument('--emma-device-jar',
help='Path to emma_device.jar to include.')
parser.add_argument('--uncompress-shared-libraries',
action='store_true',
help='Uncompress shared libraries')
options = parser.parse_args(args)
options.assets = build_utils.ParseGypList(options.assets)
options.uncompressed_assets = build_utils.ParseGypList(
options.uncompressed_assets)
options.native_lib_placeholders = build_utils.ParseGypList(
options.native_lib_placeholders)
all_libs = []
for gyp_list in options.native_libs:
all_libs.extend(build_utils.ParseGypList(gyp_list))
options.native_libs = all_libs
secondary_libs = []
for gyp_list in options.secondary_native_libs:
secondary_libs.extend(build_utils.ParseGypList(gyp_list))
options.secondary_native_libs = secondary_libs
if not options.android_abi and (options.native_libs or
options.native_lib_placeholders):
raise Exception('Must specify --android-abi with --native-libs')
if not options.secondary_android_abi and options.secondary_native_libs:
raise Exception('Must specify --secondary-android-abi with'
' --secondary-native-libs')
return options
def _SplitAssetPath(path):
"""Returns (src, dest) given an asset path in the form src[:dest]."""
path_parts = path.split(':')
src_path = path_parts[0]
if len(path_parts) > 1:
dest_path = path_parts[1]
else:
dest_path = os.path.basename(src_path)
return src_path, dest_path
def _ExpandPaths(paths):
"""Converts src:dst into tuples and enumerates files within directories.
Args:
paths: Paths in the form "src_path:dest_path"
Returns:
A list of (src_path, dest_path) tuples sorted by dest_path (for stable
ordering within output .apk).
"""
ret = []
for path in paths:
src_path, dest_path = _SplitAssetPath(path)
if os.path.isdir(src_path):
for f in build_utils.FindInDirectory(src_path, '*'):
ret.append((f, os.path.join(dest_path, f[len(src_path) + 1:])))
else:
ret.append((src_path, dest_path))
ret.sort(key=lambda t:t[1])
return ret
def _AddAssets(apk, path_tuples, disable_compression=False):
"""Adds the given paths to the apk.
Args:
apk: ZipFile to write to.
paths: List of paths (with optional :zipPath suffix) to add.
disable_compression: Whether to disable compression.
"""
# Group all uncompressed assets together in the hope that it will increase
# locality of mmap'ed files.
for target_compress in (False, True):
for src_path, dest_path in path_tuples:
compress = not disable_compression and (
os.path.splitext(src_path)[1] not in _NO_COMPRESS_EXTENSIONS)
if target_compress == compress:
apk_path = 'assets/' + dest_path
try:
apk.getinfo(apk_path)
# Should never happen since write_build_config.py handles merging.
raise Exception('Multiple targets specified the asset path: %s' %
apk_path)
except KeyError:
build_utils.AddToZipHermetic(apk, apk_path, src_path=src_path,
compress=compress)
def _CreateAssetsList(path_tuples):
"""Returns a newline-separated list of asset paths for the given paths."""
dests = sorted(t[1] for t in path_tuples)
return '\n'.join(dests) + '\n'
def _AddNativeLibraries(out_apk, native_libs, android_abi, uncompress):
"""Add native libraries to APK."""
for path in native_libs:
basename = os.path.basename(path)
apk_path = 'lib/%s/%s' % (android_abi, basename)
compress = None
if (uncompress and os.path.splitext(basename)[1] == '.so'):
compress = False
build_utils.AddToZipHermetic(out_apk,
apk_path,
src_path=path,
compress=compress)
def main(args):
args = build_utils.ExpandFileArgs(args)
options = _ParseArgs(args)
native_libs = sorted(options.native_libs)
input_paths = [options.resource_apk, __file__] + native_libs
# Include native libs in the depfile_deps since GN doesn't know about the
# dependencies when is_component_build=true.
depfile_deps = list(native_libs)
secondary_native_libs = []
if options.secondary_native_libs:
secondary_native_libs = sorted(options.secondary_native_libs)
input_paths += secondary_native_libs
depfile_deps += secondary_native_libs
if options.dex_file:
input_paths.append(options.dex_file)
if options.emma_device_jar:
input_paths.append(options.emma_device_jar)
input_strings = [options.android_abi,
options.native_lib_placeholders,
options.uncompress_shared_libraries]
if options.secondary_android_abi:
input_strings.append(options.secondary_android_abi)
_assets = _ExpandPaths(options.assets)
_uncompressed_assets = _ExpandPaths(options.uncompressed_assets)
for src_path, dest_path in itertools.chain(_assets, _uncompressed_assets):
input_paths.append(src_path)
input_strings.append(dest_path)
def on_stale_md5():
tmp_apk = options.output_apk + '.tmp'
try:
# TODO(agrieve): It would be more efficient to combine this step
# with finalize_apk(), which sometimes aligns and uncompresses the
# native libraries.
with zipfile.ZipFile(options.resource_apk) as resource_apk, \
zipfile.ZipFile(tmp_apk, 'w', zipfile.ZIP_DEFLATED) as out_apk:
def copy_resource(zipinfo):
compress = zipinfo.compress_type != zipfile.ZIP_STORED
build_utils.AddToZipHermetic(out_apk, zipinfo.filename,
data=resource_apk.read(zipinfo.filename),
compress=compress)
# Make assets come before resources in order to maintain the same file
# ordering as GYP / aapt. http://crbug.com/561862
resource_infos = resource_apk.infolist()
# 1. AndroidManifest.xml
assert resource_infos[0].filename == 'AndroidManifest.xml'
copy_resource(resource_infos[0])
# 2. Assets
if options.write_asset_list:
data = _CreateAssetsList(
itertools.chain(_assets, _uncompressed_assets))
build_utils.AddToZipHermetic(out_apk, 'assets/assets_list', data=data)
_AddAssets(out_apk, _assets, disable_compression=False)
_AddAssets(out_apk, _uncompressed_assets, disable_compression=True)
# 3. Dex files
if options.dex_file and options.dex_file.endswith('.zip'):
with zipfile.ZipFile(options.dex_file, 'r') as dex_zip:
for dex in (d for d in dex_zip.namelist() if d.endswith('.dex')):
build_utils.AddToZipHermetic(out_apk, dex, data=dex_zip.read(dex))
elif options.dex_file:
build_utils.AddToZipHermetic(out_apk, 'classes.dex',
src_path=options.dex_file)
# 4. Native libraries.
_AddNativeLibraries(out_apk,
native_libs,
options.android_abi,
options.uncompress_shared_libraries)
if options.secondary_android_abi:
_AddNativeLibraries(out_apk,
secondary_native_libs,
options.secondary_android_abi,
options.uncompress_shared_libraries)
for name in sorted(options.native_lib_placeholders):
# Empty libs files are ignored by md5check, but rezip requires them
# to be empty in order to identify them as placeholders.
apk_path = 'lib/%s/%s' % (options.android_abi, name)
build_utils.AddToZipHermetic(out_apk, apk_path, data='')
# 5. Resources
for info in resource_infos[1:]:
copy_resource(info)
# 6. Java resources. Used only when coverage is enabled, so order
# doesn't matter).
if options.emma_device_jar:
# Add EMMA Java resources to APK.
with zipfile.ZipFile(options.emma_device_jar, 'r') as emma_device_jar:
for apk_path in emma_device_jar.namelist():
apk_path_lower = apk_path.lower()
if apk_path_lower.startswith('meta-inf/'):
continue
if apk_path_lower.endswith('/'):
continue
if apk_path_lower.endswith('.class'):
continue
build_utils.AddToZipHermetic(out_apk, apk_path,
data=emma_device_jar.read(apk_path))
shutil.move(tmp_apk, options.output_apk)
finally:
if os.path.exists(tmp_apk):
os.unlink(tmp_apk)
build_utils.CallAndWriteDepfileIfStale(
on_stale_md5,
options,
input_paths=input_paths,
input_strings=input_strings,
output_paths=[options.output_apk],
depfile_deps=depfile_deps)
if __name__ == '__main__':
main(sys.argv[1:])
| |
# coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class PagedMonitoredApplicationDTO(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'cursor': 'str',
'items': 'list[MonitoredApplicationDTO]',
'limit': 'int',
'more_items': 'bool',
'offset': 'int',
'sort': 'Sorting',
'total_items': 'int'
}
attribute_map = {
'cursor': 'cursor',
'items': 'items',
'limit': 'limit',
'more_items': 'moreItems',
'offset': 'offset',
'sort': 'sort',
'total_items': 'totalItems'
}
def __init__(self, cursor=None, items=None, limit=None, more_items=None, offset=None, sort=None, total_items=None, _configuration=None): # noqa: E501
"""PagedMonitoredApplicationDTO - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._cursor = None
self._items = None
self._limit = None
self._more_items = None
self._offset = None
self._sort = None
self._total_items = None
self.discriminator = None
if cursor is not None:
self.cursor = cursor
if items is not None:
self.items = items
if limit is not None:
self.limit = limit
if more_items is not None:
self.more_items = more_items
if offset is not None:
self.offset = offset
if sort is not None:
self.sort = sort
if total_items is not None:
self.total_items = total_items
@property
def cursor(self):
"""Gets the cursor of this PagedMonitoredApplicationDTO. # noqa: E501
The id at which the current (limited) search can be continued to obtain more matching items # noqa: E501
:return: The cursor of this PagedMonitoredApplicationDTO. # noqa: E501
:rtype: str
"""
return self._cursor
@cursor.setter
def cursor(self, cursor):
"""Sets the cursor of this PagedMonitoredApplicationDTO.
The id at which the current (limited) search can be continued to obtain more matching items # noqa: E501
:param cursor: The cursor of this PagedMonitoredApplicationDTO. # noqa: E501
:type: str
"""
self._cursor = cursor
@property
def items(self):
"""Gets the items of this PagedMonitoredApplicationDTO. # noqa: E501
List of requested items # noqa: E501
:return: The items of this PagedMonitoredApplicationDTO. # noqa: E501
:rtype: list[MonitoredApplicationDTO]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this PagedMonitoredApplicationDTO.
List of requested items # noqa: E501
:param items: The items of this PagedMonitoredApplicationDTO. # noqa: E501
:type: list[MonitoredApplicationDTO]
"""
self._items = items
@property
def limit(self):
"""Gets the limit of this PagedMonitoredApplicationDTO. # noqa: E501
:return: The limit of this PagedMonitoredApplicationDTO. # noqa: E501
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this PagedMonitoredApplicationDTO.
:param limit: The limit of this PagedMonitoredApplicationDTO. # noqa: E501
:type: int
"""
self._limit = limit
@property
def more_items(self):
"""Gets the more_items of this PagedMonitoredApplicationDTO. # noqa: E501
Whether more items are available for return by increment offset or cursor # noqa: E501
:return: The more_items of this PagedMonitoredApplicationDTO. # noqa: E501
:rtype: bool
"""
return self._more_items
@more_items.setter
def more_items(self, more_items):
"""Sets the more_items of this PagedMonitoredApplicationDTO.
Whether more items are available for return by increment offset or cursor # noqa: E501
:param more_items: The more_items of this PagedMonitoredApplicationDTO. # noqa: E501
:type: bool
"""
self._more_items = more_items
@property
def offset(self):
"""Gets the offset of this PagedMonitoredApplicationDTO. # noqa: E501
:return: The offset of this PagedMonitoredApplicationDTO. # noqa: E501
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this PagedMonitoredApplicationDTO.
:param offset: The offset of this PagedMonitoredApplicationDTO. # noqa: E501
:type: int
"""
self._offset = offset
@property
def sort(self):
"""Gets the sort of this PagedMonitoredApplicationDTO. # noqa: E501
:return: The sort of this PagedMonitoredApplicationDTO. # noqa: E501
:rtype: Sorting
"""
return self._sort
@sort.setter
def sort(self, sort):
"""Sets the sort of this PagedMonitoredApplicationDTO.
:param sort: The sort of this PagedMonitoredApplicationDTO. # noqa: E501
:type: Sorting
"""
self._sort = sort
@property
def total_items(self):
"""Gets the total_items of this PagedMonitoredApplicationDTO. # noqa: E501
An estimate (lower-bound) of the total number of items available for return. May not be a tight estimate for facet queries # noqa: E501
:return: The total_items of this PagedMonitoredApplicationDTO. # noqa: E501
:rtype: int
"""
return self._total_items
@total_items.setter
def total_items(self, total_items):
"""Sets the total_items of this PagedMonitoredApplicationDTO.
An estimate (lower-bound) of the total number of items available for return. May not be a tight estimate for facet queries # noqa: E501
:param total_items: The total_items of this PagedMonitoredApplicationDTO. # noqa: E501
:type: int
"""
self._total_items = total_items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PagedMonitoredApplicationDTO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PagedMonitoredApplicationDTO):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PagedMonitoredApplicationDTO):
return True
return self.to_dict() != other.to_dict()
| |
import afnumpy
import numpy
import afnumpy as af
import numpy as np
from asserts import *
import pytest
xfail = pytest.mark.xfail
def test_zeros():
a = afnumpy.zeros(3)
b = numpy.zeros(3)
iassert(a, b)
def test_fromstring():
iassert(afnumpy.fromstring('\x01\x02', dtype=numpy.uint8),numpy.fromstring('\x01\x02', dtype=numpy.uint8))
def test_ndarray_transpose():
b = numpy.random.random((2,3))
a = afnumpy.array(b)
iassert(a.transpose(), b.transpose())
iassert(a.transpose(0,1), b.transpose(0,1))
iassert(a.transpose(1,0), b.transpose(1,0))
b = numpy.random.random((2))
a = afnumpy.array(b)
iassert(a.transpose(), b.transpose())
b = numpy.random.random((2,3,4))
a = afnumpy.array(b)
iassert(a.transpose(), b.transpose())
iassert(a.transpose((2,0,1)), b.transpose((2,0,1)))
iassert(a.transpose(2,0,1), b.transpose(2,0,1))
def test_where():
a1 = afnumpy.array([1,2,3])
b1 = numpy.array(a1)
a2 = afnumpy.array([0,2,1])
b2 = numpy.array(a2)
# Test where with input as indices
iassert(afnumpy.where(a2, a1, a2), numpy.where(b2, b1, b2))
# Test where with input as indices
iassert(afnumpy.where(a2), numpy.where(b2))
# Test where with input as booleans
iassert(afnumpy.where(a2 < 2, a1, a2), numpy.where(b2 < 2, b1, b2))
# Test where with input as booleans
iassert(afnumpy.where(a2 < 2), numpy.where(b2 < 2))
# And now multidimensional
a1 = afnumpy.array([[1,2,3],[4,5,6]])
b1 = numpy.array(a1)
a2 = afnumpy.array([[0,2,1],[1,0,1]])
b2 = numpy.array(a2)
# Test where with input as indices
iassert(afnumpy.where(a2, a1, a2), numpy.where(b2, b1, b2))
# Test where with input as indices
iassert(afnumpy.where(a2), numpy.where(b2))
# And now multidimensional
b1 = numpy.random.random((3,3,3)) > 0.5
a1 = afnumpy.array(b1)
# Test where with input as indices
iassert(afnumpy.where(a1), numpy.where(b1))
def test_array():
a = afnumpy.array([3])
b = numpy.array([3])
iassert(a, b)
a = afnumpy.array([1,2,3])
b = numpy.array([1,2,3])
iassert(a, b)
a = afnumpy.array(numpy.array([1,2,3]))
b = numpy.array([1,2,3])
iassert(a, b)
a = afnumpy.array(numpy.array([1.,2.,3.]))
b = numpy.array([1.,2.,3.])
iassert(a, b)
# Try multidimensional arrays
a = afnumpy.array(numpy.array([[1.,2.,3.],[4.,5.,6.]]))
b = numpy.array(a)
iassert(a, b)
# Check for non contiguous input
b = numpy.array([[1.,2.,3.],[4.,5.,6.]]).T
a = afnumpy.array(b)
iassert(a, b)
# Check using arrayfire arrays
a = afnumpy.arrayfire.randu(10,5)
b = afnumpy.array(a, copy=False)
c = numpy.array(a)
assert(a.device_ptr() == b.d_array.device_ptr())
iassert(b,c.T)
def test_binary_arithmetic():
a = afnumpy.random.rand(3)
b = numpy.array(a)
fassert(a+a, b+b)
fassert(a+3, b+3)
fassert(3+a, 3+b)
fassert(a-a, b-b)
fassert(a-3, b-3)
fassert(3-a, 3-b)
fassert(a*a, b*b)
fassert(a*3, b*3)
fassert(3*a, 3*b)
fassert(a/a, b/b)
fassert(a/3, b/3)
fassert(3/a, 3/b)
fassert(a**a, b**b)
fassert(a**3, b**3)
fassert(3**a, 3**b)
fassert(a%a, b%b)
fassert(a%3, b%3)
fassert(3%a, 3%b)
# Check for arguments of diffeernt types
a = afnumpy.ones(3,dtype=numpy.uint32)
b = numpy.array(a)
fassert(a+3.0, b+3.0)
# This is a tricky case we won't support for now
# fassert(a+numpy.float32(3.0), b+numpy.float32(3.0))
fassert(3.0+a, 3.0+b)
fassert(a-3.0, b-3.0)
fassert(3.0-a, 3.0-b)
fassert(a*3.0, b*3.0)
fassert(3.0*a, 3.0*b)
fassert(a/3.0, b/3.0)
fassert(3.0/a, 3.0/b)
fassert(3/a, 3/b)
fassert(a**3.0, b**3.0)
fassert(3.0**a, 3.0**b)
fassert(a%3.0, b%3.0)
fassert(3.0%a, 3.0%b)
assert(type(numpy.float32(3)+a) == afnumpy.ndarray)
def test_broadcast_binary_arithmetic():
a = afnumpy.random.rand(2,3)
b = afnumpy.random.rand(2,1)
c = numpy.array(a)
d = numpy.array(b)
fassert(a*b, c*d)
a*=b
c*=d
fassert(a, c)
fassert(a/b, c/d)
a/=b
c/=d
fassert(a, c)
fassert(a+b, c+d)
a+=b
c+=d
fassert(a, c)
fassert(a-b, c-d)
a-=b
c-=d
fassert(a, c)
def test_augmented_assignment():
a = afnumpy.random.rand(3)
b = numpy.array(a)
mem_before = a.d_array.device_ptr()
a += a
assert mem_before == a.d_array.device_ptr()
b += b
fassert(a, b)
mem_before = a.d_array.device_ptr()
a += 3
assert mem_before == a.d_array.device_ptr()
b += 3
fassert(a, b)
mem_before = a.d_array.device_ptr()
a -= a
assert mem_before == a.d_array.device_ptr()
b -= b
fassert(a, b)
mem_before = a.d_array.device_ptr()
a -= 3
assert mem_before == a.d_array.device_ptr()
b -= 3
fassert(a, b)
mem_before = a.d_array.device_ptr()
a *= a
assert mem_before == a.d_array.device_ptr()
b *= b
fassert(a, b)
mem_before = a.d_array.device_ptr()
a *= 3
assert mem_before == a.d_array.device_ptr()
b *= 3
fassert(a, b)
mem_before = a.d_array.device_ptr()
a /= a
assert mem_before == a.d_array.device_ptr()
b /= b
fassert(a, b)
mem_before = a.d_array.device_ptr()
a /= 3
assert mem_before == a.d_array.device_ptr()
b /= 3
fassert(a, b)
def test_unary_operators():
a = afnumpy.random.rand(3)
b = numpy.array(a)
fassert(-a, -b)
fassert(+a, +b)
b = numpy.random.randint(0,2,3).astype('bool')
a = afnumpy.array(b)
fassert(-a, ~b)
fassert(+a, +b)
# fassert(~a, ~b)
def test_comparisons():
a1 = afnumpy.random.rand(3)
b1 = numpy.array(a1)
a2 = afnumpy.random.rand(3)
b2 = numpy.array(a2)
iassert(a1 > a2, b1 > b2)
iassert(a1 > 0.5, b1 > 0.5)
iassert(0.5 > a1, 0.5 > b1)
iassert(a1 >= a2, b1 >= b2)
iassert(a1 >= 0.5, b1 >= 0.5)
iassert(0.5 >= a1, 0.5 >= b1)
iassert(a1 < a2, b1 < b2)
iassert(a1 < 0.5, b1 < 0.5)
iassert(0.5 < a1, 0.5 < b1)
iassert(a1 <= a2, b1 <= b2)
iassert(a1 <= 0.5, b1 <= 0.5)
iassert(0.5 <= a1, 0.5 <= b1)
iassert(a1 == a2, b1 == b2)
iassert(a1 == 0.5, b1 == 0.5)
iassert(0.5 == a1, 0.5 == b1)
iassert(a1 != a2, b1 != b2)
iassert(a1 != 0.5, b1 != 0.5)
iassert(0.5 != a1, 0.5 != b1)
iassert(a1 is not None, b1 is not None)
def test_ndarray_all():
b = numpy.random.randint(0,2,3).astype('bool')
a = afnumpy.array(b)
iassert(a.all(), b.all())
iassert(a.all(axis=0), b.all(axis=0))
b = numpy.random.randint(0,2,(3,2)).astype('bool')
a = afnumpy.array(b)
iassert(a.all(), b.all())
iassert(a.all(axis=0), b.all(axis=0))
iassert(a.all(keepdims=True), b.all(keepdims=True))
def test_sum():
b = numpy.random.random(3)
a = afnumpy.array(b)
fassert(afnumpy.sum(a), numpy.sum(b))
fassert(afnumpy.sum(a,axis=0), numpy.sum(b,axis=0))
fassert(afnumpy.sum(a,keepdims=True), numpy.sum(b,keepdims=True))
b = numpy.random.random((2,3))
a = afnumpy.array(b)
fassert(afnumpy.sum(a), numpy.sum(b))
fassert(afnumpy.sum(a,axis=0), numpy.sum(b,axis=0))
def test_max():
b = numpy.random.random(3)+numpy.random.random(3)*1.0j
a = afnumpy.array(b)
# Arrayfire uses the magnitude for max while numpy uses
# the real part as primary key followed by the imaginary part
# fassert(a.max(), b.max())
b = numpy.random.random(3)
a = afnumpy.array(b)
fassert(a.max(), b.max())
def test_min():
b = numpy.random.random(3)+numpy.random.random(3)*1.0j
a = afnumpy.array(b)
# Arrayfire uses the magnitude for max while numpy uses
# the real part as primary key followed by the imaginary part
# fassert(a.min(), b.min())
b = numpy.random.random(3)
a = afnumpy.array(b)
fassert(a.min(), b.min())
def test_ndarray_abs():
b = numpy.random.random(3)+numpy.random.random(3)*1.0j
a = afnumpy.array(b)
fassert(abs(a), abs(b))
b = numpy.random.random(3)
a = afnumpy.array(b)
fassert(abs(a), abs(b))
def test_getitem():
b = numpy.random.random((3))
a = afnumpy.array(b)
iassert(a[0], b[0])
iassert(a[2], b[2])
iassert(a[:], b[:])
iassert(a[0:], b[0:])
iassert(a[:-1], b[:-1])
iassert(a[0:-1], b[0:-1])
iassert(a[1:-1], b[1:-1])
iassert(a[1:2], b[1:2])
iassert(a[...,0], b[...,0])
# This will return an empty array, which is not yet supported
# iassert(a[1:1], b[1:1])
iassert(a[-2:], b[-2:])
iassert(a[-3:-1], b[-3:-1])
iassert(a[1:-1:1], b[1:-1:1])
iassert(a[1:-1:2], b[1:-1:2])
iassert(a[::2], b[::2])
iassert(a[::3], b[::3])
iassert(a[::-1], b[::-1])
iassert(a[::-2], b[::-2])
iassert(a[-1::-1], b[-1::-1])
iassert(a[-1:1:-1], b[-1:1:-1])
iassert(a[-2::-1], b[-2::-1])
iassert(a[-2:0:-1], b[-2:0:-1])
iassert(a[-2::-2], b[-2::-2])
iassert(a[-2::2], b[-2::2])
iassert(a[([0],)], b[([0],)])
# Now multidimensional!
b = numpy.random.random((2,3))
a = afnumpy.array(b)
iassert(a[:], b[:])
iassert(a[0], b[0])
iassert(a[:,2], b[:,2])
iassert(a[1,:], b[1,:])
iassert(a[:,::-1], b[:,::-1])
# Boolean indexing
d = numpy.random.random((2)) > 0.5
c = afnumpy.array(d)
iassert(a[c,:], b[d,:])
b = numpy.random.random((2,3,1))
a = afnumpy.array(b)
iassert(a[:], b[:])
b = numpy.random.random((2,3,1,2))
a = afnumpy.array(b)
iassert(a[:], b[:])
iassert(a[1,:,:,:], b[1,:,:,:])
iassert(a[1,:,0,:], b[1,:,0,:])
iassert(a[1,1,:,:], b[1,1,:,:])
d = numpy.array([0,2],dtype=numpy.int32)
c = afnumpy.array(d)
iassert(a[1,c,0,:], b[1,d,0,:])
# Boolean indexing
d = b > 0.5
c = afnumpy.array(d)
iassert(a[c], b[d])
d = numpy.random.random((2,3)) > 0.5
c = afnumpy.array(d)
iassert(a[c,:], b[d,:])
# Zero dimensional
b = numpy.ones(())
a = afnumpy.array(b)
iassert(a[()],b[()])
# Partial boolean indexing
b = numpy.ones((3,3))
a = afnumpy.array(b)
d = numpy.ones((3)) > 0
c = afnumpy.array(d)
iassert(a[c],b[d])
# Partial array indexing
b = numpy.ones((3,3))
a = afnumpy.array(b)
d = numpy.array([0,1])
c = afnumpy.array(d)
iassert(a[c],b[d])
@xfail
def test_getitem_xfail():
# Slices that extend outside the array
b = numpy.ones((3))
a = afnumpy.array(b)
iassert(a[1:4],b[1:4])
# This case no longer works with current version of arrayfire, accessing a d_array with slice(None,-1,-1) does not give the expected output
iassert(a[3::-1],b[3::-1])
def test_getitem_multi_array():
# Multidimensional array indexing
b = numpy.random.random((2,2))
a = afnumpy.array(b)
d = numpy.array([0,1])
c = afnumpy.array(d)
iassert(a[c,c], b[d,d])
def test_newaxis():
b = numpy.random.random((3))
a = afnumpy.array(b)
# iassert(a[afnumpy.newaxis,:], b[numpy.newaxis,:])
def test_setitem():
b = numpy.random.random((3))
a = afnumpy.array(b)
mem_before = a.d_array.device_ptr()
a[0] = 1;
b[0] = 1;
iassert(a, b)
assert mem_before == a.d_array.device_ptr()
a[:] = 2;
b[:] = 2;
assert mem_before == a.d_array.device_ptr()
iassert(a, b)
d = numpy.array([0,1],dtype=numpy.int32)
c = afnumpy.array(d)
a[c] = 3;
b[d] = 3;
assert mem_before == a.d_array.device_ptr()
# Multidimensional
# 2D
b1 = numpy.random.random((2,2))
b2 = numpy.random.random(2)
a1 = afnumpy.array(b1)
a2 = afnumpy.array(b2)
mem_before = a1.d_array.device_ptr()
a1[:] = 1
b1[:] = 1
iassert(a1,b1)
assert mem_before == a1.d_array.device_ptr()
a1[:,0] = a2[:]
b1[:,0] = b2[:]
iassert(a1,b1)
assert mem_before == a1.d_array.device_ptr()
a1[c,0] = -a2[:]
b1[d,0] = -b2[:]
iassert(a1,b1)
assert mem_before == a1.d_array.device_ptr()
a1[0,c] = a2[:]
b1[0,d] = b2[:]
iassert(a1,b1)
assert mem_before == a1.d_array.device_ptr()
a1[0] = a2[:]
b1[0] = b2[:]
iassert(a1,b1)
assert mem_before == a1.d_array.device_ptr()
# 3D
b1 = numpy.random.random((2,3,1))
b2 = numpy.random.random((3,1))
a1 = afnumpy.array(b1)
a2 = afnumpy.array(b2)
mem_before = a1.d_array.device_ptr()
a1[0,:,:] = a2[:]
b1[0,:,:] = b2[:]
iassert(a1,b1)
assert mem_before == a1.d_array.device_ptr()
a1[0] = a2[:]
b1[0] = b2[:]
iassert(a1,b1)
assert mem_before == a1.d_array.device_ptr()
# 4D
b1 = numpy.random.random((2,3,2,2))
b2 = numpy.random.random((2,2))
a1 = afnumpy.array(b1)
a2 = afnumpy.array(b2)
d = numpy.array([0,1],dtype=numpy.int32)
c = afnumpy.array(d)
mem_before = a1.d_array.device_ptr()
a1[:,0,0,c] = a2
b1[:,0,0,d] = b2
iassert(a1,b1)
assert mem_before == a1.d_array.device_ptr()
a1[1,2] = a2
b1[1,2] = b2
iassert(a1,b1)
assert mem_before == a1.d_array.device_ptr()
# Boolean indexing
d = b > 0.5
c = afnumpy.array(d)
a[c] = 1
b[d] = 1
iassert(a, b)
a[a < 0.3] = 1
b[b < 0.3] = 1
iassert(a, b)
# Multidimensional Boolean
a1[a1 < 0.3] = 1
b1[b1 < 0.3] = 1
iassert(a1, b1)
def test_setitem_multi_array():
# Multidimensional array indexing
b = numpy.random.random((2,2))
a = afnumpy.array(b)
d = numpy.array([0,1])
c = afnumpy.array(d)
# This will fail because while multiple arrays
# as indices in numpy treat the values given by
# the arrays as the coordinates of the hyperslabs
# to keep arrayfire does things differently.
# In arrayfire each entry of each array gets combined
# with all entries of all other arrays to define the coordinate
# In numpy each entry only gets combined with the corresponding
# entry in the other arrays.
# For example if one has [0,1],[0,1] as the two arrays for numpy
# this would mean that the coordinates retrieved would be [0,0],
# [1,1] while for arrayfire it would be [0,0], [0,1], [1,0], [1,1].
a[c,c] = c
b[d,d] = d
iassert(a, b)
def test_views():
b = numpy.random.random((3,3))
a = afnumpy.array(b)
a[0] = 1
b[0] = 1
c = a[0]
d = b[0]
c[:] = 0
d[:] = 0
iassert(a,b)
assert a[0,:].d_array.device_ptr() == a[0,:].d_array.device_ptr()
# There is currently no way to get views with stride[0] > 1
# assert a[:,0].d_array.device_ptr() == a[:,0].d_array.device_ptr()
b = numpy.random.random((3))
a = afnumpy.array(b)
c = a[...,0]
assert a.d_array.device_ptr() == c.d_array.device_ptr()
d = b[...,0]
c[()] = 0
d[()] = 0
iassert(a,b)
def test_ndarray_astype():
b = numpy.random.random(3)
a = afnumpy.array(b)
iassert(a.astype(numpy.uint8),b.astype(numpy.uint8))
iassert(a.astype(numpy.complex128),b.astype(numpy.complex128))
def test_ndarray_len():
b = numpy.random.random(3)
a = afnumpy.array(b)
assert(len(a) == len(b))
b = numpy.random.random((3,3))
a = afnumpy.array(b)
assert(len(a) == len(b))
def test_vstack():
b = numpy.random.random((2,3))
a = afnumpy.array(b)
iassert(afnumpy.vstack(a), numpy.vstack(b))
iassert(afnumpy.vstack((a,a)), numpy.vstack((b,b)))
def test_hstack():
b = numpy.random.random((2,3))
a = afnumpy.array(b)
iassert(afnumpy.hstack(a), numpy.hstack(b))
iassert(afnumpy.hstack((a,a)), numpy.hstack((b,b)))
def test_empty_ndarray():
a = afnumpy.zeros(())
b = numpy.zeros(())
iassert(a,b)
a = afnumpy.ndarray(0)
b = numpy.ndarray(0)
iassert(a,b)
a = afnumpy.ndarray((0,))
b = numpy.ndarray((0,))
iassert(a,b)
a = afnumpy.zeros(3)
b = numpy.zeros(3)
iassert(a[0:0],b[0:0])
def test_arange():
iassert(afnumpy.arange(10), numpy.arange(10))
iassert(afnumpy.arange(1,10), numpy.arange(1,10))
iassert(afnumpy.arange(10,1,-1), numpy.arange(10,1,-1))
iassert(afnumpy.arange(10,1,-1,dtype=numpy.int32), numpy.arange(10,1,-1,dtype=numpy.int32))
def test_ndarray_shape():
b = numpy.random.random((2,3))
a = afnumpy.array(b)
a.shape = (3,2)
b.shape = (3,2)
fassert(a,b)
def test_ndarray_round():
b = numpy.random.random((2,3))
a = afnumpy.array(b)
fassert(a.round(), b.round())
def test_ndarray_take():
b = numpy.array([4, 3, 5, 7, 6, 8])
a = afnumpy.array(b)
indices = [0, 1, 4]
iassert(a.take(indices), b.take(indices))
b = numpy.random.random((2,3))
a = afnumpy.array(b)
iassert(a.take([0,1],axis=1), b.take([0,1],axis=1))
iassert(a.take([0,1]), b.take([0,1]))
def test_ndarray_min():
a = afnumpy.random.random((2,3))
b = numpy.array(a)
fassert(a.min(), b.min())
fassert(a.min(axis=1), b.min(axis=1))
fassert(a.min(axis=1, keepdims=True), b.min(axis=1, keepdims=True))
def test_ndarray_max():
a = afnumpy.random.random((2,3))
b = numpy.array(a)
fassert(a.max(), b.max())
fassert(a.max(axis=1), b.max(axis=1))
fassert(a.max(axis=1, keepdims=True), b.max(axis=1, keepdims=True))
def test_ndarray_sum():
a = afnumpy.random.random((2,3))
b = numpy.array(a)
fassert(a.sum(), b.sum())
fassert(a.sum(axis=1), b.sum(axis=1))
fassert(a.sum(axis=1, keepdims=True), b.sum(axis=1, keepdims=True))
fassert(a.sum(axis=(0,1), keepdims=True), b.sum(axis=(0,1), keepdims=True))
fassert(a.sum(axis=(0,1)), b.sum(axis=(0,1)))
a = afnumpy.random.random(())
b = afnumpy.array(a)
fassert(a.sum(), b.sum())
def test_ndarray_conj():
# The weird astype is because of issue #914 in arrayfire
a =afnumpy.random.random((2,3)).astype(numpy.complex64)+1.0j
b = numpy.array(a)
fassert(a.conj(), b.conj())
def test_empty():
a = afnumpy.empty((2,3))
b = numpy.array(a)
a[:] = 1
b[:] = 1
fassert(a,b)
def test_ndarray_T():
x = numpy.array([[1.,2.],[3.,4.]])
y = afnumpy.array(x)
fassert(y.T,x.T)
x = numpy.array([1.,2.,3.,4.])
y = afnumpy.array(x)
fassert(y.T,x.T)
def test_ndarray_any():
x = numpy.array([[True, False], [True, True]])
y = afnumpy.array(x)
iassert(y.any(),x.any())
iassert(y.any(axis=0),x.any(axis=0))
def test_ndarray_real():
x = np.sqrt([1+0j, 0+1j])
y = af.array(x)
fassert(y.real, x.real)
y.real[:] = 0
x.real[:] = 0
fassert(y, x)
def test_ndarray_imag():
x = np.sqrt([1+0j, 0+1j])
y = af.array(x)
fassert(y.imag, x.imag)
y.imag[:] = 0
x.imag[:] = 0
fassert(y, x)
x = np.sqrt([1.0, 0.0])
y = af.array(x)
fassert(y.imag, x.imag)
def test_ndarray_strides():
a = afnumpy.random.random((4,3))
b = numpy.array(a)
iassert(a.strides, b.strides)
iassert(a[:,:].strides, b[:,:].strides)
iassert(a[1:,:].strides, b[1:,:].strides)
iassert(a[:,1:].strides, b[:,1:].strides)
# The following cases fails for arrayfire < 3.3 as the stride
# hack requires at least 2 elements per dimension
iassert(a[3:,:].strides, b[3:,:].strides)
iassert(a[2:,2:].strides, b[2:,2:].strides)
iassert(a[3,:2].strides, b[3,:2].strides)
@xfail
def test_ndarray_strides_xfail():
# The following case fails as arrayfire always drops
# leading dimensions of size 1 and so the stride
# information is missing
a = afnumpy.random.random((4,3))
b = numpy.array(a)
iassert(a[3:,:2].strides, b[3:,:2].strides)
def test_ndarray_copy():
b = numpy.random.random((3,3))
a = afnumpy.array(b)
iassert(a.copy(), b.copy())
def test_ndarray_nonzero():
b = numpy.random.random((3,3,3)) > 0.5
a = afnumpy.array(b)
iassert(a.nonzero(), b.nonzero())
def test_ndarray_constructor():
a = afnumpy.arrayfire.randu(3,2)
with pytest.raises(ValueError):
b = afnumpy.ndarray(a.dims(), dtype='f', af_array = a)
# This one should be fine
b = afnumpy.ndarray(a.dims()[::-1], dtype='f', af_array = a)
c = afnumpy.ndarray(a.dims()[::-1], dtype='f', buffer=a.raw_ptr(),
buffer_type=afnumpy.arrayfire.get_active_backend())
d = afnumpy.ndarray(a.dims()[::-1], dtype='f', buffer=a.raw_ptr(),
buffer_type=afnumpy.arrayfire.get_active_backend())
# Make sure they share the same underlying data
d[0,0] = 3
assert d[0,0] == c[0,0]
if afnumpy.arrayfire.get_active_backend() == 'cpu':
c = numpy.ones((3,2))
d = afnumpy.ndarray(c.shape, dtype=c.dtype, buffer=c.ctypes.data,
buffer_type=afnumpy.arrayfire.get_active_backend())
# Make sure they share the same underlying data
d[0,0] = -1
assert d[0,0] == c[0,0]
with pytest.raises(ValueError):
# Check for wrong backend
b = afnumpy.ndarray(c.shape, dtype=c.dtype, buffer=c.ctypes.data,
buffer_type='cuda')
def test_flatten():
b = numpy.random.random((3,3,3))
a = afnumpy.array(b)
iassert(a.flatten(), b.flatten())
iassert(a.flatten(order='C'), b.flatten(order='C'))
iassert(a.flatten(order='K'), b.flatten(order='K'))
iassert(a.flatten(order='A'), b.flatten(order='A'))
# test if it's a copy
d = b.flatten()
c = a.flatten()
b[0] += 1
a[0] += 1
iassert(c.flatten(), d.flatten())
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-,
from __future__ import absolute_import
from __future__ import print_function
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
import csv
import numpy as np
def init_palette():
from rootpy.plotting.style import set_style, get_style
atlas = get_style("ATLAS")
atlas.SetPalette(51)
set_style(atlas)
return True
def nbins(start, stop, step):
return abs(int((stop - start) / step))
def init_canvas(x, y, name="c", topMargin=0.07, rightMargin=0.16):
c = ROOT.TCanvas("c", "", 0, 0, x, y)
c.SetTopMargin(topMargin)
c.SetRightMargin(rightMargin)
return c
def axis_labels(
x_label="m(#tilde{g}) [GeV]",
y_label="m(#tilde{#chi}^{0}_{1}) [GeV]",
z_label="",
title="",
):
return ";".join([title, x_label, y_label, z_label])
def init_hist(label, x_min, x_max, y_min, y_max, x_bin_size, y_bin_size, name="grid"):
return ROOT.TH2F(
name,
axis_labels(z_label=label),
nbins(x_min, x_max, x_bin_size),
x_min,
x_max,
nbins(y_min, y_max, y_bin_size),
y_min,
y_max,
)
def fill_hist(hist, plot_array, label, skipNegativeSig=True):
for i in range(len(plot_array[label])):
g = int(plot_array["mgluino"][i])
l = int(plot_array["mlsp"][i])
z = plot_array[label][i]
sig = plot_array["sig"][i]
b = hist.FindFixBin(g, l)
if (sig > 0) or not (skipNegativeSig):
xx = ROOT.Long(0)
yy = ROOT.Long(0)
zz = ROOT.Long(0)
hist.GetBinXYZ(b, xx, yy, zz)
z_old = hist.GetBinContent(xx, yy)
newz = max(
z_old, z
) # for significances this makes sense. For the other quantities not so much. Oh well.
hist.SetBinContent(b, newz)
else:
hist.SetBinContent(b, 0.01)
def draw_hist(hist, nSigs=1, markercolor=0, drawOpts="TEXT45 COLZ", markerSize=800):
hist.SetMarkerSize(markerSize)
hist.SetMarkerColor(markercolor)
# gStyle.SetPalette(51)
ROOT.gStyle.SetPaintTextFormat("1.{0:d}f".format(nSigs))
hist.Draw(drawOpts)
def draw_labels(
lumi,
label="#tilde{g}#kern[0.1]{#tilde{g}} production, #tilde{g} #rightarrow t#bar{t} + #tilde{#chi}^{0}_{1}, m(#tilde{q}) >> m(#tilde{g})",
internal=True,
simulation=True,
):
txt = ROOT.TLatex()
txt.SetNDC()
if internal != simulation: # this is xor
txt.DrawText(0.325, 0.87, "Internal" if internal else "Simulation")
if internal and simulation:
txt.DrawText(0.325, 0.87, "Simulation")
txt.DrawText(0.5, 0.87, "Internal")
# txt.SetTextSize(0.030)
txt.SetTextSize(18)
txt.DrawLatex(0.16, 0.95, label)
txt.DrawLatex(0.62, 0.95, "#sqrt{{s}} = 13 TeV, {0:0.1f} fb^{{-1}}".format(lumi))
txt.SetTextFont(72)
txt.SetTextSize(0.05)
txt.DrawText(0.2, 0.87, "ATLAS")
def draw_text(path):
if path is None:
return
txt = ROOT.TLatex()
txt.SetNDC()
txt.SetTextSize(0.030)
with open(path, "r") as f:
reader = csv.reader(f, delimiter=",")
for row in reader:
txt.DrawLatex(float(row[0]), float(row[1]), row[2])
def draw_line(x_min, y_min, x_max, y_max, topmass=173.34):
l = ROOT.TLine(1000, 1000, 2000, 2000)
l.SetLineStyle(2)
if x_min - 2 * topmass > y_min:
line_min_x = x_min
line_min_y = x_min - 2 * topmass
else:
line_min_x = y_min + 2 * topmass
line_min_y = y_min
if x_max - 2 * topmass > y_max:
line_max_x = y_max + 2 * topmass
line_max_y = y_max
else:
line_max_x = x_max
line_max_y = x_max - 2 * topmass
l.DrawLine(line_min_x, line_min_y, line_max_x, line_max_y)
# slope should be one as it's: LSP < Gluino - 2*topmass
slope = float(line_max_y - line_min_y) / (line_max_x - line_min_x)
# Draw Kinematically Forbidden as well
txt = ROOT.TLatex()
# txt.SetNDC()
txt.SetTextFont(12)
txt.SetTextAngle(np.degrees(np.arctan(slope)))
txt.SetTextSize(0.02)
txt.DrawText(
(line_max_x + line_min_x) / 2.0,
(line_max_x + line_min_x) / 2.0 - 2 * topmass + 125,
"Kinematically Forbidden",
)
from array import array
def get_run1(filename, linestyle, linewidth, linecolor):
x = array("f")
y = array("f")
n = 0
with open(filename, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=" ")
for row in reader:
n += 1
x.append(float(row[0]))
y.append(float(row[1]))
gr = ROOT.TGraph(n, x, y)
gr.SetLineColor(linecolor)
gr.SetLineWidth(linewidth)
gr.SetLineStyle(linestyle)
return gr
def draw_run1_text(color):
txt = ROOT.TLatex()
txt.SetNDC()
txt.SetTextFont(22)
txt.SetTextSize(0.04)
txt.SetTextColor(color)
txt.DrawText(0.2, 0.2, "Run 1 Limit")
def get_run2(filename, linestyle, linewidth, linecolor):
x = array("f")
y = array("f")
n = 0
with open(filename, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=" ")
for row in reader:
n += 1
x.append(float(row[0]))
y.append(float(row[1]))
gr = ROOT.TGraph(n, x, y)
gr.SetLineColor(linecolor)
gr.SetLineWidth(linewidth)
gr.SetLineStyle(linestyle)
return gr
def draw_run2_text(color):
txt = ROOT.TLatex()
txt.SetNDC()
txt.SetTextAngle(45)
txt.SetTextFont(22)
txt.SetTextSize(0.04)
txt.SetTextColor(color)
txt.DrawText(0.35, 0.35, "Run 2 Limit")
def exclusion():
x = array("d", [1400, 1600, 1600, 1400])
y = array("d", [600, 600, 800, 600])
p = ROOT.TPolyLine(4, x, y)
p.SetFillColor(1)
p.SetFillStyle(3001)
# p.DrawPolyLine(4,x,y)
return p
| |
import threading
from socket import AF_UNSPEC
from pyroute2.netlink.rtnl.rtmsg import rtmsg
from pyroute2.netlink.rtnl.req import IPRouteRequest
from pyroute2.ipdb.transactional import Transactional
class Metrics(Transactional):
def __init__(self, *argv, **kwarg):
Transactional.__init__(self, *argv, **kwarg)
self._fields = [rtmsg.metrics.nla2name(i[0]) for i
in rtmsg.metrics.nla_map]
class Route(Transactional):
def __init__(self, ipdb, mode=None, parent=None, uid=None):
Transactional.__init__(self, ipdb, mode, parent, uid)
self._exists = False
self._load_event = threading.Event()
self._fields = [rtmsg.nla2name(i[0]) for i in rtmsg.nla_map]
self._fields.append('flags')
self._fields.append('src_len')
self._fields.append('dst_len')
self._fields.append('table')
self._fields.append('removal')
self.cleanup = ('attrs',
'header',
'event')
with self._direct_state:
self['metrics'] = Metrics(parent=self)
def load_netlink(self, msg):
with self._direct_state:
self._exists = True
self.update(msg)
# merge key
for (name, value) in msg['attrs']:
norm = rtmsg.nla2name(name)
# normalize RTAX
if norm == 'metrics':
ret = self.get(norm, Metrics(parent=self))
with ret._direct_state:
for (rtax, rtax_value) in value['attrs']:
rtax_norm = rtmsg.metrics.nla2name(rtax)
ret[rtax_norm] = rtax_value
self[norm] = ret
else:
self[norm] = value
if msg.get_attr('RTA_DST', None) is not None:
dst = '%s/%s' % (msg.get_attr('RTA_DST'),
msg['dst_len'])
else:
dst = 'default'
self['dst'] = dst
# finally, cleanup all not needed
for item in self.cleanup:
if item in self:
del self[item]
self.sync()
def sync(self):
self._load_event.set()
def reload(self):
# do NOT call get_routes() here, it can cause race condition
self._load_event.wait()
return self
def commit(self, tid=None, transaction=None, rollback=False):
self._load_event.clear()
error = None
if tid:
transaction = self._transactions[tid]
else:
transaction = transaction or self.last()
# create a new route
if not self._exists:
try:
self.nl.route('add', **IPRouteRequest(self))
except Exception:
self.nl = None
self.ipdb.routes.remove(self)
raise
# work on existing route
snapshot = self.pick()
try:
# route set
request = IPRouteRequest(transaction - snapshot)
if any([request[x] not in (None, {'attrs': []}) for x in request]):
self.nl.route('set', **IPRouteRequest(transaction))
if transaction.get('removal'):
self.nl.route('delete', **IPRouteRequest(snapshot))
except Exception as e:
if not rollback:
ret = self.commit(transaction=snapshot, rollback=True)
if isinstance(ret, Exception):
error = ret
else:
error = e
else:
self.drop()
x = RuntimeError()
x.cause = e
raise x
if not rollback:
self.drop()
self.reload()
if error is not None:
error.transaction = transaction
raise error
return self
def remove(self):
self['removal'] = True
return self
class RoutingTables(dict):
def __init__(self, ipdb):
dict.__init__(self)
self.ipdb = ipdb
self.tables = {254: {}}
def add(self, spec=None, **kwarg):
'''
Create a route from a dictionary
'''
spec = spec or kwarg
table = spec.get('table', 254)
assert 'dst' in spec
route = Route(self.ipdb)
metrics = spec.pop('metrics', {})
route.update(spec)
route.metrics.update(metrics)
if table not in self.tables:
self.tables[table] = dict()
self.tables[table][route['dst']] = route
route.begin()
return route
def load_netlink(self, msg):
'''
Loads an existing route from a rtmsg
'''
table = msg.get('table', 254)
if table not in self.tables:
self.tables[table] = dict()
dst = msg.get_attr('RTA_DST', None)
if dst is None:
key = 'default'
else:
key = '%s/%s' % (dst, msg.get('dst_len', 0))
if key in self.tables[table]:
ret = self.tables[table][key]
ret.load_netlink(msg)
else:
ret = Route(ipdb=self.ipdb)
ret.load_netlink(msg)
self.tables[table][key] = ret
return ret
def remove(self, route, table=None):
if isinstance(route, Route):
table = route.get('table', 254)
route = route.get('dst', 'default')
else:
table = table or 254
del self.tables[table][route]
def get(self, dst, table=None):
table = table or 254
return self.tables[table][dst]
def keys(self, table=254, family=AF_UNSPEC):
return [x['dst'] for x in self.tables[table].values()
if (x['family'] == family) or (family == AF_UNSPEC)]
def has_key(self, key, table=254):
return key in self.tables[table]
def __contains__(self, key):
return key in self.tables[254]
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
assert key == value['dst']
return self.add(value)
def __delitem__(self, key):
return self.remove(key)
| |
#!/usr/bin/env python
# vim: et :
import logging
import re
import sys
import time
sys.path.append('../../nipap/')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
log_format = "%(levelname)-8s %(message)s"
log_stream = logging.StreamHandler()
log_stream.setFormatter(logging.Formatter("%(asctime)s: " + log_format))
log_stream.setLevel(logging.WARNING)
logger.addHandler(log_stream)
import nipap.backend
class bonk:
def __init__(self):
self.n = nipap.backend.Nipap()
#self.n.remove_prefix({ 'name': 'test-schema' }, { 'prefix': '2.0.0.0/8' })
#self.n.add_prefix({'name': 'test-schema' }, { 'prefix': '2.0.0.0/8', 'description': 'test' })
def clear_db(self):
""" Clear out everything in the database
"""
self.n._execute("TRUNCATE ip_net_plan CASCADE")
def init_db(self):
""" Initialise a few things we need in the db, a schema, a pool, a truck
"""
def find_prefix(self, argp):
"""
"""
arg_prefix = argp.split("/")[0]
arg_pl = argp.split("/")[1]
if self.n._is_ipv4(arg_prefix):
m = re.match("([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)", arg_prefix)
os1 = int(m.group(1))
os2 = int(m.group(2))
os3 = int(m.group(3))
os4 = int(m.group(4))
count = 2**(32-int(arg_pl))
i = 0
t0 = time.time()
try:
for o1 in xrange(os1, 255):
for o2 in xrange(os2, 255):
for o3 in xrange(os3, 255):
t2 = time.time()
for o4 in xrange(os4, 255):
prefix = "%s.%s.%s.%s" % (o1, o2, o3, o4)
self.n.list_prefix({ 'name': 'test-schema'}, { 'prefix': prefix })
i += 1
if i >= count:
raise StopIteration()
t3 = time.time()
print o3, (t3-t2)/256
except StopIteration:
pass
t1 = time.time()
print count, "prefixes found in:", t1-t0
elif self.n._is_ipv6(argp):
print >> sys.stderr, "IPv6 is currently unsupported"
def fill_prefix(self, argp):
""" Fill the specified prefix with hosts (/32s or /128s for IPv[46])
"""
arg_prefix = argp.split("/")[0]
arg_pl = argp.split("/")[1]
netcount = 0
if self.n._is_ipv4(arg_prefix):
m = re.match("([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)", arg_prefix)
os1 = int(m.group(1))
os2 = int(m.group(2))
os3 = int(m.group(3))
os4 = int(m.group(4))
count = 2**(32-int(arg_pl))
i = 0
t0 = time.time()
try:
for o1 in xrange(os1, 255):
for o2 in xrange(os2, 255):
for o3 in xrange(os3, 255):
t2 = time.time()
for o4 in xrange(os4, 255):
prefix = "%s.%s.%s.%s" % (o1, o2, o3, o4)
self.n.add_prefix({'name': 'test-schema' }, { 'prefix': prefix, 'description': 'test' })
i += 1
if i >= count:
raise StopIteration()
t3 = time.time()
print netcount, (t3-t2)/256
netcount += 1
except StopIteration:
pass
t1 = time.time()
print count, "prefixes added in:", t1-t0
elif self.n._is_ipv6(argp):
print >> sys.stderr, "IPv6 is currently unsupported"
def find_free_prefix(self, argp):
t0 = time.time()
prefix = self.n.find_free_prefix({ 'name': 'test-schema' }, { 'from-prefix': [argp], 'prefix_length': 32 })
t1 = time.time()
d1 = t1-t0
print "First free prefix:", prefix, "found in", d1, "seconds"
def add_prefix(self, argp):
t0 = time.time()
prefix = self.n.add_prefix({ 'name': 'test-schema' }, { 'prefix': argp, 'description': 'test' })
t1 = time.time()
d1 = t1-t0
print "Add prefix:", argp, "took", d1, "seconds"
def remove_prefix(self, argp):
t0 = time.time()
prefix = self.n.remove_prefix({ 'name': 'test-schema' }, { 'prefix': argp })
t1 = time.time()
d1 = t1-t0
print "Delete prefix:", argp, "took", d1, "seconds"
def prefix_insert(self, argp):
pass
def test1(self):
t0 = time.time()
res = self.n.find_free_prefix({ 'schema_name': 'test-schema', 'from-prefix': ['1.0.0.0/8'] }, 32, 1)
t1 = time.time()
res = self.n.find_free_prefix({ 'schema_name': 'test-schema', 'from-prefix': ['1.0.0.0/8'] }, 32, 500)
t2 = time.time()
for prefix in res:
self.n.add_prefix({ 'schema_name': 'test-schema', 'prefix': prefix, 'description': 'test' })
t3 = time.time()
res = self.n.find_free_prefix({ 'schema_name': 'test-schema', 'from-prefix': ['1.0.0.0/8'] }, 32, 1)
t4 = time.time()
d1 = t1-t0
d2 = t2-t1
d3 = t3-t2
d4 = t4-t3
d5 = d4-d1
print "First find free prefix:", d1
print "First find of 500 prefixes:", d2
print "Adding 500 prefixes", d3
print "Find one prefix after", d4
print "Diff", d5
def test2(self):
t0 = time.time()
res = self.n.find_free_prefix({ 'schema_name': 'test-schema', 'from-prefix': ['2.0.0.0/8'] }, 32, 1)
t1 = time.time()
d1 = t1-t0
print "Find free prefix:", d1
def test3(self):
self.n.find_free_prefix({ 'schema_name': 'test-schema', 'from-prefix': ['2.0.0.0/8'] }, 32, 1)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--add-prefix', metavar = 'PREFIX', help='Add PREFIX')
parser.add_argument('--fill-prefix', metavar = 'PREFIX', help='fill PREFIX with hosts')
parser.add_argument('--find-free-prefix', metavar = 'PREFIX', help='try to find the next free /32 in PREFIX')
parser.add_argument('--remove-prefix', metavar = 'PREFIX', help='delete PREFIX')
b = bonk()
args = parser.parse_args()
if args.fill_prefix is not None:
if not b.n._get_afi(args.fill_prefix):
print >> sys.stderr, "Please enter a valid prefix"
sys.exit(1)
b.fill_prefix(args.fill_prefix)
if args.find_free_prefix is not None:
b.find_free_prefix(args.find_free_prefix)
if args.add_prefix is not None:
b.add_prefix(args.add_prefix)
if args.remove_prefix is not None:
b.remove_prefix(args.remove_prefix)
| |
# [ Ripping trackerjacker like crazy ]#
from netaddr import *
from scapy.all import *
from boop.lib import *
from pyric.utils import channels
class Dot11Frame:
TO_DS = 0x1
FROM_DS = 0x2
def __init__(self, frame):
self.frame = frame
self.bssid = None
self.ssid = None
self.signal = frame.dBm_AntSignal
self.channel = channels.ISM_24_F2C.get(frame.Channel, "-1")
self.frame_bytes = len(frame)
self.cipher = ""
self.security = []
# print(frame.Channel)
to_ds = frame.FCfield & Dot11Frame.TO_DS != 0
from_ds = frame.FCfield & Dot11Frame.FROM_DS != 0
if to_ds and from_ds:
self.dest = frame.addr3
self.src = frame.addr4
elif to_ds:
self.dest = frame.addr2
self.src = frame.addr3
self.bssid = frame.addr1
elif from_ds:
self.dest = frame.addr3
self.src = frame.addr1
self.bssid = frame.addr2
else:
self.dest = frame.addr1
self.src = frame.addr2
self.bssid = frame.addr3
if (
frame.haslayer(Dot11Elt)
and (frame.haslayer(Dot11Beacon))
or frame.haslayer(Dot11ProbeResp)
):
self.get_ssid()
try:
self.dest_vendor = EUI(self.dest).oui.registration().org[:8]
except NotRegisteredError:
self.dest_vendor = "NA"
except TypeError:
print(">", self.dest)
try:
self.src_vendor = EUI(self.src).oui.registration().org[:8]
except NotRegisteredError:
self.src_vendor = "NA"
except TypeError:
print(">>", self.src)
if self.bssid:
try:
self.bssid_vendor = EUI(self.bssid).oui.registration().org[:8]
except NotRegisteredError:
self.bssid_vendor = "NA"
except TypeError:
print(">>>", self.bssid)
else:
self.bssid_vendor = "NA"
# if frame.type == 0 and frame.subtype == 8:
# print("lasjdf")
# self.get_security()
def get_ssid(self):
# try:
self.ssid = self.frame.info.decode().replace("\x00", "")
if len(self.ssid) == 0:
self.ssid = ("< len: {0} >").format(len(self.frame.info))
# except UnicodeDecodeError:
# self.hidden.append(self.src)
# self.ssid = ("< len: {0} >").format(len(self.frame[Dot11Elt].info))
# def get_signal(self):
# self.signal = self.frame.dBm_AntSignal
# # try:
# # self.signal = -(256 - ord(self.frame.notdecoded[-2:-1]))
# # except:
# # self.signal = -(256 - ord(self.frame.notdecoded[-4:-3]))
# # if self.signal < -100:
# # self.signal = -1
def __str__(self) -> str:
return (
f"Dot11Frame(type={self.frame.type}, subtype={self.frame.subtype})"
)
def network_stats(self):
summary = {}
crypto = set()
akmsuite_types = {
0x00: "Reserved",
0x01: "802.1X",
0x02: "PSK"
}
p = self.frame["Dot11Beacon"].payload
while isinstance(p, Dot11Elt):
# if p.ID == 0:
# self.ssid = plain_str(p.info)
if isinstance(p, Dot11EltRSN):
if p.akm_suites:
auth = akmsuite_types.get(p.akm_suites[0].suite)
self.cipher = auth
self.security = ("WPA2")
else:
self.security = ("WPA2")
elif p.ID == 221:
if isinstance(p, Dot11EltMicrosoftWPA) or \
p.info.startswith(b'\x00P\xf2\x01\x01\x00'):
if p.akm_suites:
auth = akmsuite_types.get(p.akm_suites[0].suite)
self.cipher = auth
self.security = ("WPA2")
else:
self.security = ("WPA")
try:
for key in WPS_QUERY:
if key in p.info:
index = p.info.index(key)
if index != 18:
print("WPS index: "+str(p.info.index(key)))
self.security += "/WPS"
except AttributeError:
pass
p = p.payload
if not self.security:
if self.frame.cap.privacy:
self.security = ("WEP")
else:
self.security = ("OPN")
return
# def get_security(self):
# cap = self.frame.sprintf("{Dot11Beacon:%Dot11Beacon.cap%}"
# "{Dot11ProbeResp:%Dot11ProbeResp.cap%}").split("+")
# sec = ""
# cipher = None
# p_layer = ""
# # print(self.frame.notdecoded[-2:-1])
# try:
# p_layer = self.frame.getlayer(Dot11Elt, ID=48).info
# sec = "WPA2"
# except AttributeError:
# try:
# p_layer = self.frame.getlayer(Dot11Elt, ID=221).info
# except AttributeError:
# print(self.frame.show())
# if p_layer.startswith(b"\x00P\xf2\x01\x01\x00"):
# sec = "WPA"
# # try:
# # print(">>>>>", self.frame.getlayer(Dot11Elt, ID=48)[8:12])
# # print(">>", p_layer)
# # except:
# # try:
# # print(">>>>>", self.frame.getlayer(Dot11Elt, ID=221)[8:12])
# # except:
# # pass
# if not sec:
# # Check for wep
# if "privacy" in cap:
# sec = "WEP"
# elif not sec:
# self.security = "OPEN"
# self.cypher = ""
# return
# if sec == "WPA2" and p_layer:
# # print(p_layer)
# if p_layer[8:12] == b"\x00\x0f\xac\x02":
# # Broken becuase no one has a CCMP/TKIP network.
# cipher = "CCMP/TKIP" if temp[16:24] == b"\x00\x0f\xac\x04" else "TKIP"
# elif p_layer[8:12] == b"\x00\x0f\xac\x04":
# cipher = "CCMP"
# temp = self.frame.getlayer(Dot11Elt, ID=221).info
# # print(temp)
# for key in WPS_QUERY:
# if key in temp:
# sec += "/WPS"
# # print(sec, cipher)
# print(Dot11Beacon(self.frame).network_stats())
# self.security = sec
# self.cipher = cipher or "?"
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils.data_utils import rand_name
import tempest.config
from tempest import exceptions
from tempest.test import attr
class ServerRescueTestJSON(base.BaseComputeTest):
_interface = 'json'
run_ssh = tempest.config.TempestConfig().compute.run_ssh
@classmethod
def setUpClass(cls):
super(ServerRescueTestJSON, cls).setUpClass()
cls.device = 'vdf'
# Floating IP creation
resp, body = cls.floating_ips_client.create_floating_ip()
cls.floating_ip_id = str(body['id']).strip()
cls.floating_ip = str(body['ip']).strip()
# Security group creation
cls.sg_name = rand_name('sg')
cls.sg_desc = rand_name('sg-desc')
resp, cls.sg = \
cls.security_groups_client.create_security_group(cls.sg_name,
cls.sg_desc)
cls.sg_id = cls.sg['id']
# Create a volume and wait for it to become ready for attach
resp, cls.volume_to_attach = \
cls.volumes_extensions_client.create_volume(1,
display_name=
'test_attach')
cls.volumes_extensions_client.wait_for_volume_status(
cls.volume_to_attach['id'], 'available')
# Create a volume and wait for it to become ready for attach
resp, cls.volume_to_detach = \
cls.volumes_extensions_client.create_volume(1,
display_name=
'test_detach')
cls.volumes_extensions_client.wait_for_volume_status(
cls.volume_to_detach['id'], 'available')
# Server for positive tests
resp, server = cls.create_server(image_id=cls.image_ref,
flavor=cls.flavor_ref,
wait_until='BUILD')
resp, resc_server = cls.create_server(image_id=cls.image_ref,
flavor=cls.flavor_ref,
wait_until='ACTIVE')
cls.server_id = server['id']
cls.password = server['adminPass']
cls.servers_client.wait_for_server_status(cls.server_id, 'ACTIVE')
# Server for negative tests
cls.rescue_id = resc_server['id']
cls.rescue_password = resc_server['adminPass']
cls.servers_client.rescue_server(
cls.rescue_id, cls.rescue_password)
cls.servers_client.wait_for_server_status(cls.rescue_id, 'RESCUE')
def setUp(self):
super(ServerRescueTestJSON, self).setUp()
@classmethod
def tearDownClass(cls):
# Deleting the floating IP which is created in this method
cls.floating_ips_client.delete_floating_ip(cls.floating_ip_id)
client = cls.volumes_extensions_client
client.delete_volume(str(cls.volume_to_attach['id']).strip())
client.delete_volume(str(cls.volume_to_detach['id']).strip())
resp, cls.sg = cls.security_groups_client.delete_security_group(
cls.sg_id)
super(ServerRescueTestJSON, cls).tearDownClass()
def tearDown(self):
super(ServerRescueTestJSON, self).tearDown()
def _detach(self, server_id, volume_id):
self.servers_client.detach_volume(server_id, volume_id)
self.volumes_extensions_client.wait_for_volume_status(volume_id,
'available')
def _delete(self, volume_id):
self.volumes_extensions_client.delete_volume(volume_id)
def _unrescue(self, server_id):
resp, body = self.servers_client.unrescue_server(server_id)
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
def _unpause(self, server_id):
resp, body = self.servers_client.unpause_server(server_id)
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
@attr(type='smoke')
def test_rescue_unrescue_instance(self):
resp, body = self.servers_client.rescue_server(
self.server_id, self.password)
self.assertEqual(200, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
resp, body = self.servers_client.unrescue_server(self.server_id)
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
@attr(type=['negative', 'gate'])
def test_rescue_paused_instance(self):
# Rescue a paused server
resp, body = self.servers_client.pause_server(
self.server_id)
self.addCleanup(self._unpause, self.server_id)
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'PAUSED')
self.assertRaises(exceptions.Duplicate,
self.servers_client.rescue_server,
self.server_id)
@attr(type=['negative', 'gate'])
def test_rescued_vm_reboot(self):
self.assertRaises(exceptions.Duplicate, self.servers_client.reboot,
self.rescue_id, 'HARD')
@attr(type=['negative', 'gate'])
def test_rescue_non_existent_server(self):
# Rescue a non-existing server
self.assertRaises(exceptions.NotFound,
self.servers_client.rescue_server,
'999erra43')
@attr(type=['negative', 'gate'])
def test_rescued_vm_rebuild(self):
self.assertRaises(exceptions.Duplicate,
self.servers_client.rebuild,
self.rescue_id,
self.image_ref_alt)
@attr(type=['negative', 'gate'])
def test_rescued_vm_attach_volume(self):
# Rescue the server
self.servers_client.rescue_server(self.server_id, self.password)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
self.addCleanup(self._unrescue, self.server_id)
# Attach the volume to the server
self.assertRaises(exceptions.Duplicate,
self.servers_client.attach_volume,
self.server_id,
self.volume_to_attach['id'],
device='/dev/%s' % self.device)
@attr(type=['negative', 'gate'])
def test_rescued_vm_detach_volume(self):
# Attach the volume to the server
self.servers_client.attach_volume(self.server_id,
self.volume_to_detach['id'],
device='/dev/%s' % self.device)
self.volumes_extensions_client.wait_for_volume_status(
self.volume_to_detach['id'], 'in-use')
# Rescue the server
self.servers_client.rescue_server(self.server_id, self.password)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
# addCleanup is a LIFO queue
self.addCleanup(self._detach, self.server_id,
self.volume_to_detach['id'])
self.addCleanup(self._unrescue, self.server_id)
# Detach the volume from the server expecting failure
self.assertRaises(exceptions.Duplicate,
self.servers_client.detach_volume,
self.server_id,
self.volume_to_detach['id'])
@attr(type='gate')
def test_rescued_vm_associate_dissociate_floating_ip(self):
# Rescue the server
self.servers_client.rescue_server(
self.server_id, self.password)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
self.addCleanup(self._unrescue, self.server_id)
# Association of floating IP to a rescued vm
client = self.floating_ips_client
resp, body = client.associate_floating_ip_to_server(self.floating_ip,
self.server_id)
self.assertEqual(202, resp.status)
# Disassociation of floating IP that was associated in this method
resp, body = \
client.disassociate_floating_ip_from_server(self.floating_ip,
self.server_id)
self.assertEqual(202, resp.status)
@attr(type='gate')
def test_rescued_vm_add_remove_security_group(self):
# Rescue the server
self.servers_client.rescue_server(
self.server_id, self.password)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
# Add Security group
resp, body = self.servers_client.add_security_group(self.server_id,
self.sg_name)
self.assertEqual(202, resp.status)
# Delete Security group
resp, body = self.servers_client.remove_security_group(self.server_id,
self.sg_name)
self.assertEqual(202, resp.status)
# Unrescue the server
resp, body = self.servers_client.unrescue_server(self.server_id)
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
class ServerRescueTestXML(ServerRescueTestJSON):
_interface = 'xml'
| |
# External Dependencies
from __future__ import division
from math import sqrt
from numpy import poly1d
from warnings import warn
import os
from svgpathtools import (Path, Line, polyroots, real,
imag, disvg, wsvg)
from svgpathtools.misctools import isclose
poly_imag_part = imag
poly_real_part = real
# Internal Dependencies
from andysSVGpathTools import pathT2tseg
import options4rings as opt
disvg = disvg if opt.try_to_open_svgs_in_browser else wsvg
def isPointOutwardOfSeg(pt, seg):
"""
Let c(t) = seg.point(t). If <pt-c(t),c'(t)>==0 has a solution 0<=t<=1,
then this the normal leaving seg at t will intersect with pt.
"""
c = seg.poly()
u = poly1d((pt,)) - c
dc = c.deriv()
inner_prod = (poly_real_part(u)*poly_real_part(dc) +
poly_imag_part(u)*poly_imag_part(dc))
# Note about nondeg_cond:
# The derivative of a CubicBezier object can be zero, but only at
# at most one point. For example if seg.start==seg.control1, then (in
# theory) seg.derivative(t)==0 if and only if t==0.
def dot_prod(z1, z2):
return (z1.real*z2.real +
z1.imag*z2.imag)
def allcond(_t):
from andysSVGpathTools import segUnitTangent
tt = max(0, min(1, _t.real))
lin2pt = Line(seg.point(tt), pt)
real_cond = isclose(_t.imag, 0)
bezier_cond = (0 < _t.real < 1 or
isclose(_t.real, 0) or
isclose(_t.real, 1))
nondeg_cond = (not isclose(seg.derivative(tt), 0) or
isclose(0, dot_prod(segUnitTangent(seg, tt),
lin2pt.unit_tangent())))
outward_cond = \
0 < dot_prod(lin2pt.unit_tangent(), -1j*segUnitTangent(seg, tt))
return real_cond and bezier_cond and nondeg_cond and outward_cond
inward_tvals = polyroots(inner_prod, condition=allcond)
return [tval.real for tval in inward_tvals]
def isPointOutwardOfPath(pt, path, outerRing=None, justone=False):
"""returns a list of (seg_idx, t) tuples s.t. the outward normal to
seg at t intersects pt.
if justone=True, then this list will be of length 1
note: outerRing is only needed incase path contains corners."""
inward_segt_pairs = []
for idx, seg in enumerate(path):
tvals = isPointOutwardOfSeg(pt, seg)
if justone and len(tvals):
return [(idx,tvals[0])]
for t in tvals:
inward_segt_pairs.append((idx, t))
return inward_segt_pairs
def invTransect(T, sorted_ring_list, warnifnotunique=True):
"""Finds a transect that ends at T.
In the case there are more than one, if warnifnotunique=True,
user will be warned, but this may slow down transect generation.
IS `warnifnotunique` IMPLEMENTED? Maybe not ...
Returns:
list of tuples of form [(pt, ring_idx, seg_idx, t), ...]
"""
cur_ring = sorted_ring_list[-1]
cur_idx = len(sorted_ring_list) - 1
init_t, init_seg = pathT2tseg(cur_ring.path, T)
init_seg_idx = cur_ring.path.index(init_seg)
transect_info = [(cur_ring.point(T),
len(sorted_ring_list) - 1,
init_seg_idx,
init_t)]
cur_pt = transect_info[-1][0]
while cur_idx > 0:
# Find all rings this transect segment could be coming from
test_rings = []
for r_idx, r in list(enumerate(sorted_ring_list[:cur_idx]))[::-1]:
test_rings.append((r_idx, r))
if r.path.isclosed():
break
test_ring_results = []
for r_idx, test_ring in test_rings:
args = (cur_pt, test_ring.path, cur_ring)
inward_segt_list = isPointOutwardOfPath(*args, justone=False)
for seg_idx, t in inward_segt_list:
test_ring_results.append((r_idx, seg_idx, t))
# sort choices by distance to cur_pt
def dist(res_):
r_idx_, seg_idx_, t_ = res_
new_pt_ = sorted_ring_list[r_idx_].path[seg_idx_].point(t_)
return abs(cur_pt - new_pt_)
sorted_results = sorted(test_ring_results, key=dist)
# Find the closest result such that the transect does not go through
# any other rings on it's way to cur_pt
for res in sorted_results:
wr_idx, wseg_idx, wt = res
new_pt = sorted_ring_list[wr_idx].path[wseg_idx].point(wt)
tr_line = Line(new_pt, cur_pt)
winner = not any(r.path.intersect(tr_line)
for ri, r in test_rings if ri != wr_idx)
if winner:
break
else:
if opt.skip_transects_that_dont_exist:
bdry_ring = sorted_ring_list[-1]
s_rel = bdry_ring.path.length(T1=T) / bdry_ring.path.length()
fn = sorted_ring_list[0].svgname + \
"_partial_transect_%s.svg" % s_rel
fn = os.path.join(opt.output_directory, fn)
wsvg([r.path for r in sorted_ring_list],
nodes=[tr[0] for tr in transect_info],
filename=fn)
warn("\nNo transect exists ending at relative arc "
"length %s. An svg displaying this partial transect has"
"been saved to:\n%s\n" % (s_rel, fn))
return []
elif opt.accept_transect_crossings:
wr_idx, wseg_idx, wt = sorted_results[0]
else:
disvg([r.path for r in sorted_ring_list],
nodes=[tr[0] for tr in transect_info]) # DEBUG line
bdry_ring = sorted_ring_list[-1]
s_rel = bdry_ring.path.length(T1=T) / bdry_ring.path.length()
raise Exception("No transect exists ending at relative arc "
"length %s." % s_rel)
# Record the closest choice
transect_info.append((sorted_ring_list[wr_idx].path[wseg_idx].point(wt),
cur_idx,
wseg_idx,
wt))
cur_ring = sorted_ring_list[wr_idx]
cur_pt = transect_info[-1][0]
cur_idx = wr_idx
# Erroneous Termination
if cur_idx < 0 and sorted_ring_list.index(cur_ring) != 0:
disvg([r.path for r in sorted_ring_list],
nodes=[tr[0] for tr in transect_info]) # DEBUG line
bdry_ring = sorted_ring_list[-1]
s_rel = bdry_ring.path.length(T1=T) / bdry_ring.path.length()
raise Exception("Something went wrong finding inverse transect at "
"relative arc length %s." % s_rel)
return transect_info
def generate_inverse_transects(ring_list, Tvals):
"""The main purpose of this function is to run invTransect for all Tvals
and format the data"""
ring_sorting, sorted_ring_list = \
zip(*sorted(enumerate(ring_list), key=lambda i_r: i_r[1].sort_index))
def unsorted_index(idx):
return ring_sorting[idx]
data = []
data_indices = []
skipped_angle_indices = []
for T_idx, T in enumerate(Tvals):
tran_info = invTransect(T, sorted_ring_list, opt.warn_if_not_unique)
if not tran_info and opt.skip_transects_that_dont_exist:
skipped_angle_indices.append(T_idx)
continue
transect = []
transect_rings = []
for pt, ring_idx, seg_idx, t in tran_info:
transect.append(pt)
transect_rings.append(unsorted_index(ring_idx))
transect.append(sorted_ring_list[0].center)
transect_rings.append('core')
transect.reverse()
transect_rings.reverse()
data.append(transect)
data_indices.append(transect_rings)
return data, data_indices, skipped_angle_indices
def generate_unsorted_transects(ring_list, center):
from options4rings import (
basic_output_on, warnings_output_on, N_transects,
unsorted_transect_debug_output_folder, unsorted_transect_debug_on,
colordict)
from misc4rings import (
transect_from_angle, normalLineAt_t_toInnerSeg_intersects_withOuter)
from andysSVGpathTools import pathlistXlineIntersections
from andysmod import Timer
import operator
from random import uniform
# Find outer boundary ring
for r in ring_list:
if r.color == colordict['boundary']:
boundary_ring = r
break
else:
warnings_output_on.dprint(
"[Warning:] Having trouble finding outer boundary - it "
"should be color %s. Will now search for a ring of a "
"similar color and if one is found, will use that.\n"
"" % colordict['boundary'])
from misc4rings import closestColor
for r in ring_list:
if colordict['boundary'] == closestColor(r.color , colordict):
boundary_ring = r
basic_output_on.dprint(
"Found a ring of color %s, using that one." % r.color)
break
else:
warnings_output_on.dprint(
"[Warning:] Outer boundary could not be found by color "
"(or similar color). This is possibly caused by the "
"outer boundary ring not being closed - in this case "
"you'd be able to see a (possibly quite small) gap "
"between it's startpoint and endpoint. Using the ring "
"of greatest maximum radius as the boundary ring (and "
"hoping if there is a gap none of the transects hit it).\n")
boundary_ring = max(ring_list, key=lambda x: x.maxR)
# Find transects
from time import time as current_time
from andysmod import format_time
tr_gen_start_time = current_time()
data = []
data_indices = []
angles = []
for k in range(N_transects):
# estimate time remaining
if k != 0:
total_elapsed_time = current_time() - tr_gen_start_time
estimated_time_remaining = (N_transects - k)*total_elapsed_time/k
timer_str = 'Transect %s of %s || ' \
'Est. Remaining Time = %s || ' \
'Elapsed Time = %s' \
'' % (k+1, N_transects,
format_time(estimated_time_remaining),
format_time(total_elapsed_time))
overwrite_progress = True
else:
timer_str = 'transect %s of %s' % (k+1, N_transects)
overwrite_progress = False
print('')
# generate current transect
with Timer(timer_str, overwrite=overwrite_progress):
if unsorted_transect_debug_on:
print('')
test_angle = uniform(0, 1)
# test_angle = 0.408
angles.append(test_angle)
transect = [center]
transect_rings = ['core']
# used to keep track of which rings I've used and thus don't
# need to be checked in the future
unused_ring_indices = range(len(ring_list))
# Find first transect segment (from core/center)
# normal line to use to find intersections (from center to boundary ring)
nl2bdry, seg_outer, t_outer = transect_from_angle(
test_angle, center, boundary_ring.path, 'debug')
# make normal line a little longer
nl2bdry = Line(nl2bdry.start,
nl2bdry.start + 1.5*(nl2bdry.end-nl2bdry.start))
tmp = pathlistXlineIntersections(
nl2bdry, [ring_list[i].path for i in unused_ring_indices])
tl, path_index, seg, tp = min(tmp, key=operator.itemgetter(0))
transect.append(nl2bdry.point(tl))
transect_rings.append(unused_ring_indices[path_index])
del unused_ring_indices[path_index]
# now for the rest of the transect
num_rings_checked = 0
while (ring_list[transect_rings[-1]] != boundary_ring and
num_rings_checked < len(ring_list)): # < is correct, already did first
num_rings_checked += 1
inner_path = ring_list[transect_rings[-1]].path
inner_t = tp
inner_seg = seg
# normal line to use to find intersections (from center to boundary ring)
nl2bdry, seg_outer, t_outer = \
normalLineAt_t_toInnerSeg_intersects_withOuter(
inner_t, inner_seg, boundary_ring.path, center, 'debug')
# make normal line a little longer
nl2bdry = Line(nl2bdry.start,
nl2bdry.start + 1.5*(nl2bdry.end-nl2bdry.start))
normal_line_intersections = pathlistXlineIntersections(
nl2bdry, [ring_list[i].path for i in unused_ring_indices])
try:
tl, path_index, seg, tp = min(normal_line_intersections,
key=operator.itemgetter(0))
except ValueError:
raise
if unsorted_transect_debug_on:
from andysmod import format001
inner_path_index = transect_rings[-1]
used_ring_paths = \
[r.path for i, r in enumerate(ring_list)
if i not in unused_ring_indices + [inner_path_index]]
used_ring_colors = ['black']*len(used_ring_paths)
unused_ring_paths = \
[ring_list[i].path for i in unused_ring_indices]
unused_ring_colors = \
[ring_list[i].color for i in unused_ring_indices]
transect_so_far = \
Path(*[Line(transect[i-1], transect[i])
for i in range(1, len(transect))])
paths = used_ring_paths + unused_ring_paths + \
[transect_so_far] + [inner_path] + [nl2bdry]
colors = used_ring_colors + unused_ring_colors + \
['green'] + ['blue'] + ['black']
nodes_so_far = transect[1:-1]
potential_nodes = \
[nl2bdry.point(tltmp)
for tltmp, _, _, _ in normal_line_intersections]
nodes = nodes_so_far + potential_nodes
node_colors = ['red']*len(nodes_so_far) + \
['purple']*len(potential_nodes)
save_name = unsorted_transect_debug_output_folder + \
'unsorted_transect_debug_%s.svg' \
'' % format001(3, len(transect))
disvg(paths, colors, nodes=nodes, node_colors=node_colors,
center=center, filename=save_name, openInBrowser=False)
print("Done with %s out of (at most) %s transect "
"segments" % (len(transect), len(ring_list)))
transect.append(nl2bdry.point(tl))
transect_rings.append(unused_ring_indices[path_index])
del unused_ring_indices[path_index]
data.append(transect)
data_indices.append(transect_rings)
return data, data_indices, angles
def generate_sorted_transects(ring_list, center, angles2use=None):
from options4rings import basic_output_on, N_transects
from misc4rings import (
transect_from_angle, normalLineAt_t_toInnerSeg_intersects_withOuter)
from andysSVGpathTools import pathlistXlineIntersections
from andysmod import Timer, format_time
from svgpathtools import Line
from random import uniform
from time import time as current_time
from operator import itemgetter
tmp = sorted(enumerate(ring_list), key=lambda tup: tup[1].sort_index)
ring_sorting, sorted_ring_list = zip(*tmp)
def unsorted_index(idx):
return ring_sorting[idx]
# Find transects
tr_gen_start_time = current_time()
data = []
data_indices = []
angles = []
for k in range(N_transects):
# estimate time remaining
if k != 0:
total_elapsed_time = current_time() - tr_gen_start_time
estimated_time_remaining = \
(N_transects - k)*total_elapsed_time/k
timer_str = 'Transect %s of %s || ' \
'Est. Remaining Time = %s || ' \
'Elapsed Time = %s' \
'' % (k+1, N_transects,
format_time(estimated_time_remaining),
format_time(total_elapsed_time))
overwrite_progress = True
else:
timer_str = 'transect %s of %s' % (k+1, N_transects)
overwrite_progress = False
print('')
# generate current transect
with Timer(timer_str, overwrite=overwrite_progress):
if angles2use:
test_angle = angles2use[k]
else:
test_angle = uniform(0, 1)
angles.append(test_angle)
transect = [center]
transect_rings = ['core']
# find first (innermost) closed ring
next_closed_ring = next(r for r in sorted_ring_list if r.isClosed())
next_closed_ring_sidx = next_closed_ring.sort_index
# Find first transect segment (from core/center)
# Start by finding line that leaves center at angle and goes to
# the first closed ring
nl2bdry, seg_outer, t_outer = transect_from_angle(
test_angle, center, next_closed_ring.path, 'debug')
# Make normal line a little longer
end2use = nl2bdry.start + 1.5*(nl2bdry.end - nl2bdry.start)
nl2bdry = Line(nl2bdry.start, end2use)
pot_paths = \
[r.path for r in sorted_ring_list[0: next_closed_ring_sidx + 1]]
# Note: intersections returned as (tl, path_index, seg, tp)
pot_path_inters = pathlistXlineIntersections(nl2bdry, pot_paths)
tl, path_index, seg, tp = min(pot_path_inters, key=itemgetter(0))
# updates
transect.append(nl2bdry.point(tl))
transect_rings.append(unsorted_index(path_index))
cur_pos_si = path_index
next_closed_ring = next(r for r in sorted_ring_list
if (r.sort_index > cur_pos_si and
r.isClosed()))
next_closed_ring_sidx = next_closed_ring.sort_index
# now for the rest of the transects
# note: < is correct, already did first
num_rings_checked = 0
while (cur_pos_si < len(ring_list) - 1 and
num_rings_checked < len(ring_list)):
num_rings_checked += 1
inner_t = tp
inner_seg = seg
# Find outwards normal line from current position to the next
# closed ring
nl2bdry, seg_outer, t_outer = \
normalLineAt_t_toInnerSeg_intersects_withOuter(
inner_t, inner_seg, next_closed_ring.path, center, 'debug')
# Make the normal line a bit longer to avoid numerical error
end2use = nl2bdry.start + 1.5*(nl2bdry.end - nl2bdry.start)
nl2bdry = Line(nl2bdry.start, end2use)
pot_paths = [r.path for r in
sorted_ring_list[cur_pos_si + 1:
next_closed_ring_sidx + 1]]
tl, path_index, seg, tp = \
min(pathlistXlineIntersections(nl2bdry, pot_paths),
key=itemgetter(0))
# updates
transect.append(nl2bdry.point(tl))
cur_pos_si += path_index + 1
transect_rings.append(unsorted_index(cur_pos_si))
if cur_pos_si < len(ring_list)-1:
next_closed_ring = next(r for r in sorted_ring_list
if (r.sort_index > cur_pos_si and
r.isClosed()))
next_closed_ring_sidx = next_closed_ring.sort_index
data.append(transect)
data_indices.append(transect_rings)
return data, data_indices, angles
def save_transect_data(outputFile_transects, ring_list, data, data_indices,
angles, skipped_angles):
from options4rings import basic_output_on
with open(outputFile_transects,"wt") as out_file:
out_file.write("transect angle, number of rings counted by transect, "
"distance time series... total number of svg paths = "
"%s\n" % (len(ring_list) + 1))
for k in range(len(data)):
distances = [abs(data[k][i+1]-data[k][i])
for i in range(len(data[k]) - 1)]
# number of rings counted (i.e. number of little lines this
# transect is made of)
rings_counted_by_transect = len(distances)
# Convert data_indices to sort_index indices
indices = []
for idx in data_indices[k]:
if isinstance(idx, str): # 'core' case
indices.append(idx)
else:
indices.append(ring_list[idx].sort_index)
row = str([angles[k]] + [rings_counted_by_transect] + distances)
row = row[1:len(row)-1]
row2 = str([angles[k]] + ['NA'] + indices)
row2 = row2[1:len(row2)-1]
out_file.write(row + '\n' + row2 + '\n')
for angle in skipped_angles:
row = str([angle] + ['skipped'])[1:-1]
row2 = str([angle] + ['NA'] + ['NA'])[1:-1]
out_file.write(row + '\n' + row2 + '\n')
basic_output_on.dprint("Data from %s transects saved to:" % len(data))
basic_output_on.dprint(outputFile_transects)
def save_transect_summary(outputFile_transect_summary, ring_list, data,
data_indices, angles):
"""records averages of all transect distances going from ring1 to ring2"""
from options4rings import basic_output_on
from andysmod import eucnormalize_numpy, flattenList
num_transects = len(angles)
def collect(some_dict,key,val):
try:
some_dict.update({key :[val]+some_dict[key]})
except KeyError:
some_dict.update({key : [val]})
distance_time_series = [[abs(tr[i] - tr[i-1]) for i in range(1, len(tr))]
for tr in data]
arrow_guides = [[(tr[i-1],tr[i]) for i in range(1,len(tr))]
for tr in data_indices]
def g(ridx):
if isinstance(ridx, str):
return ridx
else:
return ring_list[ridx].sort_index
sorted_arrow_guides = [[(g(x), g(y)) for (x, y) in aguide]
for aguide in arrow_guides]
def normalized(vec):
mag = sqrt(sum(x*x for x in vec))
return [x/mag for x in vec]
normalized_distance_time_series = [normalized(tr) for tr in distance_time_series]
# initialize some dictionaries (there names explain them)
raw_distances_for_arrow = dict(); normalized_distances_for_arrow = dict() #arrow -> list of distances (without zeros)
# put data in the above dictionaries
[collect(raw_distances_for_arrow, *item) for item in
zip(flattenList(arrow_guides), flattenList(distance_time_series))]
[collect(normalized_distances_for_arrow, *item) for item in
zip(flattenList(arrow_guides), flattenList(normalized_distance_time_series))]
# initialize some more dictionaries (there names explain them)
# arrow -> list of distances (without zeros)
raw_distance_average_for_arrow_woZeros = dict()
normalized_distance_average_for_arrow_woZeros = dict()
# arrow -> list of distances (with zeros)
raw_distance_average_for_arrow_wZeros = dict()
normalized_distance_average_for_arrow_wZeros = dict()
# put data in the above dictionaries
[[raw_distance_average_for_arrow_woZeros.update(
{arrow: sum(dlist)/len(dlist)}) for arrow, dlist in
raw_distances_for_arrow.items()]]
# note: len(angles) = the number of transects generated
[[raw_distance_average_for_arrow_wZeros.update(
{arrow: sum(dlist)/num_transects}) for arrow, dlist in
raw_distances_for_arrow.items()]]
[[normalized_distance_average_for_arrow_woZeros.update(
{arrow:sum(dlist)/len(dlist)}) for arrow, dlist in
normalized_distances_for_arrow.items()]]
# note: len(angles) = the number of transects generated
[[normalized_distance_average_for_arrow_wZeros.update(
{arrow:sum(dlist)/num_transects}) for arrow, dlist in
normalized_distances_for_arrow.items()]]
# in order to display averaged time series in order of length...
# here's the permutation
keyfcn = lambda k: len(distance_time_series[k])
order2printTimeSeries = \
sorted(range(num_transects), key=keyfcn, reverse=True)
def deliminate_and_write(out_file,*args):
"""insert delimeters between args and output string"""
output = str(args[0]).replace(',', ';')
for k in range(len(args)):
output += ', '+str(args[k]).replace(',', ';')
out_file.write(output + '\n')
with open(outputFile_transect_summary, "wt") as out_file:
out_file.write("Number of path objects counted in SVG: %s\n"
"" % len(ring_list))
out_file.write("Max number of rings counted by a transect: %s\n\n"
"" % (max([len(transect) for transect in data]) - 1))
out_file.write("angle arrow guide is based on, type of timeseries, "
"time series..."+'\n')
# record data without zeros
for k in order2printTimeSeries:
angle = angles[k]
arrow_guide = arrow_guides[k]
sorted_arrow_guide = sorted_arrow_guides[k]
closure_guide = \
[('core', ring_list[arrow_guide[0][1]].isApproxClosedRing())] + \
[(ring_list[i].isApproxClosedRing(),
ring_list[j].isApproxClosedRing())
for i, j in arrow_guide[1: len(arrow_guide)]]
raw_woZeros = [raw_distance_average_for_arrow_woZeros[arrow]
for arrow in arrow_guide]
raw_wZeros = [raw_distance_average_for_arrow_wZeros[arrow]
for arrow in arrow_guide]
normalized_woZeros = \
[normalized_distance_average_for_arrow_woZeros[arrow]
for arrow in arrow_guide]
normalized_wZeros = \
[normalized_distance_average_for_arrow_wZeros[arrow]
for arrow in arrow_guide]
deliminate_and_write(
out_file, angle, 'arrow guide', *sorted_arrow_guide)
deliminate_and_write(
out_file, angle, 'closure', *closure_guide)
deliminate_and_write(
out_file, angle, 'raw w/o zeros', *raw_woZeros)
deliminate_and_write(
out_file, angle, 'raw with zeros', *raw_wZeros)
deliminate_and_write(
out_file, angle, 'normalized w/o zeros', *normalized_woZeros)
deliminate_and_write(
out_file, angle, 'normalized with zeros', *normalized_wZeros)
deliminate_and_write(
out_file, angle, 'renormalized raw w/o zeros',
*eucnormalize_numpy(raw_woZeros))
deliminate_and_write(
out_file, angle, 'renormalized raw with zeros',
*eucnormalize_numpy(raw_wZeros))
deliminate_and_write(
out_file, angle, 'renormalized normalized w/o zeros',
*eucnormalize_numpy(normalized_woZeros))
deliminate_and_write(
out_file, angle, 'renormalized normalized with zeros',
*eucnormalize_numpy(normalized_wZeros))
out_file.write("\n")
basic_output_on.dprint("Summary of transect results saved to:")
basic_output_on.dprint(outputFile_transect_summary)
| |
# -*- coding: utf-8 -*-
import unittest
import jwt
from eve import Eve
from eve_auth_jwt import JWTAuth
from flask import g
from eve_auth_jwt.tests import test_routes
settings = {
'JWT_SECRET': 'secret',
'JWT_ISSUER': 'https://domain.com/token',
'JWT_ROLES_CLAIM': 'roles',
'JWT_SCOPE_CLAIM': 'scope',
'DOMAIN': {
'foo': {
'schema': {
'name': {},
},
'audiences': ['aud1'],
'resource_methods': ['POST', 'GET'],
},
'bar': {
'audiences': ['aud2'],
},
'baz': {
'audiences': ['aud1'],
'allowed_roles': ['role1', 'role2'],
},
'bad': {
},
'bag': {
'audiences': ['aud1'],
'authentication': JWTAuth('custom'),
},
},
}
class TestBase(unittest.TestCase):
def setUp(self):
self.app = Eve(settings=settings, auth=JWTAuth)
test_routes.register(self.app)
self.test_client = self.app.test_client()
def test_restricted_access(self):
r = self.test_client.get('/foo')
self.assertEqual(r.status_code, 401)
self.assertEqual(r.headers['WWW-Authenticate'], 'Bearer realm="eve_auth_jwt"')
def test_token_error(self):
r = self.test_client.get('/foo?access_token=invalid')
self.assertEqual(r.status_code, 401)
self.assertEqual(r.headers['WWW-Authenticate'], 'Bearer realm="eve_auth_jwt", error="invalid_token"')
def test_valid_token_header(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567'}
token = jwt.encode(claims, 'secret')
auth = [('Authorization', 'Bearer {}'.format(token.decode('utf-8')))]
r = self.test_client.get('/foo', headers=auth)
self.assertEqual(r.status_code, 200)
def test_valid_token_query(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567'}
token = jwt.encode(claims, 'secret')
r = self.test_client.get('/foo?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 200)
def test_token_claims_context(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567'}
token = jwt.encode(claims, 'secret')
with self.app.test_client() as client:
client.get('/foo?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(g.get('authen_claims'), claims)
def test_invalid_token_secret(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567'}
token = jwt.encode(claims, 'invalid secret')
r = self.test_client.get('/foo?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 401)
def test_missing_token_subject(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1'}
token = jwt.encode(claims, 'secret')
r = self.test_client.get('/foo?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 200)
def test_invalid_token_issuer(self):
claims = {'iss': 'https://invalid-domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567'}
token = jwt.encode(claims, 'secret')
r = self.test_client.get('/foo?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 401)
def test_invalid_token_audience(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud2',
'sub': '0123456789abcdef01234567'}
token = jwt.encode(claims, 'secret')
r = self.test_client.get('/foo?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 401)
def test_valid_token_resource_audience(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud2',
'sub': '0123456789abcdef01234567'}
token = jwt.encode(claims, 'secret')
r = self.test_client.get('/bar?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 200)
def test_invalid_token_resource_audience(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567'}
token = jwt.encode(claims, 'secret')
r = self.test_client.get('/bar?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 401)
def test_valid_token_role(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567',
'roles': ['role1']}
token = jwt.encode(claims, 'secret')
r = self.test_client.get('/baz?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 200)
def test_invalid_token_role(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567'}
token = jwt.encode(claims, 'secret')
r = self.test_client.get('/baz?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 401)
def test_token_role_context(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567',
'roles': ['role1']}
token = jwt.encode(claims, 'secret')
with self.app.test_client() as client:
client.get('/baz?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(g.get('authen_roles'), ['role1'])
def test_token_role_context_always(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567',
'roles': ['role1']}
token = jwt.encode(claims, 'secret')
with self.app.test_client() as client:
client.get('/foo?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(g.get('authen_roles'), ['role1'])
def test_token_scope(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567',
'scope': 'user'}
token = jwt.encode(claims, 'secret')
r = self.test_client.get('/foo?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 200)
def test_token_scope_viewer_read(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567',
'scope': 'viewer'}
token = jwt.encode(claims, 'secret')
r = self.test_client.get('/foo?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 200)
def test_token_scope_viewer_write(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567',
'scope': 'viewer'}
token = jwt.encode(claims, 'secret')
r = self.test_client.post('/foo?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 401)
def test_requires_token_success(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567',
'roles': ['super'],
'scope': 'user'}
token = jwt.encode(claims, 'secret')
r = self.test_client.get('/token/success?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 200, r.data)
def test_requires_token_failure_audience(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud2',
'sub': '0123456789abcdef01234567',
'roles': ['super'],
'scope': 'user'}
token = jwt.encode(claims, 'secret')
r = self.test_client.get('/token/failure?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 401, r.data)
def test_requires_token_failure_roles(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567',
'roles': [],
'scope': 'user'}
token = jwt.encode(claims, 'secret')
r = self.test_client.get('/token/failure?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 401, r.data)
def test_auth_header_token_success(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567',
'roles': ['super'],
'scope': 'user'}
token = jwt.encode(claims, 'secret')
headers = {'Authorization': 'Bearer {}'.format(token.decode('utf-8'))}
r = self.test_client.get('/token/success', headers=headers)
self.assertEqual(r.status_code, 200)
def test_auth_header_token_failure(self):
r = self.test_client.get('/token/failure')
self.assertEqual(r.status_code, 401, r.data)
def test_no_audience_token_success(self):
claims = {'iss': 'https://domain.com/token'}
token = jwt.encode(claims, 'secret')
r = self.test_client.get('/bad?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 200)
def test_no_audience_token_failure(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'sub': '0123456789abcdef01234567'}
token = jwt.encode(claims, 'secret')
r = self.test_client.get('/bad?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 401)
def test_endpoint_level_auth_with_different_secret(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1'}
token = jwt.encode(claims, 'custom')
r = self.test_client.get('/bag?access_token={}'.format(token.decode('utf-8')))
self.assertEqual(r.status_code, 200)
def test_auth_header_token_success_with_different_secret(self):
claims = {'iss': 'custom_issuer',
'aud': 'aud1',
'roles': ['super']}
token = jwt.encode(claims, 'custom_secret')
headers = {'Authorization': 'Bearer {}'.format(token.decode('utf-8'))}
r = self.test_client.get('/custom/success', headers=headers)
self.assertEqual(r.status_code, 200)
def test_auth_header_token_failure_with_wrong_issuer(self):
claims = {'iss': 'https://domain.com/token',
'aud': 'aud1',
'roles': ['super']}
token = jwt.encode(claims, 'custom_secret')
headers = {'Authorization': 'Bearer {}'.format(token.decode('utf-8'))}
r = self.test_client.get('/custom/success', headers=headers)
self.assertEqual(r.status_code, 401)
if __name__ == '__main__':
unittest.main()
| |
from distutils.dir_util import copy_tree
import glob
import os
import shutil
from django.core.management import BaseCommand, CommandError
from django.conf import settings
from django.core.management import call_command
from django.db import connection
from gcutils.bigquery import Client as BQClient, DATASETS, build_schema
from gcutils.storage import Client as StorageClient
from frontend import bq_schemas as schemas
from frontend.models import MeasureValue, MeasureGlobal
from dmd.models import NCSOConcession, DMDProduct, TariffPrice, DMDVmpp
from openprescribing.slack import notify_slack
from pipeline import runner
e2e_path = os.path.join(settings.APPS_ROOT, 'pipeline', 'e2e-test-data')
class Command(BaseCommand):
def handle(self, *args, **kwargs):
if os.environ['DJANGO_SETTINGS_MODULE'] != \
'openprescribing.settings.e2etest':
raise CommandError('Command must run with e2etest settings')
try:
run_end_to_end()
except Exception:
msg = 'End-to-end test failed (seed: %s)\n\n' % settings.BQ_NONCE
msg += 'Check logs in /tmp/'
notify_slack(msg)
raise
msg = 'Pipeline tests ran to completion (seed: %s)' % settings.BQ_NONCE
notify_slack(msg)
def run_end_to_end():
print('BQ_NONCE: {}'.format(settings.BQ_NONCE))
call_command('migrate')
path = os.path.join(settings.APPS_ROOT, 'frontend', 'management',
'commands', 'measure_definitions')
num_measures = len(os.listdir(path))
shutil.rmtree(settings.PIPELINE_DATA_BASEDIR, ignore_errors=True)
with open(settings.PIPELINE_IMPORT_LOG_PATH, 'w') as f:
f.write('{}')
for blob in StorageClient().bucket().list_blobs():
blob.delete()
for dataset_key in DATASETS:
BQClient(dataset_key).create_dataset()
client = BQClient('hscic')
client.create_table('bnf', schemas.BNF_SCHEMA)
client.create_table('ccgs', schemas.CCG_SCHEMA)
client.create_table('ppu_savings', schemas.PPU_SAVING_SCHEMA)
client.create_table(
'practice_statistics',
schemas.PRACTICE_STATISTICS_SCHEMA
)
client.create_table(
'practice_statistics_all_years',
schemas.PRACTICE_STATISTICS_SCHEMA
)
client.create_table('practices', schemas.PRACTICE_SCHEMA)
client.create_table('prescribing', schemas.PRESCRIBING_SCHEMA)
client.create_table('presentation', schemas.PRESENTATION_SCHEMA)
client.create_table('tariff', schemas.TARIFF_SCHEMA)
client.create_table('bdz_adq', schemas.BDZ_ADQ_SCHEMA)
client = BQClient('measures')
# This is enough of a schema to allow the practice_data_all_low_priority
# table to be created, since it references these fields. Once populated by
# import_measures, the tables in the measures dataset will have several
# more fields. But we don't need to specify exactly what they are, as BQ
# will work it out when the data is inserted with insert_rows_from_query.
measures_schema = build_schema(
('month', 'DATE'),
('practice_id', 'STRING'),
('numerator', 'INTEGER'),
('denominator', 'INTEGER'),
)
path = os.path.join(settings.APPS_ROOT, 'frontend', 'management',
'commands', 'measure_definitions', '*.json')
for path in glob.glob(path):
measure_id = os.path.splitext(os.path.basename(path))[0]
client.create_table('practice_data_' + measure_id, measures_schema)
client.create_table('ccg_data_' + measure_id, measures_schema)
client.create_table('global_data_' + measure_id, measures_schema)
# Although there are no model instances, we call upload_model to create the
# tables in BQ that might be required by certain measure views.
client = BQClient('dmd')
client.upload_model(NCSOConcession)
client.upload_model(DMDProduct)
client.upload_model(TariffPrice)
client.upload_model(DMDVmpp)
call_command('generate_presentation_replacements')
path = os.path.join(settings.APPS_ROOT, 'frontend', 'management',
'commands', 'replace_matviews.sql')
with open(path) as f:
with connection.cursor() as c:
c.execute(f.read())
copy_tree(
os.path.join(e2e_path, 'data-1'),
os.path.join(e2e_path, 'data'),
)
runner.run_all(2017, 9, under_test=True)
# We expect one MeasureGlobal per measure per month.
assert_count_equal(num_measures, MeasureGlobal)
# We expect one MeasureValue for each organisation per measure per month
# (There are 4 practices, 2 CCGs, 2 STPs, and 2 regional teams).
assert_count_equal(10 * num_measures, MeasureValue)
# We expect one statistic per CCG per month
assert_raw_count_equal(2, 'vw__ccgstatistics')
# We expect one chemical summary per CCG per month
assert_raw_count_equal(2, 'vw__chemical_summary_by_ccg',
"chemical_id = '1001030C0'")
# We expect one chemical summary per practice per month
assert_raw_count_equal(4, 'vw__chemical_summary_by_practice',
"chemical_id = '1001030C0'")
# We expect one summary per practice per month
assert_raw_count_equal(4, 'vw__practice_summary')
# We expect one presentation summary per month
assert_raw_count_equal(1, 'vw__presentation_summary',
"presentation_code = '1001030C0AAAAAA'")
# We expect one presentation summary per CCG per month
assert_raw_count_equal(2, 'vw__presentation_summary_by_ccg',
"presentation_code = '1001030C0AAAAAA'")
copy_tree(
os.path.join(e2e_path, 'data-2'),
os.path.join(e2e_path, 'data'),
)
runner.run_all(2017, 10, under_test=True)
# We expect one MeasureGlobal per measure per month
assert_count_equal(2 * num_measures, MeasureGlobal)
# We expect one MeasureValue for each organisation per measure per month
assert_count_equal(20 * num_measures, MeasureValue)
# We expect one statistic per CCG per month
assert_raw_count_equal(4, 'vw__ccgstatistics')
# We expect one chemical summary per CCG per month
assert_raw_count_equal(4, 'vw__chemical_summary_by_ccg',
"chemical_id = '1001030C0'")
# We expect one chemical summary per practice per month
assert_raw_count_equal(8, 'vw__chemical_summary_by_practice',
"chemical_id = '1001030C0'")
# We expect one summary per practice per month
assert_raw_count_equal(8, 'vw__practice_summary')
# We expect one presentation summary per month
assert_raw_count_equal(2, 'vw__presentation_summary',
"presentation_code = '1001030C0AAAAAA'")
# We expect one presentation summary per CCG per month
assert_raw_count_equal(4, 'vw__presentation_summary_by_ccg',
"presentation_code = '1001030C0AAAAAA'")
def assert_count_equal(expected, model):
actual = model.objects.count()
if actual != expected:
msg = 'Expected {} {} objects, found {}'.format(
expected, model, actual)
raise CommandError(msg)
def assert_raw_count_equal(expected, table_name, where_condition=None):
sql = 'SELECT COUNT(*) FROM {}'.format(table_name)
if where_condition is not None:
sql += ' WHERE {}'.format(where_condition)
with connection.cursor() as c:
c.execute(sql)
results = c.fetchall()
actual = results[0][0]
if actual != expected:
msg = 'Expected {} to return {}, got {}'.format(sql, expected, actual)
raise CommandError(msg)
| |
import numpy as np
import itertools
import random
from qitensor import qudit, direct_sum, NotKetSpaceError, \
HilbertSpace, HilbertArray, HilbertError, HilbertShapeError, MismatchedSpaceError
from qitensor.space import create_space2
toler = 1e-12
# FIXME - some methods don't have docs
# FIXME - use CP_Map in the map-state duality example
# FIXME - method to relabel input/output/env space
# Possible examples:
# Space not seen by environment:
# ha = qudit('a', 5); hb = qudit('b', 8); hc = qudit('c', 3)
# E = CP_Map((hb*hc*ha.H).random_isometry(), hc)
# ... or
# E = CP_Map.random(ha, hb, hc)
# E.ket().O.trace(hc).span(ha.O)
__all__ = ['Superoperator', 'CP_Map']
def _unreduce_supop_v1(in_space, out_space, m):
"""
This is the function that handles restoring a pickle.
"""
return Superoperator(in_space, out_space, m)
class Superoperator(object):
"""
FIXME: need to write documentation.
"""
def __init__(self, in_space, out_space, m):
"""
>>> ha = qudit('a', 3)
>>> hb = qudit('b', 4)
>>> E = Superoperator.random(ha, hb)
>>> X = ha.O.random_array()
>>> Y = hb.O.random_array()
>>> # Test the adjoint channel.
>>> abs( (E(X).H * Y).trace() - (X.H * E.H(Y)).trace() ) < 1e-14
True
"""
self._in_space = self._to_ket_space(in_space)
self._out_space = self._to_ket_space(out_space)
self._m = np.matrix(m)
if m.shape != (self.out_space.O.dim(), self.in_space.O.dim()):
raise HilbertShapeError(m.shape, (self.out_space.O.dim(), self.in_space.O.dim()))
self._H_S = None
def __reduce__(self):
"""
Tells pickle how to store this object.
>>> import pickle
>>> from qitensor import qubit, qudit, Superoperator
>>> ha = qudit('a', 3)
>>> hb = qubit('b')
>>> rho = (ha*hb).random_density()
>>> E = Superoperator.from_function(ha, lambda x: x.T)
>>> E
Superoperator( |a><a| to |a><a| )
>>> F = pickle.loads(pickle.dumps(E))
>>> F
Superoperator( |a><a| to |a><a| )
>>> (E(rho) - F(rho)).norm() < 1e-14
True
"""
return _unreduce_supop_v1, (self.in_space, self.out_space, self._m)
@property
def in_space(self):
return self._in_space
@property
def out_space(self):
return self._out_space
@classmethod
def _make_environ_spc(cls, espc_def, field, dim):
if espc_def is None:
chartab = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
rndstr = ''.join(random.sample(chartab, 6))
espc_def = 'env_'+rndstr
if isinstance(espc_def, HilbertSpace):
if espc_def.dim() < dim:
raise HilbertError('environment space not big enough: %d vs %d'
% (espc_def.dim(), dim))
return espc_def
return qudit(espc_def, dim, dtype=field)
@classmethod
def _to_ket_space(cls, spc):
if not spc.bra_set:
return spc
if not spc.ket_set:
return spc.H
if spc == spc.O:
return spc.ket_space
raise NotKetSpaceError('need a bra, ket, or self-adjoint space, not '+str(spc))
def __str__(self):
return 'Superoperator( '+str(self.in_space.O)+' to '+str(self.out_space.O)+' )'
def __repr__(self):
return str(self)
def as_matrix(self):
return self._m
def __call__(self, rho):
if not rho.space.bra_ket_set >= self.in_space.O.bra_ket_set:
raise MismatchedSpaceError("argument space "+repr(rho.space)+
" does not contain superop domain "+repr(self.in_space.O))
(row_space, col_space) = rho._get_row_col_spaces(col_space=self.in_space.O)
ret_vec = self._m * rho.as_np_matrix(col_space=self.in_space.O)
if len(row_space):
out_space = self.out_space.O * np.prod(row_space)
else:
out_space = self.out_space.O
return out_space.array(ret_vec, reshape=True, input_axes=self.out_space.O.axes+row_space)
def __mul__(self, other):
"""
>>> from qitensor import qudit, Superoperator
>>> ha = qudit('a', 2)
>>> hb = qudit('b', 3)
>>> hc = qudit('c', 4)
>>> hd = qudit('d', 2)
>>> he = qudit('e', 3)
>>> rho = (ha*hd).O.random_array()
>>> E = Superoperator.random(ha, ha)
>>> E
Superoperator( |a><a| to |a><a| )
>>> 2*E
Superoperator( |a><a| to |a><a| )
>>> ((2*E)(rho) - 2*E(rho)).norm() < 1e-14
True
>>> (-2)*E
Superoperator( |a><a| to |a><a| )
>>> (((-2)*E)(rho) - (-2)*E(rho)).norm() < 1e-14
True
>>> E1 = Superoperator.random(ha, hb*hc)
>>> E1
Superoperator( |a><a| to |b,c><b,c| )
>>> E2 = Superoperator.random(hc*hd, he)
>>> E2
Superoperator( |c,d><c,d| to |e><e| )
>>> E3 = E2*E1
>>> E3
Superoperator( |a,d><a,d| to |b,e><b,e| )
>>> (E2(E1(rho)) - E3(rho)).norm() < 1e-12 # FIXME - why not 1e-14 precision?
True
"""
if isinstance(other, Superoperator):
common_spc = self.in_space.ket_set & other.out_space.ket_set
in_spc = (self.in_space.ket_set - common_spc) | other.in_space.ket_set
in_spc = create_space2(in_spc, frozenset())
return Superoperator.from_function(in_spc, lambda x: self(other(x)))
if isinstance(other, HilbertArray):
return NotImplemented
# hopefully `other` is a scalar
return Superoperator(self.in_space, self.out_space, self._m*other)
def __rmul__(self, other):
# hopefully `other` is a scalar
return self * other
def __add__(self, other):
"""
>>> from qitensor import qudit, Superoperator
>>> ha = qudit('a', 4)
>>> hb = qudit('b', 3)
>>> E1 = Superoperator.random(ha, hb)
>>> E2 = Superoperator.random(ha, hb)
>>> rho = ha.random_density()
>>> chi = (E1*0.2 + E2*0.8)(rho)
>>> xi = E1(rho)*0.2 + E2(rho)*0.8
>>> (chi - xi).norm() < 1e-14
True
"""
if not isinstance(other, Superoperator):
return NotImplemented
if self.in_space != other.in_space or self.out_space != other.out_space:
raise MismatchedSpaceError("spaces do not match: "+
repr(self.in_space)+" -> "+repr(self.out_space)+" vs. "+
repr(other.in_space)+" -> "+repr(other.out_space))
return Superoperator(self.in_space, self.out_space, self._m + other._m)
def __neg__(self):
"""
>>> from qitensor import qudit, Superoperator
>>> ha = qudit('a', 4)
>>> hb = qudit('b', 3)
>>> E = Superoperator.random(ha, hb)
>>> rho = ha.random_density()
>>> ((-E)(rho) + E(rho)).norm() < 1e-14
True
"""
return (-1)*self
def __sub__(self, other):
"""
>>> from qitensor import qudit, Superoperator
>>> ha = qudit('a', 4)
>>> hb = qudit('b', 3)
>>> E1 = Superoperator.random(ha, hb)
>>> E2 = Superoperator.random(ha, hb)
>>> rho = ha.random_density()
>>> chi = (E1 - E2)(rho)
>>> xi = E1(rho) - E2(rho)
>>> (chi - xi).norm() < 1e-14
True
"""
return self + (-other)
@property
def H(self):
"""The adjoint channel."""
if self._H_S is None:
da = self.in_space.dim()
db = self.out_space.dim()
MH = self.as_matrix().A.conj().reshape(db,db,da,da).transpose(2,3,0,1). \
reshape(da*da, db*db)
self._H_S = Superoperator(self.out_space, self.in_space, MH)
return self._H_S
@classmethod
def from_function(cls, in_space, f):
"""
>>> from qitensor import qudit, Superoperator
>>> ha = qudit('a', 3)
>>> hb = qudit('b', 4)
>>> rho = (ha*hb).random_density()
>>> ET = Superoperator.from_function(ha, lambda x: x.T)
>>> ET
Superoperator( |a><a| to |a><a| )
>>> (ET(rho) - rho.transpose(ha)).norm() < 1e-14
True
>>> hc = qudit('c', 5)
>>> L = (hc*ha.H).random_array()
>>> R = (ha*hc.H).random_array()
>>> N = Superoperator.from_function(ha, lambda x: L*x*R)
>>> N
Superoperator( |a><a| to |c><c| )
>>> (N(rho) - L*rho*R).norm() < 1e-14
True
>>> Superoperator.from_function(ha, lambda x: x.H)
Traceback (most recent call last):
...
ValueError: function was not linear
"""
in_space = cls._to_ket_space(in_space)
out_space = f(in_space.eye()).space
if out_space != out_space.H:
raise MismatchedSpaceError("out space was not symmetric: "+repr(out_space))
out_space = out_space.ket_space()
m = np.zeros((out_space.dim()**2, in_space.dim()**2), in_space.base_field.dtype)
for (i, x) in enumerate(in_space.O.index_iter()):
m[:, i] = f(in_space.O.basis_vec(x)).nparray.flatten()
E = Superoperator(in_space, out_space, m)
rho = in_space.random_density()
if (E(rho) - f(rho)).norm() > toler:
raise ValueError('function was not linear')
return E
@classmethod
def random(cls, spc_in, spc_out):
in_space = cls._to_ket_space(spc_in)
out_space = cls._to_ket_space(spc_out)
m = spc_in.base_field.random_array((out_space.O.dim(), in_space.O.dim()))
return Superoperator(in_space, out_space, m)
@classmethod
def transposer(cls, spc):
"""
>>> from qitensor import qubit, qudit, Superoperator
>>> ha = qudit('a', 3)
>>> hb = qubit('b')
>>> rho = (ha*hb).random_density()
>>> T = Superoperator.transposer(ha)
>>> T
Superoperator( |a><a| to |a><a| )
>>> (T(rho) - rho.transpose(ha)).norm() < 1e-14
True
"""
return cls.from_function(spc, lambda x: x.T)
def upgrade_to_cp_map(self, espc_def=None):
return CP_Map.from_matrix(self._m, self.in_space, self.out_space, espc_def=espc_def)
def upgrade_to_cptp_map(self, espc_def=None):
ret = self.upgrade_to_cp_map()
ret.assert_cptp()
return ret
def _unreduce_cpmap_v1(in_space, out_space, env_space, J):
"""
This is the function that handles restoring a pickle.
"""
return CP_Map(J, env_space)
class CP_Map(Superoperator):
"""
FIXME: need to write documentation.
"""
def __init__(self, J, env_space):
"""
>>> ha = qudit('a', 3)
>>> hb = qudit('b', 4)
>>> hd = qudit('d', 3)
>>> rho = (ha*hd).random_density()
>>> E = CP_Map.random(ha, hb)
>>> # Test the channel via its isometry.
>>> ((E.J * rho * E.J.H).trace(E.env_space) - E(rho)).norm() < 1e-14
True
>>> # Test complementary channel.
>>> ((E.J * rho * E.J.H).trace(hb) - E.C(rho)).norm() < 1e-14
True
>>> X = ha.O.random_array()
>>> Y = hb.O.random_array()
>>> # Test the adjoint channel.
>>> abs( (E(X).H * Y).trace() - (X.H * E.H(Y)).trace() ) < 1e-14
True
"""
env_space = self._to_ket_space(env_space)
in_space = J.space.bra_space().H
if not J.space.ket_set >= env_space.ket_set:
raise MismatchedSpaceError("J output does not contain env_space: "+repr(J.ket_space)+
" vs. "+repr(env_space))
out_space = J.space.ket_set - env_space.ket_set
out_space = create_space2(out_space, frozenset())
assert J.space == out_space * env_space * in_space.H
da = in_space.dim()
db = out_space.dim()
t = np.zeros((db, da, db, da), dtype=in_space.base_field.dtype)
for j in env_space.index_iter():
op = J[{ env_space: j }].as_np_matrix(row_space=in_space.H)
t += np.tensordot(op, op.conj(), axes=([],[]))
t = t.transpose([0,2,1,3])
t = t.reshape(db**2, da**2)
t = np.matrix(t)
super(CP_Map, self).__init__(in_space, out_space, t)
self._J = J
self._env_space = env_space
self._C = None
self._H_CP = None
def __reduce__(self):
"""
Tells pickle how to store this object.
>>> import pickle
>>> from qitensor import qudit, CP_Map
>>> ha = qudit('a', 2)
>>> rho = ha.O.random_array()
>>> E = CP_Map.random(ha, ha)
>>> F = pickle.loads(pickle.dumps(E))
>>> F
CP_Map( |a><a| to |a><a| )
>>> (E(rho) - F(rho)).norm() < 1e-14
True
"""
return _unreduce_cpmap_v1, (self.in_space, self.out_space, self.env_space, self.J)
@property
def env_space(self):
return self._env_space
@property
def J(self):
"""The channel isometry."""
return self._J
@property
def C(self):
"""The complimentary channel."""
if self._C is None:
self._C = CP_Map(self.J, self.out_space)
return self._C
@property
def H(self):
"""The adjoint channel."""
if self._H_CP is None:
JH = self.J.H.relabel({ self.env_space.H: self.env_space })
self._H_CP = CP_Map(JH, self.env_space)
return self._H_CP
def ket(self):
"""
Returns the channel ket.
>>> from qitensor import qudit, CP_Map
>>> ha = qudit('a', 2)
>>> hb = qudit('b', 2)
>>> E = CP_Map.random(ha, hb, 'c')
>>> E.J.space
|b,c><a|
>>> E.ket().space
|a,b,c>
>>> F = CP_Map.random(ha, ha, 'c')
>>> F.ket()
Traceback (most recent call last):
...
HilbertError: 'channel ket can only be made if input space is different from output and environment spaces'
"""
if not self.in_space.ket_set.isdisjoint(self.J.space.ket_set):
raise HilbertError('channel ket can only be made if input space is different '+
'from output and environment spaces')
return self.J.transpose(self.in_space)
def is_cptp(self):
return (self.J.H*self.J - self.in_space.eye()).norm() < toler
def assert_cptp(self):
if not self.is_cptp():
raise ValueError('channel is not trace preserving')
def krauses(self):
"""
Returns the channel ket.
>>> import numpy
>>> from qitensor import qudit, CP_Map
>>> ha = qudit('a', 2)
>>> hb = qudit('b', 2)
>>> E = CP_Map.random(ha, hb)
>>> len(E.krauses())
4
>>> E.krauses()[0].space
|b><a|
>>> # Test closure condition.
>>> ( numpy.sum([ x.H * x for x in E.krauses() ]) - ha.eye() ).norm() < 1e-14
True
"""
return [ self.J[x] for x in self.env_space.index_iter_dict() ]
def __str__(self):
return 'CP_Map( '+str(self.in_space.O)+' to '+str(self.out_space.O)+' )'
def __repr__(self):
return str(self)
def __mul__(self, other):
"""
>>> from qitensor import qudit, CP_Map
>>> ha = qudit('a', 2)
>>> hb = qudit('b', 3)
>>> hc = qudit('c', 2)
>>> hd = qudit('d', 2)
>>> he = qudit('e', 3)
>>> rho = (ha*hd).O.random_array()
>>> E = CP_Map.random(ha, ha)
>>> E
CP_Map( |a><a| to |a><a| )
>>> 2*E
CP_Map( |a><a| to |a><a| )
>>> ((2*E)(rho) - 2*E(rho)).norm() < 1e-14
True
>>> (-2)*E
Superoperator( |a><a| to |a><a| )
>>> (((-2)*E)(rho) - (-2)*E(rho)).norm() < 1e-14
True
>>> E*E
CP_Map( |a><a| to |a><a| )
>>> ((E*E)(rho) - E(E(rho))).norm() < 1e-14
True
>>> E1 = CP_Map.random(ha, hb*hc, 'env1')
>>> E1
CP_Map( |a><a| to |b,c><b,c| )
>>> E2 = CP_Map.random(hc*hd, he, 'env2')
>>> E2
CP_Map( |c,d><c,d| to |e><e| )
>>> E3 = E2*E1
>>> E3
CP_Map( |a,d><a,d| to |b,e><b,e| )
>>> E3.env_space
|env1,env2>
>>> (E2(E1(rho)) - E3(rho)).norm() < 1e-14
True
"""
if isinstance(other, CP_Map):
common_spc = self.in_space.ket_set & other.out_space.ket_set
in_spc = (self.in_space.ket_set - common_spc) | other.in_space.ket_set
out_spc = self.out_space.ket_set | (other.out_space.ket_set - common_spc)
in_spc = create_space2(in_spc , frozenset())
out_spc = create_space2(out_spc, frozenset())
# If the multiplicands have disjoint environments, then the product will use the
# product environment. Otherwise, a new environment is created.
if self.env_space.ket_set.isdisjoint(other.env_space.ket_set):
env = self.env_space * other.env_space
return CP_Map(self.J*other.J, env)
else:
return super(CP_Map, self).__mul__(other).upgrade_to_cp_map()
if isinstance(other, Superoperator):
return super(CP_Map, self).__mul__(other)
if isinstance(other, HilbertArray):
return NotImplemented
# hopefully `other` is a scalar
if other < 0:
return super(CP_Map, self).__mul__(other)
else:
s = self.in_space.base_field.sqrt(other)
return CP_Map(self.J*s, self.env_space)
def __rmul__(self, other):
# hopefully `other` is a scalar
return self * other
def __add__(self, other):
ret = super(CP_Map, self).__add__(other)
if isinstance(other, CP_Map):
# FIXME - reuse env_space if possible
return ret.upgrade_to_cp_map()
else:
return ret
def add2(self, other):
"""
Adds two CP maps. The returned map has the same action as E1+E2, but the environment
is the direct sum of the component environments.
>>> import numpy.linalg as linalg
>>> from qitensor import qudit, CP_Map
>>> ha = qudit('a', 2)
>>> hb = qudit('b', 3)
>>> E1 = CP_Map.random(ha, hb, 'hc1')
>>> E2 = CP_Map.random(ha, hb, 'hc2')
>>> X = E1 + E2
>>> Y = E1.add2(E2)
>>> linalg.norm(X.as_matrix() - Y.as_matrix()) < 1e-14
True
>>> (E1.env_space, E2.env_space, Y.env_space)
(|hc1>, |hc2>, |hc1+hc2>)
"""
if not isinstance(other, CP_Map):
raise ValueError('other was not a CP_Map')
if self.in_space != other.in_space or self.out_space != other.out_space:
raise MismatchedSpaceError("spaces do not match: "+
repr(self.in_space)+" -> "+repr(self.out_space)+" vs. "+
repr(other.in_space)+" -> "+repr(other.out_space))
ret_hc = direct_sum((self.env_space, other.env_space))
ret_J = ret_hc.P[0]*self.J + ret_hc.P[1]*other.J
return CP_Map(ret_J, ret_hc)
def coherent_information(self, rho):
"""
Compute S(B)-S(C) after passing the given state through the channel.
"""
if rho.space != rho.H.space:
raise HilbertError("rho did not have equal bra and ket spaces: "+str(rho.space))
if np.abs(rho.trace() - 1) > toler:
raise ValueError("rho didn't have trace=1")
return self(rho).tracekeep(self.out_space).entropy() - self.C(rho).tracekeep(self.env_space).entropy()
def private_information(self, ensemble):
"""
Compute I(X;B) - I(X;C) where X is a classical ancillary system that records which
state of the ensemble was passed through the channel.
"""
ensemble = list(ensemble)
dx = len(ensemble)
hx = self._make_environ_spc(None, self.in_space.base_field, dx)
rho = np.sum([ hx.ket(i).O * rho_i for (i, rho_i) in enumerate(ensemble) ])
if rho.space != rho.H.space:
raise HilbertError("ensemble was not on a Hermitian space: "+rho.space)
if np.abs(rho.trace() - 1) > toler:
raise ValueError("your ensemble didn't have trace=1")
return self(rho).mutual_info(hx, self.out_space) - self.C(rho).mutual_info(hx, self.env_space)
@classmethod
def from_function(cls, in_space, f, espc_def=None):
"""
>>> from qitensor import qubit, qudit, CP_Map
>>> ha = qudit('a', 3)
>>> hb = qubit('b')
>>> rho = (ha*hb).random_density()
>>> CP_Map.from_function(ha, lambda x: x.T)
Traceback (most recent call last):
...
ValueError: matrix didn't correspond to a completely positive superoperator (min eig=-1.0)
>>> U = ha.random_unitary()
>>> EU = CP_Map.from_function(ha, lambda x: U*x*U.H)
>>> EU
CP_Map( |a><a| to |a><a| )
>>> (EU(rho) - U*rho*U.H).norm() < 1e-14
True
"""
E = Superoperator.from_function(in_space, f)
E = E.upgrade_to_cp_map(espc_def)
return E
@classmethod
def from_matrix(cls, m, spc_in, spc_out, espc_def=None, compact_environ_tol=1e-12):
"""
>>> from qitensor import qudit, CP_Map
>>> ha = qudit('a', 2)
>>> hb = qudit('b', 3)
>>> hx = qudit('x', 5)
>>> E1 = CP_Map.random(ha*hb, hx)
>>> E2 = CP_Map.random(hx, ha*hb)
>>> m = E2.as_matrix() * E1.as_matrix()
>>> E3 = CP_Map.from_matrix(m, ha*hb, ha*hb)
>>> rho = (ha*hb).random_density()
>>> (E2(E1(rho)) - E3(rho)).norm() < 1e-14
True
"""
in_space = cls._to_ket_space(spc_in)
out_space = cls._to_ket_space(spc_out)
da = in_space.dim()
db = out_space.dim()
t = np.array(m)
if t.shape != (db*db, da*da):
raise HilbertShapeError("matrix wrong size for given spaces: "+
repr(t.shape)+" vs. "+repr((db*db, da*da)))
t = t.reshape(db, db, da, da)
t = t.transpose([0, 2, 1, 3])
t = t.reshape(db*da, db*da)
field = in_space.base_field
if field.mat_norm(np.transpose(np.conj(t)) - t, 2) > toler:
raise ValueError("matrix didn't correspond to a completely positive "+
"superoperator (cross operator not self-adjoint)")
(ew, ev) = field.mat_eig(t, hermit=True)
if np.min(ew) < -toler:
raise ValueError("matrix didn't correspond to a completely positive "+
"superoperator (min eig="+str(np.min(ew))+")")
ew = np.where(ew < 0, 0, ew)
if compact_environ_tol:
nonzero = np.nonzero(ew > compact_environ_tol)[0]
dc = len(nonzero)
else:
dc = da*db
nonzero = list(range(dc))
env_space = cls._make_environ_spc(espc_def, in_space.base_field, dc)
J = (out_space * env_space * in_space.H).array()
for (i, j) in enumerate(nonzero):
J[{ env_space: i }] = (out_space * in_space.H).array(ev[:,j] * field.sqrt(ew[j]), reshape=True)
return CP_Map(J, env_space)
@classmethod
def from_kraus(cls, ops, espc_def=None):
ops = list(ops)
op_spc = ops[0].space
dc = len(ops)
env_space = cls._make_environ_spc(espc_def, op_spc.base_field, dc)
J = (op_spc * env_space).array()
for (i, op) in enumerate(ops):
J[{ env_space: i }] = op
return CP_Map(J, env_space)
@classmethod
def random(cls, spc_in, spc_out, espc_def=None):
"""
Return a random CPTP map. The channel's isometry is distributed uniformly over the
Haar measure.
:param espc_def: a HilbertSpace for the environment, or a label for that space if a
string is provided, or the dimension of the environment if an integer is provided.
If not specified, the environment will have full dimension.
"""
in_space = cls._to_ket_space(spc_in)
out_space = cls._to_ket_space(spc_out)
if isinstance(espc_def, HilbertSpace):
dc = espc_def.dim()
elif isinstance(espc_def, int):
dc = espc_def
espc_def = None
else:
dc = in_space.dim() * out_space.dim()
env_space = cls._make_environ_spc(espc_def, in_space.base_field, dc)
J = (out_space*env_space*in_space.H).random_isometry()
return CP_Map(J, env_space)
@classmethod
def unitary(cls, U, espc_def=None):
"""
>>> from qitensor import qubit, CP_Map
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> U = ha.random_unitary()
>>> rho = (ha*hb).random_density()
>>> E = CP_Map.unitary(U)
>>> (E(rho) - U*rho*U.H).norm() < 1e-14
True
"""
env_space = cls._make_environ_spc(espc_def, U.space.base_field, 1)
J = U * env_space.ket(0)
return CP_Map(J, env_space)
@classmethod
def identity(cls, spc, espc_def=None):
"""
>>> from qitensor import qubit, CP_Map
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> rho = (ha*hb).random_density()
>>> E = CP_Map.identity(ha)
>>> (E(rho) - rho).norm() < 1e-14
True
"""
return cls.unitary(spc.eye(), espc_def)
@classmethod
def totally_noisy(cls, spc, espc_def=None):
"""
>>> from qitensor import qudit, CP_Map
>>> ha = qudit('a', 5)
>>> rho = ha.random_density()
>>> E = CP_Map.totally_noisy(ha)
>>> (E(rho) - ha.fully_mixed()).norm() < 1e-14
True
"""
in_space = cls._to_ket_space(spc)
d = in_space.dim()
d2 = d*d
env_space = cls._make_environ_spc(espc_def, in_space.base_field, d2)
J = (in_space.O*env_space).array()
for (i, (j, k)) in enumerate(itertools.product(in_space.index_iter(), repeat=2)):
J[{ in_space.H: j, in_space: k, env_space: i }] = 1
J /= in_space.base_field.sqrt(d)
return CP_Map(J, env_space)
@classmethod
def noisy(cls, spc, p, espc_def=None):
"""
>>> from qitensor import qudit, CP_Map
>>> ha = qudit('a', 5)
>>> rho = ha.random_density()
>>> E = CP_Map.noisy(ha, 0.2)
>>> (E(rho) - 0.8*rho - 0.2*ha.fully_mixed()).norm() < 1e-14
True
"""
if not (0 <= p <= 1):
raise HilbertError("p must be in [0,1], but it was "+repr(p))
E0 = cls.totally_noisy(spc)
E1 = cls.identity(spc)
return p*E0 + (1-p)*E1
@classmethod
def decohere(cls, spc, espc_def=None):
"""
>>> from qitensor import qudit, CP_Map
>>> ha = qudit('a', 5)
>>> rho = ha.random_density()
>>> E = CP_Map.decohere(ha)
>>> (E(rho) - ha.diag(rho.diag(as_np=True))).norm() < 1e-14
True
"""
in_space = cls._to_ket_space(spc)
d = in_space.dim()
env_space = cls._make_environ_spc(espc_def, in_space.base_field, d)
J = (in_space.O*env_space).array()
for (i, a) in enumerate(in_space.index_iter()):
J[{ in_space.H: a, in_space: a, env_space: i }] = 1
return CP_Map(J, env_space)
@classmethod
def erasure(cls, spc, p, bspc_def=None, espc_def=None):
"""
Create a channel that has probability p of erasing the input, and 1-p of perfectly
transmitting the input. The output space has dimension one greater than the input
space, and the receiver is notified of erasure via the extra computational basis state.
If p=0.5 then the channel is symmetric.
"""
if p < 0 or p > 1:
raise ValueError("p must be in [0, 1]")
in_space = cls._to_ket_space(spc)
d = in_space.dim()
out_space = cls._make_environ_spc(bspc_def, in_space.base_field, d+1)
env_space = cls._make_environ_spc(espc_def, in_space.base_field, d+1)
J = (out_space * env_space * in_space.H).array()
J += np.sqrt( p) * out_space.ket(d) * (env_space * in_space.H).array(np.eye(d+1, d), reshape=True)
J += np.sqrt(1-p) * env_space.ket(d) * (out_space * in_space.H).array(np.eye(d+1, d), reshape=True)
return CP_Map(J, env_space)
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import shutil
from contextlib import contextmanager
from textwrap import dedent
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest, ensure_cached
class CheckstyleIntegrationTest(PantsRunIntegrationTest):
def _create_config_file(self, filepath, rules_xml=''):
with open(filepath, 'w') as f:
f.write(dedent(
"""<?xml version="1.0"?>
<!DOCTYPE module PUBLIC
"-//Puppy Crawl//DTD Check Configuration 1.3//EN"
"http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
<module name="Checker">
{rules_xml}
</module>""".format(rules_xml=rules_xml)))
@ensure_cached(expected_num_artifacts=2)
def test_config_invalidates_targets(self, cache_args):
with self.temporary_workdir() as workdir:
with temporary_dir(root_dir=get_buildroot()) as tmp:
configs = [
dedent("""
<module name="TreeWalker">
<property name="tabWidth" value="2"/>
</module>"""),
dedent("""
<module name="TreeWalker">
<module name="LineLength">
<property name="max" value="100"/>
</module>
</module>""")
]
for config in configs:
# Ensure that even though the config files have the same name, their
# contents will invalidate the targets.
config_file = os.path.join(tmp, 'config.xml')
self._create_config_file(config_file, config)
args = [
'clean-all',
'lint.checkstyle',
cache_args,
'examples/src/java/org/pantsbuild/example/hello/simple',
'--lint-checkstyle-configuration={}'.format(config_file)
]
pants_run = self.run_pants_with_workdir(args, workdir)
self.assert_success(pants_run)
@ensure_cached(expected_num_artifacts=2)
def test_config_name_invalidates_targets(self, cache_args):
with self.temporary_workdir() as workdir:
with temporary_dir(root_dir=get_buildroot()) as tmp:
config_names = ['one.xml', 'two.xml']
config = dedent("""
<module name="TreeWalker">
<property name="tabWidth" value="2"/>
</module>""")
for config_name in config_names:
# Ensure that even though the config files have the same name, their contents will
# invalidate the targets.
config_file = os.path.join(tmp, config_name)
self._create_config_file(config_file, config)
args = [
'lint.checkstyle',
cache_args,
'examples/src/java/org/pantsbuild/example/hello/simple',
'--lint-checkstyle-configuration={}'.format(config_file)
]
pants_run = self.run_pants_with_workdir(args, workdir)
self.assert_success(pants_run)
@contextmanager
def _temporary_buildroot(self, files_to_copy, current_root=None):
if current_root is None:
current_root = get_buildroot()
files_to_copy = set(files_to_copy)
files_to_copy.update(f for f in os.listdir(current_root)
if f.endswith('.ini') or f.startswith('BUILD'))
files_to_copy.update((
'pants',
'3rdparty',
'build-support',
'contrib',
'pants-plugins',
'src',
))
with temporary_dir() as temp_root:
temp_root = os.path.normpath(temp_root)
for path in files_to_copy:
src = os.path.join(current_root, path)
dst = os.path.join(temp_root, path)
if os.path.isdir(path):
shutil.copytree(src, dst)
else:
shutil.copyfile(src, dst)
current = os.getcwd()
try:
os.chdir(temp_root)
temp_root = os.getcwd()
yield temp_root
finally:
os.chdir(current)
def _temporary_buildroots(self, files_to_copy=None, current_root=None, iterations=2):
while iterations:
with self._temporary_buildroot(files_to_copy, current_root) as root:
yield root
iterations -= 1
@ensure_cached(expected_num_artifacts=1)
def test_config_buildroot_does_not_invalidate_targets(self, cache_args):
previous_names = set()
for buildroot in self._temporary_buildroots(['examples']):
with self.temporary_workdir() as workdir:
tmp = os.path.join(buildroot, 'tmp')
os.mkdir(tmp)
config = dedent("""
<module name="TreeWalker">
<property name="tabWidth" value="2"/>
</module>""")
# Ensure that even though the config files have the same name, their
# contents will invalidate the targets.
config_file = os.path.join(tmp, 'one.xml')
self.assertNotIn(config_file, previous_names)
previous_names.add(config_file)
self._create_config_file(config_file, config)
args = [
'lint.checkstyle',
cache_args,
'examples/src/java/org/pantsbuild/example/hello/simple',
'--lint-checkstyle-configuration={}'.format(config_file),
]
pants_run = self.run_pants_with_workdir(args, workdir)
self.assert_success(pants_run)
@ensure_cached(expected_num_artifacts=1)
def test_properties_file_names_does_not_invalidates_targets(self, cache_args):
with self.temporary_workdir() as workdir:
with temporary_dir(root_dir=get_buildroot()) as tmp:
suppression_names = ['one-supress.xml', 'two-supress.xml']
suppression_data = dedent("""
<?xml version="1.0"?>
<!DOCTYPE suppressions PUBLIC
"-//Puppy Crawl//DTD Suppressions 1.1//EN"
"http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
<suppressions>
<suppress files=".*/bad-files/.*\.java" checks=".*"/>
</suppressions>
""").strip()
for suppression_name in suppression_names:
suppression_file = os.path.join(tmp, suppression_name)
self._create_config_file(suppression_file, suppression_data)
properties = {
'checkstyle.suppression.files': suppression_file,
}
args = [
'lint.checkstyle',
cache_args,
'examples/src/java/org/pantsbuild/example/hello/simple',
"--lint-checkstyle-properties={}".format(json.dumps(properties)),
]
pants_run = self.run_pants_with_workdir(args, workdir)
self.assert_success(pants_run)
@ensure_cached(expected_num_artifacts=2)
def test_properties_file_contents_invalidates_targets(self, cache_args):
with self.temporary_workdir() as workdir:
with temporary_dir(root_dir=get_buildroot()) as tmp:
suppression_files = [
dedent("""
<?xml version="1.0"?>
<!DOCTYPE suppressions PUBLIC
"-//Puppy Crawl//DTD Suppressions 1.1//EN"
"http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
<suppressions>
<suppress files=".*/bad-files/.*\.java" checks=".*"/>
</suppressions>
""").strip(),
dedent("""
<?xml version="1.0"?>
<!DOCTYPE suppressions PUBLIC
"-//Puppy Crawl//DTD Suppressions 1.1//EN"
"http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
<suppressions>
<suppress files=".*/bad-files/.*\.java" checks=".*"/>
<suppress files=".*/really-bad-files/.*\.java" checks=".*"/>
</suppressions>
""").strip(),
]
for suppressions in suppression_files:
suppression_file = os.path.join(tmp, 'suppressions.xml')
self._create_config_file(suppression_file, suppressions)
properties = {
'checkstyle.suppression.files': suppression_file,
}
args = [
'lint.checkstyle',
cache_args,
'examples/src/java/org/pantsbuild/example/hello/simple',
"--lint-checkstyle-properties={}".format(json.dumps(properties)),
]
pants_run = self.run_pants_with_workdir(args, workdir)
self.assert_success(pants_run)
@ensure_cached(expected_num_artifacts=2)
def test_properties_nonfile_values_invalidates_targets(self, cache_args):
with self.temporary_workdir() as workdir:
with temporary_dir(root_dir=get_buildroot()):
values = ['this-is-not-a-file', '37']
for value in values:
properties = {
'my.value': value,
}
args = [
'lint.checkstyle',
cache_args,
'examples/src/java/org/pantsbuild/example/hello/simple',
"--lint-checkstyle-properties={}".format(json.dumps(properties)),
]
pants_run = self.run_pants_with_workdir(args, workdir)
self.assert_success(pants_run)
@ensure_cached(expected_num_artifacts=2)
def test_jvm_tool_changes_invalidate_targets(self, cache_args):
with self.temporary_workdir() as workdir:
# Ensure that only the second use of the default checkstyle will not invalidate anything.
for checkstyle_jar in (None, 'testprojects/3rdparty/checkstyle', None):
args = [
'lint.checkstyle',
cache_args,
'--checkstyle={}'.format(checkstyle_jar) if checkstyle_jar else '',
'examples/src/java/org/pantsbuild/example/hello/simple'
]
pants_run = self.run_pants_with_workdir(args, workdir)
print(pants_run.stdout_data)
self.assert_success(pants_run)
| |
# -*- coding: utf-8 -*-
import random
import uuid
from unittest import TestCase
from faker import Faker
from rwslib.builders.admindata import Location
from rwslib.builders.constants import QueryStatusType
from rwslib.builders.clinicaldata import ClinicalData, FormData, ItemData, ItemGroupData, MdsolQuery, StudyEventData, \
SubjectData
from rwslib.tests.common import obj_to_doc
import datetime
import unittest
# create a Faker
fake = Faker()
YesNoRave = ('Yes', 'No')
class TestMdsolQuery(unittest.TestCase):
"""Test extension MdsolQuery"""
def get_tested(self):
return MdsolQuery(status=QueryStatusType.Open, value="Data missing", query_repeat_key=123,
recipient="Site from System", requires_response=True)
def test_basic(self):
tested = self.get_tested()
self.assertEqual("Data missing", tested.value)
self.assertEqual(123, tested.query_repeat_key)
self.assertEqual(QueryStatusType.Open, tested.status)
self.assertEqual("Site from System", tested.recipient)
self.assertEqual(True, tested.requires_response)
def test_builder(self):
tested = self.get_tested()
tested.response = "Done"
doc = obj_to_doc(tested)
self.assertEqual("mdsol:Query", doc.tag)
self.assertEqual("Yes", doc.attrib['RequiresResponse'])
self.assertEqual("Site from System", doc.attrib['Recipient'])
self.assertEqual("123", doc.attrib['QueryRepeatKey'])
self.assertEqual("Data missing", doc.attrib['Value'])
self.assertEqual("Done", doc.attrib['Response'])
def test_invalid_status_value(self):
"""Status must come from QueryStatusType"""
with self.assertRaises(AttributeError):
MdsolQuery(status='A test')
class TestMODMClinicalData(TestCase):
def test_add_last_update_time(self):
"""We add a LastUpdateTime"""
clindata = ClinicalData("Mediflex", "Prod", metadata_version_oid="1012")
now = datetime.datetime.utcnow()
clindata.last_update_time = now
tested = obj_to_doc(clindata)
self.assertEqual(now.isoformat(), tested.get('mdsol:LastUpdateTime'))
def test_add_audit_subcategory(self):
"""We add a LastUpdateTime"""
clindata = ClinicalData("Mediflex", "Prod", metadata_version_oid="1012")
clindata.add_attribute('AuditSubCategoryName', "EnteredWithChangeCode")
tested = obj_to_doc(clindata)
self.assertEqual("EnteredWithChangeCode", tested.get('mdsol:AuditSubCategoryName'))
def test_last_update_time_naiive(self):
"""We don't see a LastUpdateTime for naive elements"""
clindata = ClinicalData("Mediflex", "Prod", metadata_version_oid="1012")
tested = obj_to_doc(clindata)
self.assertIsNone(tested.get('mdsol:LastUpdateTime'))
def test_modm_attributes(self):
"""Each modm attribute is settable"""
for attribute in ["ExternalStudyID", "StudyUUID", "AuditSubCategoryName",
"StudyName", "ClientDivisionUUID", "ClientDivisionSchemeUUID",
"SDRCompleteDate", "SDVCompleteDate", "LockCompleteDate",
"IsSDVRequired", "IsSDVComplete"]:
data = ClinicalData("Mediflex", "Prod", metadata_version_oid="1012")
if "UUID" in attribute:
data.add_attribute(attribute, uuid.uuid4())
elif "Date" in attribute:
data.add_attribute(attribute, fake.date_time_this_year(before_now=True,
after_now=False,
tzinfo=None))
elif attribute.startswith('Is'):
data.add_attribute(attribute, random.choice(YesNoRave))
else:
data.add_attribute(attribute, "Blargle")
tested = obj_to_doc(data)
self.assertIsNotNone(tested.get("mdsol:{}".format(attribute)))
class TestMODMSubjectData(TestCase):
def test_add_last_update_time(self):
"""We add a LastUpdateTime"""
obj = SubjectData("Subject 1", "Site 1")
now = datetime.datetime.utcnow()
obj.last_update_time = now
tested = obj_to_doc(obj)
self.assertEqual(now.isoformat(), tested.get('mdsol:LastUpdateTime'))
def test_last_update_time_naiive(self):
"""We don't see a LastUpdateTime for naiive elements"""
obj = SubjectData("Subject 1", "Site 1")
tested = obj_to_doc(obj)
self.assertIsNone(tested.get('mdsol:LastUpdateTime'))
def test_add_milestone(self):
"""We add a Milestone"""
obj = SubjectData("Subject 1", "Site 1")
obj.add_milestone("Randomised")
tested = obj_to_doc(obj)
self.assertEqual('Annotation', list(tested)[1].tag)
self.assertEqual('Randomised', list(list(list(tested)[1])[0])[0].text)
def test_modm_attributes(self):
"""Each modm attribute is settable"""
for attribute in ["SubjectName", "Status",
"SDRCompleteDate", "SDVCompleteDate", "LockCompleteDate",
"IsSDVRequired", "IsSDVComplete", "SubjectUUID"]:
data = SubjectData("Subject 1", "Site 1")
if "UUID" in attribute:
data.add_attribute(attribute, uuid.uuid4())
elif "Date" in attribute:
data.add_attribute(attribute, fake.date_time_this_year(before_now=True,
after_now=False,
tzinfo=None))
elif attribute.startswith('Is'):
data.add_attribute(attribute, random.choice(YesNoRave))
else:
data.add_attribute(attribute, "Blargle")
tested = obj_to_doc(data)
self.assertIsNotNone(tested.get("mdsol:{}".format(attribute)))
def test_invalid_modm_attributes(self):
"""Each invalid modm attribute raises an exception"""
for attribute in ["StudyUUID"]:
obj = SubjectData("Subject 1", "Site 1")
with self.assertRaises(ValueError) as exc:
if "UUID" in attribute:
obj.add_attribute(attribute, uuid.uuid4())
elif "Date" in attribute:
obj.add_attribute(attribute, fake.date_time_this_year(before_now=True,
after_now=False,
tzinfo=None))
else:
obj.add_attribute(attribute, "Blargle")
self.assertEqual("Can't add {} to SubjectData".format(attribute), str(exc.exception))
class TestMODMStudyEventData(TestCase):
def test_add_last_update_time(self):
"""We add a LastUpdateTime"""
obj = StudyEventData("VISIT1")
now = datetime.datetime.utcnow()
obj.last_update_time = now
tested = obj_to_doc(obj)
self.assertEqual("StudyEventData", tested.tag)
self.assertEqual(now.isoformat(), tested.get('mdsol:LastUpdateTime'))
def test_last_update_time_naiive(self):
"""We don't see a LastUpdateTime for naiive elements"""
obj = StudyEventData("VISIT1")
tested = obj_to_doc(obj)
self.assertEqual("StudyEventData", tested.tag)
self.assertIsNone(tested.get('mdsol:LastUpdateTime'))
def test_add_milestone(self):
"""We add a single milestone"""
obj = StudyEventData("VISIT1")
obj.add_milestone("Informed Consent")
tested = obj_to_doc(obj)
self.assertEqual('Annotation', list(tested)[0].tag)
annotation = list(tested)[0]
self.assertEqual('Informed Consent', list(list(annotation)[0])[0].text)
def test_add_milestones(self):
"""We add multiple milestones"""
obj = StudyEventData("VISIT1")
obj.add_milestone("Informed Consent")
obj.add_milestone("Study Start")
tested = obj_to_doc(obj)
self.assertEqual('Annotation', list(tested)[0].tag)
annotation = list(tested)[0]
self.assertEqual('Informed Consent', list(list(annotation)[0])[0].text)
self.assertEqual('Study Start', list(list(annotation)[1])[0].text)
def test_modm_attributes(self):
"""Each modm attribute is settable"""
for attribute in ["StartWindowDate", "EndWindowDate", "StudyEventUUID",
"InstanceName", "VisitTargetDate", "InstanceId",
"InstanceOverDue", "InstanceStartWindow", "InstanceEndWindow",
"InstanceClose", "InstanceAccess", "StudyEventDate",
"SDRCompleteDate", "SDVCompleteDate", "LockCompleteDate",
"VisitFirstDataEntryDate", "IsSDVComplete", "IsSDVRequired"]:
data = StudyEventData("VISIT1")
if "UUID" in attribute:
data.add_attribute(attribute, uuid.uuid4())
elif "Date" in attribute:
data.add_attribute(attribute, fake.date_time_this_year(before_now=True,
after_now=False,
tzinfo=None))
elif attribute.startswith('Is'):
data.add_attribute(attribute, random.choice(YesNoRave))
else:
data.add_attribute(attribute, "Blargle")
tested = obj_to_doc(data)
self.assertIsNotNone(tested.get("mdsol:{}".format(attribute)))
def test_invalid_modm_attributes(self):
"""Each invalid modm attribute raises an exception"""
for attribute in ["StudyUUID"]:
obj = StudyEventData("VISIT1")
with self.assertRaises(ValueError) as exc:
if "UUID" in attribute:
obj.add_attribute(attribute, uuid.uuid4())
elif "Date" in attribute:
obj.add_attribute(attribute, fake.date_time_this_year(before_now=True,
after_now=False,
tzinfo=None))
else:
obj.add_attribute(attribute, "Blargle")
self.assertEqual("Can't add {} to StudyEventData".format(attribute), str(exc.exception))
class TestMODMFormData(TestCase):
def test_add_last_update_time(self):
"""We add a LastUpdateTime"""
obj = FormData(formoid="DM")
now = datetime.datetime.utcnow()
obj.last_update_time = now
tested = obj_to_doc(obj)
self.assertEqual("FormData", tested.tag)
self.assertEqual(now.isoformat(), tested.get('mdsol:LastUpdateTime'))
def test_last_update_time_naiive(self):
"""We don't see a LastUpdateTime for naiive elements"""
obj = FormData(formoid="DM")
tested = obj_to_doc(obj)
self.assertEqual("FormData", tested.tag)
self.assertIsNone(tested.get('mdsol:LastUpdateTime'))
def test_add_milestone(self):
"""We add a single milestone"""
obj = FormData(formoid="DM")
obj.add_milestone("Informed Consent")
tested = obj_to_doc(obj)
self.assertEqual('Annotation', list(tested)[0].tag)
annotation = list(tested)[0]
self.assertEqual('Informed Consent', list(list(annotation)[0])[0].text)
def test_add_milestones(self):
"""We add multiple milestones"""
obj = FormData(formoid="DM")
obj.add_milestone("Informed Consent")
obj.add_milestone("Study Start")
tested = obj_to_doc(obj)
self.assertEqual('Annotation', list(tested)[0].tag)
annotation = list(tested)[0]
self.assertEqual('Informed Consent', list(list(annotation)[0])[0].text)
self.assertEqual('Study Start', list(list(annotation)[1])[0].text)
def test_modm_attributes(self):
"""Each modm attribute is settable"""
for attribute in ["FormUUID", "DataPageName", "DataPageID",
"SDRCompleteDate", "SDVCompleteDate", "LockCompleteDate", "IsSDVRequired",
"IsSDVComplete"]:
data = FormData(formoid="DM")
if "UUID" in attribute:
data.add_attribute(attribute, uuid.uuid4())
elif "Date" in attribute:
data.add_attribute(attribute, fake.date_time_this_year(before_now=True,
after_now=False,
tzinfo=None))
elif attribute.startswith('Is'):
data.add_attribute(attribute, random.choice(YesNoRave))
else:
data.add_attribute(attribute, "Blargle")
tested = obj_to_doc(data)
self.assertIsNotNone(tested.get("mdsol:{}".format(attribute)))
def test_invalid_modm_attributes(self):
"""Each invalid modm attribute raises an exception"""
for attribute in ["StudyUUID"]:
obj = FormData(formoid="DM")
with self.assertRaises(ValueError) as exc:
if "UUID" in attribute:
obj.add_attribute(attribute, uuid.uuid4())
elif "Date" in attribute:
obj.add_attribute(attribute, fake.date_time_this_year(before_now=True,
after_now=False,
tzinfo=None))
else:
obj.add_attribute(attribute, "Blargle")
self.assertEqual("Can't add {} to FormData".format(attribute), str(exc.exception))
class TestMODMItemGroupData(TestCase):
def test_add_last_update_time(self):
"""We add a LastUpdateTime"""
obj = ItemGroupData(itemgroupoid="DM")
now = datetime.datetime.utcnow()
obj.last_update_time = now
tested = obj_to_doc(obj)
self.assertEqual("ItemGroupData", tested.tag)
self.assertEqual(now.isoformat(), tested.get('mdsol:LastUpdateTime'))
def test_last_update_time_naiive(self):
"""We don't see a LastUpdateTime for naiive elements"""
obj = ItemGroupData(itemgroupoid="DM")
tested = obj_to_doc(obj)
self.assertEqual("ItemGroupData", tested.tag)
self.assertIsNone(tested.get('mdsol:LastUpdateTime'))
def test_add_milestone(self):
"""We add a single milestone"""
obj = ItemGroupData(itemgroupoid="DM")
obj.add_milestone("Informed Consent")
tested = obj_to_doc(obj)
self.assertEqual('Annotation', list(tested)[0].tag)
annotation = list(tested)[0]
self.assertEqual('Informed Consent', list(list(annotation)[0])[0].text)
def test_add_milestones(self):
"""We add multiple milestones"""
obj = ItemGroupData(itemgroupoid="DM")
obj.add_milestone("Informed Consent")
obj.add_milestone("Study Start")
tested = obj_to_doc(obj)
self.assertEqual('Annotation', list(tested)[0].tag)
annotation = list(tested)[0]
self.assertEqual('Informed Consent', list(list(annotation)[0])[0].text)
self.assertEqual('Study Start', list(list(annotation)[1])[0].text)
def test_modm_attributes(self):
"""Each modm attribute is settable"""
for attribute in ["ItemGroupUUID", "RecordID",
"SDRCompleteDate", "SDVCompleteDate", "LockCompleteDate",
"IsSDVRequired", "IsSDVComplete"]:
data = ItemGroupData(itemgroupoid="DM")
if "UUID" in attribute:
data.add_attribute(attribute, uuid.uuid4())
elif "Date" in attribute:
data.add_attribute(attribute, fake.date_time_this_year(before_now=True,
after_now=False,
tzinfo=None))
elif attribute.startswith('Is'):
data.add_attribute(attribute, random.choice(YesNoRave))
else:
data.add_attribute(attribute, "Blargle")
tested = obj_to_doc(data)
self.assertIsNotNone(tested.get("mdsol:{}".format(attribute)))
def test_invalid_modm_attributes(self):
"""Each invalid modm attribute raises an exception"""
for attribute in ["StudyUUID"]:
obj = ItemGroupData(itemgroupoid="DM")
with self.assertRaises(ValueError) as exc:
if "UUID" in attribute:
obj.add_attribute(attribute, uuid.uuid4())
elif "Date" in attribute:
obj.add_attribute(attribute, fake.date_time_this_year(before_now=True,
after_now=False,
tzinfo=None))
else:
obj.add_attribute(attribute, "Blargle")
self.assertEqual("Can't add {} to ItemGroupData".format(attribute), str(exc.exception))
class TestMODMItemData(TestCase):
def test_add_last_update_time(self):
"""We add a LastUpdateTime"""
obj = ItemData(itemoid="BRTHDAT", value="12 DEC 1975")
now = datetime.datetime.utcnow()
obj.last_update_time = now
tested = obj_to_doc(obj)
self.assertEqual("ItemData", tested.tag)
self.assertEqual(now.isoformat(), tested.get('mdsol:LastUpdateTime'))
def test_add_last_update_time_with_invalid_time(self):
"""We add a LastUpdateTime with a nonsense value"""
obj = ItemData(itemoid="BRTHDAT", value="12 DEC 1975")
now = "2017-04-21"
with self.assertRaises(ValueError) as exc:
obj.last_update_time = now
def test_last_update_time_naiive(self):
"""We don't see a LastUpdateTime for naiive elements"""
obj = ItemData(itemoid="BRTHDAT", value="12 DEC 1975")
tested = obj_to_doc(obj)
self.assertEqual("ItemData", tested.tag)
self.assertIsNone(tested.get('mdsol:LastUpdateTime'))
def test_last_update_time_set(self):
"""We don't see a LastUpdateTime for naiive elements"""
obj = ItemData(itemoid="BRTHDAT", value="12 DEC 1975")
obj.set_update_time()
tested = obj_to_doc(obj)
self.assertEqual("ItemData", tested.tag)
self.assertIsNotNone(tested.get('mdsol:LastUpdateTime'))
def test_add_milestone(self):
"""We add a single milestone"""
obj = ItemData(itemoid="BRTHDAT", value="12 DEC 1975")
obj.add_milestone("Informed Consent")
tested = obj_to_doc(obj)
self.assertEqual('Annotation', list(tested)[0].tag)
annotation = list(tested)[0]
self.assertEqual('Informed Consent', list(list(annotation)[0])[0].text)
def test_add_milestones(self):
"""We add multiple milestones"""
obj = ItemData(itemoid="BRTHDAT", value="12 DEC 1975")
obj.add_milestone("Informed Consent")
obj.add_milestone("Study Start")
tested = obj_to_doc(obj)
self.assertEqual('Annotation', list(tested)[0].tag)
annotation = list(tested)[0]
self.assertEqual('Informed Consent', list(list(annotation)[0])[0].text)
self.assertEqual('Study Start', list(list(annotation)[1])[0].text)
def test_add_item_uuid(self):
"""We add a mdsol:ItemUUID"""
obj = ItemData(itemoid="BRTHDAT", value="12 DEC 1975")
now = datetime.datetime.utcnow()
obj.last_update_time = now
obj.add_attribute("ItemUUID", "85D4F9F0-9F49-42F3-A8E7-413DE85CC55E")
tested = obj_to_doc(obj)
self.assertEqual("ItemData", tested.tag)
self.assertEqual(now.isoformat(), tested.get('mdsol:LastUpdateTime'))
self.assertEqual("85D4F9F0-9F49-42F3-A8E7-413DE85CC55E", tested.get('mdsol:ItemUUID'))
def test_gate_modm_attributes(self):
"""We add a mdsol:Nonsense"""
obj = ItemData(itemoid="BRTHDAT", value="12 DEC 1975")
now = datetime.datetime.utcnow()
obj.last_update_time = now
with self.assertRaises(ValueError) as exc:
obj.add_attribute("Nonsense", "85D4F9F0-9F49-42F3-A8E7-413DE85CC55E")
self.assertEqual("Can't add Nonsense to ItemData", str(exc.exception))
def test_gate_modm_milestones_global(self):
"""We add a mdsol:Nonsense"""
igp = ItemGroupData("LOG_LINE")
brth = ItemData(itemoid="BRTHDAT", value="12 DEC 1975")
brth.add_milestone("Birth Date")
igp << brth
ifc = ItemData(itemoid="DSSTDAT_IFC", value="12 DEC 1975")
ifc.add_milestone("Informed Consent")
igp << ifc
tested = obj_to_doc(igp)
self.assertEqual('ItemGroupData', tested.tag)
self.assertEqual('ItemData', list(tested)[0].tag)
idata_zero = list(tested)[0]
self.assertEqual('Annotation', list(idata_zero)[0].tag)
anno = list(idata_zero)[0]
self.assertEqual(1, len(list(anno)))
def test_modm_attributes(self):
"""Each modm attribute is settable"""
for attribute in ["ItemUUID",
"SDRCompleteDate", "SDVCompleteDate", "LockCompleteDate",
"IsSDVRequired", "IsSDVComplete"]:
data = ItemData(itemoid="BRTHDAT", value="12 DEC 1975")
if "UUID" in attribute:
data.add_attribute(attribute, uuid.uuid4())
elif "Date" in attribute:
data.add_attribute(attribute, fake.date_time_this_year(before_now=True,
after_now=False,
tzinfo=None))
elif attribute.startswith('Is'):
data.add_attribute(attribute, random.choice(YesNoRave))
else:
data.add_attribute(attribute, "Blargle")
tested = obj_to_doc(data)
self.assertIsNotNone(tested.get("mdsol:{}".format(attribute)))
def test_modm_bool_attribute(self):
"""A boolean gets mapped to Yes or No"""
data = ItemData(itemoid="BRTHDAT", value="12 DEC 1975")
data.add_attribute("IsSDVRequired", True)
data.add_attribute("IsSDVComplete", False)
tested = obj_to_doc(data)
self.assertEqual(tested.get("mdsol:IsSDVRequired"), "Yes")
self.assertEqual(tested.get("mdsol:IsSDVComplete"), "No")
def test_invalid_modm_attributes(self):
"""Each invalid modm attribute raises an exception"""
for attribute in ["StudyUUID"]:
obj = ItemData(itemoid="BRTHDAT", value="12 DEC 1975")
with self.assertRaises(ValueError) as exc:
if "UUID" in attribute:
obj.add_attribute(attribute, uuid.uuid4())
elif "Date" in attribute:
obj.add_attribute(attribute, fake.date_time_this_year(before_now=True,
after_now=False,
tzinfo=None))
else:
obj.add_attribute(attribute, "Blargle")
self.assertEqual("Can't add {} to ItemData".format(attribute), str(exc.exception))
class TestMODMLocation(unittest.TestCase):
def test_add_a_date(self):
"""We add a date to the open and close"""
obj = Location("site1", "Site 1")
obj.add_attribute("SiteStartDate", datetime.date(2015, 12, 27))
obj.add_attribute("SiteCloseDate", datetime.date(2016, 2, 27))
tested = obj_to_doc(obj)
self.assertEqual('Location', tested.tag)
self.assertEqual("2015-12-27", tested.get('mdsol:SiteStartDate'))
self.assertEqual("2016-02-27", tested.get('mdsol:SiteCloseDate'))
| |
"""
:class:`.YahooPlaceFinder` geocoder.
"""
from functools import partial
try:
from requests import get, Request
from requests_oauthlib import OAuth1
requests_missing = False
except ImportError:
requests_missing = True
from geopy.geocoders.base import Geocoder, DEFAULT_TIMEOUT
from geopy.exc import GeocoderParseError
from geopy.location import Location
from geopy.compat import string_compare, py3k
__all__ = ("YahooPlaceFinder", )
class YahooPlaceFinder(Geocoder): # pylint: disable=W0223
"""
Geocoder that utilizes the Yahoo! BOSS PlaceFinder API. Documentation at:
https://developer.yahoo.com/boss/geo/docs/
"""
def __init__(
self,
consumer_key,
consumer_secret,
timeout=DEFAULT_TIMEOUT,
proxies=None,
user_agent=None,
): # pylint: disable=R0913
"""
:param string consumer_key: Key provided by Yahoo.
:param string consumer_secret: Secret corresponding to the key
provided by Yahoo.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception.
:param dict proxies: If specified, routes this geocoder"s requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
.. versionadded:: 0.96
"""
if requests_missing:
raise ImportError(
'requests-oauthlib is needed for YahooPlaceFinder.'
' Install with `pip install geopy -e ".[placefinder]"`.'
)
super(YahooPlaceFinder, self).__init__(
timeout=timeout, proxies=proxies, user_agent=user_agent
)
self.consumer_key = (
unicode(consumer_key)
if not py3k
else str(consumer_key)
)
self.consumer_secret = (
unicode(consumer_secret)
if not py3k
else str(consumer_secret)
)
self.auth = OAuth1(
client_key=self.consumer_key,
client_secret=self.consumer_secret,
signature_method="HMAC-SHA1",
signature_type="AUTH_HEADER",
)
self.api = "https://yboss.yahooapis.com/geo/placefinder"
@staticmethod
def _filtered_results(results, min_quality, valid_country_codes):
"""
Returns only the results that meet the minimum quality threshold
and are located in expected countries.
"""
if min_quality:
results = [
loc
for loc in results
if int(loc.raw["quality"]) > min_quality
]
if valid_country_codes:
results = [
loc
for loc in results
if loc.raw["countrycode"] in valid_country_codes
]
return results
def _parse_response(self, content):
"""
Returns the parsed result of a PlaceFinder API call.
"""
try:
placefinder = (
content["bossresponse"]["placefinder"]
)
if not len(placefinder) or not len(placefinder.get("results", [])):
return None
results = [
Location(
self.humanize(place),
(float(place["latitude"]), float(place["longitude"])),
raw=place
)
for place in placefinder["results"]
]
except (KeyError, ValueError):
raise GeocoderParseError("Error parsing PlaceFinder result")
return results
@staticmethod
def humanize(location):
"""
Returns a human readable representation of a raw PlaceFinder location
"""
return ", ".join([
location[line]
for line in ["line1", "line2", "line3", "line4"]
if location[line]
])
def geocode(
self,
query,
exactly_one=True,
timeout=None,
min_quality=0,
reverse=False,
valid_country_codes=None,
with_timezone=False,
locale=None,
): # pylint: disable=W0221,R0913
"""
Geocode a location query.
:param string query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int min_quality:
:param bool reverse:
:param valid_country_codes:
:type valid_country_codes: list or tuple
:param bool with_timezone: Include the timezone in the response's
`raw` dictionary (as `timezone`).
"""
params = {
"location": query,
"flags": "J", # JSON
}
if reverse is True:
params["gflags"] = "R"
if exactly_one is True:
params["count"] = "1"
if with_timezone is True:
params['flags'] += 'T' #Return timezone
if locale:
params["locale"] = locale
response = self._call_geocoder(
self.api,
timeout=timeout,
requester=get,
params=params,
auth=self.auth,
)
results = self._parse_response(response)
if results is None:
return None
results = self._filtered_results(
results,
min_quality,
valid_country_codes,
)
if exactly_one:
return results[0]
else:
return results
def reverse(self, query, exactly_one=True, timeout=None, min_quality=0, locale=None):
"""
Returns a reverse geocoded location using Yahoo"s PlaceFinder API.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param bool exactly_one: Return one result or a list of results, if
available.
"""
query = self._coerce_point_to_string(query)
if isinstance(query, string_compare):
query = query.replace(" ", "") # oauth signature failure; todo
return self.geocode(
query,
exactly_one=exactly_one,
timeout=timeout,
reverse=True,
min_quality=min_quality,
locale=locale
)
| |
"""
Event parser and human readable log generator.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/logbook/
"""
import asyncio
import logging
from datetime import timedelta
from itertools import groupby
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.components import recorder, sun
from homeassistant.components.frontend import register_built_in_panel
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP, EVENT_STATE_CHANGED,
STATE_NOT_HOME, STATE_OFF, STATE_ON,
ATTR_HIDDEN, HTTP_BAD_REQUEST)
from homeassistant.core import State, split_entity_id, DOMAIN as HA_DOMAIN
from homeassistant.util.async import run_callback_threadsafe
DOMAIN = "logbook"
DEPENDENCIES = ['recorder', 'frontend']
_LOGGER = logging.getLogger(__name__)
CONF_EXCLUDE = 'exclude'
CONF_INCLUDE = 'include'
CONF_ENTITIES = 'entities'
CONF_DOMAINS = 'domains'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
CONF_EXCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(cv.ensure_list,
[cv.string])
}),
CONF_INCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(cv.ensure_list,
[cv.string])
})
}),
}, extra=vol.ALLOW_EXTRA)
EVENT_LOGBOOK_ENTRY = 'logbook_entry'
GROUP_BY_MINUTES = 15
ATTR_NAME = 'name'
ATTR_MESSAGE = 'message'
ATTR_DOMAIN = 'domain'
ATTR_ENTITY_ID = 'entity_id'
LOG_MESSAGE_SCHEMA = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_DOMAIN): cv.slug,
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
})
def log_entry(hass, name, message, domain=None, entity_id=None):
"""Add an entry to the logbook."""
run_callback_threadsafe(
hass.loop, async_log_entry, hass, name, message, domain, entity_id
).result()
def async_log_entry(hass, name, message, domain=None, entity_id=None):
"""Add an entry to the logbook."""
data = {
ATTR_NAME: name,
ATTR_MESSAGE: message
}
if domain is not None:
data[ATTR_DOMAIN] = domain
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.bus.async_fire(EVENT_LOGBOOK_ENTRY, data)
def setup(hass, config):
"""Listen for download events to download files."""
@callback
def log_message(service):
"""Handle sending notification message service calls."""
message = service.data[ATTR_MESSAGE]
name = service.data[ATTR_NAME]
domain = service.data.get(ATTR_DOMAIN)
entity_id = service.data.get(ATTR_ENTITY_ID)
message.hass = hass
message = message.async_render()
async_log_entry(hass, name, message, domain, entity_id)
hass.http.register_view(LogbookView(config))
register_built_in_panel(hass, 'logbook', 'Logbook',
'mdi:format-list-bulleted-type')
hass.services.register(DOMAIN, 'log', log_message,
schema=LOG_MESSAGE_SCHEMA)
return True
class LogbookView(HomeAssistantView):
"""Handle logbook view requests."""
url = '/api/logbook'
name = 'api:logbook'
extra_urls = ['/api/logbook/{datetime}']
def __init__(self, config):
"""Initilalize the logbook view."""
self.config = config
@asyncio.coroutine
def get(self, request, datetime=None):
"""Retrieve logbook entries."""
if datetime:
datetime = dt_util.parse_datetime(datetime)
if datetime is None:
return self.json_message('Invalid datetime', HTTP_BAD_REQUEST)
else:
datetime = dt_util.start_of_local_day()
start_day = dt_util.as_utc(datetime)
end_day = start_day + timedelta(days=1)
def get_results():
"""Query DB for results."""
events = recorder.get_model('Events')
query = recorder.query('Events').order_by(
events.time_fired).filter(
(events.time_fired > start_day) &
(events.time_fired < end_day))
events = recorder.execute(query)
return _exclude_events(events, self.config)
events = yield from request.app['hass'].loop.run_in_executor(
None, get_results)
return self.json(humanify(events))
class Entry(object):
"""A human readable version of the log."""
def __init__(self, when=None, name=None, message=None, domain=None,
entity_id=None):
"""Initialize the entry."""
self.when = when
self.name = name
self.message = message
self.domain = domain
self.entity_id = entity_id
def as_dict(self):
"""Convert entry to a dict to be used within JSON."""
return {
'when': self.when,
'name': self.name,
'message': self.message,
'domain': self.domain,
'entity_id': self.entity_id,
}
def humanify(events):
"""Generator that converts a list of events into Entry objects.
Will try to group events if possible:
- if 2+ sensor updates in GROUP_BY_MINUTES, show last
- if home assistant stop and start happen in same minute call it restarted
"""
# Group events in batches of GROUP_BY_MINUTES
for _, g_events in groupby(
events,
lambda event: event.time_fired.minute // GROUP_BY_MINUTES):
events_batch = list(g_events)
# Keep track of last sensor states
last_sensor_event = {}
# Group HA start/stop events
# Maps minute of event to 1: stop, 2: stop + start
start_stop_events = {}
# Process events
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
entity_id = event.data.get('entity_id')
if entity_id is None:
continue
if entity_id.startswith('sensor.'):
last_sensor_event[entity_id] = event
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if event.time_fired.minute in start_stop_events:
continue
start_stop_events[event.time_fired.minute] = 1
elif event.event_type == EVENT_HOMEASSISTANT_START:
if event.time_fired.minute not in start_stop_events:
continue
start_stop_events[event.time_fired.minute] = 2
# Yield entries
for event in events_batch:
if event.event_type == EVENT_STATE_CHANGED:
to_state = State.from_dict(event.data.get('new_state'))
# If last_changed != last_updated only attributes have changed
# we do not report on that yet. Also filter auto groups.
if not to_state or \
to_state.last_changed != to_state.last_updated or \
to_state.domain == 'group' and \
to_state.attributes.get('auto', False):
continue
domain = to_state.domain
# Skip all but the last sensor state
if domain == 'sensor' and \
event != last_sensor_event[to_state.entity_id]:
continue
# Don't show continuous sensor value changes in the logbook
if domain == 'sensor' and \
to_state.attributes.get('unit_of_measurement'):
continue
yield Entry(
event.time_fired,
name=to_state.name,
message=_entry_message_from_state(domain, to_state),
domain=domain,
entity_id=to_state.entity_id)
elif event.event_type == EVENT_HOMEASSISTANT_START:
if start_stop_events.get(event.time_fired.minute) == 2:
continue
yield Entry(
event.time_fired, "Home Assistant", "started",
domain=HA_DOMAIN)
elif event.event_type == EVENT_HOMEASSISTANT_STOP:
if start_stop_events.get(event.time_fired.minute) == 2:
action = "restarted"
else:
action = "stopped"
yield Entry(
event.time_fired, "Home Assistant", action,
domain=HA_DOMAIN)
elif event.event_type == EVENT_LOGBOOK_ENTRY:
domain = event.data.get(ATTR_DOMAIN)
entity_id = event.data.get(ATTR_ENTITY_ID)
if domain is None and entity_id is not None:
try:
domain = split_entity_id(str(entity_id))[0]
except IndexError:
pass
yield Entry(
event.time_fired, event.data.get(ATTR_NAME),
event.data.get(ATTR_MESSAGE), domain,
entity_id)
def _exclude_events(events, config):
"""Get lists of excluded entities and platforms."""
excluded_entities = []
excluded_domains = []
included_entities = []
included_domains = []
exclude = config[DOMAIN].get(CONF_EXCLUDE)
if exclude:
excluded_entities = exclude[CONF_ENTITIES]
excluded_domains = exclude[CONF_DOMAINS]
include = config[DOMAIN].get(CONF_INCLUDE)
if include:
included_entities = include[CONF_ENTITIES]
included_domains = include[CONF_DOMAINS]
filtered_events = []
for event in events:
domain, entity_id = None, None
if event.event_type == EVENT_STATE_CHANGED:
to_state = State.from_dict(event.data.get('new_state'))
# Do not report on new entities
if not to_state:
continue
# exclude entities which are customized hidden
hidden = to_state.attributes.get(ATTR_HIDDEN, False)
if hidden:
continue
domain = to_state.domain
entity_id = to_state.entity_id
elif event.event_type == EVENT_LOGBOOK_ENTRY:
domain = event.data.get(ATTR_DOMAIN)
entity_id = event.data.get(ATTR_ENTITY_ID)
if domain or entity_id:
# filter if only excluded is configured for this domain
if excluded_domains and domain in excluded_domains and \
not included_domains:
if (included_entities and entity_id not in included_entities) \
or not included_entities:
continue
# filter if only included is configured for this domain
elif not excluded_domains and included_domains and \
domain not in included_domains:
if (included_entities and entity_id not in included_entities) \
or not included_entities:
continue
# filter if included and excluded is configured for this domain
elif excluded_domains and included_domains and \
(domain not in included_domains or
domain in excluded_domains):
if (included_entities and entity_id not in included_entities) \
or not included_entities or domain in excluded_domains:
continue
# filter if only included is configured for this entity
elif not excluded_domains and not included_domains and \
included_entities and entity_id not in included_entities:
continue
# check if logbook entry is excluded for this entity
if entity_id in excluded_entities:
continue
filtered_events.append(event)
return filtered_events
# pylint: disable=too-many-return-statements
def _entry_message_from_state(domain, state):
"""Convert a state to a message for the logbook."""
# We pass domain in so we don't have to split entity_id again
if domain == 'device_tracker':
if state.state == STATE_NOT_HOME:
return 'is away'
else:
return 'is at {}'.format(state.state)
elif domain == 'sun':
if state.state == sun.STATE_ABOVE_HORIZON:
return 'has risen'
else:
return 'has set'
elif state.state == STATE_ON:
# Future: combine groups and its entity entries ?
return "turned on"
elif state.state == STATE_OFF:
return "turned off"
return "changed to {}".format(state.state)
| |
from __future__ import absolute_import
from sentry.api.bases.project import ProjectPermission
from sentry.models import ApiKey
from sentry.testutils import TestCase
class ProjectPermissionBase(TestCase):
def setUp(self):
self.org = self.create_organization()
self.team = self.create_team(organization=self.org)
self.project = self.create_project(organization=self.org)
super(ProjectPermissionBase, self).setUp()
def has_object_perm(self, method, obj, auth=None, user=None, is_superuser=None):
perm = ProjectPermission()
request = self.make_request(user=user, auth=auth, method=method)
if is_superuser:
request.superuser.set_logged_in(request.user)
return perm.has_permission(request, None) and perm.has_object_permission(request, None, obj)
class ProjectPermissionTest(ProjectPermissionBase):
def test_regular_user(self):
user = self.create_user(is_superuser=False)
assert not self.has_object_perm("GET", self.project, user=user)
def test_superuser(self):
user = self.create_user(is_superuser=True)
assert self.has_object_perm("GET", self.project, user=user, is_superuser=True)
def test_member_for_project_read(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert self.has_object_perm("GET", self.project, user=user)
def test_member_for_project_write(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert not self.has_object_perm("POST", self.project, user=user)
def test_member_for_project_delete(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert not self.has_object_perm("DELETE", self.project, user=user)
def test_member_with_team_access(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert self.has_object_perm("GET", self.project, user=user)
def test_api_key_with_org_access(self):
key = ApiKey.objects.create(organization=self.org, scope_list=["project:read"])
assert self.has_object_perm("GET", self.project, auth=key)
def test_api_key_without_org_access(self):
key = ApiKey.objects.create(
organization=self.create_organization(), scope_list=["project:read"]
)
assert not self.has_object_perm("GET", self.project, auth=key)
def test_api_key_without_access(self):
key = ApiKey.objects.create(organization=self.org)
assert not self.has_object_perm("GET", self.project, auth=key)
def test_api_key_with_wrong_access(self):
key = ApiKey.objects.create(organization=self.org, scope_list=["team:read"])
assert not self.has_object_perm("GET", self.project, auth=key)
def test_api_key_with_wrong_access_for_method(self):
key = ApiKey.objects.create(organization=self.org, scope_list=["project:read"])
assert not self.has_object_perm("PUT", self.project, auth=key)
def test_admin_without_team_access(self):
team = self.create_team(organization=self.org)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="admin", teams=[team])
# if `allow_joinleave` is True, admins can act on teams
# they don't have access to
assert self.has_object_perm("POST", self.project, user=user)
def test_admin_with_team_access(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="admin", teams=[self.team])
assert self.has_object_perm("POST", self.project, user=user)
def test_manager_without_team_access(self):
team = self.create_team(organization=self.org)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="manager", teams=[team])
# managers should be able to act on teams/projects they
# don't have access to
assert self.has_object_perm("POST", self.project, user=user)
def test_manager_with_team_access(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="manager", teams=[self.team])
assert self.has_object_perm("POST", self.project, user=user)
def test_owner_without_team_access(self):
team = self.create_team(organization=self.org)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="owner", teams=[team])
# owners should be able to act on teams/projects they
# don't have access to
assert self.has_object_perm("POST", self.project, user=user)
def test_owner_with_team_access(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="owner", teams=[self.team])
assert self.has_object_perm("POST", self.project, user=user)
class ProjectPermissionNoJoinLeaveTest(ProjectPermissionBase):
def setUp(self):
super(ProjectPermissionNoJoinLeaveTest, self).setUp()
self.org = self.create_organization()
self.org.flags.allow_joinleave = False
self.org.save()
self.team = self.create_team(organization=self.org)
self.project = self.create_project(organization=self.org)
def test_regular_user(self):
user = self.create_user(is_superuser=False)
assert not self.has_object_perm("GET", self.project, user=user)
def test_superuser(self):
user = self.create_user(is_superuser=True)
assert self.has_object_perm("GET", self.project, user=user, is_superuser=True)
def test_member_for_project_read(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert self.has_object_perm("GET", self.project, user=user)
def test_member_for_project_write(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert not self.has_object_perm("POST", self.project, user=user)
def test_member_for_project_delete(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert not self.has_object_perm("DELETE", self.project, user=user)
def test_member_with_team_access(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert self.has_object_perm("GET", self.project, user=user)
def test_api_key_with_org_access(self):
key = ApiKey.objects.create(organization=self.org, scope_list=["project:read"])
assert self.has_object_perm("GET", self.project, auth=key)
def test_api_key_without_org_access(self):
key = ApiKey.objects.create(
organization=self.create_organization(), scope_list=["project:read"]
)
assert not self.has_object_perm("GET", self.project, auth=key)
def test_api_key_without_access(self):
key = ApiKey.objects.create(organization=self.org)
assert not self.has_object_perm("GET", self.project, auth=key)
def test_api_key_with_wrong_access(self):
key = ApiKey.objects.create(organization=self.org, scope_list=["team:read"])
assert not self.has_object_perm("GET", self.project, auth=key)
def test_api_key_with_wrong_access_for_method(self):
key = ApiKey.objects.create(organization=self.org, scope_list=["project:read"])
assert not self.has_object_perm("PUT", self.project, auth=key)
def test_admin_without_team_access(self):
team = self.create_team(organization=self.org)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="admin", teams=[team])
# if `allow_joinleave` is False, admins can't act on teams
# they don't have access to
assert not self.has_object_perm("POST", self.project, user=user)
def test_admin_with_team_access(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="admin", teams=[self.team])
assert self.has_object_perm("POST", self.project, user=user)
def test_manager_without_team_access(self):
team = self.create_team(organization=self.org)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="manager", teams=[team])
# managers should be able to act on teams/projects they
# don't have access to
assert self.has_object_perm("POST", self.project, user=user)
def test_manager_with_team_access(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="manager", teams=[self.team])
assert self.has_object_perm("POST", self.project, user=user)
def test_owner_without_team_access(self):
team = self.create_team(organization=self.org)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="owner", teams=[team])
# owners should be able to act on teams/projects they
# don't have access to
assert self.has_object_perm("POST", self.project, user=user)
def test_owner_with_team_access(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="owner", teams=[self.team])
assert self.has_object_perm("POST", self.project, user=user)
def test_manager_when_project_has_no_teams(self):
project = self.create_project(organization=self.org, teams=[])
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="manager")
# managers should be able to act on teams/projects they
# don't have access to
assert self.has_object_perm("POST", project, user=user)
def test_owner_when_project_has_no_teams(self):
project = self.create_project(organization=self.org, teams=[])
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="owner")
# owners should be able to act on teams/projects they
# don't have access to
assert self.has_object_perm("POST", project, user=user)
| |
import sys
from pysamimport import pysam
import re
import inspect
class BadRead(RuntimeError):
def __init__(self):
RuntimeError.__init__(self, self.header)
class IsBadRead(BadRead):
header = "BadRead"
class IsDuplicate(BadRead):
header = "Alignment:IsDuplicate"
class IsQCFail(BadRead):
header = "Alignment:IsQCFail"
class IsSecondary(BadRead):
header = "Alignment:IsSecondary"
class IsUnmapped(BadRead):
header = "Alignment:IsUnmapped"
class TooShort(BadRead):
header = "TooShort"
class TooManyHits(BadRead):
header = "MultipleAlignments"
class BadCigar(BadRead):
header = "BadCIGAROperation"
class IndelAtSNV(BadRead):
header = "QueryIndelAtSNVLocus"
class GapAtSNV(BadRead):
header = "GapInQueryAtSNVLocus"
class SNVPadding(BadRead):
header = "SNVLocusAtEndOfRead"
class SNVEditPadding(BadRead):
header = "SubstitutionNearSNVLocus"
class TooManyEdits(BadRead):
header = "TooManyEdits"
class TooManyEditsOtherThanSNV(BadRead):
header = "TooManyEditsOtherThanSNV"
class TooManyQueryGaps(BadRead):
header = "TooManyQueryGaps"
class MappingQualityTooLow(BadRead):
header = "MappingQualityTooLow"
BadRead.allheaders = map(lambda cls: cls[1].header, inspect.getmembers(sys.modules[
__name__], lambda member: inspect.isclass(member) and issubclass(member, BadRead) and member != BadRead))
BAM_CMATCH = 0
BAM_CREF_SKIP = 3
class ReadFilter(object):
NONH = "Warning: Tag NH missing from alignments"
NONM = "Warning: Tag NM missing from alignments"
NOMD = "Warning: Tag MD missing from alignments"
def __init__(self, maxsegments=1, minlength=45,
maxhits=1, maxedits=1, mapq=4,
warnings=set([NONM, NOMD])):
self.minlength = minlength
self.maxsegments = maxsegments
self.maxhits = maxhits
self.maxedits = maxedits
self.warnings = warnings
self.mapq = mapq
if self.warnings == None:
self.warnings = set()
def test(self, al):
if al.is_duplicate:
raise IsDuplicate()
if al.is_qcfail:
raise IsQCFail()
if al.is_secondary:
raise IsSecondary()
if al.is_unmapped:
raise IsUnmapped()
if al.qlen < self.minlength:
raise TooShort()
if al.mapq < self.mapq:
raise MappingQualityTooLow()
try:
if int(al.opt('NH')) > self.maxhits:
raise TooManyHits()
except KeyError:
if self.NONH in self.warnings:
print >>sys.stderr, self.NONH + \
".\n Cannot filter out reads that align to mutiple loci."
self.warnings.remove(self.NONH)
if any(map(lambda t: t[0] not in (BAM_CMATCH, BAM_CREF_SKIP), al.cigar)):
raise BadCigar()
segments = [t[1] for t in al.cigar if t[0] == BAM_CMATCH]
if len(segments) > self.maxsegments:
raise TooManyQueryGaps()
try:
if int(al.opt('NM')) > self.maxedits:
raise TooManyEdits()
except KeyError:
if self.NONM in self.warnings:
print >>sys.stderr, self.NONM + \
".\n Cannot filter out reads with too many substitutions."
self.warnings.remove(self.NONM)
return segments
class SNVPileupReadFilter(ReadFilter):
def __init__(self, minpad=3, minsubstdist=3, maxedits=1, **kw):
kw['maxedits'] = (maxedits + 1)
self.maxedits = maxedits
self.minpad = minpad
self.minsubstdist = minsubstdist
super(SNVPileupReadFilter,self).__init__(**kw)
def findseg(self, pos, segments):
i = 0
while True:
if (pos <= segments[i]):
return i, pos
pos -= segments[i]
i += 1
return None
def test(self, pileupread):
if pileupread.indel != 0:
raise IndelAtSNV()
if pileupread.is_del:
raise GapAtSNV()
al = pileupread.alignment
segments = super(SNVPileupReadFilter,self).test(al)
qpos = pileupread.query_position
seg, qpos = self.findseg(qpos, segments)
if qpos < self.minpad or (segments[seg] - qpos) < self.minpad:
raise SNVPadding()
try:
edits = re.split(r'(\d+)', al.opt('MD'))[1:-1]
substs = dict()
reference = None
for i in range(0, len(edits) - 1, 2):
pos = int(edits[i])
substs[pos] = (edits[i + 1], al.seq[pileupread.query_position])
if pos == pileupread.query_position:
reference = edits[i + 1]
elif abs(pos - pileupread.query_position) < self.minsubstdist:
raise SNVEditPadding()
try:
if int(al.opt('NM')) > (self.maxedits + (0 if (reference) else 1)):
raise TooManyEditsOtherThanSNV()
except KeyError:
if self.NONM in self.warnings:
print >>sys.stderr, self.NONM + \
".\n Cannot filter out reference reads with one too many\n substitutions."
self.warnings.remove(self.NONM)
except KeyError:
if self.NOMD in self.warnings:
print >>sys.stderr, self.NOMD + \
".\n Cannot filter out reads with edits too close to the SNV locus\n or reference reads with one too many substitutions."
self.warnings.remove(self.NOMD)
readbase = al.seq[pileupread.query_position]
return al, pileupread.query_position, readbase, segments
class NoFilter:
def __init__(self):
pass
def test(self, pileupread):
al = pileupread.alignment
readbase = al.seq[pileupread.query_position]
return al, pileupread.query_position, readbase, 1
class BasicFilter:
def __init__(self):
pass
def test(self, pileupread):
if pileupread.indel != 0:
raise IndelAtSNV()
if pileupread.is_del:
raise GapAtSNV()
al = pileupread.alignment
if al.is_duplicate:
raise IsDuplicate()
if al.is_qcfail:
raise IsQCFail()
if al.is_secondary:
raise IsSecondary()
if al.is_unmapped:
raise IsUnmapped()
readbase = al.seq[pileupread.query_position]
return al, pileupread.query_position, readbase, 1
class AllFilter:
def __init__(self):
pass
def test(self, pileupread):
raise IsBadRead()
| |
# -*- coding: utf-8 -*-
from opensextant.TaxCat import Taxon, get_taxnode, TaxCatalogBuilder, get_starting_id
import json
catalog = "WFB" # WFB = World FactBook
min_len = 4
min_len_acronym = 3
def evaluate_text(txn, stop):
"""
Consolidate evaluations of text if it is valid to tag or not.
:param txn: Taxon
:param stop:
:return:
"""
if stop:
txn.is_valid = False
elif txn.is_acronym:
txn.is_valid = len(txn.phrase) >= min_len_acronym
else:
txn.is_valid = len(txn.phrase) >= min_len
GENERIC_PHRASES = {"commonwealth", "north"}
class FactBookTaxon(Taxon):
def __init__(self, rowdata, row_id=None, stopwords=[]):
Taxon.__init__(self)
self._entity_type = rowdata['entity_type']
self.id = row_id
self.name = rowdata['name']
self.is_valid = False
# WFB custom metadata.
self.variants = []
self.variant_count = 1
self.distinct = set([])
if not self.name or not self._entity_type:
return
self.phrase = self.name.lower().strip()
if not self.phrase:
return
self.is_acronym = self.name.isupper()
# If the entire value is a stop word we'll mark it as invalid.
stop = self.phrase in stopwords or self.phrase in GENERIC_PHRASES
self.distinct.add(self.phrase)
# Taxon.name is now reassigned to a normalized taxon key, name = TYPE.NAME
self.name = get_taxnode(self._entity_type, self.name.title())
evaluate_text(self, stop)
if not self.is_valid:
print("Ignore generic term ", self.phrase)
# Othermetadata for Factbook: Country and Orgs associated.
# Normalize country to country code?
attrs = {
"country": "co",
"official_title": "official_title",
"personal_title": "personal_title"
}
self.tags = []
for src in attrs:
dst = attrs[src]
if src in rowdata:
val = rowdata.get(src)
fld = '{}+{}'.format(dst, val.lower())
self.tags.append(fld)
self.variants.append(self)
self.add_alias(rowdata.get("aliases"), stopwords=stopwords)
def add_alias(self, aliases, stopwords=[]):
"""
Parse and add names from "Name1; Name2; ..."
associating each alias with this Taxon.
:param aliases: delimited list of aliases for a taxon.
:param stopwords: stopword set
:return:
"""
if not aliases:
return
for A in aliases:
_phrase = A.lower()
if _phrase in self.distinct:
continue
alias = Taxon()
# Taxon ID is computed on export to Solr.
# Taxon name:
alias.name = self.name
# This variant on the taxon:
alias.phrase = _phrase
alias.is_acronym = A.isupper()
alias.tags = self.tags
_is_stopwd = _phrase in stopwords or _phrase in GENERIC_PHRASES
evaluate_text(alias, _is_stopwd)
if not alias.is_valid:
print("Ignore stop word variant '{}' on {}".format(alias.phrase, self.name))
self.variants.append(alias)
self.variant_count += 1
def ingest_wfb_leaders(dct, filepath, stopwords=[]):
"""
Add taxonomy in file to builder.
TODO: merge multiple files into dct.
:param dct: country keyed dictionary of entries
:param filepath:
:param stopwords: stopword set
:return:
"""
with open(filepath, "r", encoding="UTF-8") as fh:
countries = json.load(fh)
for cc in countries:
leaders = countries.get(cc)
print("{} count {}".format(cc, len(leaders)))
leaders_nodes = []
for leader in leaders:
leader['entity_type'] = "person"
leader['country'] = cc
leader['name'] = leader['name'].title()
node = FactBookTaxon(leader, stopwords=stopwords)
leaders_nodes.extend(node.variants)
dct[cc] = leaders_nodes
return
def ingest_wfb_orgs(dct, fpath, stopwords=[]):
"""
organize taxons as
{ taxon_id : [ taxon_variant, taxon_variant,... ]
}
TODO: resolve multiple files that may have duplicative taxons/updates.
:param dct:
:param fpath:
:param stopwords: stopword set
:return:
"""
with open(fpath, "r", encoding="UTF-8") as fh:
parent = json.load(fh)
org_nodes = {}
orgs = parent['orgs_and_groups']
print("Orgs count {}".format(len(orgs)))
for org in orgs:
org['entity_type'] = "org"
node = FactBookTaxon(org, stopwords=stopwords)
org_nodes[node.name] = node.variants
dct.update(org_nodes)
return
def main_loop(url):
import glob
from opensextant.utility import ConfigUtility
print(f"""
TaxCat Builder for Taxonomy: WorldFactBook
""")
util = ConfigUtility()
all_stopwords = util.loadListFromFile("etc/taxcat/stopwords.txt")
start_id = get_starting_id(catalog)
builder = TaxCatalogBuilder(server=url, test=url is None)
if not args.solr:
print("Running in Test mode only -- No indexing to Solr")
# Major TODO: if we have multiple cached versions of WFB leaders, they need to be folded into a superset,
# deconflicted by name/taxon and then indexed.
files = glob.glob("etc/taxcat/data/wfb-leaders*json")
master = {}
for fpath in files:
ingest_wfb_leaders(master, fpath, stopwords=all_stopwords)
tid = start_id
for cc in master:
taxons = master[cc]
for taxon in taxons:
taxon.id = tid
builder.add("WFB", taxon)
tid += 1
files = glob.glob("etc/taxcat/data/wfb-orgs*json")
master = {}
#
# Load Taxons as a flat dictionary of 'taxon1' = [ taxon1_a, taxon1_b,... ]
# so each taxon1_* represents a variation on taxon1
for fpath in files:
ingest_wfb_orgs(master, fpath, stopwords=all_stopwords)
for taxon_name in master:
for taxon in master[taxon_name]:
taxon.id = tid
builder.add("WFB", taxon)
tid += 1
builder.save(flush=True)
builder.optimize()
if __name__ == '__main__':
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('--solr')
args = ap.parse_args()
main_loop(args.solr)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.visualization_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import numpy as np
import PIL.Image as Image
import six
from six.moves import range
import tensorflow as tf
from object_detection.core import standard_fields as fields
from object_detection.utils import visualization_utils
_TESTDATA_PATH = 'object_detection/test_images'
class VisualizationUtilsTest(tf.test.TestCase):
def test_get_prime_multiplier_for_color_randomness(self):
# Show that default multipler is not 1 and does not divide the total number
# of standard colors.
multiplier = visualization_utils._get_multiplier_for_color_randomness()
self.assertNotEqual(
0, multiplier % len(visualization_utils.STANDARD_COLORS))
self.assertNotEqual(1, multiplier)
# Show that with 34 colors, the closest prime number to 34/10 that
# satisfies the constraints is 5.
visualization_utils.STANDARD_COLORS = [
'color_{}'.format(str(i)) for i in range(34)
]
multiplier = visualization_utils._get_multiplier_for_color_randomness()
self.assertEqual(5, multiplier)
# Show that with 110 colors, the closest prime number to 110/10 that
# satisfies the constraints is 13 (since 11 equally divides 110).
visualization_utils.STANDARD_COLORS = [
'color_{}'.format(str(i)) for i in range(110)
]
multiplier = visualization_utils._get_multiplier_for_color_randomness()
self.assertEqual(13, multiplier)
def create_colorful_test_image(self):
"""This function creates an image that can be used to test vis functions.
It makes an image composed of four colored rectangles.
Returns:
colorful test numpy array image.
"""
ch255 = np.full([100, 200, 1], 255, dtype=np.uint8)
ch128 = np.full([100, 200, 1], 128, dtype=np.uint8)
ch0 = np.full([100, 200, 1], 0, dtype=np.uint8)
imr = np.concatenate((ch255, ch128, ch128), axis=2)
img = np.concatenate((ch255, ch255, ch0), axis=2)
imb = np.concatenate((ch255, ch0, ch255), axis=2)
imw = np.concatenate((ch128, ch128, ch128), axis=2)
imu = np.concatenate((imr, img), axis=1)
imd = np.concatenate((imb, imw), axis=1)
image = np.concatenate((imu, imd), axis=0)
return image
def create_test_image_with_five_channels(self):
return np.full([100, 200, 5], 255, dtype=np.uint8)
def create_test_grayscale_image(self):
return np.full([100, 200, 1], 255, dtype=np.uint8)
def test_draw_bounding_box_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
ymin = 0.25
ymax = 0.75
xmin = 0.4
xmax = 0.6
visualization_utils.draw_bounding_box_on_image(test_image, ymin, xmin, ymax,
xmax)
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_box_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
ymin = 0.25
ymax = 0.75
xmin = 0.4
xmax = 0.6
visualization_utils.draw_bounding_box_on_image_array(
test_image, ymin, xmin, ymax, xmax)
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
boxes = np.array([[0.25, 0.75, 0.4, 0.6],
[0.1, 0.1, 0.9, 0.9]])
visualization_utils.draw_bounding_boxes_on_image(test_image, boxes)
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
boxes = np.array([[0.25, 0.75, 0.4, 0.6],
[0.1, 0.1, 0.9, 0.9]])
visualization_utils.draw_bounding_boxes_on_image_array(test_image, boxes)
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_bounding_boxes_on_image_tensors(self):
"""Tests that bounding box utility produces reasonable results."""
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
fname = os.path.join(_TESTDATA_PATH, 'image1.jpg')
image_np = np.array(Image.open(fname))
images_np = np.stack((image_np, image_np), axis=0)
original_image_shape = [[636, 512], [636, 512]]
with tf.Graph().as_default():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant(original_image_shape, dtype=tf.int32)
boxes = tf.constant([[[0.4, 0.25, 0.75, 0.75], [0.5, 0.3, 0.6, 0.9]],
[[0.25, 0.25, 0.75, 0.75], [0.1, 0.3, 0.6, 1.0]]])
classes = tf.constant([[1, 1], [1, 2]], dtype=tf.int64)
scores = tf.constant([[0.8, 0.1], [0.6, 0.5]])
images_with_boxes = (
visualization_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=image_shape,
true_image_shape=image_shape,
min_score_thresh=0.2))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
# Write output images for visualization.
images_with_boxes_np = sess.run(images_with_boxes)
self.assertEqual(images_np.shape[0], images_with_boxes_np.shape[0])
self.assertEqual(images_np.shape[3], images_with_boxes_np.shape[3])
self.assertEqual(
tuple(original_image_shape[0]), images_with_boxes_np.shape[1:3])
for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name)
logging.info('Writing output image %d to %s', i, output_file)
image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file)
def test_draw_bounding_boxes_on_image_tensors_with_track_ids(self):
"""Tests that bounding box utility produces reasonable results."""
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
fname = os.path.join(_TESTDATA_PATH, 'image1.jpg')
image_np = np.array(Image.open(fname))
images_np = np.stack((image_np, image_np), axis=0)
original_image_shape = [[636, 512], [636, 512]]
with tf.Graph().as_default():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant(original_image_shape, dtype=tf.int32)
boxes = tf.constant([[[0.4, 0.25, 0.75, 0.75],
[0.5, 0.3, 0.7, 0.9],
[0.7, 0.5, 0.8, 0.9]],
[[0.41, 0.25, 0.75, 0.75],
[0.51, 0.3, 0.7, 0.9],
[0.75, 0.5, 0.8, 0.9]]])
classes = tf.constant([[1, 1, 2], [1, 1, 2]], dtype=tf.int64)
scores = tf.constant([[0.8, 0.5, 0.7], [0.6, 0.5, 0.8]])
track_ids = tf.constant([[3, 9, 7], [3, 9, 144]], dtype=tf.int32)
images_with_boxes = (
visualization_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=image_shape,
true_image_shape=image_shape,
track_ids=track_ids,
min_score_thresh=0.2))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
# Write output images for visualization.
images_with_boxes_np = sess.run(images_with_boxes)
self.assertEqual(images_np.shape[0], images_with_boxes_np.shape[0])
self.assertEqual(images_np.shape[3], images_with_boxes_np.shape[3])
self.assertEqual(
tuple(original_image_shape[0]), images_with_boxes_np.shape[1:3])
for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_with_track_ids_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name)
logging.info('Writing output image %d to %s', i, output_file)
image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file)
def test_draw_bounding_boxes_on_image_tensors_with_additional_channels(self):
"""Tests the case where input image tensor has more than 3 channels."""
category_index = {1: {'id': 1, 'name': 'dog'}}
image_np = self.create_test_image_with_five_channels()
images_np = np.stack((image_np, image_np), axis=0)
with tf.Graph().as_default():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
boxes = tf.constant(0, dtype=tf.float32, shape=[2, 0, 4])
classes = tf.constant(0, dtype=tf.int64, shape=[2, 0])
scores = tf.constant(0, dtype=tf.float32, shape=[2, 0])
images_with_boxes = (
visualization_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
min_score_thresh=0.2))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
final_images_np = sess.run(images_with_boxes)
self.assertEqual((2, 100, 200, 3), final_images_np.shape)
def test_draw_bounding_boxes_on_image_tensors_grayscale(self):
"""Tests the case where input image tensor has one channel."""
category_index = {1: {'id': 1, 'name': 'dog'}}
image_np = self.create_test_grayscale_image()
images_np = np.stack((image_np, image_np), axis=0)
with tf.Graph().as_default():
images_tensor = tf.constant(value=images_np, dtype=tf.uint8)
image_shape = tf.constant([[100, 200], [100, 200]], dtype=tf.int32)
boxes = tf.constant(0, dtype=tf.float32, shape=[2, 0, 4])
classes = tf.constant(0, dtype=tf.int64, shape=[2, 0])
scores = tf.constant(0, dtype=tf.float32, shape=[2, 0])
images_with_boxes = (
visualization_utils.draw_bounding_boxes_on_image_tensors(
images_tensor,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=image_shape,
true_image_shape=image_shape,
min_score_thresh=0.2))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
final_images_np = sess.run(images_with_boxes)
self.assertEqual((2, 100, 200, 3), final_images_np.shape)
def test_draw_keypoints_on_image(self):
test_image = self.create_colorful_test_image()
test_image = Image.fromarray(test_image)
width_original, height_original = test_image.size
keypoints = [[0.25, 0.75], [0.4, 0.6], [0.1, 0.1], [0.9, 0.9]]
visualization_utils.draw_keypoints_on_image(test_image, keypoints)
width_final, height_final = test_image.size
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_keypoints_on_image_array(self):
test_image = self.create_colorful_test_image()
width_original = test_image.shape[0]
height_original = test_image.shape[1]
keypoints = [[0.25, 0.75], [0.4, 0.6], [0.1, 0.1], [0.9, 0.9]]
visualization_utils.draw_keypoints_on_image_array(test_image, keypoints)
width_final = test_image.shape[0]
height_final = test_image.shape[1]
self.assertEqual(width_original, width_final)
self.assertEqual(height_original, height_final)
def test_draw_mask_on_image_array(self):
test_image = np.asarray([[[0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]], dtype=np.uint8)
mask = np.asarray([[0, 1],
[1, 1]], dtype=np.uint8)
expected_result = np.asarray([[[0, 0, 0], [0, 0, 127]],
[[0, 0, 127], [0, 0, 127]]], dtype=np.uint8)
visualization_utils.draw_mask_on_image_array(test_image, mask,
color='Blue', alpha=.5)
self.assertAllEqual(test_image, expected_result)
def test_add_cdf_image_summary(self):
values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50]
visualization_utils.add_cdf_image_summary(values, 'PositiveAnchorLoss')
cdf_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0]
with self.test_session():
cdf_image_summary.eval()
def test_add_hist_image_summary(self):
values = [0.1, 0.2, 0.3, 0.4, 0.42, 0.44, 0.46, 0.48, 0.50]
bins = [0.01 * i for i in range(101)]
visualization_utils.add_hist_image_summary(values, bins,
'ScoresDistribution')
hist_image_summary = tf.get_collection(key=tf.GraphKeys.SUMMARIES)[0]
with self.test_session():
hist_image_summary.eval()
def test_eval_metric_ops(self):
category_index = {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}}
max_examples_to_draw = 4
metric_op_base = 'Detections_Left_Groundtruth_Right'
eval_metric_ops = visualization_utils.VisualizeSingleFrameDetections(
category_index,
max_examples_to_draw=max_examples_to_draw,
summary_name_prefix=metric_op_base)
original_image = tf.placeholder(tf.uint8, [4, None, None, 3])
original_image_spatial_shape = tf.placeholder(tf.int32, [4, 2])
true_image_shape = tf.placeholder(tf.int32, [4, 3])
detection_boxes = tf.random_uniform([4, 20, 4],
minval=0.0,
maxval=1.0,
dtype=tf.float32)
detection_classes = tf.random_uniform([4, 20],
minval=1,
maxval=3,
dtype=tf.int64)
detection_scores = tf.random_uniform([4, 20],
minval=0.,
maxval=1.,
dtype=tf.float32)
groundtruth_boxes = tf.random_uniform([4, 8, 4],
minval=0.0,
maxval=1.0,
dtype=tf.float32)
groundtruth_classes = tf.random_uniform([4, 8],
minval=1,
maxval=3,
dtype=tf.int64)
eval_dict = {
fields.DetectionResultFields.detection_boxes:
detection_boxes,
fields.DetectionResultFields.detection_classes:
detection_classes,
fields.DetectionResultFields.detection_scores:
detection_scores,
fields.InputDataFields.original_image:
original_image,
fields.InputDataFields.original_image_spatial_shape: (
original_image_spatial_shape),
fields.InputDataFields.true_image_shape: (true_image_shape),
fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
fields.InputDataFields.groundtruth_classes:
groundtruth_classes
}
metric_ops = eval_metric_ops.get_estimator_eval_metric_ops(eval_dict)
_, update_op = metric_ops[next(six.iterkeys(metric_ops))]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
value_ops = {}
for key, (value_op, _) in six.iteritems(metric_ops):
value_ops[key] = value_op
# First run enough update steps to surpass `max_examples_to_draw`.
for i in range(max_examples_to_draw):
# Use a unique image shape on each eval image.
sess.run(
update_op,
feed_dict={
original_image:
np.random.randint(
low=0,
high=256,
size=(4, 6 + i, 7 + i, 3),
dtype=np.uint8),
original_image_spatial_shape: [[6 + i, 7 + i], [6 + i, 7 + i],
[6 + i, 7 + i], [6 + i, 7 + i]],
true_image_shape: [[6 + i, 7 + i, 3], [6 + i, 7 + i, 3],
[6 + i, 7 + i, 3], [6 + i, 7 + i, 3]]
})
value_ops_out = sess.run(value_ops)
for key, value_op in six.iteritems(value_ops_out):
self.assertNotEqual('', value_op)
# Now run fewer update steps than `max_examples_to_draw`. A single value
# op will be the empty string, since not enough image summaries can be
# produced.
for i in range(max_examples_to_draw - 1):
# Use a unique image shape on each eval image.
sess.run(
update_op,
feed_dict={
original_image:
np.random.randint(
low=0,
high=256,
size=(4, 6 + i, 7 + i, 3),
dtype=np.uint8),
original_image_spatial_shape: [[6 + i, 7 + i], [6 + i, 7 + i],
[6 + i, 7 + i], [6 + i, 7 + i]],
true_image_shape: [[6 + i, 7 + i, 3], [6 + i, 7 + i, 3],
[6 + i, 7 + i, 3], [6 + i, 7 + i, 3]]
})
value_ops_out = sess.run(value_ops)
self.assertEqual(
six.b(''),
value_ops_out[metric_op_base + '/' + str(max_examples_to_draw - 1)])
if __name__ == '__main__':
tf.test.main()
| |
# -*- coding: utf-8 -*-
""" Sahana Eden Assets Model
@copyright: 2009-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3AssetModel",
"S3AssetHRModel",
"S3AssetTeamModel",
"S3AssetTelephoneModel",
#"asset_rheader",
"asset_types",
"asset_log_status",
"asset_controller",
"asset_AssetRepresent",
)
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
ASSET_TYPE_VEHICLE = 1 # => Extra Tab(s) for Registration Documents, Fuel Efficiency
#ASSET_TYPE_RADIO = 2 # => Extra Tab(s) for Radio Channels/Frequencies
ASSET_TYPE_TELEPHONE = 3 # => Extra Tab(s) for Contact Details & Airtime Billing
ASSET_TYPE_OTHER = 4 # => No extra Tabs
# To pass to global scope
asset_types = {"VEHICLE" : ASSET_TYPE_VEHICLE,
#"RADIO" : ASSET_TYPE_RADIO,
"TELEPHONE" : ASSET_TYPE_TELEPHONE,
"OTHER" : ASSET_TYPE_OTHER,
}
ASSET_LOG_SET_BASE = 1
ASSET_LOG_ASSIGN = 2
ASSET_LOG_RETURN = 3
ASSET_LOG_CHECK = 4
ASSET_LOG_REPAIR = 5
ASSET_LOG_DONATED = 32
ASSET_LOG_LOST = 33
ASSET_LOG_STOLEN = 34
ASSET_LOG_DESTROY = 35
# To pass to global scope
asset_log_status = {"SET_BASE" : ASSET_LOG_SET_BASE,
"ASSIGN" : ASSET_LOG_ASSIGN,
"RETURN" : ASSET_LOG_RETURN,
"CHECK" : ASSET_LOG_CHECK,
"REPAIR" : ASSET_LOG_REPAIR,
"DONATED" : ASSET_LOG_DONATED,
"LOST" : ASSET_LOG_LOST,
"STOLEN" : ASSET_LOG_STOLEN,
"DESTROY" : ASSET_LOG_DESTROY,
}
# =============================================================================
class S3AssetModel(S3Model):
"""
Asset Management
"""
names = ("asset_asset",
"asset_item",
"asset_log",
"asset_asset_id",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
s3 = current.response.s3
item_id = self.supply_item_id
item_entity_id = self.supply_item_entity_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
person_id = self.pr_person_id
messages = current.messages
NONE = messages["NONE"]
UNKNOWN_OPT = messages.UNKNOWN_OPT
settings = current.deployment_settings
org_site_label = settings.get_org_site_label()
#radios = settings.get_asset_radios()
telephones = settings.get_asset_telephones()
vehicles = settings.has_module("vehicle")
types = telephones or vehicles
# Shortcuts
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
#--------------------------------------------------------------------------
# Assets
#
asset_type_opts = {ASSET_TYPE_OTHER : T("Other"),
}
#if radios:
# asset_type_opts[ASSET_TYPE_RADIO] = T("Radio")
if telephones:
asset_type_opts[ASSET_TYPE_TELEPHONE] = T("Telephone")
if vehicles:
asset_type_opts[ASSET_TYPE_VEHICLE] = T("Vehicle")
asset_condition_opts = {1: T("Good Condition"),
2: T("Minor Damage"),
3: T("Major Damage"),
4: T("Un-Repairable"),
5: T("Needs Maintenance"),
}
ctable = self.supply_item_category
itable = self.supply_item
supply_item_represent = self.supply_item_represent
asset_items_set = db((ctable.can_be_asset == True) & \
(itable.item_category_id == ctable.id))
tablename = "asset_asset"
define_table(tablename,
# Instances
super_link("track_id", "sit_trackable"),
super_link("doc_id", "doc_entity"),
item_entity_id,
Field("number",
label = T("Asset Number"),
),
Field("type", "integer",
# @ToDo: We could set this automatically based on Item Category
default = ASSET_TYPE_OTHER,
label = T("Type"),
represent = lambda opt: \
asset_type_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(asset_type_opts),
readable = types,
writable = types,
),
item_id(represent = supply_item_represent,
requires = IS_ONE_OF(asset_items_set,
"supply_item.id",
supply_item_represent,
sort = True,
),
script = None, # No Item Pack Filter
widget = None,
),
Field("kit", "boolean",
default = False,
label = T("Kit?"),
represent = lambda opt: \
(opt and [T("Yes")] or [NONE])[0],
# @ToDo: deployment_setting
readable = False,
writable = False,
),
organisation_id(requires=self.org_organisation_requires(
updateable=True,
#required=True
),
required = True,
script = \
'''$.filterOptionsS3({
'trigger':'organisation_id',
'target':'site_id',
'lookupResource':'site',
'lookupPrefix':'org',
'lookupField':'site_id',
'lookupURL':S3.Ap.concat('/org/sites_for_org/')
})''',
),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
default = auth.user.site_id if auth.is_logged_in() else None,
empty = False,
label = org_site_label,
ondelete = "RESTRICT",
readable = True,
writable = True,
represent = self.org_site_represent,
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3SiteAutocompleteWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Warehouse"),
# messages.AUTOCOMPLETE_HELP)),
),
Field("sn",
label = T("Serial Number"),
),
organisation_id("supply_org_id",
label = T("Supplier/Donor"),
ondelete = "SET NULL",
),
s3_date("purchase_date",
label = T("Purchase Date"),
),
Field("purchase_price", "double",
#default = 0.00,
label = T("Purchase Price"),
represent = lambda v, row=None: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
),
s3_currency("purchase_currency"),
# Base Location, which should always be a Site & set via Log
location_id(readable = False,
writable = False,
),
# Populated onaccept of the log to make a component tab
person_id("assigned_to_id",
readable = False,
writable = False,
comment = self.pr_person_comment(child="assigned_to_id"),
),
# Populated onaccept of the log for reporting/filtering
Field("cond", "integer",
label = T("Condition"),
represent = lambda opt: \
asset_condition_opts.get(opt, UNKNOWN_OPT),
#readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Asset"),
title_display = T("Asset Details"),
title_list = T("Assets"),
title_update = T("Edit Asset"),
title_upload = T("Import Assets"),
label_list_button = T("List Assets"),
label_delete_button = T("Delete Asset"),
msg_record_created = T("Asset added"),
msg_record_modified = T("Asset updated"),
msg_record_deleted = T("Asset deleted"),
msg_list_empty = T("No Assets currently registered"))
asset_represent = asset_AssetRepresent(show_link=True)
# Reusable Field
asset_id = S3ReusableField("asset_id", "reference %s" % tablename,
label = T("Asset"),
ondelete = "CASCADE",
represent = asset_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "asset_asset.id",
asset_represent,
sort=True)),
sortby = "number",
)
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
list_fields = ["id",
"item_id$item_category_id",
"item_id",
"number",
#"type",
#"purchase_date",
(T("Assigned To"), "assigned_to_id"),
"organisation_id",
"site_id",
]
report_fields = ["number",
(T("Category"), "item_id$item_category_id"),
(T("Item"), "item_id"),
"organisation_id",
"site_id",
"cond",
]
text_fields = ["number",
"item_id$name",
#"item_id$category_id$name",
"comments",
]
for level in levels:
lfield = "location_id$%s" % level
report_fields.append(lfield)
text_fields.append(lfield)
list_fields.append(lfield)
list_fields.extend(("cond",
"comments"))
if settings.get_org_branches():
org_filter = S3HierarchyFilter("organisation_id",
# Can be unhidden in customise_xx_resource if there is a need to use a default_filter
hidden = True,
leafonly = False,
)
else:
org_filter = S3OptionsFilter("organisation_id",
filter = True,
header = "",
# Can be unhidden in customise_xx_resource if there is a need to use a default_filter
hidden = True,
)
filter_widgets = [
S3TextFilter(text_fields,
label = T("Search"),
comment = T("You can search by asset number, item description or comments. You may use % as wildcard. Press 'Search' without input to list all assets."),
#_class = "filter-search",
),
S3OptionsFilter("item_id$item_category_id",
),
org_filter,
S3LocationFilter("location_id",
levels = levels,
hidden = True,
),
S3OptionsFilter("cond",
hidden = True,
),
]
report_options = Storage(
rows = report_fields,
cols = report_fields,
fact = [(T("Number of items"), "count(number)")],
defaults=Storage(cols = "location_id$%s" % levels[0], # Highest-level of hierarchy
fact = "count(number)",
rows = "item_id$item_category_id",
totals = True,
)
)
# Default summary
summary = [{"name": "addform",
"common": True,
"widgets": [{"method": "create"}],
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "report",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map",
"ajax_init": True}],
},
]
# Resource Configuration
configure(tablename,
# Open Tabs after creation
create_next = URL(c="asset", f="asset",
args=["[id]"]),
deduplicate = self.asset_duplicate,
filter_widgets = filter_widgets,
list_fields = list_fields,
mark_required = ("organisation_id",),
onaccept = self.asset_onaccept,
realm_components = ("log", "presence"),
report_options = report_options,
summary = summary,
super_entity = ("supply_item_entity", "sit_trackable"),
update_realm = True,
)
# Components
add_components(tablename,
asset_group = "asset_id",
asset_item = "asset_id",
asset_log = "asset_id",
asset_human_resource = "asset_id",
asset_telephone = "asset_id",
asset_telephone_usage = "asset_id",
hrm_human_resource = {"link": "asset_human_resource",
"joinby": "asset_id",
"key": "human_resource_id",
"actuate": "hide",
},
vehicle_gps = "asset_id",
vehicle_vehicle = {"joinby": "asset_id",
"multiple": False,
},
)
# =====================================================================
# Asset Items
# - to allow building ad-hoc Kits
#
tablename = "asset_item"
define_table(tablename,
item_entity_id,
asset_id(ondelete="CASCADE"),
item_id(represent = supply_item_represent,
requires = IS_ONE_OF(asset_items_set,
"supply_item.id",
supply_item_represent,
sort = True,
),
script = None, # No Item Pack Filter
widget = None,
),
Field("quantity", "integer", notnull=True,
default = 1,
label = T("Quantity"),
requires = IS_INT_IN_RANGE(1, 1000),
),
Field("sn",
label = T("Serial Number"),
),
organisation_id("supply_org_id",
label = T("Supplier/Donor"),
ondelete = "SET NULL",
),
s3_date("purchase_date",
label = T("Purchase Date"),
),
Field("purchase_price", "double",
#default=0.00,
represent=lambda v, row=None: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
),
s3_currency("purchase_currency"),
# Base Location, which should always be a Site & set via Log
location_id(readable = False,
writable = False),
s3_comments(comment = None),
*s3_meta_fields())
# =====================================================================
# Asset Log
#
asset_log_status_opts = {ASSET_LOG_SET_BASE : T("Base %(facility)s Set") % dict(facility = org_site_label),
ASSET_LOG_ASSIGN : T("Assigned"),
ASSET_LOG_RETURN : T("Returned"),
ASSET_LOG_CHECK : T("Checked"),
ASSET_LOG_REPAIR : T("Repaired"),
ASSET_LOG_DONATED : T("Donated"),
ASSET_LOG_LOST : T("Lost"),
ASSET_LOG_STOLEN : T("Stolen"),
ASSET_LOG_DESTROY : T("Destroyed"),
}
if auth.permission.format == "html":
# T isn't JSON serializable
site_types = auth.org_site_types
for key in site_types.keys():
site_types[key] = str(site_types[key])
site_types = json.dumps(site_types)
script = '''
$.filterOptionsS3({
'trigger':'organisation_id',
'target':'site_id',
'lookupPrefix':'org',
'lookupResource':'site',
'lookupField':'site_id',
'fncRepresent': function(record,PrepResult){
var InstanceTypeNice=%(instance_type_nice)s
return record.name+" ("+InstanceTypeNice[record.instance_type]+")"
}})''' % dict(instance_type_nice = site_types)
else:
script = None
tablename = "asset_log"
define_table(tablename,
asset_id(),
Field("status", "integer",
label = T("Status"),
represent = lambda opt: \
asset_log_status_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(asset_log_status_opts),
),
s3_datetime("datetime",
default = "now",
empty = False,
represent = "date",
),
s3_datetime("datetime_until",
label = T("Date Until"),
represent = "date",
),
person_id(label = T("Assigned To")),
Field("check_in_to_person", "boolean",
#label = T("Mobile"), # Relabel?
label = T("Track with this Person?"),
comment = DIV(_class="tooltip",
#_title="%s|%s" % (T("Mobile"),
_title="%s|%s" % (T("Track with this Person?"),
T("If selected, then this Asset's Location will be updated whenever the Person's Location is updated."))),
readable = False,
writable = False,
),
# The Organisation to whom the loan is made
organisation_id(readable = False,
widget = None,
writable = False,
),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
label = org_site_label,
#filterby = "site_id",
#filter_opts = auth.permitted_facilities(redirect_on_error=False),
instance_types = auth.org_site_types,
updateable = True,
not_filterby = "obsolete",
not_filter_opts = (True,),
#default = user.site_id if is_logged_in() else None,
readable = True,
writable = True,
empty = False,
represent = self.org_site_represent,
#widget = S3SiteAutocompleteWidget(),
script = script,
),
self.org_room_id(),
#location_id(),
Field("cancel", "boolean",
default = False,
label = T("Cancel Log Entry"),
represent = s3_yes_no_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Cancel Log Entry"),
T("'Cancel' will indicate an asset log entry did not occur")))
),
Field("cond", "integer", # condition is a MySQL reserved word
label = T("Condition"),
represent = lambda opt: \
asset_condition_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(asset_condition_opts,
zero = "%s..." % T("Please select")),
),
person_id("by_person_id",
default = auth.s3_logged_in_person(), # This can either be the Asset controller if signed-out from the store
label = T("Assigned By"), # or the previous owner if passed on directly (e.g. to successor in their post)
comment = self.pr_person_comment(child="by_person_id"),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_ASSIGN = T("New Entry in Asset Log")
crud_strings[tablename] = Storage(
label_create = ADD_ASSIGN,
title_display = T("Asset Log Details"),
title_list = T("Asset Log"),
title_update = T("Edit Asset Log Entry"),
label_list_button = T("Asset Log"),
label_delete_button = T("Delete Asset Log Entry"),
msg_record_created = T("Entry added to Asset Log"),
msg_record_modified = T("Asset Log Entry updated"),
msg_record_deleted = T("Asset Log Entry deleted"),
msg_list_empty = T("Asset Log Empty"))
# Resource configuration
configure(tablename,
listadd = False,
list_fields = ["id",
"datetime",
"status",
"datetime_until",
"organisation_id",
"site_id",
"room_id",
"person_id",
#"location_id",
"cancel",
"cond",
"comments",
],
onaccept = self.asset_log_onaccept,
orderby = "asset_log.datetime desc",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(asset_asset_id = asset_id,
asset_represent = asset_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Return safe defaults for names in case the model is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(asset_asset_id = lambda **attr: dummy("asset_id"),
)
# -------------------------------------------------------------------------
@staticmethod
def asset_duplicate(item):
"""
Deduplication of Assets
"""
table = item.table
data = item.data
number = data.get("number", None)
query = (table.number == number)
organisation_id = data.get("organisation_id", None)
if organisation_id:
query &= (table.organisation_id == organisation_id)
site_id = data.get("site_id", None)
if site_id:
query &= (table.site_id == site_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def asset_onaccept(form):
"""
After DB I/O
"""
if current.response.s3.bulk:
# Import or Sync
return
db = current.db
atable = db.asset_asset
form_vars = form.vars
kit = form_vars.get("kit", None)
site_id = form_vars.get("site_id", None)
if site_id:
stable = db.org_site
asset_id = form_vars.id
# Set the Base Location
location_id = db(stable.site_id == site_id).select(stable.location_id,
limitby=(0, 1)
).first().location_id
tracker = S3Tracker()
asset_tracker = tracker(atable, asset_id)
asset_tracker.set_base_location(location_id)
if kit:
# Also populate location_id field in component items
aitable = db.asset_item
db(aitable.asset_id == asset_id).update(location_id = location_id)
# Add a log entry for this
ltable = db.asset_log
ltable.insert(asset_id = asset_id,
status = ASSET_LOG_SET_BASE,
organisation_id = form_vars.get("organisation_id", None),
site_id = site_id,
cond = 1,
)
if kit:
# Empty any inappropriate fields
db(atable.id == asset_id).update(supplier_org_id = None,
purchase_date = None,
purchase_price = None,
purchase_currency = None,
)
else:
# Delete any component items
aitable = db.asset_item
ids = db(aitable.asset_id == asset_id).select(aitable.id).as_list()
if ids:
resource = current.s3db.resource("asset_item", id=ids)
resource.delete()
return
# -------------------------------------------------------------------------
@staticmethod
def asset_log_onaccept(form):
"""
After DB I/O
"""
request = current.request
get_vars = request.get_vars
status = get_vars.get("status", None)
if not status:
if not current.response.s3.asset_import:
# e.g. Record merger or Sync
return
# Import
db = current.db
form_vars = form.vars
asset_id = form_vars.asset_id
status = int(form_vars.status)
if status == ASSET_LOG_ASSIGN:
# Only type supported right now
# @ToDo: Support more types
type == "person"
new = True
else:
# Interactive
form_vars = form.vars
status = int(form_vars.status or status)
db = current.db
ltable = db.asset_log
row = db(ltable.id == form_vars.id).select(ltable.asset_id,
limitby=(0, 1)
).first()
try:
asset_id = row.asset_id
except:
return
current_log = asset_get_current_log(asset_id)
type = get_vars.get("type", None)
log_time = current_log.datetime
current_time = form_vars.get("datetime", None).replace(tzinfo=None)
new = log_time <= current_time
if new:
# This is a current assignment
atable = db.asset_asset
aitable = db.asset_item
tracker = S3Tracker()
asset_tracker = tracker(atable, asset_id)
if status == ASSET_LOG_SET_BASE:
# Set Base Location
site_id = form_vars.get("site_id", None)
stable = db.org_site
location_id = db(stable.site_id == site_id).select(stable.location_id,
limitby=(0, 1)
).first().location_id
asset_tracker.set_base_location(location_id)
# Also do component items
db(aitable.asset_id == asset_id).update(location_id = location_id)
elif status == ASSET_LOG_ASSIGN:
if type == "person":
if form_vars.check_in_to_person:
asset_tracker.check_in(db.pr_person, form_vars.person_id,
timestmp = request.utcnow)
# Also do component items
# @ToDo: Have these move when the person moves
locations = asset_tracker.get_location(_fields=[db.gis_location.id])
try:
db(aitable.asset_id == asset_id).update(location_id = locations[0].id)
except:
pass
else:
location_id = asset_tracker.set_location(form_vars.person_id,
timestmp = request.utcnow)
# Also do component items
db(aitable.asset_id == asset_id).update(location_id = location_id)
# Update main record for component
db(atable.id == asset_id).update(assigned_to_id=form_vars.person_id)
elif type == "site":
asset_tracker.check_in(db.org_site, form_vars.site_id,
timestmp = request.utcnow)
# Also do component items
locations = asset_tracker.get_location(_fields=[db.gis_location.id])
try:
db(aitable.asset_id == asset_id).update(location_id = locations[0].id)
except:
pass
elif type == "organisation":
site_id = form_vars.get("site_id", None)
if site_id:
asset_tracker.check_in(db.org_site, site_id,
timestmp = request.utcnow)
# Also do component items
locations = asset_tracker.get_location(_fields=[db.gis_location.id])
try:
db(aitable.asset_id == asset_id).update(location_id = locations[0].id)
except:
pass
else:
# We can no longer track location
asset_tracker.check_out()
elif status == ASSET_LOG_RETURN:
# Set location to base location
location_id = asset_tracker.set_location(asset_tracker,
timestmp = request.utcnow)
# Also do component items
db(aitable.asset_id == asset_id).update(location_id = location_id)
# Update condition in main record
db(atable.id == asset_id).update(cond=form_vars.cond)
return
# =============================================================================
class S3AssetHRModel(S3Model):
"""
Optionally link Assets to Human Resources
- useful for staffing a vehicle
"""
names = ("asset_human_resource",)
def model(self):
#T = current.T
#--------------------------------------------------------------------------
# Assets <> Human Resources
#
tablename = "asset_human_resource"
self.define_table(tablename,
self.asset_asset_id(empty = False),
self.hrm_human_resource_id(empty = False,
ondelete = "CASCADE",
),
#s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# =============================================================================
class S3AssetTeamModel(S3Model):
"""
Optionally link Assets to Teams
"""
names = ("asset_group",)
def model(self):
#T = current.T
#--------------------------------------------------------------------------
# Assets <> Groups
#
tablename = "asset_group"
self.define_table(tablename,
self.asset_asset_id(empty = False),
self.pr_group_id(comment = None,
empty = False,
),
#s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# =============================================================================
class S3AssetTelephoneModel(S3Model):
"""
Extend the Assset Module for Telephones:
Usage Costs
"""
names = ("asset_telephone",
"asset_telephone_usage",
)
def model(self):
T = current.T
#--------------------------------------------------------------------------
# Asset Telephones
#
tablename = "asset_telephone"
self.define_table(tablename,
self.asset_asset_id(empty = False),
# @ToDo: Filter to Suppliers
self.org_organisation_id(label = T("Airtime Provider")),
# We'll need something more complex here as there may be a per-month cost with bundled units
#Field("unit_cost", "double",
# label = T("Unit Cost"),
# ),
s3_comments(),
*s3_meta_fields())
#--------------------------------------------------------------------------
# Telephone Usage Costs
#
# @ToDo: Virtual Fields for Month/Year for Reporting
#
tablename = "asset_telephone_usage"
self.define_table(tablename,
self.asset_asset_id(empty = False),
s3_date(label = T("Start Date")),
# @ToDo: Validation to ensure not before Start Date
s3_date("end_date",
label = T("End Date"),
start_field = "asset_telephone_usage_date",
default_interval = 1,
),
Field("units_used", "double", # 'usage' is a reserved word in MySQL
label = T("Usage"),
),
# mins, Mb (for BGANs)
#Field("unit",
# label = T("Usage"),
# ),
# @ToDo: Calculate this from asset_telephone fields
#Field("cost", "double",
# label = T("Cost"),
# ),
#s3_currency(),
s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# =============================================================================
def asset_get_current_log(asset_id):
"""
Get the current log entry for this asset
"""
table = current.s3db.asset_log
query = (table.asset_id == asset_id) & \
(table.cancel == False) & \
(table.deleted == False)
# Get the log with the maximum time
asset_log = current.db(query).select(table.id,
table.status,
table.datetime,
table.cond,
table.person_id,
table.organisation_id,
table.site_id,
#table.location_id,
orderby = ~table.datetime,
limitby=(0, 1)).first()
if asset_log:
return Storage(datetime = asset_log.datetime,
person_id = asset_log.person_id,
cond = int(asset_log.cond or 0),
status = int(asset_log.status or 0),
organisation_id = asset_log.organisation_id,
site_id = asset_log.site_id,
#location_id = asset_log.location_id
)
else:
return Storage()
# =============================================================================
def asset_log_prep(r):
"""
Called by Controller
"""
T = current.T
db = current.db
request = current.request
table = db.asset_log
if r.record:
asset = Storage(r.record)
else:
# This is a new record
asset = Storage()
table.cancel.readable = False
table.cancel.writable = False
# This causes an error with the dataTables paginate
# if used only in r.interactive & not also r.representation=="aadata"
if r.method != "read" and r.method != "update":
table.cancel.readable = False
table.cancel.writable = False
current_log = asset_get_current_log(asset.id)
if request.vars.status:
status = int(request.vars.status)
else:
status = 0
if status and status != "None":
field = table.status
field.default = status
field.readable = False
field.writable = False
elif current_log:
table.status.default = current_log.status
if current_log.organisation_id:
table.organisation_id.default = current_log.organisation_id
table.site_id.requires = IS_ONE_OF(db, "org_site.site_id",
table.site_id.represent,
filterby = "organisation_id",
filter_opts = (current_log.organisation_id,))
crud_strings = current.response.s3.crud_strings.asset_log
if status == ASSET_LOG_SET_BASE:
crud_strings.msg_record_created = T("Base Facility/Site Set")
table.by_person_id.label = T("Set By")
table.site_id.writable = True
table.datetime_until.readable = False
table.datetime_until.writable = False
table.person_id.readable = False
table.person_id.writable = False
table.organisation_id.readable = True
table.organisation_id.writable = True
table.site_id.requires = IS_ONE_OF(db, "org_site.site_id",
table.site_id.represent)
elif status == ASSET_LOG_RETURN:
crud_strings.msg_record_created = T("Returned")
table.person_id.label = T("Returned From")
table.person_id.default = current_log.person_id
table.site_id.readable = False
table.site_id.writable = False
elif status == ASSET_LOG_ASSIGN:
type = request.vars.type
# table["%s_id" % type].required = True
if type == "person":
crud_strings.msg_record_created = T("Assigned to Person")
table["person_id"].requires = IS_ONE_OF(db, "pr_person.id",
table.person_id.represent,
orderby="pr_person.first_name",
sort=True,
error_message="Person must be specified!")
table.check_in_to_person.readable = True
table.check_in_to_person.writable = True
table.site_id.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_site.site_id",
table.site_id.represent))
elif type == "site":
crud_strings.msg_record_created = T("Assigned to Facility/Site")
elif type == "organisation":
crud_strings.msg_record_created = T("Assigned to Organization")
table.organisation_id.readable = True
table.organisation_id.writable = True
table.organisation_id.requires = IS_ONE_OF(db, "org_organisation.id",
table.organisation_id.represent,
orderby="org_organisation.name",
sort=True)
table.site_id.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_site.site_id",
table.site_id.represent))
elif "status" in request.get_vars:
crud_strings.msg_record_created = T("Status Updated")
table.person_id.label = T("Updated By")
field = table.status
field.readable = True
field.writable = True
field.requires = IS_IN_SET({ASSET_LOG_CHECK : T("Check"),
ASSET_LOG_REPAIR : T("Repair"),
ASSET_LOG_DONATED : T("Donated"),
ASSET_LOG_LOST : T("Lost"),
ASSET_LOG_STOLEN : T("Stolen"),
ASSET_LOG_DESTROY : T("Destroyed"),
})
# =============================================================================
def asset_rheader(r):
""" Resource Header for Assets """
if r.representation == "html":
record = r.record
if record:
T = current.T
s3db = current.s3db
s3 = current.response.s3
NONE = current.messages["NONE"]
if record.type == ASSET_TYPE_TELEPHONE:
tabs = [(T("Asset Details"), None, {"native": True}),
(T("Telephone Details"), "telephone"),
(T("Usage"), "telephone_usage"),
]
#elif record.type == s3.asset.ASSET_TYPE_RADIO:
# tabs.append((T("Radio Details"), "radio"))
elif record.type == ASSET_TYPE_VEHICLE:
STAFF = current.deployment_settings.get_hrm_staff_label()
tabs = [(T("Asset Details"), None, {"native": True}),
(T("Vehicle Details"), "vehicle"),
(STAFF, "human_resource"),
(T("Assign %(staff)s") % dict(staff=STAFF), "assign"),
(T("Check-In"), "check-in"),
(T("Check-Out"), "check-out"),
(T("GPS Data"), "presence"),
]
else:
tabs = [(T("Edit Details"), None)]
tabs.append((T("Log"), "log"))
tabs.append((T("Documents"), "document"))
rheader_tabs = s3_rheader_tabs(r, tabs)
if current.request.controller == "vehicle":
func = "vehicle"
else:
func = "asset"
# @ToDo: Check permissions before displaying buttons
asset_action_btns = [
A(T("Set Base Facility/Site"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_SET_BASE)
),
_class = "action-btn",
)
]
current_log = asset_get_current_log(record.id)
status = current_log.status
#if record.location_id:
# A Base Site has been set
# Return functionality removed - as it doesn't set site_id & organisation_id in the logs
#if status == ASSET_LOG_ASSIGN:
# asset_action_btns += [ A( T("Return"),
# _href = URL(f=func,
# args = [record.id, "log", "create"],
# vars = dict(status = ASSET_LOG_RETURN)
# ),
# _class = "action-btn"
# )
# ]
if status < ASSET_LOG_DONATED:
# @ToDo: deployment setting to prevent assigning assets before returning them
# The Asset is available for assignment (not disposed)
asset_action_btns += [
A(T("Assign to Person"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_ASSIGN,
type = "person")
),
_class = "action-btn",
),
A(T("Assign to Facility/Site"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_ASSIGN,
type = "site")
),
_class = "action-btn",
),
A(T("Assign to Organization"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = dict(status = ASSET_LOG_ASSIGN,
type = "organisation")
),
_class = "action-btn",
),
]
asset_action_btns += [
A(T("Update Status"),
_href = URL(f=func,
args = [record.id, "log", "create"],
vars = None
),
_class = "action-btn",
),
]
table = r.table
ltable = s3db.asset_log
rheader = DIV(TABLE(TR(TH("%s: " % table.number.label),
record.number,
TH("%s: " % table.item_id.label),
table.item_id.represent(record.item_id)
),
TR(TH("%s: " % ltable.cond.label),
ltable.cond.represent(current_log.cond),
TH("%s: " % ltable.status.label),
ltable.status.represent(status),
),
TR(TH("%s: " % ltable.person_id.label),
ltable.person_id.represent(current_log.person_id),
TH("%s: " % ltable.site_id.label),
ltable.site_id.represent(current_log.site_id),
),
),
rheader_tabs)
s3.rfooter = TAG[""](*asset_action_btns)
return rheader
return None
# =============================================================================
def asset_controller():
""" RESTful CRUD controller """
s3db = current.s3db
s3 = current.response.s3
# Pre-process
def prep(r):
# Location Filter
current.s3db.gis_location_filter(r)
if r.component_name == "log":
asset_log_prep(r)
return True
s3.prep = prep
# Import pre-process
def import_prep(data):
"""
Flag that this is an Import (to distinguish from Sync)
@ToDo: Find Person records from their email addresses
"""
current.response.s3.asset_import = True
return
# @ToDo: get this working
ctable = s3db.pr_contact
ptable = s3db.pr_person
resource, tree = data
elements = tree.getroot().xpath("/s3xml//resource[@name='pr_person']/data[@field='first_name']")
persons = {}
for element in elements:
email = element.text
if email in persons:
# Replace email with uuid
element.text = persons[email]["uuid"]
# Don't check again
continue
query = (ctable.value == email) & \
(ctable.pe_id == ptable.pe_id)
person = db(query).select(ptable.uuid,
limitby=(0, 1)
).first()
if person:
# Replace email with uuid
uuid = person.uuid
else:
# Blank it
uuid = ""
element.text = uuid
# Store in case we get called again with same value
persons[email] = dict(uuid=uuid)
s3.import_prep = import_prep
# Post-processor
def postp(r, output):
if r.interactive and r.method != "import":
script = "/%s/static/scripts/S3/s3.asset.js" % r.application
s3.scripts.append(script)
S3CRUD.action_buttons(r, deletable=False)
#if not r.component:
#s3.actions.append({"url" : URL(c="asset", f="asset",
# args = ["[id]", "log", "create"],
# vars = {"status" : eden.asset.asset_log_status["ASSIGN"],
# "type" : "person"}),
# "_class" : "action-btn",
# "label" : str(T("Assign"))})
return output
s3.postp = postp
output = current.rest_controller("asset", "asset",
rheader = asset_rheader,
)
return output
# =============================================================================
class asset_AssetRepresent(S3Represent):
""" Representation of Assets """
def __init__(self,
fields = ("number",), # unused
show_link = False,
translate = False,
multiple = False,
):
# Need a custom lookup
self.lookup_rows = self.custom_lookup_rows
super(asset_AssetRepresent,
self).__init__(lookup="asset_asset",
fields=fields,
show_link=show_link,
translate=translate,
multiple=multiple)
# -------------------------------------------------------------------------
def custom_lookup_rows(self, key, values, fields=[]):
"""
Custom lookup method for organisation rows, does a
left join with the parent organisation. Parameters
key and fields are not used, but are kept for API
compatibility reasons.
@param values: the organisation IDs
"""
db = current.db
s3db = current.s3db
table = s3db.asset_asset
itable = db.supply_item
btable = db.supply_brand
qty = len(values)
if qty == 1:
query = (table.id == values[0])
limitby = (0, 1)
else:
query = (table.id.belongs(values))
limitby = (0, qty)
query &= (itable.id == table.item_id)
rows = db(query).select(table.id,
table.number,
table.type,
itable.name,
btable.name,
left=btable.on(itable.brand_id == btable.id),
limitby=limitby)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the asset_asset Row
"""
# Custom Row (with the item & brand left-joined)
number = row["asset_asset.number"]
item = row["supply_item.name"]
brand = row.get("supply_brand.name", None)
if not number:
return self.default
represent = "%s (%s" % (number, item)
if brand:
represent = "%s, %s)" % (represent, brand)
else:
represent = "%s)" % represent
return s3_unicode(represent)
# -------------------------------------------------------------------------
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link.
@param k: the key (site_id)
@param v: the representation of the key
@param row: the row with this key
"""
if row:
type = row.get("asset_asset.type", None)
if type == 1:
return A(v, _href=URL(c="vehicle", f="vehicle", args=[k],
# remove the .aaData extension in paginated views
extension=""
))
k = s3_unicode(k)
return A(v, _href=self.linkto.replace("[id]", k) \
.replace("%5Bid%5D", k))
# END =========================================================================
| |
from __future__ import unicode_literals
from collections import OrderedDict
import copy
from django.apps import AppConfig
from django.apps.registry import Apps, apps as global_apps
from django.db import models
from django.db.models.options import DEFAULT_NAMES, normalize_together
from django.db.models.fields.related import do_pending_lookups
from django.db.models.fields.proxy import OrderWrt
from django.conf import settings
from django.utils import six
from django.utils.encoding import force_text, smart_text
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.version import get_docs_version
class InvalidBasesError(ValueError):
pass
class ProjectState(object):
"""
Represents the entire project's overall state.
This is the item that is passed around - we do it here rather than at the
app level so that cross-app FKs/etc. resolve properly.
"""
def __init__(self, models=None, real_apps=None):
self.models = models or {}
# Apps to include from main registry, usually unmigrated ones
self.real_apps = real_apps or []
def add_model(self, model_state):
app_label, model_name = model_state.app_label, model_state.name_lower
self.models[(app_label, model_name)] = model_state
if 'apps' in self.__dict__: # hasattr would cache the property
self.reload_model(app_label, model_name)
def remove_model(self, app_label, model_name):
del self.models[app_label, model_name]
if 'apps' in self.__dict__: # hasattr would cache the property
self.apps.unregister_model(app_label, model_name)
def reload_model(self, app_label, model_name):
if 'apps' in self.__dict__: # hasattr would cache the property
# Get relations before reloading the models, as _meta.apps may change
try:
related_old = {
f.related_model for f in
self.apps.get_model(app_label, model_name)._meta.related_objects
}
except LookupError:
related_old = set()
self._reload_one_model(app_label, model_name)
# Reload models if there are relations
model = self.apps.get_model(app_label, model_name)
related_m2m = {f.related_model for f in model._meta.many_to_many}
for rel_model in related_old.union(related_m2m):
self._reload_one_model(rel_model._meta.app_label, rel_model._meta.model_name)
if related_m2m:
# Re-render this model after related models have been reloaded
self._reload_one_model(app_label, model_name)
def _reload_one_model(self, app_label, model_name):
self.apps.unregister_model(app_label, model_name)
self.models[app_label, model_name].render(self.apps)
def clone(self):
"Returns an exact copy of this ProjectState"
new_state = ProjectState(
models={k: v.clone() for k, v in self.models.items()},
real_apps=self.real_apps,
)
if 'apps' in self.__dict__:
new_state.apps = self.apps.clone()
return new_state
@cached_property
def apps(self):
return StateApps(self.real_apps, self.models)
@property
def concrete_apps(self):
self.apps = StateApps(self.real_apps, self.models, ignore_swappable=True)
return self.apps
@classmethod
def from_apps(cls, apps):
"Takes in an Apps and returns a ProjectState matching it"
app_models = {}
for model in apps.get_models(include_swapped=True):
model_state = ModelState.from_model(model)
app_models[(model_state.app_label, model_state.name_lower)] = model_state
return cls(app_models)
def __eq__(self, other):
if set(self.models.keys()) != set(other.models.keys()):
return False
if set(self.real_apps) != set(other.real_apps):
return False
return all(model == other.models[key] for key, model in self.models.items())
def __ne__(self, other):
return not (self == other)
class AppConfigStub(AppConfig):
"""
Stubs a Django AppConfig. Only provides a label, and a dict of models.
"""
# Not used, but required by AppConfig.__init__
path = ''
def __init__(self, label):
self.label = label
# App-label and app-name are not the same thing, so technically passing
# in the label here is wrong. In practice, migrations don't care about
# the app name, but we need something unique, and the label works fine.
super(AppConfigStub, self).__init__(label, None)
def import_models(self, all_models):
self.models = all_models
class StateApps(Apps):
"""
Subclass of the global Apps registry class to better handle dynamic model
additions and removals.
"""
def __init__(self, real_apps, models, ignore_swappable=False):
# Any apps in self.real_apps should have all their models included
# in the render. We don't use the original model instances as there
# are some variables that refer to the Apps object.
# FKs/M2Ms from real apps are also not included as they just
# mess things up with partial states (due to lack of dependencies)
real_models = []
for app_label in real_apps:
app = global_apps.get_app_config(app_label)
for model in app.get_models():
real_models.append(ModelState.from_model(model, exclude_rels=True))
# Populate the app registry with a stub for each application.
app_labels = {model_state.app_label for model_state in models.values()}
app_configs = [AppConfigStub(label) for label in sorted(real_apps + list(app_labels))]
super(StateApps, self).__init__(app_configs)
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
unrendered_models = list(models.values()) + real_models
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError(
"Cannot resolve bases for %r\nThis can happen if you are inheriting models from an "
"app with migrations (e.g. contrib.auth)\n in an app with no migrations; see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#dependencies "
"for more" % (new_unrendered_models, get_docs_version())
)
unrendered_models = new_unrendered_models
# If there are some lookups left, see if we can first resolve them
# ourselves - sometimes fields are added after class_prepared is sent
for lookup_model, operations in self._pending_lookups.items():
try:
model = self.get_model(lookup_model[0], lookup_model[1])
except LookupError:
app_label = "%s.%s" % (lookup_model[0], lookup_model[1])
if app_label == settings.AUTH_USER_MODEL and ignore_swappable:
continue
# Raise an error with a best-effort helpful message
# (only for the first issue). Error message should look like:
# "ValueError: Lookup failed for model referenced by
# field migrations.Book.author: migrations.Author"
msg = "Lookup failed for model referenced by field {field}: {model[0]}.{model[1]}"
raise ValueError(msg.format(field=operations[0][1], model=lookup_model))
else:
do_pending_lookups(model)
def clone(self):
"""
Return a clone of this registry, mainly used by the migration framework.
"""
clone = StateApps([], {})
clone.all_models = copy.deepcopy(self.all_models)
clone.app_configs = copy.deepcopy(self.app_configs)
return clone
def register_model(self, app_label, model):
self.all_models[app_label][model._meta.model_name] = model
if app_label not in self.app_configs:
self.app_configs[app_label] = AppConfigStub(app_label)
self.app_configs[app_label].models = OrderedDict()
self.app_configs[app_label].models[model._meta.model_name] = model
self.clear_cache()
def unregister_model(self, app_label, model_name):
try:
del self.all_models[app_label][model_name]
del self.app_configs[app_label].models[model_name]
except KeyError:
pass
self.clear_cache()
class ModelState(object):
"""
Represents a Django Model. We don't use the actual Model class
as it's not designed to have its options changed - instead, we
mutate this one and then render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(self, app_label, name, fields, options=None, bases=None, managers=None):
self.app_label = app_label
self.name = force_text(name)
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model, )
self.managers = managers or []
# Sanity-check that fields is NOT a dict. It must be ordered.
if isinstance(self.fields, dict):
raise ValueError("ModelState.fields cannot be a dict - it must be a list of 2-tuples.")
# Sanity-check that fields are NOT already bound to a model.
for name, field in fields:
if hasattr(field, 'model'):
raise ValueError(
'ModelState.fields cannot be bound to a model - "%s" is.' % name
)
@cached_property
def name_lower(self):
return self.name.lower()
@classmethod
def from_model(cls, model, exclude_rels=False):
"""
Feed me a model, get a ModelState representing it out.
"""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
if getattr(field, "rel", None) and exclude_rels:
continue
if isinstance(field, OrderWrt):
continue
name, path, args, kwargs = field.deconstruct()
field_class = import_string(path)
try:
fields.append((name, field_class(*args, **kwargs)))
except TypeError as e:
raise TypeError("Couldn't reconstruct field %s on %s.%s: %s" % (
name,
model._meta.app_label,
model._meta.object_name,
e,
))
if not exclude_rels:
for field in model._meta.local_many_to_many:
name, path, args, kwargs = field.deconstruct()
field_class = import_string(path)
try:
fields.append((name, field_class(*args, **kwargs)))
except TypeError as e:
raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % (
name,
model._meta.object_name,
e,
))
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_together(ut))
elif name == "index_together":
it = model._meta.original_attrs["index_together"]
options[name] = set(normalize_together(it))
else:
options[name] = model._meta.original_attrs[name]
# Force-convert all options to text_type (#23226)
options = cls.force_text_recursive(options)
# If we're ignoring relationships, remove all field-listing model
# options (that option basically just means "make a stub model")
if exclude_rels:
for key in ["unique_together", "index_together", "order_with_respect_to"]:
if key in options:
del options[key]
def flatten_bases(model):
bases = []
for base in model.__bases__:
if hasattr(base, "_meta") and base._meta.abstract:
bases.extend(flatten_bases(base))
else:
bases.append(base)
return bases
# We can't rely on __mro__ directly because we only want to flatten
# abstract models and not the whole tree. However by recursing on
# __bases__ we may end up with duplicates and ordering issues, we
# therefore discard any duplicates and reorder the bases according
# to their index in the MRO.
flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x))
# Make our record
bases = tuple(
(
"%s.%s" % (base._meta.app_label, base._meta.model_name)
if hasattr(base, "_meta") else
base
)
for base in flattened_bases
)
# Ensure at least one base inherits from models.Model
if not any((isinstance(base, six.string_types) or issubclass(base, models.Model)) for base in bases):
bases = (models.Model,)
# Constructs all managers on the model
managers = {}
def reconstruct_manager(mgr):
as_manager, manager_path, qs_path, args, kwargs = mgr.deconstruct()
if as_manager:
qs_class = import_string(qs_path)
instance = qs_class.as_manager()
else:
manager_class = import_string(manager_path)
instance = manager_class(*args, **kwargs)
# We rely on the ordering of the creation_counter of the original
# instance
managers[mgr.name] = (mgr.creation_counter, instance)
default_manager_name = model._default_manager.name
# Make sure the default manager is always the first
if model._default_manager.use_in_migrations:
reconstruct_manager(model._default_manager)
else:
# Force this manager to be the first and thus default
managers[default_manager_name] = (0, models.Manager())
# Sort all managers by their creation counter
for _, manager, _ in sorted(model._meta.managers):
if manager.name == '_base_manager' or not manager.use_in_migrations:
continue
reconstruct_manager(manager)
# Sort all managers by their creation counter but take only name and
# instance for further processing
managers = [
(name, instance) for name, (cc, instance) in
sorted(managers.items(), key=lambda v: v[1])
]
if managers == [(default_manager_name, models.Manager())]:
managers = []
# Construct the new ModelState
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
managers,
)
@classmethod
def force_text_recursive(cls, value):
if isinstance(value, six.string_types):
return smart_text(value)
elif isinstance(value, list):
return [cls.force_text_recursive(x) for x in value]
elif isinstance(value, tuple):
return tuple(cls.force_text_recursive(x) for x in value)
elif isinstance(value, set):
return set(cls.force_text_recursive(x) for x in value)
elif isinstance(value, dict):
return {
cls.force_text_recursive(k): cls.force_text_recursive(v)
for k, v in value.items()
}
return value
def construct_fields(self):
"Deep-clone the fields using deconstruction"
for name, field in self.fields:
_, path, args, kwargs = field.deconstruct()
field_class = import_string(path)
yield name, field_class(*args, **kwargs)
def construct_managers(self):
"Deep-clone the managers using deconstruction"
# Sort all managers by their creation counter
sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter)
for mgr_name, manager in sorted_managers:
as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct()
if as_manager:
qs_class = import_string(qs_path)
yield mgr_name, qs_class.as_manager()
else:
manager_class = import_string(manager_path)
yield mgr_name, manager_class(*args, **kwargs)
def clone(self):
"Returns an exact copy of this ModelState"
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=list(self.construct_fields()),
options=dict(self.options),
bases=self.bases,
managers=list(self.construct_managers()),
)
def render(self, apps):
"Creates a Model object from our current state into the given apps"
# First, make a Meta object
meta_contents = {'app_label': self.app_label, "apps": apps}
meta_contents.update(self.options)
meta = type(str("Meta"), tuple(), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, six.string_types) else base)
for base in self.bases
)
except LookupError:
raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,))
# Turn fields into a dict for the body, add other bits
body = dict(self.construct_fields())
body['Meta'] = meta
body['__module__'] = "__fake__"
# Restore managers
body.update(self.construct_managers())
# Then, make a Model object (apps.register_model is called in __new__)
return type(
str(self.name),
bases,
body,
)
def get_field_by_name(self, name):
for fname, field in self.fields:
if fname == name:
return field
raise ValueError("No field called %s on model %s" % (name, self.name))
def __repr__(self):
return "<ModelState: '%s.%s'>" % (self.app_label, self.name)
def __eq__(self, other):
return (
(self.app_label == other.app_label) and
(self.name == other.name) and
(len(self.fields) == len(other.fields)) and
all((k1 == k2 and (f1.deconstruct()[1:] == f2.deconstruct()[1:]))
for (k1, f1), (k2, f2) in zip(self.fields, other.fields)) and
(self.options == other.options) and
(self.bases == other.bases) and
(self.managers == other.managers)
)
def __ne__(self, other):
return not (self == other)
| |
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""The Viewfinder schema definition.
The schema contains a set of tables. Each table is described by name,
key, a set of columns, and a list of versions.
The table name is the name used to access the database. The table key
is used to segment index terms by table. It, combined with the column
key, forms the prefix of each index term generated when a column value
is indexed.
"""
__authors__ = ['spencer@emailscrubbed.com (Spencer Kimball)',
'andy@emailscrubbed.com (Andy Kimball)']
from schema import Schema, Table, IndexedTable, IndexTable, Column, HashKeyColumn, RangeKeyColumn, SetColumn, JSONColumn, LatLngColumn, PlacemarkColumn, CryptColumn
from indexers import Indexer, SecondaryIndexer, FullTextIndexer, EmailIndexer, LocationIndexer
ACCOUNTING = 'Accounting'
ACTIVITY = 'Activity'
ADMIN_PERMISSIONS = 'AdminPermissions'
ANALYTICS = "Analytics"
COMMENT = 'Comment'
CONTACT = 'Contact'
DEVICE = 'Device'
EPISODE = 'Episode'
FOLLOWED = 'Followed'
FOLLOWER = 'Follower'
FRIEND = 'Friend'
GUESS = 'Guess'
HEALTH_REPORT = 'HealthReport'
ID_ALLOCATOR = 'IdAllocator'
IDENTITY = 'Identity'
LOCK = 'Lock'
METRIC = 'Metric'
NOTIFICATION = 'Notification'
OPERATION = 'Operation'
PHOTO = 'Photo'
POST = 'Post'
SETTINGS = 'Settings'
SHORT_URL = 'ShortURL'
SUBSCRIPTION = 'Subscription'
USER = 'User'
USER_PHOTO = 'UserPhoto'
USER_POST = 'UserPost'
VIEWPOINT = 'Viewpoint'
INDEX = 'Index'
TEST_RENAME = 'TestRename'
SCHEMA = Schema([
# The accounting table stores aggregated usage stats.
# The hash and sort keys are strings consisting of 'prefix:<optional id>'
#
# Accounting categories:
# - Per viewpoint: hash_key='vs:<vp_id>'
# Aggregate sizes/counts per viewpoint, keyed by the viewpoint
# id. Sort keys fall into three categories:
# - owned by: 'ow:<user_id>' only found in default viewpoint.
# - shared by: 'sb:<user_id>' in shared viewpoint, sum of all photos
# in episodes owned by 'user_id'
# - visible to: 'vt' in shared viewpoint, sum of all photos. not keyed
# by user. a given user's "shared with" stats will be 'vt - sb:<user_id>',
# but we do not want to keep per-user shared-by stats.
# - Per user: hash_key='us:<user_id>'
# Aggregate sizes/counts per user, keyed by user id. Sort keys are:
# - owned by: 'ow' sum of all photos in default viewpoint
# - shared by: 'sb' sum of all photos in shared viewpoints and episodes owned by this user
# - visible to: 'vt' sum of all photos in shared viewpoint (includes 'sb'). to get the
# real count of photos shared with this user but not shared by him, compute 'vt - sb'
#
# 'op_ids' holds a list of previously-applied operation IDs. This is an attempt to
# make increments idempotent with replays. The list is a comma-separated string of
# operation ids (sometimes suffixed with a viewpoint ID), in the order in which they were
# applied. We keep a maximum of Accounting._MAX_APPLIED_OP_IDS.
#
# Currently, all columns are used by each accounting category.
Table(ACCOUNTING, 'at', read_units=100, write_units=10,
columns=[HashKeyColumn('hash_key', 'hk', 'S'),
RangeKeyColumn('sort_key', 'sk', 'S'),
Column('num_photos', 'np', 'N'),
Column('tn_size', 'ts', 'N'),
Column('med_size', 'ms', 'N'),
Column('full_size', 'fs', 'N'),
Column('orig_size', 'os', 'N'),
Column('op_ids', 'oi', 'S')]),
# Activities are associated with a viewpoint and contain a record of
# all high-level operations which have modified the structure of the
# viewpoint in some way. For more details, see activity.py. The
# activity_id attribute is a composite of information gleaned from
# current operation: (reverse timestamp, user_id, op_id). The content
# of the activity is a JSON-encoded ACTIVITY structure, as defined in
# json_schema.py. 'update_seq' is set to the value of the viewpoint's
# 'update_seq' attribute after it was incremented during creation of
# the activity.
Table(ACTIVITY, 'ac', read_units=100, write_units=10,
columns=[HashKeyColumn('viewpoint_id', 'vi', 'S'),
RangeKeyColumn('activity_id', 'ai', 'S'),
Column('user_id', 'ui', 'N', read_only=True),
Column('timestamp', 'ti', 'N', read_only=True),
Column('update_seq', 'us', 'N'),
Column('name', 'na', 'S', read_only=True),
Column('json', 'js', 'S', read_only=True)]),
# Admin table. This table lists all users with access to admin and support functions.
# Entries are created by the otp script, with 'rights' being a set of roles (eg: 'root' or 'support').
# Admin users are not currently linked to viewfinder users.
Table(ADMIN_PERMISSIONS, 'ad', read_units=10, write_units=10,
columns=[HashKeyColumn('username', 'un', 'S'),
Column('rights', 'ri', 'SS')]),
# Timestamped information for various entities. The entity hash key should be of the form: <type>:<id>.
# eg: us:112 (for user with ID 112).
# sort_key: base64 hex encoded timestamp + type
# Type is a string representing the type of analytics entry. See db/analytics.py for details.
# Payload is an optional payload attached to the entry. Its format depends on the type of entry.
Table(ANALYTICS, 'an', read_units=10, write_units=10,
columns=[HashKeyColumn('entity', 'et', 'S'),
RangeKeyColumn('sort_key', 'sk', 'S'),
Column('timestamp', 'ti', 'N'),
Column('type', 'tp', 'S'),
Column('payload', 'pl', 'S')]),
# Key is composite of (viewpoint_id, comment_id), which sorts all
# comments by ascending timestamp within each viewpoint. 'user_id'
# is the user that created the comment. At this time, 'asset_id'
# can be:
# 1. Absent: The comment is not linked to any other asset.
# 2. Comment id: The comment is a response to another comment.
# 3. Photo id: The comment is a comment on a photo.
#
# 'timestamp' records the time that the comment was originally
# created. 'message' is the actual comment text.
IndexedTable(COMMENT, 'cm', read_units=200, write_units=20,
columns=[HashKeyColumn('viewpoint_id', 'vi', 'S'),
RangeKeyColumn('comment_id', 'ci', 'S'),
Column('user_id', 'ui', 'N', read_only=True),
Column('asset_id', 'ai', 'S', read_only=True),
Column('timestamp', 'ti', 'N'),
Column('message', 'me', 'S')]),
# Key is composite of (user_id, sort_key)
# sort_key: base64 hex encoded timestamp + contact_id
# contact_id: contact_source + ':' + hash (base64 encoded) of CONTACT data: name, given_name, family_name,
# rank, and identities_properties columns.
# contact_source: 'fb', 'gm', 'ip', or 'm' (for, respectively, Facebook, GMail, iPhone, and Manual sources)
# timestamp column should always match the timestamp encoded prefix of the sort_key.
# identities: set of canonicalized identity strings: Email:<email-address>, Phone:<phone>, Facebook:<fb-graph-id>
# These reference identities in the IDENTITY table. This column exists so that contacts can be queried by
# identity. Note: duplicates info that's contained in the identities_properties column.
# identities_properties: json formatted list of identities each with an optional label such as 'mobile', 'work',
# etc... This list preserves the order in which the identities were upload by (or fetched from) a
# contact source. These identities may not be in canonicalized form, but it must be possible to canonicalize
# them.
# labels: 'removed' indicates that the contact is in a removed state. This surfaces the removed state of
# contacts to clients through invalidation notifications. These contacts will be filtered out for down-level
# client queries.
IndexedTable(CONTACT, 'co', read_units=50, write_units=120,
columns=[HashKeyColumn('user_id', 'ui', 'N'),
RangeKeyColumn('sort_key', 'sk', 'S'),
Column('timestamp', 'ti', 'N'),
Column('contact_id', 'ci', 'S', SecondaryIndexer(), read_only=True),
Column('contact_source', 'cs', 'S', read_only=True),
SetColumn('labels', 'lb', 'SS'),
SetColumn('identities', 'ids', 'SS', SecondaryIndexer(), read_only=True),
Column('name', 'na', 'S', read_only=True),
Column('given_name', 'gn', 'S', read_only=True),
Column('family_name', 'fn', 'S', read_only=True),
Column('rank', 'ra', 'N', read_only=True),
JSONColumn('identities_properties', 'ip', read_only=True)]),
# Device information. Key is a composite of user id and a 32-bit
# integer device id (allocated via the id-allocation table). Each
# device is a source of photos. The device id comprises the first
# 32 bits of the photo id. The last 32 bits are sequentially
# allocated by the device (in the case of mobile), or via an
# atomic increment of 'id_seq' (in the case of the web).
#
# 'last_access' and 'push_token' are set on device registration
# and each time the application is launched (in the case of the
# mobile app). 'push_token' is indexed to allow device lookups in
# response to feedback from provider push-notification services.
# 'alert_user_id' is a sparse column index, used to quickly find
# all devices for a user that need to be alerted.
#
# Device ID of 0 is reserved to mean local to an individual device.
#
# Example Apple push token: "apns:oYJrenW5JsH42r1eevgq3HhC6bhXL3OP0SqHkOeo/58="
IndexedTable(DEVICE, 'de', read_units=25, write_units=5,
columns=[HashKeyColumn('user_id', 'ui', 'N'),
RangeKeyColumn('device_id', 'di', 'N', SecondaryIndexer()),
Column('timestamp', 'ti', 'N'),
Column('name', 'na', 'S'),
Column('version', 've', 'S'),
Column('platform', 'pl', 'S'),
Column('os', 'os', 'S'),
Column('last_access', 'la', 'N'),
Column('alert_user_id', 'aui', 'N', SecondaryIndexer()),
Column('push_token', 'pt', 'S', SecondaryIndexer()),
Column('language', 'lg', 'S'),
Column('country', 'co', 'S')]),
# Key is episode-id. Episodes are indexed for full-text search on
# episode title and description, and lookup of all episodes for a user.
# Due to a rename, the Episode table is called Event in the database.
IndexedTable(EPISODE, 'ev', read_units=200, write_units=10, name_in_db="Event",
columns=[HashKeyColumn('episode_id', 'ei', 'S'),
Column('parent_ep_id', 'pa', 'S', SecondaryIndexer(), read_only=True),
Column('user_id', 'ui', 'N', SecondaryIndexer(), read_only=True),
Column('viewpoint_id', 'vi', 'S', SecondaryIndexer(), read_only=True),
Column('publish_timestamp', 'pu', 'N'),
Column('timestamp', 'cr', 'N'),
Column('title', 'ti', 'S'),
Column('description', 'de', 'S'),
LatLngColumn('location', 'lo'),
PlacemarkColumn('placemark', 'pl')]),
# Sorts all viewpoints followed by a user in order of the date of
# on which the last activity was added. Viewpoints updated on the
# same day are in undefined order. Sort is in descending order, with
# viewpoints most recently updated coming first. The query_followed
# method returns results in this ordering. Note that paging may result
# in missed followed records, as updates to a viewpoint may cause the
# corresponding record to "jump ahead" in time past the current paging
# bookmark. 'date_updated' is a timestamp truncated to a day boundary.
# 'sort_key' is a concatenation of the 'date_updated' field and the
# viewpoint id.
IndexedTable(FOLLOWED, 'fd', read_units=200, write_units=10,
columns=[HashKeyColumn('user_id', 'ui', 'N'),
RangeKeyColumn('sort_key', 'sk', 'S'),
Column('date_updated', 'du', 'N'),
Column('viewpoint_id', 'vi', 'S', read_only=True)]),
# Key is a composite of (user-id, viewpoint-id). The 'labels' set
# specifies the features of the relation between the user and
# viewpoint: ('admin', 'contribute'). 'adding_user_id' contains the id
# of the user who added this follower, and 'timestamp' the time at which
# the follower was added. 'viewed_seq' is the sequence number of the last
# viewpoint update that has been 'read' by this follower. The last
# viewpoint update is tracked by the 'update_seq' attribute on Viewpoint.
IndexedTable(FOLLOWER, 'fo', read_units=400, write_units=10,
columns=[HashKeyColumn('user_id', 'ui', 'N'),
RangeKeyColumn('viewpoint_id', 'vi', 'S', SecondaryIndexer()),
Column('timestamp', 'ti', 'N'),
Column('adding_user_id', 'aui', 'N'),
SetColumn('labels', 'la', 'SS'),
Column('viewed_seq', 'vs', 'N')]),
# Key is composite of user-id / friend-id. "colocated_shares" and
# "total_shares" are decaying stats that track the number of photo
# opportunities where sharing occurred. 'last_colocated' and
# 'last_share' are timestamps for computing decay. Friend status is
# one of {friend,blocked,muted}.
Table(FRIEND, 'fr', read_units=50, write_units=10,
columns=[HashKeyColumn('user_id', 'ui', 'N'),
RangeKeyColumn('friend_id', 'fi', 'N'),
Column('name', 'na', 'S'),
Column('nickname', 'nn', 'S'),
Column('colocated_shares', 'cs', 'N'),
Column('last_colocated', 'lc', 'N'),
Column('total_shares', 'ts', 'N'),
Column('last_share', 'ls', 'N'),
Column('status', 'st', 'S')]),
# Tracks the number of incorrect attempts that have been made to guess some
# secret, such as a password or an access code. 'guess_id' is of the form
# <type>:<id>, where <type> is one of these:
#
# url:<group-id> - Limits number of attempts that can be made to guess a
# valid ShortURL within any particular 24-hour period.
#
# pw:<user-id> - Limits number of attempts that can be made to guess a
# particular user's password within any particular 24-hour
# period.
#
# em:<user-id> - Limits number of attempts that can be made to guess
# access tokens e-mailed to a particular user within any
# particular 24-hour period.
#
# ph:<user-id> - Limits number of attempts that can be made to guess
# access tokens sent in SMS messages to a user within any
# particular 24-hour period.
#
# The 'guesses' field tracks the number of incorrect guesses that have been
# made so far. The 'expires' field stores the time at which the guesses count
# can be reset to 0.
Table(GUESS, 'gu', read_units=50, write_units=10,
columns=[HashKeyColumn('guess_id', 'gi', 'S'),
Column('expires', 'ex', 'N'),
Column('guesses', 'gu', 'N')]),
# Key is a composite of (group_key, timestamp), where group_key is the
# same key used to collect machine metrics in the metrics table. The
# intention is that for each metrics group_key, a single health report
# will be generated summarizing problems across all machines in that group.
#
# Alerts and Warnings are string sets which describe any problems detected
# from the metrics information. If no problems are detected, this record
# will be sparse.
Table(HEALTH_REPORT, 'hr', read_units=10, write_units=5,
columns=[HashKeyColumn('group_key', 'gk', 'S'),
RangeKeyColumn('timestamp', 'ts', 'N'),
SetColumn('alerts', 'as', 'SS'),
SetColumn('warnings', 'ws', 'SS')]),
# Key is ID type (e.g. op-id, photo-id, user-id, episode-id).
Table(ID_ALLOCATOR, 'ia', read_units=10, write_units=10,
columns=[HashKeyColumn('id_type', 'it', 'S'),
Column('next_id', 'ni', 'N')]),
# Key is identity. User-id is indexed to provide quick queries for the
# list of identities associated with a viewfinder account. The token
# allows access to external resources associated with the identity.
# 'last_fetch' specifies the last time that the contacts were
# fetched for this identity. 'authority' is one of ('Facebook', 'Google'
# 'Viewfinder', etc.) and identifies the trusted authentication authority.
#
# The complete set of attributes (if any) returned when an
# identity was authenticated is stored as a json-encoded dict in
# 'json_attrs'. Some of these may be taken to populate the
# demographic and informational attributes of the User table.
#
# The 'access_token' and 'refresh_token' fields store any tokens used to
# access the authority, with 'expires' tracking the lifetime of the
# token.
#
# The 'auth_throttle' field limits the number of auth email/sms messages
# that can be sent within a certain period of time.
IndexedTable(IDENTITY, 'id', read_units=50, write_units=10,
columns=[HashKeyColumn('key', 'ke', 'S'),
Column('user_id', 'ui', 'N', SecondaryIndexer()),
JSONColumn('json_attrs', 'ja'),
Column('last_fetch', 'lf', 'N'),
Column('authority', 'au', 'S'),
Column('access_token', 'at', 'S'),
Column('refresh_token', 'rt', 'S'),
Column('expires', 'ex', 'N'),
JSONColumn('auth_throttle', 'th'),
# TODO(Andy): Remove these attributes, as they are now deprecated.
Column('access_code', 'ac', 'S', SecondaryIndexer()),
Column('expire_code', 'xc', 'N'),
Column('token_guesses', 'tg', 'N'),
Column('token_guesses_time', 'gt', 'N')]),
# A lock is acquired in order to control concurrent access to
# a resource. The 'lock_id' is a composite of the type of the
# resource and its unique id. The 'owner_id' is a string that
# uniquely identifies the holder of the lock. 'resource_data'
# is resource-specific information that is provided by the
# owner and stored with the lock. The 'expiration' is the time
# (UTC) at which the lock is assumed to have been abandoned by
# the owner and can be taken over by another owner.
#
# 'acquire_failures' tracks the number of times other agents
# tried to acquire the lock while it was held.
Table(LOCK, 'lo', read_units=50, write_units=10,
columns=[HashKeyColumn('lock_id', 'li', 'S'),
Column('owner_id', 'oi', 'S'),
Column('expiration', 'ex', 'N'),
Column('acquire_failures', 'af', 'N'),
Column('resource_data', 'rd', 'S')]),
# Metrics represent a timestamped payload of performance metrics
# from a single machine running viewfinder. The metrics key is a
# composite of (group_key, sort_key). The payload column is a serialized
# dictionary describing the performance metrics that were captured from
# the machine.
#
# The group_key for a metric is intended to organize metrics by the way
# they are queried. For instance, a group key might contain all
# metrics for all machines in an EC2 region, or a more specific division
# than that.
#
# The sort_key is a composite of the timestamp and machine id - the
# intention is that records will be queried by timestamp, while machine_id
# is simply included in the key to differentiate records with the same
# timestamp from different machines.
IndexedTable(METRIC, 'mt', read_units=50, write_units=10,
columns=[HashKeyColumn('group_key', 'gk', 'S'),
RangeKeyColumn('sort_key', 'sk', 'S'),
Column('machine_id', 'mi', 'S', SecondaryIndexer()),
Column('timestamp', 'ts', 'N'),
Column('payload', 'p', 'S')]),
# Notifications are messages to deliver to devices hosting the
# viewfinder client, whether mobile, desktop, web application or
# otherwise. Key is a composite of (user-id and allocated
# notification id--taken from user's uu_id sequence). Other
# fields record the name, id, and timestamp of the operation that
# resulted in the notification, as well as the user and device
# that started it. The badge attribute records the value of the
# "push badge" on client devices at the time that notification
# was recorded. The invalidate attribute is a JSON-encoded
# INVALIDATE structure, as defined in json_schema.py.
Table(NOTIFICATION, 'no', read_units=50, write_units=10,
columns=[HashKeyColumn('user_id', 'ui', 'N'),
RangeKeyColumn('notification_id', 'ni', 'N'),
Column('name', 'na', 'S'),
Column('timestamp', 'ti', 'N'),
Column('sender_id', 'si', 'N'),
Column('sender_device_id', 'sd', 'N'),
Column('badge', 'ba', 'N'),
Column('invalidate', 'in', 'S'),
Column('op_id', 'oi', 'S'),
Column('viewpoint_id', 'vi', 'S'),
Column('update_seq', 'us', 'N'),
Column('viewed_seq', 'vs', 'N'),
Column('activity_id', 'ai', 'S')]),
# Operations are write-ahead logs of mutating server
# requests. These requests need to be persisted so that they can
# be retried on server failure. They often involve multiple
# queries / updates to different database tables and/or rows, so a
# partially completed operation could leave the database in an
# inconsistent state. Each operation must be idempotent, as
# failing servers may cause retries. The actual operation is
# stored JSON-encoded in 'json'. This is often the original HTTP
# request, though in some cases, the JSON from the HTTP request
# is augmented with additional information, such as pre-allocated
# photo, user or device IDs.
#
# 'quarantine' indicates that if the operation fails, it
# should not prevent further operations for the same user from
# processing.
#
# 'checkpoint' stores progress information with the operation. If the
# operation is restarted, it can use this information to skip over
# steps it's already completed. The progress information is operation-
# specific and is not used in any way by the operation framework itself.
#
# 'triggered_failpoints' is used for testing operation idempotency. It
# contains the set of failpoints which have already been triggered for
# this operation and need not be triggered again.
Table(OPERATION, 'op', read_units=50, write_units=50,
columns=[HashKeyColumn('user_id', 'ui', 'N'),
RangeKeyColumn('operation_id', 'oi', 'S'),
Column('device_id', 'di', 'N'),
Column('method', 'me', 'S'),
Column('json', 'js', 'S'),
Column('timestamp', 'ti', 'N'),
Column('attempts', 'at', 'N'),
Column('backoff', 'bo', 'N'),
Column('first_failure', 'ff', 'S'),
Column('last_failure', 'lf', 'S'),
Column('quarantine', 'sf', 'N'),
JSONColumn('checkpoint', 'cp'),
JSONColumn('triggered_failpoints', 'fa')]),
# Key is photo-id. Photo id is composed of 32 bits of time in the
# high 32 bits, then 32 bits of device id, then 32 bits of
# monotonic photo id, unique to the device. The full 96 bits are
# base-64 hex encoded into 128 bits. Photos can have a parent
# photo-id, which refers back to an original photo if this is a
# copy. Copies are made when filters are applied to photos. The
# client_data string is a JSON-encoded dict of opaque
# client-supplied key-value pairs.
#
# The 'share_seq_no' attribute is incremented every time the shares
# for a photo are modified. It provides for efficient queries from
# clients meant to determine the list of friends with viewing
# privileges
#
# Sizes for tn, med, full, orig are file sizes in bytes for thumnail,
# medium, full and original images respectively.
#
# The 'new_assets' attribute is temporary and there to support rename
# of image asset files from underscore to period suffixes. It contains
# the value 'copied' if the asset files have been duplicated and
# 'deleted' if the original asset files have been verified as copied
# and removed.
# TODO(spencer): remove this once we have completely migrated the photo
# data.
#
# 'client_data' is deprecated; use USER_PHOTO instead.
IndexedTable(PHOTO, 'ph', read_units=400, write_units=25,
columns=[HashKeyColumn('photo_id', 'pi', 'S'),
Column('parent_id', 'pa', 'S', SecondaryIndexer(), read_only=True),
Column('episode_id', 'ei', 'S', read_only=True),
Column('user_id', 'ui', 'N', read_only=True),
Column('aspect_ratio', 'ar', 'N'),
Column('content_type', 'ct', 'S', read_only=True),
Column('timestamp', 'ti', 'N'),
Column('tn_md5', 'tm', 'S'),
Column('med_md5', 'mm', 'S'),
Column('orig_md5', 'om', 'S', SecondaryIndexer()),
Column('full_md5', 'fm', 'S', SecondaryIndexer()),
Column('tn_size', 'ts', 'N'),
Column('med_size', 'ms', 'N'),
Column('full_size', 'fs', 'N'),
Column('orig_size', 'os', 'N'),
LatLngColumn('location', 'lo'),
PlacemarkColumn('placemark', 'pl'),
Column('caption', 'ca', 'S',
FullTextIndexer(metaphone=Indexer.Option.YES)),
Column('link', 'li', 'S'),
Column('thumbnail_data', 'da', 'S'),
Column('share_seq', 'ss', 'N'),
JSONColumn('client_data', 'cd'), # deprecated
Column('new_assets', 'na', 'S')]),
# Key is composite of (episode-id, photo_id). When photos are
# posted/reposted to episodes, a post relation is created. This
# allows the same photo to be included in many episodes. The
# 'labels' attribute associates a set of properties with the
# post.
IndexedTable(POST, 'po', read_units=200, write_units=25,
columns=[HashKeyColumn('episode_id', 'ei', 'S'),
RangeKeyColumn('photo_id', 'sk', 'S'),
SetColumn('labels', 'la', 'SS')]),
# Key is composite of (settings_id, group_name). 'settings_id' is the
# id of the entity to which the settings apply. For example, user account
# settings have ids like 'us:<user_id>'. 'group_name' can be used if
# a particular entity has large numbers of settings that need to be
# sub-grouped.
#
# All other columns are a union of all columns defined by all the groups
# stored in the table. The Settings class has support for only exposing
# columns that apply to a particular group, in order to avoid accidental
# use of a column belonging to another settings group.
Table(SETTINGS, 'se', read_units=100, write_units=10,
columns=[HashKeyColumn('settings_id', 'si', 'S'),
RangeKeyColumn('group_name', 'gn', 'S'),
# User account group settings.
Column('user_id', 'ui', 'N'),
Column('email_alerts', 'ea', 'S'),
Column('sms_alerts', 'sa', 'S'),
Column('push_alerts', 'pa', 'S'),
Column('marketing', 'mk', 'S'),
Column('sms_count', 'sc', 'N'),
SetColumn('storage_options', 'so', 'SS')]),
# Key is composite of (group_id, random_key). 'group_id' partitions the URL
# space into groups, so that URL's generated for one group have no overlap
# with those for another group. The group id will be appended as the URL
# path, so it may contain '/' characters, and should be URL safe. The
# 'timestamp' column tracks the time at which the ShortURL was created.
#
# The 'json' column contains arbitrary named arguments that are associated
# with the short URL and are pased to the request handler when the short
# URL is used. The 'expires' field bounds the time during which the URL
# can be used.
Table(SHORT_URL, 'su', read_units=25, write_units=5,
columns=[HashKeyColumn('group_id', 'gi', 'S'),
RangeKeyColumn('random_key', 'rk', 'S'),
Column('timestamp', 'ti', 'N'),
Column('expires', 'ex', 'N'),
JSONColumn('json', 'js')]),
# The subscription table contains a user's current
# subscription(s). A subscription is any time-limited
# modification to a user's privileges, such as increased storage
# quota.
#
# This table contains a log of all transactions that have affected
# a user's subscriptions. In most cases only the most recent
# transaction for a given subscription_id is relevant - it is the
# most recent renewal.
#
# "product_type" is the type of subscription, such as "storage".
# Quantity is a interpreted based on the product_type; for the
# "storage" product it is a number of GB. "payment_type"
# indicates how the subscription was paid for (e.g. "itunes" or
# "referral_bonus"). The contents of "extra_info" and
# "renewal_data" depend on the payment type. "extra_info" is a
# dict of additional information related to the transaction, and
# "renewal_data" is an opaque blob that is used to renew a subscription
# when it expires.
Table(SUBSCRIPTION, 'su', read_units=10, write_units=5,
columns=[HashKeyColumn('user_id', 'ui', 'N'),
RangeKeyColumn('transaction_id', 'tr', 'S'),
Column('subscription_id', 'su', 'S', read_only=True),
# timestamps should be read-only too, once we fix
# problems with read-only floats.
Column('timestamp', 'ti', 'N'),
Column('expiration_ts', 'ex', 'N'),
Column('product_type', 'pr', 'S', read_only=True),
Column('quantity', 'qu', 'N'),
Column('payment_type', 'pt', 'S', read_only=True),
JSONColumn('extra_info', 'ei'),
Column('renewal_data', 'pd', 'S', read_only=True)]),
# Key is user id. 'webapp_dev_id' is assigned on creation, and
# serves as a unique ID with which to formulate asset IDs in
# conjunction with the 'asset_id_seq' attribute. This provides a
# monotonically increasing sequence of episode/viewpoint/photo ids
# for uploads via the web application. The 'uu_id_seq' provides a
# similar increasing sequence of user update sequence numbers for
# a user.
#
# Facebook email is kept separately in an effort to maximize
# deliverability of Viewfinder invitations to Facebook contacts.
# The from: header of those emails must be from the email address
# registered for the Facebook user if incoming to
# <username>@facebook.com.
#
# 'last_notification' is the most recent notification id which has
# been queried by any of the user's devices. This is the watermark
# used to supply the badge for push notifications. 'badge' is set
# appropriately in response to notifications generated by account
# activity.
#
# The 'merged_with' column specifies the sink user account with
# which this user was merged. If 'merged_with' is set, this user
# account is invalid and should not be used. If at all possible,
# the request intended for this user should be re-routed to the
# 'merged_with' user.
#
# The 'signing_key' column is a Keyczar signing keyset used when
# it is desirable to sign a payload with a key that is specific to
# one particular user. The contents of the column are encrypted
# with the service-wide db crypt keyset.
#
# The 'pwd_hash' and 'salt' columns are used to securely generate
# and store an iterative SHA1 hash of the user's password + salt.
#
# For user index, range key column is a string-version of user ID.
IndexedTable(USER, 'us', read_units=50, write_units=50,
columns=[HashKeyColumn('user_id', 'ui', 'N'),
Column('private_vp_id', 'pvi', 'S'),
Column('webapp_dev_id', 'wdi', 'N'),
Column('asset_id_seq', 'ais', 'N'),
Column('uu_id_seq', 'uis', 'N'),
Column('given_name', 'fi', 'S', FullTextIndexer()),
Column('family_name', 'la', 'S', FullTextIndexer()),
Column('name', 'na', 'S', FullTextIndexer()),
Column('email', 'em', 'S', EmailIndexer()),
Column('facebook_email', 'fe', 'S'),
LatLngColumn('location', 'lo', LocationIndexer()),
Column('gender', 'ge', 'S'),
Column('locale', 'lc', 'S'),
Column('link', 'li', 'S'),
Column('phone', 'ph', 'S'),
Column('picture', 'pi', 'S'),
Column('timezone', 'ti', 'N'),
Column('last_notification', 'ln', 'N'),
Column('badge', 'ba', 'N'),
Column('merged_with', 'mw', 'N'),
SetColumn('labels', 'lb', 'SS'),
CryptColumn('signing_key', 'sk'),
CryptColumn('pwd_hash', 'pwd'),
CryptColumn('salt', 'slt'),
# Deprecated (to be removed).
Column('beta_status', 'bs', 'S')]),
# The USER_PHOTO is associated with a PHOTO object, and
# represents user-specific information about the photo.
# Specifically, this includes mappings between the photo and a
# device's native asset library. Normally only the user/device
# who originated the photo will have a USER_PHOTO entry for it,
# but it is possible for other users to create USER_PHOTOS if
# they export a photo to their camera roll.
IndexedTable(USER_PHOTO, 'up', read_units=400, write_units=10,
columns=[HashKeyColumn('user_id', 'di', 'N'),
RangeKeyColumn('photo_id', 'pi', 'S'),
SetColumn('asset_keys', 'ak', 'SS')]),
# The USER_POST is associated with a POST object, and represents
# user-specific override of information in the POST. 'timestamp'
# records the creation time of the record, and 'labels' contains
# a set of values which describes the customizations. For example,
# the 'removed' label indicates that the post should not be shown
# in the user's personal collection.
#
# Rows in the USER_POST table are only created if the user wants
# to customize the viewpoint in some way. In the absence of a
# row, default values are assumed.
IndexedTable(USER_POST, 'uo', read_units=400, write_units=10,
columns=[HashKeyColumn('user_id', 'ui', 'N'),
RangeKeyColumn('post_id', 'pi', 'S'),
Column('timestamp', 'ti', 'N'),
SetColumn('labels', 'la', 'SS')]),
# Key is viewpoint-id. Viewpoints are a collection of episodes.
# Viewpoint title and description are indexed for full-text
# search. The viewpoint name, sort of like a twitter
# handler, is also indexed. 'type' is one of:
# ('default', 'event', 'thematic')
#
# The 'update_seq' is incremented each time a viewpoint asset
# is added, removed, or updated. Using this with the 'viewed_seq'
# attribute on Follower, clients can easily determine if there
# is any "unread" content in the viewpoint. Note that updates to
# user-specific content on Follower does not trigger the increment
# of this value. 'last_updated' is set to the creation timestamp of
# the latest activity that was added to this viewpoint.
#
# The 'cover_photo' column is a JSON-encoded dict of photo_id and
# episode_id which indicates which photo should be used as the cover
# photo for the viewpoint. An absent column or None value for this indicates
# that it's explicitly not available (no visible photos in the viewpoint).
# Default viewpoints will not have this column set.
IndexedTable(VIEWPOINT, 'vp', read_units=400, write_units=10,
columns=[HashKeyColumn('viewpoint_id', 'vi', 'S'),
Column('user_id', 'ui', 'N', SecondaryIndexer(), read_only=True),
Column('timestamp', 'ts', 'N', read_only=True),
Column('title', 'ti', 'S',
FullTextIndexer(metaphone=Indexer.Option.YES)),
Column('description', 'de', 'S',
FullTextIndexer(metaphone=Indexer.Option.YES)),
Column('last_updated', 'lu', 'N'),
Column('name', 'na', 'S', SecondaryIndexer()),
Column('type', 'ty', 'S', read_only=True),
Column('update_seq', 'us', 'N'),
JSONColumn('cover_photo', 'cp')]),
# The index table for all indexed terms. Maps from a string to a
# string for greatest flexibility. This requires that the various
# database objects convert from a string value if the doc-id
# actually does represent a number, such as the user-id in some of
# the indexed tables in this schema.
IndexTable(INDEX, 'S', 'S', read_units=200, write_units=50),
# For the dynamodb_client_test.
Table(TEST_RENAME, 'test', read_units=10, write_units=5, name_in_db="Test",
columns=[HashKeyColumn('test_hk', 'thk', 'S'),
RangeKeyColumn('test_rk', 'trk', 'N'),
Column('attr0', 'a0', 'N'),
Column('attr1', 'a1', 'N'),
Column('attr2', 'a2', 'S'),
Column('attr3', 'a3', 'NS'),
Column('attr4', 'a4', 'SS')]),
])
| |
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import argparse
import logging
import os
import platform
import shutil
import stat
import subprocess
import sys
import time
import threading
import re
import json
import tempfile
import six
from six.moves import range
_V8_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir, 'third_party',
'v8'))
_JS_PARSER_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir, 'third_party',
'parse5', 'parse5.js'))
_BOOTSTRAP_JS_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'd8_bootstrap.js'))
_BASE64_COMPAT_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'base64_compat.js'))
_PATH_UTILS_JS_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'path_utils.js'))
_HTML_IMPORTS_LOADER_JS_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'html_imports_loader.js'))
_HTML_TO_JS_GENERATOR_JS_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'html_to_js_generator.js'))
_BOOTSTRAP_JS_CONTENT = None
_NUM_TRIALS = 3
def _ValidateSourcePaths(source_paths):
if source_paths is None:
return
for x in source_paths:
assert os.path.exists(x)
assert os.path.isdir(x)
assert os.path.isabs(x)
def _EscapeJsString(s):
assert isinstance(s, str)
return json.dumps(s)
def _RenderTemplateStringForJsSource(source, template, replacement_string):
return source.replace(template, _EscapeJsString(replacement_string))
def _GetBootStrapJsContent(source_paths):
assert isinstance(source_paths, list)
global _BOOTSTRAP_JS_CONTENT
if not _BOOTSTRAP_JS_CONTENT:
with open(_BOOTSTRAP_JS_DIR, 'r') as f:
_BOOTSTRAP_JS_CONTENT = f.read()
bsc = _BOOTSTRAP_JS_CONTENT
# Ensure that source paths are unique.
source_paths = list(set(source_paths))
source_paths_string = '[%s]' % (
','.join(_EscapeJsString(s) for s in source_paths))
bsc = bsc.replace('<%source_paths%>', source_paths_string)
bsc = _RenderTemplateStringForJsSource(
bsc, '<%current_working_directory%>', os.getcwd())
bsc = _RenderTemplateStringForJsSource(
bsc, '<%path_utils_js_path%>', _PATH_UTILS_JS_DIR)
bsc = _RenderTemplateStringForJsSource(
bsc, '<%html_imports_loader_js_path%>', _HTML_IMPORTS_LOADER_JS_DIR)
bsc = _RenderTemplateStringForJsSource(
bsc, '<%html_to_js_generator_js_path%>', _HTML_TO_JS_GENERATOR_JS_DIR)
bsc = _RenderTemplateStringForJsSource(
bsc, '<%js_parser_path%>', _JS_PARSER_DIR)
bsc = _RenderTemplateStringForJsSource(
bsc, '<%base64_compat_path%>', _BASE64_COMPAT_DIR)
bsc += '\n//@ sourceURL=%s\n' % _BOOTSTRAP_JS_DIR
return bsc
def _IsValidJsOrHTMLFile(parser, js_file_arg):
if not os.path.exists(js_file_arg):
parser.error('The file %s does not exist' % js_file_arg)
_, extension = os.path.splitext(js_file_arg)
if extension not in ('.js', '.html'):
parser.error('Input must be a JavaScript or HTML file')
return js_file_arg
def _GetD8BinaryPathForPlatform():
def _D8Path(*paths):
"""Join paths and make it executable."""
assert isinstance(paths, tuple)
exe = os.path.join(_V8_DIR, *paths)
st = os.stat(exe)
if not st.st_mode & stat.S_IEXEC:
os.chmod(exe, st.st_mode | stat.S_IEXEC)
return exe
if platform.system() == 'Linux' and platform.machine() == 'x86_64':
return _D8Path('linux', 'x86_64', 'd8')
elif platform.system() == 'Linux' and platform.machine() == 'aarch64':
return _D8Path('linux', 'arm', 'd8')
elif platform.system() == 'Linux' and platform.machine() == 'armv7l':
return _D8Path('linux', 'arm', 'd8')
elif platform.system() == 'Linux' and platform.machine() == 'mips':
return _D8Path('linux', 'mips', 'd8')
elif platform.system() == 'Linux' and platform.machine() == 'mips64':
return _D8Path('linux', 'mips64', 'd8')
elif platform.system() == 'Darwin' and platform.machine() == 'x86_64':
return _D8Path('mac', 'x86_64', 'd8')
elif platform.system() == 'Darwin' and platform.machine() == 'arm64':
return _D8Path('mac', 'arm', 'd8')
elif platform.system() == 'Windows' and platform.machine() == 'AMD64':
return _D8Path('win', 'AMD64', 'd8.exe')
else:
raise NotImplementedError(
'd8 binary for this platform (%s) and architecture (%s) is not yet'
' supported' % (platform.system(), platform.machine()))
# Speculative change to workaround a failure on Windows: speculation is that the
# script attempts to remove a file before the process using the file has
# completely terminated. So the function here attempts to retry a few times with
# a second timeout between retries. More details at https://crbug.com/946012
# TODO(sadrul): delete this speculative change since it didn't work.
def _RemoveTreeWithRetry(tree, retry=3):
for count in range(retry):
try:
shutil.rmtree(tree)
return
except:
if count == retry - 1:
raise
logging.warning('Removing %s failed. Retrying in 1 second ...' % tree)
time.sleep(1)
class RunResult(object):
def __init__(self, returncode, stdout):
self.returncode = returncode
self.stdout = stdout
def ExecuteFile(file_path, source_paths=None, js_args=None, v8_args=None,
stdout=subprocess.PIPE, stdin=subprocess.PIPE):
"""Execute JavaScript program in |file_path|.
Args:
file_path: string file_path that contains path the .js or .html file to be
executed.
source_paths: the list of absolute paths containing code. All the imports
js_args: a list of string arguments to sent to the JS program.
Args stdout & stdin are the same as _RunFileWithD8.
Returns:
The string output from running the JS program.
"""
res = RunFile(file_path, source_paths, js_args, v8_args, None, stdout, stdin)
return res.stdout
def RunFile(file_path, source_paths=None, js_args=None, v8_args=None,
timeout=None, stdout=subprocess.PIPE, stdin=subprocess.PIPE):
"""Runs JavaScript program in |file_path|.
Args are same as ExecuteFile.
Returns:
A RunResult containing the program's output.
"""
assert os.path.isfile(file_path)
_ValidateSourcePaths(source_paths)
_, extension = os.path.splitext(file_path)
if not extension in ('.html', '.js'):
raise ValueError('Can only execute .js or .html file. File %s has '
'unsupported file type: %s' % (file_path, extension))
if source_paths is None:
source_paths = [os.path.dirname(file_path)]
abs_file_path_str = _EscapeJsString(os.path.abspath(file_path))
for trial in range(_NUM_TRIALS):
try:
temp_dir = tempfile.mkdtemp()
temp_bootstrap_file = os.path.join(temp_dir, '_tmp_bootstrap.js')
with open(temp_bootstrap_file, 'w') as f:
f.write(_GetBootStrapJsContent(source_paths))
if extension == '.html':
f.write('\nHTMLImportsLoader.loadHTMLFile(%s, %s);' %
(abs_file_path_str, abs_file_path_str))
else:
f.write('\nHTMLImportsLoader.loadFile(%s);' % abs_file_path_str)
result = _RunFileWithD8(temp_bootstrap_file, js_args, v8_args, timeout,
stdout, stdin)
except:
# Save the exception.
t, v, tb = sys.exc_info()
try:
_RemoveTreeWithRetry(temp_dir)
except:
logging.error('Failed to remove temp dir %s.', temp_dir)
if 'Error reading' in str(v): # Handle crbug.com/953365
if trial == _NUM_TRIALS - 1:
logging.error(
'Failed to run file with D8 after %s tries.', _NUM_TRIALS)
six.reraise(t, v, tb)
logging.warn('Hit error %s. Retrying after sleeping.', v)
time.sleep(10)
continue
# Re-raise original exception.
six.reraise(t, v, tb)
_RemoveTreeWithRetry(temp_dir)
break
return result
def ExecuteJsString(js_string, source_paths=None, js_args=None, v8_args=None,
original_file_name=None, stdout=subprocess.PIPE,
stdin=subprocess.PIPE):
res = RunJsString(js_string, source_paths, js_args, v8_args,
original_file_name, stdout, stdin)
return res.stdout
def RunJsString(js_string, source_paths=None, js_args=None, v8_args=None,
original_file_name=None, stdout=subprocess.PIPE,
stdin=subprocess.PIPE):
_ValidateSourcePaths(source_paths)
try:
temp_dir = tempfile.mkdtemp()
if original_file_name:
name = os.path.basename(original_file_name)
name, _ = os.path.splitext(name)
temp_file = os.path.join(temp_dir, '%s.js' % name)
else:
temp_file = os.path.join(temp_dir, 'temp_program.js')
with open(temp_file, 'w') as f:
f.write(js_string)
result = RunFile(temp_file, source_paths, js_args, v8_args, None, stdout,
stdin)
except:
# Save the exception.
t, v, tb = sys.exc_info()
try:
_RemoveTreeWithRetry(temp_dir)
except:
logging.error('Failed to remove temp dir %s.', temp_dir)
# Re-raise original exception.
six.reraise(t, v, tb)
_RemoveTreeWithRetry(temp_dir)
return result
def _KillProcess(process, name, reason):
# kill() does not close the handle to the process. On Windows, a process
# will live until you delete all handles to that subprocess, so
# ps_util.ListAllSubprocesses will find this subprocess if
# we haven't garbage-collected the handle yet. poll() should close the
# handle once the process dies.
logging.warn('Killing process %s because %s.', name, reason)
process.kill()
time.sleep(.01)
for _ in range(100):
if process.poll() is None:
time.sleep(.1)
continue
break
else:
logging.warn('process %s is still running after we '
'attempted to kill it.', name)
def _RunFileWithD8(js_file_path, js_args, v8_args, timeout, stdout, stdin):
""" Execute the js_files with v8 engine and return the output of the program.
Args:
js_file_path: the string path of the js file to be run.
js_args: a list of arguments to passed to the |js_file_path| program.
v8_args: extra arguments to pass into d8. (for the full list of these
options, run d8 --help)
timeout: how many seconds to wait for d8 to finish. If None or 0 then
this will wait indefinitely.
stdout: where to pipe the stdout of the executed program to. If
subprocess.PIPE is used, stdout will be returned in RunResult.out.
Otherwise RunResult.out is None
stdin: specify the executed program's input.
"""
if v8_args is None:
v8_args = []
assert isinstance(v8_args, list)
args = [_GetD8BinaryPathForPlatform()] + v8_args
args.append(os.path.abspath(js_file_path))
full_js_args = [args[0]]
if js_args:
full_js_args += js_args
args += ['--'] + full_js_args
# Set stderr=None since d8 doesn't write into stderr anyway.
sp = subprocess.Popen(args, stdout=stdout, stderr=None, stdin=stdin)
if timeout:
deadline = time.time() + timeout
timeout_thread = threading.Timer(timeout, _KillProcess, args=(
sp, 'd8', 'it timed out'))
timeout_thread.start()
out, _ = sp.communicate()
if timeout:
timeout_thread.cancel()
# On Windows, d8's print() method add the carriage return characters \r to
# newline, which make the output different from d8 on posix. We remove the
# extra \r's to make the output consistent with posix platforms.
if platform.system() == 'Windows' and out:
out = re.sub(b'\r+\n', b'\n', six.ensure_binary(out))
# d8 uses returncode 1 to indicate an uncaught exception, but
# _RunFileWithD8 needs to distingiush between that and quit(1).
#
# To fix this, d8_bootstrap.js monkeypatches D8's quit function to
# adds 1 to an intentioned nonzero quit. So, now, we have to undo this
# logic here in order to raise/return the right thing.
returncode = sp.returncode
if returncode == 0:
return RunResult(0, out)
elif returncode == 1:
if out:
raise RuntimeError(
'Exception raised when executing %s:\n%s' % (js_file_path, out))
else:
raise RuntimeError(
'Exception raised when executing %s. '
'(Error stack is dumped into stdout)' % js_file_path)
else:
return RunResult(returncode - 1, out)
def main():
parser = argparse.ArgumentParser(
description='Run JavaScript file with v8 engine')
parser.add_argument('file_name', help='input file', metavar='FILE',
type=lambda f: _IsValidJsOrHTMLFile(parser, f))
parser.add_argument('--js_args', help='arguments for the js program',
nargs='+')
parser.add_argument('--source_paths', help='search path for the js program',
nargs='+', type=str)
args = parser.parse_args()
if args.source_paths:
args.source_paths = [os.path.abspath(x) for x in args.source_paths]
else:
args.source_paths = [os.path.abspath(os.path.dirname(args.file_name))]
logging.warning(
'--source_paths is not specified. Use %s for search path.' %
args.source_paths)
res = RunFile(args.file_name, source_paths=args.source_paths,
js_args=args.js_args, timeout=None, stdout=sys.stdout,
stdin=sys.stdin)
return res.returncode
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from marvin.codes import FAILED
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import (cleanup_resources,
is_snapshot_on_nfs,
validateList)
from marvin.lib.base import (VirtualMachine,
Account,
Template,
ServiceOffering,
Snapshot,
StoragePool,
Volume,
DiskOffering)
from marvin.lib.common import (get_domain,
get_test_template,
get_zone,
get_pod,
list_volumes,
list_snapshots,
list_storage_pools,
list_clusters)
from marvin.lib.decoratorGenerators import skipTestIf
from marvin.codes import PASS
class TestSnapshotRootDisk(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestSnapshotRootDisk, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.pod = get_pod(cls.apiclient, cls.zone.id)
cls.services['mode'] = cls.zone.networktype
cls.hypervisorNotSupported = False
cls.hypervisor = cls.testClient.getHypervisorInfo()
if cls.hypervisor.lower() in ['hyperv', 'lxc'] or 'kvm-centos6' in cls.testClient.getZoneForTests():
cls.hypervisorNotSupported = True
cls._cleanup = []
if not cls.hypervisorNotSupported:
cls.template = get_test_template(cls.apiclient, cls.zone.id, cls.hypervisor)
if cls.template == FAILED:
assert False, "get_test_template() failed to return template"
cls.services["domainid"] = cls.domain.id
cls.services["small"]["zoneid"] = cls.zone.id
cls.services["templates"]["ostypeid"] = cls.template.ostypeid
cls.services["zoneid"] = cls.zone.id
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["tiny"]
)
cls.disk_offering = DiskOffering.create(
cls.apiclient,
cls.services["disk_offering"]
)
cls.virtual_machine = cls.virtual_machine_with_disk = \
VirtualMachine.create(
cls.apiclient,
cls.services["small"],
templateid=cls.template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
mode=cls.services["mode"]
)
cls._cleanup.append(cls.service_offering)
cls._cleanup.append(cls.account)
cls._cleanup.append(cls.disk_offering)
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@skipTestIf("hypervisorNotSupported")
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_01_snapshot_root_disk(self):
"""Test Snapshot Root Disk
"""
# Validate the following
# 1. listSnapshots should list the snapshot that was created.
# 2. verify that secondary storage NFS share contains
# the reqd volume under
# /secondary/snapshots//$account_id/$volumeid/$snapshot_uuid
# 3. verify backup_snap_id was non null in the `snapshots` table
# 4. Verify that zoneid is returned in listSnapshots API response
volumes = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine_with_disk.id,
type='ROOT',
listall=True
)
snapshot = Snapshot.create(
self.apiclient,
volumes[0].id,
account=self.account.name,
domainid=self.account.domainid
)
self.cleanup.append(snapshot)
self.debug("Snapshot created: ID - %s" % snapshot.id)
snapshots = list_snapshots(
self.apiclient,
id=snapshot.id
)
self.assertEqual(
isinstance(snapshots, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
snapshots,
None,
"Check if result exists in list item call"
)
self.assertEqual(
snapshots[0].id,
snapshot.id,
"Check resource id in list resources call"
)
self.assertIsNotNone(snapshots[0].zoneid,
"Zone id is not none in listSnapshots")
self.assertEqual(
snapshots[0].zoneid,
self.zone.id,
"Check zone id in the list snapshots"
)
self.debug(
"select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';"
% str(snapshot.id)
)
qresultset = self.dbclient.execute(
"select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';"
% str(snapshot.id)
)
self.assertNotEqual(
len(qresultset),
0,
"Check DB Query result set"
)
qresult = qresultset[0]
snapshot_uuid = qresult[0] # backup_snap_id = snapshot UUID
self.assertNotEqual(
str(snapshot_uuid),
'NULL',
"Check if backup_snap_id is not null"
)
self.assertTrue(is_snapshot_on_nfs(
self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id))
return
@skipTestIf("hypervisorNotSupported")
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_02_list_snapshots_with_removed_data_store(self):
"""Test listing volume snapshots with removed data stores
"""
# 1 - Create new volume -> V
# 2 - Create new Primary Storage -> PS
# 3 - Attach and detach volume V from vm
# 4 - Migrate volume V to PS
# 5 - Take volume V snapshot -> S
# 6 - List snapshot and verify it gets properly listed although Primary Storage was removed
# Create new volume
vol = Volume.create(
self.apiclient,
self.services["volume"],
diskofferingid=self.disk_offering.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
)
self.cleanup.append(vol)
self.assertIsNotNone(vol, "Failed to create volume")
vol_res = Volume.list(
self.apiclient,
id=vol.id
)
self.assertEqual(
validateList(vol_res)[0],
PASS,
"Invalid response returned for list volumes")
vol_uuid = vol_res[0].id
# Create new Primary Storage
clusters = list_clusters(
self.apiclient,
zoneid=self.zone.id
)
assert isinstance(clusters,list) and len(clusters)>0
storage = StoragePool.create(self.apiclient,
self.services["nfs2"],
clusterid=clusters[0].id,
zoneid=self.zone.id,
podid=self.pod.id
)
self.cleanup.append(self.virtual_machine_with_disk)
self.cleanup.append(storage)
self.assertEqual(
storage.state,
'Up',
"Check primary storage state"
)
self.assertEqual(
storage.type,
'NetworkFilesystem',
"Check storage pool type"
)
storage_pools_response = list_storage_pools(self.apiclient,
id=storage.id)
self.assertEqual(
isinstance(storage_pools_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(storage_pools_response),
0,
"Check list Hosts response"
)
storage_response = storage_pools_response[0]
self.assertEqual(
storage_response.id,
storage.id,
"Check storage pool ID"
)
self.assertEqual(
storage.type,
storage_response.type,
"Check storage pool type "
)
# Attach created volume to vm, then detach it to be able to migrate it
self.virtual_machine_with_disk.stop(self.apiclient)
self.virtual_machine_with_disk.attach_volume(
self.apiclient,
vol
)
self.virtual_machine_with_disk.detach_volume(
self.apiclient,
vol
)
# Migrate volume to new Primary Storage
Volume.migrate(self.apiclient,
storageid=storage.id,
volumeid=vol.id
)
volume_response = list_volumes(
self.apiclient,
id=vol.id,
)
self.assertNotEqual(
len(volume_response),
0,
"Check list Volumes response"
)
volume_migrated = volume_response[0]
self.assertEqual(
volume_migrated.storageid,
storage.id,
"Check volume storage id"
)
# Take snapshot of new volume
snapshot = Snapshot.create(
self.apiclient,
volume_migrated.id,
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Snapshot created: ID - %s" % snapshot.id)
# Delete volume, VM and created Primary Storage
cleanup_resources(self.apiclient, self.cleanup)
# List snapshot and verify it gets properly listed although Primary Storage was removed
snapshot_response = Snapshot.list(
self.apiclient,
id=snapshot.id
)
self.assertNotEqual(
len(snapshot_response),
0,
"Check list Snapshot response"
)
self.assertEqual(
snapshot_response[0].id,
snapshot.id,
"Check snapshot id"
)
# Delete snapshot and verify it gets properly deleted (should not be listed)
self.cleanup = [snapshot]
cleanup_resources(self.apiclient, self.cleanup)
self.cleanup = []
snapshot_response_2 = Snapshot.list(
self.apiclient,
id=snapshot.id
)
self.assertEqual(
snapshot_response_2,
None,
"Check list Snapshot response"
)
return
| |
"""
Module provides the api connection class for pulling DHCD DFD data
on projects pending funding and development
from https://octo.quickbase.com/db/<DB_ID>
Quickbase API
"""
import sys, os
import string
# Enable relative package imports when running this file as a script (i.e. for testing purposes).
python_filepath = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir))
sys.path.append(python_filepath)
from collections import OrderedDict
from xml.etree.ElementTree import Element, ElementTree
from xml.etree.ElementTree import fromstring as xml_fromstring
from xmljson import parker as xml_to_json
import json
from housinginsights.sources.base_project import ProjectBaseApiConn
from housinginsights.sources.models.dhcd import APP_ID, TABLE_ID_MAPPING, \
APP_METADATA_FIELDS, \
TABLE_METADATA_FIELDS, \
DhcdResult, \
PROJECT_FIELDS_MAP,\
SUBSIDY_FIELDS_MAP, \
PROJECT_ADDRE_FIELDS_MAP
# TODO include_all_fields properly switches which fields are requested to be a more extensive list
# However, for the DHCD "project" table (equivalent to Housing Insights 'subsidy' table), the
# query then results in a 'Report too large' error (errcode 75). IF we want to enable this field,
# we will need to enable chunking the request using the record ids.
# info: https://community.quickbase.com/quickbase/topics/suggestion-to-work-around-when-report-too-large-maximum-number-of-bytes-in-report-exceeded-error
INCLUDE_ALL_FIELDS = {'dhcd_dfd_projects': False, 'dhcd_dfd_properties':True}
from housinginsights.tools.logger import HILogger
logger = HILogger(name=__file__, logfile="sources.log")
class DhcdApiConn(ProjectBaseApiConn):
"""
API Interface to the DHCD DFD data on projects
pending funding and development.
Inherits from BaseApiConn class.
"""
BASEURL = 'https://octo.quickbase.com/db'
PARAMS_METADATA = {'a': 'API_GetSchema'}
PARAMS_DATA_ALL_FIELDS = {'a': 'API_DoQuery', 'query': '{\'1\'.XEX.\'0\'}', 'clist': 'a', 'slist': '3'}
PARAMS_DATA_DEFAULT_FIELDS = {'a': 'API_DoQuery', 'query': '{\'1\'.XEX.\'0\'}'}
def __init__(self,baseurl=None,proxies=None,database_choice=None, debug=False):
super().__init__(baseurl=DhcdApiConn.BASEURL, proxies=proxies,database_choice=database_choice, debug=debug)
# unique_data_id format: 'dhcd_dfd_' + <lowercase_table_name>
self._available_unique_data_ids = [ 'dhcd_dfd_projects',
'dhcd_dfd_properties' #,
# 'dhcd_dfd_units', 'dhcd_dfd_loans', 'dhcd_dfd_modifications',
# 'dhcd_dfd_lihtc_allocations', 'dhcd_dfd_construction_activity',
# 'dhcd_dfd_funding_sources', 'dhcd_dfd_8609s', 'dhcd_dfd_8610s',
# 'dhcd_dfd_source_use', 'dhcd_dfd_ami_levels', 'dhcd_dfd_fiscal_years',
# 'dhcd_dfd_organizations', 'dhcd_dfd_teams', 'dhcd_dfd_project_managers',
# 'dhcd_dfd_funding_increases', 'dhcd_dfd_lihtc_fees',
# 'dhcd_dfd_lihtc___bins', 'dhcd_dfd_council_packages',
# 'dhcd_dfd_policies_and_procedures', 'dhcd_dfd_dhcd_documents',
# 'dhcd_dfd_images_icons'
]
self._app_dbid = APP_ID
self._table_names = {
'dhcd_dfd_projects': 'Projects',
'dhcd_dfd_properties': 'Properties' #,
# 'dhcd_dfd_units': 'Units',
# 'dhcd_dfd_loans': 'Loans',
# 'dhcd_dfd_modifications': 'Modifications',
# 'dhcd_dfd_lihtc_allocations': 'LIHTC Allocations',
# 'dhcd_dfd_construction_activity': 'Construction Activity',
# 'dhcd_dfd_funding_sources': 'Funding Sources',
# 'dhcd_dfd_8609s': '8609s',
# 'dhcd_dfd_8610s': '8610s',
# 'dhcd_dfd_source_use': 'Source/Use',
# 'dhcd_dfd_ami_levels': 'AMI Levels',
# 'dhcd_dfd_fiscal_years': 'Fiscal Years',
# 'dhcd_dfd_organizations': 'Organizations',
# 'dhcd_dfd_teams': 'Teams'
# 'dhcd_dfd_project_managers': 'Project Managers',
# 'dhcd_dfd_funding_increases': 'Funding Increases',
# 'dhcd_dfd_lihtc_fees': 'LIHTC Fees',
# 'dhcd_dfd_lihtc___bins': 'LIHTC - BINs',
# 'dhcd_dfd_council_packages': 'Council Packages',
# 'dhcd_dfd_policies_and_procedures': 'Policies and Procedures',
# 'dhcd_dfd_dhcd_documents': 'DHCD Documents',
# 'dhcd_dfd_images_icons': 'Images/Icons'
}
self._urls = { unique_data_id: '/' + TABLE_ID_MAPPING[self._table_names[unique_data_id]] \
for unique_data_id in self._available_unique_data_ids }
print("self._urls:")
print(self._urls)
identifier_unallowed_chars = string.punctuation + string.whitespace
replacement_underscores = ''.join('_' * len(identifier_unallowed_chars))
self._identifier_translation_map = str.maketrans(identifier_unallowed_chars, replacement_underscores)
self._fields = {}
self._params = {}
if INCLUDE_ALL_FIELDS:
self._get_metadata()
else:
self._get_metadata()
def _get_metadata(self):
"""
Retrieves metadata about the DHCD DFD Quick Base app and its member tables
(including field metadata and relationships) and saves this in two CSV files.
Also, for each unique data id corresponding to a table, (1) builds a field
reference list of all relevant fields, and (2) sets the query parameter string
(including the sort field parameter) used when saving table data in get_data(...).
"""
output_path_dir = os.path.dirname(self.output_paths[self._available_unique_data_ids[0]])
output_path_app_metadata = os.path.join(output_path_dir, '_dhcd_dfd_app_metadata.csv')
output_path_table_metadata = os.path.join(output_path_dir, '_dhcd_dfd_table_metadata.csv')
app_metadata_result = self.get('/' + self._app_dbid, params=DhcdApiConn.PARAMS_METADATA)
app_tables_metadata_xml = xml_fromstring(app_metadata_result.text).findall('./table/chdbids/chdbid')
app_metadata = OrderedDict()
table_metadata = OrderedDict()
field_count = 0
for app_table_metadata in app_tables_metadata_xml:
table_dbid = app_table_metadata.text
table_metadata_result = self.get('/' + table_dbid, params=DhcdApiConn.PARAMS_METADATA)
# Strip out singly-occurring line break tags to prevent truncation of multi-line formulas
table_metadata_full = table_metadata_result.text.replace("<BR/>\n<BR/>", "<br />\n<br />")
table_metadata_full = table_metadata_result.text.replace("<BR/>", "")
table_metadata_xml_root = xml_fromstring(table_metadata_full)
errcode = int(table_metadata_xml_root.find('./errcode').text)
if errcode == 0:
table_metadata_xml_orig = table_metadata_xml_root.find('./table/original')
table_name = table_metadata_xml_root.find('./table/name').text
table_name_snake_case = table_name.lower().translate(self._identifier_translation_map)
unique_data_id = None
if 'dhcd_dfd_'+table_name_snake_case in self._available_unique_data_ids:
unique_data_id = 'dhcd_dfd_' + table_name_snake_case
self._fields[unique_data_id] = []
table_metadata_xml_fields = table_metadata_xml_root.findall('./table/fields/field')
table_metadata[table_dbid] = OrderedDict()
field_line_start = field_count + 2
for field_xml in table_metadata_xml_fields:
fid = int(field_xml.get('id'))
table_metadata[table_dbid][fid] = OrderedDict()
field_label = field_xml.find('label').text
field_name = field_label.lower().translate(self._identifier_translation_map)
# For any fields that belong to composite fields (e.g. address component fields),
# resolve the full field name by prepending the parent field name
parent_fid = None
if field_xml.find('parentFieldID') is not None:
parent_fid = int(field_xml.find('parentFieldID').text)
if parent_fid in table_metadata[table_dbid]:
parent_field_name = table_metadata[table_dbid][parent_fid]['field_name']
else:
parent_field_label = table_metadata_xml_root.find("./table/fields/field[@id='{}']/label".format(str(parent_fid))).text
parent_field_name = parent_field_label.lower().translate(self._identifier_translation_map)
if parent_field_name[0].isdigit():
parent_field_name = '_' + parent_field_name
field_name = '__'.join([parent_field_name, field_name])
if field_name[0].isdigit():
field_name = '_' + field_name
# For any composite fields (e.g. address fields), get child/component fields
child_fids = []
for child_field in field_xml.findall('./compositeFields/compositeField'):
child_fids.append(child_field.get('id'))
child_fids = '|'.join(child_fids) if len(child_fids) > 0 else None
table_metadata[table_dbid][fid]['table_name'] = table_name
table_metadata[table_dbid][fid]['field_name'] = field_name
table_metadata[table_dbid][fid]['field_label'] = field_label
table_metadata[table_dbid][fid]['field_id'] = str(fid)
table_metadata[table_dbid][fid]['field_type'] = field_xml.get('field_type')
table_metadata[table_dbid][fid]['base_type'] = field_xml.get('base_type')
table_metadata[table_dbid][fid]['appears_by_default'] = field_xml.find('appears_by_default').text
table_metadata[table_dbid][fid]['composite_field_parent_fid'] = parent_fid
table_metadata[table_dbid][fid]['composite_field_child_fids'] = child_fids
table_metadata[table_dbid][fid]['mode'] = field_xml.get('mode')
table_metadata[table_dbid][fid]['formula'] = None
if field_xml.find('formula') is not None:
table_metadata[table_dbid][fid]['formula'] = field_xml.find('formula').text
table_metadata[table_dbid][fid]['choices'] = None
if field_xml.find('choices') is not None:
table_metadata[table_dbid][fid]['choices'] = ""
for choice in field_xml.findall('./choices/choice'):
table_metadata[table_dbid][fid]['choices'] += "\n" + choice.text \
if len(table_metadata[table_dbid][fid]['choices']) > 0 \
else choice.text
table_metadata[table_dbid][fid]['lookup_target_fid'] = None
table_metadata[table_dbid][fid]['lookup_source_fid'] = None
if table_metadata[table_dbid][fid]['mode'] == 'lookup':
if field_xml.find('lutfid') is not None:
table_metadata[table_dbid][fid]['lookup_target_fid'] = field_xml.find('lutfid').text
if field_xml.find('lusfid') is not None:
table_metadata[table_dbid][fid]['lookup_source_fid'] = field_xml.find('lusfid').text
table_metadata[table_dbid][fid]['dblink_target_dbid'] = None
table_metadata[table_dbid][fid]['dblink_target_fid'] = None
table_metadata[table_dbid][fid]['dblink_source_fid'] = None
if table_metadata[table_dbid][fid]['mode'] == 'virtual' and \
table_metadata[table_dbid][fid]['field_type'] == 'dblink':
if field_xml.find('target_dbid') is not None:
table_metadata[table_dbid][fid]['dblink_target_dbid'] = field_xml.find('target_dbid').text
if field_xml.find('target_fid') is not None:
table_metadata[table_dbid][fid]['dblink_target_fid'] = field_xml.find('target_fid').text
if field_xml.find('source_fid') is not None:
table_metadata[table_dbid][fid]['dblink_source_fid'] = field_xml.find('source_fid').text
table_metadata[table_dbid][fid]['fkey_table_app_dbid'] = None
table_metadata[table_dbid][fid]['fkey_table_alias'] = None
if field_xml.find('mastag') is not None:
fkey_ref = field_xml.find('mastag').text.split('.')
if len(fkey_ref) == 2:
table_metadata[table_dbid][fid]['fkey_table_app_dbid'] = fkey_ref[0]
table_metadata[table_dbid][fid]['fkey_table_alias'] = fkey_ref[1].lower()
else:
table_metadata[table_dbid][fid]['fkey_table_app_dbid'] = None
table_metadata[table_dbid][fid]['fkey_table_alias'] = fkey_ref[0].lower()
table_metadata[table_dbid][fid]['field_help'] = field_xml.find('fieldhelp').text
# For each unique data id corresponding to a table,
# build a list of all relevant fields
if unique_data_id is not None and \
(INCLUDE_ALL_FIELDS[unique_data_id] or \
table_metadata[table_dbid][fid]['appears_by_default'] == '1'):
self._fields[unique_data_id].append(field_name)
field_count += 1
field_line_end = field_count + 1
app_metadata[table_dbid] = OrderedDict([
('table_name', table_name),
('table_dbid', table_dbid),
('table_alias', app_table_metadata.get('name')),
('key_fid', table_metadata_xml_orig.find('key_fid').text),
('default_sort_fid', table_metadata_xml_orig.find('def_sort_fid').text),
('default_sort_order', table_metadata_xml_orig.find('def_sort_order').text),
('single_record_name', table_metadata_xml_orig.find('single_record_name').text),
('plural_record_name', table_metadata_xml_orig.find('plural_record_name').text),
('field_metadata_line_start', field_line_start),
('field_metadata_line_end', field_line_end)
])
if unique_data_id is not None and unique_data_id in self._fields:
# While not strictly a field, Quick Base always includes final 'update_id':
self._fields[unique_data_id].append('update_id')
# Set the query parameter string (including the sort field parameter):
if INCLUDE_ALL_FIELDS[unique_data_id]:
self._params[unique_data_id] = DhcdApiConn.PARAMS_DATA_ALL_FIELDS
else:
self._params[unique_data_id] = DhcdApiConn.PARAMS_DATA_DEFAULT_FIELDS
self._params[unique_data_id]['slist'] = app_metadata[table_dbid]['default_sort_fid']
all_tables_field_metadata = [ list(field_metadata_row.values()) \
for all_field_metadata in table_metadata.values() \
for field_metadata_row in all_field_metadata.values() ]
self.result_to_csv(TABLE_METADATA_FIELDS, all_tables_field_metadata, output_path_table_metadata)
self.result_to_csv(APP_METADATA_FIELDS, list(list(d.values()) for d in app_metadata.values()), output_path_app_metadata)
def get_data(self, unique_data_ids=None, sample=False, output_type='csv',
**kwargs):
"""
Returns a JSON object of the entire data set.
"""
data_json = None
db = kwargs.get('db', None)
if unique_data_ids is None:
unique_data_ids = self._available_unique_data_ids
for u in unique_data_ids:
if (u not in self._available_unique_data_ids):
logger.info(" The unique_data_id '{}' is not supported by the DhcdApiConn".format(u))
else:
result = self.get(self._urls[u], params=self._params[u])
if result.status_code != 200:
err = "An error occurred during request: status {0}"
logger.exception(err.format(result.status_code))
continue
data_xml_root = xml_fromstring(result.text)
data_xml_records = data_xml_root.findall('record')
data_json = xml_to_json.data(data_xml_root)
results = [DhcdResult(
{e.tag: e.text for e in list(r)},
self._fields[u]).data for r in data_xml_records]
self.result_to_csv(self._fields[u], results, self.output_paths[u])
#Convert to format expected by database
if u == 'dhcd_dfd_properties':
self.create_project_subsidy_csv('dhcd_dfd_properties', PROJECT_FIELDS_MAP, SUBSIDY_FIELDS_MAP, PROJECT_ADDRE_FIELDS_MAP, db)
# For testing purposes (running this as a script):
if __name__ == '__main__':
d = DhcdApiConn()
# unique_data_ids = ['dhcd_dfd_projects']
unique_data_ids = None
sample = False
# output_type = 'stdout'
output_type = 'csv'
db = None
d.get_data(unique_data_ids, sample, output_type, db=db)
| |
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
import commonware
from rest_framework import status
from rest_framework.mixins import (CreateModelMixin, DestroyModelMixin,
ListModelMixin, RetrieveModelMixin,
UpdateModelMixin)
from rest_framework.permissions import BasePermission, IsAuthenticated
from rest_framework.relations import HyperlinkedRelatedField
from rest_framework.response import Response
from rest_framework.serializers import (HyperlinkedModelSerializer,
Serializer,
ValidationError)
from rest_framework.viewsets import GenericViewSet
from slumber.exceptions import HttpClientError, HttpServerError
from tower import ugettext as _
import mkt
from lib.pay_server import get_client
from mkt.api.authentication import (RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.api.authorization import AllowAppOwner, GroupPermission
from mkt.api.base import MarketplaceView
from mkt.constants.payments import PAYMENT_STATUSES
from mkt.constants.payments import PROVIDER_BANGO
from mkt.developers.forms_payments import (BangoPaymentAccountForm,
PaymentCheckForm)
from mkt.developers.models import (AddonPaymentAccount, CantCancel,
PaymentAccount)
from mkt.developers.providers import get_provider
from mkt.webapps.models import AddonUpsell
log = commonware.log.getLogger('z.api.payments')
class PaymentAppViewSet(GenericViewSet):
def initialize_request(self, request, *args, **kwargs):
"""
Pass the value in the URL through to the form defined on the
ViewSet, which will populate the app property with the app object.
You must define a form which will take an app object.
"""
request = (super(PaymentAppViewSet, self)
.initialize_request(request, *args, **kwargs))
self.app = None
form = self.form({'app': kwargs.get('pk')})
if form.is_valid():
self.app = form.cleaned_data['app']
return request
class PaymentAccountSerializer(Serializer):
"""
Fake serializer that returns PaymentAccount details when
serializing a PaymentAccount instance. Use only for read operations.
"""
def to_native(self, obj):
data = obj.get_provider().account_retrieve(obj)
data['resource_uri'] = reverse('payment-account-detail',
kwargs={'pk': obj.pk})
return data
class PaymentAccountViewSet(ListModelMixin, RetrieveModelMixin,
MarketplaceView, GenericViewSet):
queryset = PaymentAccount.objects.all()
# PaymentAccountSerializer is not a real serializer, it just looks up
# the details on the object. It's only used for GET requests, in every
# other case we use BangoPaymentAccountForm directly.
serializer_class = PaymentAccountSerializer
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
# Security checks are performed in get_queryset(), so we allow any
# authenticated users by default.
permission_classes = [IsAuthenticated]
def get_queryset(self):
"""
Return the queryset specific to the user using the view. (This replaces
permission checks, unauthorized users won't be able to see that an
account they don't have access to exists, we'll return 404 for them.)
"""
qs = super(PaymentAccountViewSet, self).get_queryset()
return qs.filter(user=self.request.user, inactive=False)
def create(self, request, *args, **kwargs):
provider = get_provider()
form = provider.forms['account'](request.DATA)
if form.is_valid():
try:
provider = get_provider()
obj = provider.account_create(request.user, form.data)
except HttpClientError as e:
log.error('Client error creating Bango account; %s' % e)
return Response(e.content,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except HttpServerError as e:
log.error('Error creating Bango payment account; %s' % e)
return Response(_(u'Could not connect to payment server.'),
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
serializer = self.get_serializer(obj)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
def partial_update(self, request, *args, **kwargs):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
def update(self, request, *args, **kwargs):
self.object = self.get_object()
form = BangoPaymentAccountForm(request.DATA, account=True)
if form.is_valid():
self.object.get_provider().account_update(self.object,
form.cleaned_data)
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, *args, **kwargs):
account = self.get_object()
try:
account.cancel(disable_refs=True)
except CantCancel:
return Response(_('Cannot delete shared account'),
status=status.HTTP_409_CONFLICT)
log.info('Account cancelled: %s' % account.pk)
return Response(status=status.HTTP_204_NO_CONTENT)
class UpsellSerializer(HyperlinkedModelSerializer):
free = premium = HyperlinkedRelatedField(view_name='app-detail')
class Meta:
model = AddonUpsell
fields = ('free', 'premium', 'created', 'modified', 'url')
view_name = 'app-upsell-detail'
def validate(self, attrs):
if attrs['free'].premium_type not in mkt.ADDON_FREES:
raise ValidationError('Upsell must be from a free app.')
if attrs['premium'].premium_type in mkt.ADDON_FREES:
raise ValidationError('Upsell must be to a premium app.')
return attrs
class UpsellPermission(BasePermission):
"""
Permissions on the upsell object, is determined by permissions on the
free and premium object.
"""
def check(self, request, free, premium):
allow = AllowAppOwner()
for app in free, premium:
if app and not allow.has_object_permission(request, '', app):
return False
return True
def has_object_permission(self, request, view, object):
return self.check(request, object.free, object.premium)
class UpsellViewSet(CreateModelMixin, DestroyModelMixin, RetrieveModelMixin,
UpdateModelMixin, MarketplaceView, GenericViewSet):
permission_classes = (UpsellPermission,)
queryset = AddonUpsell.objects.filter()
serializer_class = UpsellSerializer
def pre_save(self, obj):
if not UpsellPermission().check(self.request, obj.free, obj.premium):
raise PermissionDenied('Not allowed to alter that object')
class AddonPaymentAccountPermission(BasePermission):
"""
Permissions on the app payment account object, is determined by permissions
on the app the account is being used for.
"""
def check(self, request, app, account):
if AllowAppOwner().has_object_permission(request, '', app):
if account.shared or account.user.pk == request.user.pk:
return True
else:
log.info('AddonPaymentAccount access %(account)s denied '
'for %(user)s: wrong user, not shared.'.format(
{'account': account.pk, 'user': request.user.pk}))
else:
log.info('AddonPaymentAccount access %(account)s denied '
'for %(user)s: no app permission.'.format(
{'account': account.pk, 'user': request.user.pk}))
return False
def has_object_permission(self, request, view, object):
return self.check(request, object.addon, object.payment_account)
class AddonPaymentAccountSerializer(HyperlinkedModelSerializer):
addon = HyperlinkedRelatedField(view_name='app-detail')
payment_account = HyperlinkedRelatedField(
view_name='payment-account-detail')
class Meta:
model = AddonPaymentAccount
fields = ('addon', 'payment_account', 'created', 'modified', 'url')
view_name = 'app-payment-account-detail'
def validate(self, attrs):
if attrs['addon'].premium_type in mkt.ADDON_FREES:
raise ValidationError('App must be a premium app.')
return attrs
class AddonPaymentAccountViewSet(CreateModelMixin, RetrieveModelMixin,
UpdateModelMixin, MarketplaceView,
GenericViewSet):
permission_classes = (AddonPaymentAccountPermission,)
queryset = AddonPaymentAccount.objects.filter()
serializer_class = AddonPaymentAccountSerializer
def pre_save(self, obj):
if not AddonPaymentAccountPermission().check(
self.request,
obj.addon, obj.payment_account):
raise PermissionDenied('Not allowed to alter that object.')
if self.request.method != 'POST':
addon = obj.__class__.objects.get(pk=obj.pk).addon
if not obj.addon == addon:
# This should be a 400 error.
raise PermissionDenied('Cannot change the add-on.')
def post_save(self, obj, created=False):
"""Ensure that the setup_bango method is called after creation."""
if created:
provider = get_provider()
uri = provider.product_create(obj.payment_account, obj.addon)
obj.product_uri = uri
obj.save()
class PaymentCheckViewSet(PaymentAppViewSet):
permission_classes = (AllowAppOwner,)
form = PaymentCheckForm
def create(self, request, *args, **kwargs):
"""
We aren't actually creating objects, but proxying them
through to solitude.
"""
if not self.app:
return Response('', status=400)
self.check_object_permissions(request, self.app)
client = get_client()
res = client.api.bango.status.post(
data={'seller_product_bango':
self.app.payment_account(PROVIDER_BANGO).account_uri})
filtered = {
'bango': {
'status': PAYMENT_STATUSES[res['status']],
'errors': ''
},
}
return Response(filtered, status=200)
class PaymentDebugViewSet(PaymentAppViewSet):
permission_classes = [GroupPermission('Transaction', 'Debug')]
form = PaymentCheckForm
def list(self, request, *args, **kwargs):
if not self.app:
return Response('', status=400)
client = get_client()
res = client.api.bango.debug.get(
data={'seller_product_bango':
self.app.payment_account(PROVIDER_BANGO).account_uri})
filtered = {
'bango': res['bango'],
}
return Response(filtered, status=200)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Pipeline, the top-level Dataflow object.
A pipeline holds a DAG of data transforms. Conceptually the nodes of the DAG
are transforms (PTransform objects) and the edges are values (mostly PCollection
objects). The transforms take as inputs one or more PValues and output one or
more PValues.
The pipeline offers functionality to traverse the graph. The actual operation
to be executed for each node visited is specified through a runner object.
Typical usage:
# Create a pipeline object using a local runner for execution.
p = beam.Pipeline('DirectRunner')
# Add to the pipeline a "Create" transform. When executed this
# transform will produce a PCollection object with the specified values.
pcoll = p | 'create' >> beam.Create([1, 2, 3])
# Another transform could be applied to pcoll, e.g., writing to a text file.
# For other transforms, refer to transforms/ directory.
pcoll | 'write' >> beam.io.WriteToText('./output')
# run() will execute the DAG stored in the pipeline. The execution of the
# nodes visited is done using the specified local runner.
p.run()
"""
from __future__ import absolute_import
import collections
import logging
import os
import shutil
import tempfile
from google.protobuf import wrappers_pb2
from apache_beam import pvalue
from apache_beam import typehints
from apache_beam.internal import pickler
from apache_beam.runners import create_runner
from apache_beam.runners import PipelineRunner
from apache_beam.transforms import ptransform
from apache_beam.typehints import TypeCheckError
from apache_beam.utils import proto_utils
from apache_beam.utils import urns
from apache_beam.utils.pipeline_options import PipelineOptions
from apache_beam.utils.pipeline_options import SetupOptions
from apache_beam.utils.pipeline_options import StandardOptions
from apache_beam.utils.pipeline_options import TypeOptions
from apache_beam.utils.pipeline_options_validator import PipelineOptionsValidator
class Pipeline(object):
"""A pipeline object that manages a DAG of PValues and their PTransforms.
Conceptually the PValues are the DAG's nodes and the PTransforms computing
the PValues are the edges.
All the transforms applied to the pipeline must have distinct full labels.
If same transform instance needs to be applied then a clone should be created
with a new label (e.g., transform.clone('new label')).
"""
def __init__(self, runner=None, options=None, argv=None):
"""Initialize a pipeline object.
Args:
runner: An object of type 'PipelineRunner' that will be used to execute
the pipeline. For registered runners, the runner name can be specified,
otherwise a runner object must be supplied.
options: A configured 'PipelineOptions' object containing arguments
that should be used for running the Dataflow job.
argv: a list of arguments (such as sys.argv) to be used for building a
'PipelineOptions' object. This will only be used if argument 'options'
is None.
Raises:
ValueError: if either the runner or options argument is not of the
expected type.
"""
if options is not None:
if isinstance(options, PipelineOptions):
self.options = options
else:
raise ValueError(
'Parameter options, if specified, must be of type PipelineOptions. '
'Received : %r', options)
elif argv is not None:
if isinstance(argv, list):
self.options = PipelineOptions(argv)
else:
raise ValueError(
'Parameter argv, if specified, must be a list. Received : %r', argv)
else:
self.options = PipelineOptions([])
if runner is None:
runner = self.options.view_as(StandardOptions).runner
if runner is None:
runner = StandardOptions.DEFAULT_RUNNER
logging.info(('Missing pipeline option (runner). Executing pipeline '
'using the default runner: %s.'), runner)
if isinstance(runner, str):
runner = create_runner(runner)
elif not isinstance(runner, PipelineRunner):
raise TypeError('Runner must be a PipelineRunner object or the '
'name of a registered runner.')
# Validate pipeline options
errors = PipelineOptionsValidator(self.options, runner).validate()
if errors:
raise ValueError(
'Pipeline has validations errors: \n' + '\n'.join(errors))
# Default runner to be used.
self.runner = runner
# Stack of transforms generated by nested apply() calls. The stack will
# contain a root node as an enclosing (parent) node for top transforms.
self.transforms_stack = [AppliedPTransform(None, None, '', None)]
# Set of transform labels (full labels) applied to the pipeline.
# If a transform is applied and the full label is already in the set
# then the transform will have to be cloned with a new label.
self.applied_labels = set()
def _current_transform(self):
"""Returns the transform currently on the top of the stack."""
return self.transforms_stack[-1]
def _root_transform(self):
"""Returns the root transform of the transform stack."""
return self.transforms_stack[0]
def run(self, test_runner_api=True):
"""Runs the pipeline. Returns whatever our runner returns after running."""
# When possible, invoke a round trip through the runner API.
if test_runner_api and self._verify_runner_api_compatible():
return Pipeline.from_runner_api(
self.to_runner_api(), self.runner, self.options).run(False)
if self.options.view_as(SetupOptions).save_main_session:
# If this option is chosen, verify we can pickle the main session early.
tmpdir = tempfile.mkdtemp()
try:
pickler.dump_session(os.path.join(tmpdir, 'main_session.pickle'))
finally:
shutil.rmtree(tmpdir)
return self.runner.run(self)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_type:
self.run().wait_until_finish()
def visit(self, visitor):
"""Visits depth-first every node of a pipeline's DAG.
Args:
visitor: PipelineVisitor object whose callbacks will be called for each
node visited. See PipelineVisitor comments.
Raises:
TypeError: if node is specified and is not a PValue.
pipeline.PipelineError: if node is specified and does not belong to this
pipeline instance.
"""
visited = set()
self._root_transform().visit(visitor, self, visited)
def apply(self, transform, pvalueish=None, label=None):
"""Applies a custom transform using the pvalueish specified.
Args:
transform: the PTranform to apply.
pvalueish: the input for the PTransform (typically a PCollection).
label: label of the PTransform.
Raises:
TypeError: if the transform object extracted from the argument list is
not a PTransform.
RuntimeError: if the transform object was already applied to this pipeline
and needs to be cloned in order to apply again.
"""
if isinstance(transform, ptransform._NamedPTransform):
return self.apply(transform.transform, pvalueish,
label or transform.label)
if not isinstance(transform, ptransform.PTransform):
raise TypeError("Expected a PTransform object, got %s" % transform)
if label:
# Fix self.label as it is inspected by some PTransform operations
# (e.g. to produce error messages for type hint violations).
try:
old_label, transform.label = transform.label, label
return self.apply(transform, pvalueish)
finally:
transform.label = old_label
full_label = '/'.join([self._current_transform().full_label,
label or transform.label]).lstrip('/')
if full_label in self.applied_labels:
raise RuntimeError(
'Transform "%s" does not have a stable unique label. '
'This will prevent updating of pipelines. '
'To apply a transform with a specified label write '
'pvalue | "label" >> transform'
% full_label)
self.applied_labels.add(full_label)
pvalueish, inputs = transform._extract_input_pvalues(pvalueish)
try:
inputs = tuple(inputs)
for leaf_input in inputs:
if not isinstance(leaf_input, pvalue.PValue):
raise TypeError
except TypeError:
raise NotImplementedError(
'Unable to extract PValue inputs from %s; either %s does not accept '
'inputs of this format, or it does not properly override '
'_extract_input_pvalues' % (pvalueish, transform))
current = AppliedPTransform(
self._current_transform(), transform, full_label, inputs)
self._current_transform().add_part(current)
self.transforms_stack.append(current)
type_options = self.options.view_as(TypeOptions)
if type_options.pipeline_type_check:
transform.type_check_inputs(pvalueish)
pvalueish_result = self.runner.apply(transform, pvalueish)
if type_options is not None and type_options.pipeline_type_check:
transform.type_check_outputs(pvalueish_result)
for result in ptransform.GetPValues().visit(pvalueish_result):
assert isinstance(result, (pvalue.PValue, pvalue.DoOutputsTuple))
# Make sure we set the producer only for a leaf node in the transform DAG.
# This way we preserve the last transform of a composite transform as
# being the real producer of the result.
if result.producer is None:
result.producer = current
# TODO(robertwb): Multi-input, multi-output inference.
# TODO(robertwb): Ideally we'd do intersection here.
if (type_options is not None and type_options.pipeline_type_check
and isinstance(result, pvalue.PCollection)
and not result.element_type):
input_element_type = (
inputs[0].element_type
if len(inputs) == 1
else typehints.Any)
type_hints = transform.get_type_hints()
declared_output_type = type_hints.simple_output_type(transform.label)
if declared_output_type:
input_types = type_hints.input_types
if input_types and input_types[0]:
declared_input_type = input_types[0][0]
result.element_type = typehints.bind_type_variables(
declared_output_type,
typehints.match_type_variables(declared_input_type,
input_element_type))
else:
result.element_type = declared_output_type
else:
result.element_type = transform.infer_output_type(input_element_type)
assert isinstance(result.producer.inputs, tuple)
current.add_output(result)
if (type_options is not None and
type_options.type_check_strictness == 'ALL_REQUIRED' and
transform.get_type_hints().output_types is None):
ptransform_name = '%s(%s)' % (transform.__class__.__name__, full_label)
raise TypeCheckError('Pipeline type checking is enabled, however no '
'output type-hint was found for the '
'PTransform %s' % ptransform_name)
current.update_input_refcounts()
self.transforms_stack.pop()
return pvalueish_result
def _verify_runner_api_compatible(self):
class Visitor(PipelineVisitor): # pylint: disable=used-before-assignment
ok = True # Really a nonlocal.
def visit_transform(self, transform_node):
if transform_node.side_inputs:
# No side inputs (yet).
Visitor.ok = False
try:
# Transforms must be picklable.
pickler.loads(pickler.dumps(transform_node.transform))
except Exception:
Visitor.ok = False
def visit_value(self, value, _):
if isinstance(value, pvalue.PDone):
Visitor.ok = False
self.visit(Visitor())
return Visitor.ok
def to_runner_api(self):
from apache_beam.runners import pipeline_context
from apache_beam.runners.api import beam_runner_api_pb2
context = pipeline_context.PipelineContext()
# Mutates context; placing inline would force dependence on
# argument evaluation order.
root_transform_id = context.transforms.get_id(self._root_transform())
proto = beam_runner_api_pb2.Pipeline(
root_transform_id=root_transform_id,
components=context.to_runner_api())
return proto
@staticmethod
def from_runner_api(proto, runner, options):
p = Pipeline(runner=runner, options=options)
from apache_beam.runners import pipeline_context
context = pipeline_context.PipelineContext(proto.components)
p.transforms_stack = [
context.transforms.get_by_id(proto.root_transform_id)]
# TODO(robertwb): These are only needed to continue construction. Omit?
p.applied_labels = set([
t.unique_name for t in proto.components.transforms.values()])
for id in proto.components.pcollections:
context.pcollections.get_by_id(id).pipeline = p
return p
class PipelineVisitor(object):
"""Visitor pattern class used to traverse a DAG of transforms.
This is an internal class used for bookkeeping by a Pipeline.
"""
def visit_value(self, value, producer_node):
"""Callback for visiting a PValue in the pipeline DAG.
Args:
value: PValue visited (typically a PCollection instance).
producer_node: AppliedPTransform object whose transform produced the
pvalue.
"""
pass
def visit_transform(self, transform_node):
"""Callback for visiting a transform node in the pipeline DAG."""
pass
def enter_composite_transform(self, transform_node):
"""Callback for entering traversal of a composite transform node."""
pass
def leave_composite_transform(self, transform_node):
"""Callback for leaving traversal of a composite transform node."""
pass
class AppliedPTransform(object):
"""A transform node representing an instance of applying a PTransform.
This is an internal class used for bookkeeping by a Pipeline.
"""
def __init__(self, parent, transform, full_label, inputs):
self.parent = parent
self.transform = transform
# Note that we want the PipelineVisitor classes to use the full_label,
# inputs, side_inputs, and outputs fields from this instance instead of the
# ones of the PTransform instance associated with it. Doing this permits
# reusing PTransform instances in different contexts (apply() calls) without
# any interference. This is particularly useful for composite transforms.
self.full_label = full_label
self.inputs = inputs or ()
self.side_inputs = () if transform is None else tuple(transform.side_inputs)
self.outputs = {}
self.parts = []
# Per tag refcount dictionary for PValues for which this node is a
# root producer.
self.refcounts = collections.defaultdict(int)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.full_label,
type(self.transform).__name__)
def update_input_refcounts(self):
"""Increment refcounts for all transforms providing inputs."""
def real_producer(pv):
real = pv.producer
while real.parts:
real = real.parts[-1]
return real
if not self.is_composite():
for main_input in self.inputs:
if not isinstance(main_input, pvalue.PBegin):
real_producer(main_input).refcounts[main_input.tag] += 1
for side_input in self.side_inputs:
real_producer(side_input.pvalue).refcounts[side_input.pvalue.tag] += 1
def add_output(self, output, tag=None):
if isinstance(output, pvalue.DoOutputsTuple):
self.add_output(output[output._main_tag])
elif isinstance(output, pvalue.PValue):
# TODO(BEAM-1833): Require tags when calling this method.
if tag is None and None in self.outputs:
tag = len(self.outputs)
assert tag not in self.outputs
self.outputs[tag] = output
else:
raise TypeError("Unexpected output type: %s" % output)
def add_part(self, part):
assert isinstance(part, AppliedPTransform)
self.parts.append(part)
def is_composite(self):
"""Returns whether this is a composite transform.
A composite transform has parts (inner transforms) or isn't the
producer for any of its outputs. (An example of a transform that
is not a producer is one that returns its inputs instead.)
"""
return bool(self.parts) or all(
pval.producer is not self for pval in self.outputs.values())
def visit(self, visitor, pipeline, visited):
"""Visits all nodes reachable from the current node."""
for pval in self.inputs:
if pval not in visited and not isinstance(pval, pvalue.PBegin):
assert pval.producer is not None
pval.producer.visit(visitor, pipeline, visited)
# The value should be visited now since we visit outputs too.
assert pval in visited, pval
# Visit side inputs.
for pval in self.side_inputs:
if isinstance(pval, pvalue.AsSideInput) and pval.pvalue not in visited:
pval = pval.pvalue # Unpack marker-object-wrapped pvalue.
assert pval.producer is not None
pval.producer.visit(visitor, pipeline, visited)
# The value should be visited now since we visit outputs too.
assert pval in visited
# TODO(silviuc): Is there a way to signal that we are visiting a side
# value? The issue is that the same PValue can be reachable through
# multiple paths and therefore it is not guaranteed that the value
# will be visited as a side value.
# Visit a composite or primitive transform.
if self.is_composite():
visitor.enter_composite_transform(self)
for part in self.parts:
part.visit(visitor, pipeline, visited)
visitor.leave_composite_transform(self)
else:
visitor.visit_transform(self)
# Visit the outputs (one or more). It is essential to mark as visited the
# tagged PCollections of the DoOutputsTuple object. A tagged PCollection is
# connected directly with its producer (a multi-output ParDo), but the
# output of such a transform is the containing DoOutputsTuple, not the
# PCollection inside it. Without the code below a tagged PCollection will
# not be marked as visited while visiting its producer.
for pval in self.outputs.values():
if isinstance(pval, pvalue.DoOutputsTuple):
pvals = (v for v in pval)
else:
pvals = (pval,)
for v in pvals:
if v not in visited:
visited.add(v)
visitor.visit_value(v, self)
def named_inputs(self):
# TODO(BEAM-1833): Push names up into the sdk construction.
return {str(ix): input for ix, input in enumerate(self.inputs)
if isinstance(input, pvalue.PCollection)}
def named_outputs(self):
return {str(tag): output for tag, output in self.outputs.items()
if isinstance(output, pvalue.PCollection)}
def to_runner_api(self, context):
from apache_beam.runners.api import beam_runner_api_pb2
return beam_runner_api_pb2.PTransform(
unique_name=self.full_label,
spec=beam_runner_api_pb2.FunctionSpec(
urn=urns.PICKLED_TRANSFORM,
parameter=proto_utils.pack_Any(
wrappers_pb2.BytesValue(value=pickler.dumps(self.transform)))),
subtransforms=[context.transforms.get_id(part) for part in self.parts],
# TODO(BEAM-115): Side inputs.
inputs={tag: context.pcollections.get_id(pc)
for tag, pc in self.named_inputs().items()},
outputs={str(tag): context.pcollections.get_id(out)
for tag, out in self.named_outputs().items()},
# TODO(BEAM-115): display_data
display_data=None)
@staticmethod
def from_runner_api(proto, context):
result = AppliedPTransform(
parent=None,
transform=pickler.loads(
proto_utils.unpack_Any(proto.spec.parameter,
wrappers_pb2.BytesValue).value),
full_label=proto.unique_name,
inputs=[
context.pcollections.get_by_id(id) for id in proto.inputs.values()])
result.parts = [
context.transforms.get_by_id(id) for id in proto.subtransforms]
result.outputs = {
None if tag == 'None' else tag: context.pcollections.get_by_id(id)
for tag, id in proto.outputs.items()}
if not result.parts:
for tag, pc in result.outputs.items():
if pc not in result.inputs:
pc.producer = result
pc.tag = tag
result.update_input_refcounts()
return result
| |
from datetime import datetime
from functools import wraps
from werkzeug.local import LocalProxy
from flask import (request, Response, after_this_request, make_response, session, redirect,
jsonify, current_app)
from flask_login import login_user as _login_user, logout_user, current_user, login_required
from authomatic.adapters import WerkzeugAdapter
from . import _userflow, signals
_datastore = LocalProxy(lambda: _userflow.datastore)
def schema_errors_processor(errors):
response = jsonify({'errors': errors})
response.status_code = 422
return response
def load_schema(schema_name):
# using schema_name for lazy getting schema, in case of override
def decorator(func):
@wraps(func)
def wrapper(payload, *args, **kwargs):
schema = _userflow.schemas[schema_name]
data, errors = schema.load(payload or {})
if errors:
return _userflow.views['_schema_errors_processor'](errors)
else:
return func(data, *args, **kwargs)
wrapper.load_schema_decorated = True
return wrapper
return decorator
def request_json(func):
@wraps(func)
def wrapper():
return func(request.json)
return wrapper
def response_json(func):
@wraps(func)
def wrapper(*args, **kwargs):
response = func(*args, **kwargs)
if not isinstance(response, Response):
return jsonify(response)
return response
return wrapper
def login_user(user, remember=False, provider=None):
assert user.is_active
logged_in = _login_user(user, remember)
assert logged_in, 'Not logged in for unknown reason'
session.pop('auth_provider', None)
remote_addr = _userflow.request_utils.get_remote_addr()
ua_info = _userflow.request_utils.get_ua_info()
if _userflow.request_utils.geoip:
geoip_info = _userflow.request_utils.get_geoip_info()
else:
geoip_info = None
if _datastore.track_login_model:
track_login = _datastore.create_track_login(
time=datetime.utcnow(),
remote_addr=remote_addr,
geoip_info=geoip_info,
ua_info=ua_info,
)
_datastore.put(track_login)
after_this_request(lambda r: _datastore.commit())
signals.logged_in.send(app=current_app._get_current_object(), user=user,
remote_addr=remote_addr, geoip_info=geoip_info, ua_info=ua_info)
return user.get_auth_token()
@load_schema('login')
def login(data):
user, data = data
auth_token = login_user(user, data['remember'])
data = status()
data['auth_token'] = auth_token
return data
def logout():
logout_user()
return status()
def status():
if not current_user.is_anonymous:
user, errors = _userflow.schemas['user_schema'].dump(current_user)
assert not errors
else:
user = None
result = {
'user': user,
'locale': current_user.locale,
'timezone': current_user.timezone,
}
if 'auth_provider' in session:
auth_provider = {}
for provider, provider_user_id in session['auth_provider'].items():
provider_user = _datastore.find_provider_user(provider=provider,
provider_user_id=provider_user_id)
if provider_user:
schema = _userflow.schemas['ProviderUserSchema']
auth_provider[provider] = schema.dump(provider_user)
else:
session['auth_provider'].pop(provider)
if not session['auth_provider']:
session.pop('auth_provider')
if auth_provider:
result['auth_provider'] = auth_provider
if _userflow.request_utils.geoip:
result['geoip'] = _userflow.request_utils.get_geoip_info()
return result
@load_schema('set_i18n')
def set_i18n(data):
if 'timezone' in data:
current_user.timezone = data['timezone']
if 'locale' in data:
current_user.locale = data['locale']
if not current_user.is_anonymous:
_datastore.commit()
def timezones():
return {'timezones': _userflow.get_timezone_choices()}
def provider_login(provider, goal):
if goal not in ('LOGIN', 'REGISTER', 'ASSOCIATE'):
raise ValueError('Unknown goal: {}'.format(goal))
response = make_response()
result = _userflow.authomatic.login(WerkzeugAdapter(request, response), provider)
if result:
if result.error:
# log result.to_json() if needed, but authomatic logs it anyway
return redirect(_userflow.config['PROVIDER_{}_ERROR_URL'.format(goal)])
# OAuth 2.0 and OAuth 1.0a provide only limited user data on login,
# We need to update the user to get more info.
result.user.update()
provider_user = _datastore.find_provider_user(provider=provider,
provider_user_id=result.user.id)
if provider_user:
provider_user.set_provider_data(result.user)
else:
provider_user = _datastore.create_provider_user(provider=provider,
provider_user_id=result.user.id)
provider_user.set_provider_data(result.user)
_datastore.put(provider_user)
after_this_request(lambda r: _datastore.commit())
if goal == 'ASSOCIATE':
provider_user.user_id == current_user.id
return redirect(_userflow.config['PROVIDER_ASSOCIATE_SUCCEED_URL'])
if provider_user.user_id:
user = _datastore.find_user(id=provider_user.user_id)
if not user.is_active:
return redirect(_userflow.config['PROVIDER_LOGIN_INACTIVE_URL'])
login_user(user, False, provider)
return redirect(_userflow.config['PROVIDER_LOGIN_SUCCEED_URL'])
if result.user.email:
user = _datastore.find_user(email=result.user.email)
if user:
provider_user.user_id = user.id
if not user.is_active:
return redirect(_userflow.config['PROVIDER_LOGIN_INACTIVE_URL'])
login_user(user, False, provider)
return redirect(_userflow.config['PROVIDER_LOGIN_SUCCEED_URL'])
if goal == 'LOGIN':
return redirect(_userflow.config['PROVIDER_LOGIN_NOT_EXIST_URL'])
assert goal == 'REGISTER'
session.setdefault('auth_provider', {})
session['auth_provider'][provider] = result.user.id
if result.user.email:
token = _userflow.register_confirm_serializer.dumps(result.user.email)
confirm_url = _userflow.config['REGISTER_CONFIRM_URL'].format(token)
return redirect(confirm_url)
else:
return redirect(_userflow.config['REGISTER_START_URL'])
@load_schema('register_start')
def register_start(data):
token = _userflow.register_confirm_serializer.dumps(data['email'])
confirm_url = _userflow.config['REGISTER_CONFIRM_URL'].format(token)
_userflow.emails.send('register_start', data['email'],
{'confirm_url': confirm_url, 'token': token})
@load_schema('register_confirm')
def register_confirm(data):
return data
@load_schema('register_finish')
def register_finish(data, login=True, login_remember=False):
locale = data.get('locale', current_user.locale)
timezone = data.get('timezone', current_user.timezone)
user = _datastore.create_user(email=data['email'], is_active=True,
locale=locale, timezone=timezone)
if not user.is_active:
user.is_active = True
user.set_password(data['password'])
user.generate_auth_id()
_datastore.put(user)
_datastore.commit() # TODO: to get user_id
if login:
auth_token = login_user(user, login_remember)
else:
auth_token = None
provider_associated = False
for provider, provider_user_id in session.get('auth_provider', {}).items():
provider_user = _datastore.find_provider_user(provider=provider,
provider_user_id=provider_user_id)
if provider_user:
provider_associated = True
provider_user.user_id = user.id
session.pop('auth_provider', None)
if provider_associated:
_datastore.commit()
signals.register_finish.send(app=current_app._get_current_object(), user=user)
data = status()
data['auth_token'] = auth_token
return data
@load_schema('restore_start')
def restore_start(data):
token = _userflow.restore_confirm_serializer.dumps(data['email'])
confirm_url = _userflow.config['RESTORE_CONFIRM_URL'].format(token)
_userflow.emails.send('restore_start', data['email'],
{'confirm_url': confirm_url, 'token': token})
@load_schema('restore_confirm')
def restore_confirm(data):
return data
@load_schema('restore_finish')
def restore_finish(data, login=True, login_remember=False):
user, data = data
user.set_password(data['password'])
_datastore.commit()
if login:
auth_token = login_user(user, login_remember)
else:
auth_token = None
data = status()
data['auth_token'] = auth_token
return data
@login_required
@load_schema('password_change')
def password_change(data):
current_user.set_password(data['password'])
_datastore.commit()
return status()
views_map = {
'_schema_errors_processor': schema_errors_processor,
'login': login,
'logout': logout,
'status': status,
'set_i18n': set_i18n,
'timezones': timezones,
'register_start': register_start,
'register_confirm': register_confirm,
'register_finish': register_finish,
'restore_start': restore_start,
'restore_confirm': restore_confirm,
'restore_finish': restore_finish,
'password_change': password_change,
}
def add_api_routes(config, views_map, blueprint):
for name, view in views_map.items():
if name.startswith('_'):
continue
def _conf(key):
return config[key.format(name.upper())]
if _conf('{}_API_URL'):
if hasattr(view, 'load_schema_decorated'):
view = request_json(view)
view = response_json(view)
blueprint.route(_conf('{}_API_URL'), methods=[_conf('{}_API_METHOD')],
endpoint=name)(view)
| |
#!/usr/bin/env python
# Copyright (C) 2020 T. Zachary Laine
#
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import lzw
constants_header_form = '''\
// Copyright (C) 2020 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#ifndef BOOST_TEXT_DETAIL_CASE_CONSTANTS_HPP
#define BOOST_TEXT_DETAIL_CASE_CONSTANTS_HPP
#include <array>
#include <cstdint>
namespace boost {{ namespace text {{ namespace detail {{
enum class case_condition : uint16_t {{
{0}
}};
}}}}}}
#endif
'''
case_impl_file_form = '''\
// Copyright (C) 2020 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#include <boost/text/trie_map.hpp>
#include <boost/text/detail/case_mapping_data.hpp>
namespace boost {{ namespace text {{ namespace detail {{
std::array<uint32_t, {1}> make_case_cps()
{{
return {{{{
{0}
}}}};
}}
std::array<case_mapping_to, {3}> make_case_mapping_to()
{{
return {{{{
{2}
}}}};
}}
namespace {{
constexpr std::array<uint32_t, {5}> cased_cps()
{{
return {{{{
{4}
}}}};
}}
constexpr std::array<uint32_t, {7}> case_ignorable_cps()
{{
return {{{{
{6}
}}}};
}}
constexpr std::array<uint32_t, {9}> soft_dotted_cps()
{{
return {{{{
{8}
}}}};
}}
constexpr std::array<case_mapping, {11}> to_lower_mappings()
{{
return {{{{
{10}
}}}};
}}
constexpr std::array<case_mapping, {13}> to_title_mappings()
{{
return {{{{
{12}
}}}};
}}
constexpr std::array<case_mapping, {15}> to_upper_mappings()
{{
return {{{{
{14}
}}}};
}}
constexpr std::array<uint32_t, {17}> changes_when_uppered()
{{
return {{{{
{16}
}}}};
}}
constexpr std::array<uint32_t, {19}> changes_when_lowered()
{{
return {{{{
{18}
}}}};
}}
constexpr std::array<uint32_t, {21}> changes_when_titled()
{{
return {{{{
{20}
}}}};
}}
}}
case_map_t make_to_lower_map()
{{
case_map_t retval;
for (auto datum : to_lower_mappings()) {{
retval[datum.from_] =
case_elements{{datum.first_, datum.last_}};
}}
return retval;
}}
case_map_t make_to_title_map()
{{
case_map_t retval;
for (auto datum : to_title_mappings()) {{
retval[datum.from_] =
case_elements{{datum.first_, datum.last_}};
}}
return retval;
}}
case_map_t make_to_upper_map()
{{
case_map_t retval;
for (auto datum : to_upper_mappings()) {{
retval[datum.from_] =
case_elements{{datum.first_, datum.last_}};
}}
return retval;
}}
std::vector<uint32_t> make_soft_dotted_cps()
{{
auto const cps = soft_dotted_cps();
return std::vector<uint32_t>(cps.begin(), cps.end());
}}
std::unordered_set<uint32_t> make_cased_cps()
{{
auto const cps = cased_cps();
return std::unordered_set<uint32_t>(cps.begin(), cps.end());
}}
std::unordered_set<uint32_t> make_case_ignorable_cps()
{{
auto const cps = case_ignorable_cps();
return std::unordered_set<uint32_t>(cps.begin(), cps.end());
}}
std::unordered_set<uint32_t> make_changes_when_uppered_cps()
{{
auto const cps = changes_when_uppered();
return std::unordered_set<uint32_t>(cps.begin(), cps.end());
}}
std::unordered_set<uint32_t> make_changes_when_lowered_cps()
{{
auto const cps = changes_when_lowered();
return std::unordered_set<uint32_t>(cps.begin(), cps.end());
}}
std::unordered_set<uint32_t> make_changes_when_titled_cps()
{{
auto const cps = changes_when_titled();
return std::unordered_set<uint32_t>(cps.begin(), cps.end());
}}
}}}}}}
'''
def get_case_mappings(unicode_data, special_casing, prop_list, derived_core_props):
to_lower = {}
to_title = {}
to_upper = {}
all_tuples = set()
conditions = set()
changes_when_u = []
changes_when_l = []
changes_when_t = []
def init_dict_elem(k, m):
if k not in m:
m[k] = []
lines = open(unicode_data, 'r').readlines()
for line in lines:
line = line[:-1]
if not line.startswith('#') and len(line) != 0:
comment_start = line.find('#')
comment = ''
if comment_start != -1:
comment = line[comment_start + 1:].strip()
line = line[:comment_start]
fields = map(lambda x: x.strip(), line.split(';'))
cp = fields[0]
upper = fields[12]
lower = fields[13]
title = fields[14]
if lower != '':
init_dict_elem(cp, to_lower)
to_lower[cp].append(([lower], [], 'from_unicode_data'))
all_tuples.add((lower, None, None))
if title != '':
init_dict_elem(cp, to_title)
to_title[cp].append(([title], [], 'from_unicode_data'))
all_tuples.add((title, None, None))
if upper != '':
init_dict_elem(cp, to_upper)
to_upper[cp].append(([upper], [], 'from_unicode_data'))
all_tuples.add((upper, None, None))
def to_tuple_2(l):
if len(l) == 1:
return (l[0], None)
if len(l) == 2:
return (l[0], l[1])
return None
def to_tuple_3(l):
if len(l) == 1:
return (l[0], None, None)
if len(l) == 2:
return (l[0], l[1], None)
if len(l) == 3:
return (l[0], l[1], l[2])
return None
def from_tuple(t):
retval = []
retval.append(t[0])
if t[1] != None:
retval.append(t[1])
if 2 < len(t) and t[2] != None:
retval.append(t[2])
return retval
lines = open(special_casing, 'r').readlines()
for line in lines:
line = line[:-1]
if not line.startswith('#') and len(line) != 0:
fields = map(lambda x: x.strip(), line.split(';'))
cp = fields[0].strip()
lower = fields[1].strip().split(' ')
if lower == ['']:
lower = []
title = fields[2].strip().split(' ')
if title == ['']:
title = []
upper = fields[3].strip().split(' ')
if upper == ['']:
upper = []
conditions_ = []
if 3 < len(fields) and '#' not in fields[4]:
conditions_ = fields[4].strip().split(' ')
for c in conditions_:
conditions.add(c)
if len(lower):
init_dict_elem(cp, to_lower)
to_lower[cp].append((lower, conditions_, None))
all_tuples.add(to_tuple_3(lower))
if len(title):
init_dict_elem(cp, to_title)
to_title[cp].append((title, conditions_, None))
all_tuples.add(to_tuple_3(title))
if len(upper):
init_dict_elem(cp, to_upper)
to_upper[cp].append((upper, conditions_, None))
all_tuples.add(to_tuple_3(upper))
all_tuples = sorted(map(from_tuple, all_tuples))
conditions = sorted(conditions)
def subsequence(seq, subseq):
i = 0
while i < len(seq):
if seq[i] == subseq[0]:
break
i += 1
if i == len(seq):
return (i, i)
lo = i
sub_i = 0
while i < len(seq) and sub_i < len(subseq) and seq[i] == subseq[sub_i]:
i += 1
sub_i += 1
if sub_i == len(subseq):
return (lo, i)
return (lo, lo)
cps = []
tuple_offsets = []
tuple_offset = 0
for i in range(len(all_tuples)):
subseq = subsequence(cps, all_tuples[i])
if subseq[0] != subseq[1]:
tuple_offsets.append(subseq)
continue
cps += all_tuples[i]
lo = tuple_offset
tuple_offset += len(all_tuples[i])
hi = tuple_offset
tuple_offsets.append((lo, hi))
def cp_indices(t):
return tuple_offsets[all_tuples.index(from_tuple(t))]
def to_cond_bitset(conds):
retval = ' | '.join(map(lambda x: '(uint16_t)case_condition::' + x, conds))
if retval == '':
retval = '0'
return retval
all_mapped_tos = []
def filter_dupes(l):
retval = []
for x in l:
if x not in retval:
retval.append(x)
return retval
def unconditioned_last(l):
unconditioned = None
retval = []
for x in l:
if x[1] == '0':
unconditioned = x
else:
retval.append(x)
if unconditioned != None:
retval.append(unconditioned)
return retval
tmp = to_lower
to_lower = []
for k,v in tmp.items():
lo = len(all_mapped_tos)
mapped_tos = map(lambda x: (cp_indices(to_tuple_3(x[0])), to_cond_bitset(x[1])), v)
mapped_tos = unconditioned_last(filter_dupes(mapped_tos))
subseq = subsequence(all_mapped_tos, mapped_tos)
if subseq[0] != subseq[1]:
to_lower.append((k, subseq))
continue
all_mapped_tos += mapped_tos
hi = len(all_mapped_tos)
to_lower.append((k, (lo, hi)))
tmp = to_title
to_title = []
for k,v in tmp.items():
lo = len(all_mapped_tos)
mapped_tos = map(lambda x: (cp_indices(to_tuple_3(x[0])), to_cond_bitset(x[1])), v)
mapped_tos = unconditioned_last(filter_dupes(mapped_tos))
subseq = subsequence(all_mapped_tos, mapped_tos)
if subseq[0] != subseq[1]:
to_title.append((k, subseq))
continue
all_mapped_tos += mapped_tos
hi = len(all_mapped_tos)
to_title.append((k, (lo, hi)))
tmp = to_upper
to_upper = []
for k,v in tmp.items():
lo = len(all_mapped_tos)
mapped_tos = map(lambda x: (cp_indices(to_tuple_3(x[0])), to_cond_bitset(x[1])), v)
mapped_tos = unconditioned_last(filter_dupes(mapped_tos))
subseq = subsequence(all_mapped_tos, mapped_tos)
if subseq[0] != subseq[1]:
to_upper.append((k, subseq))
continue
all_mapped_tos += mapped_tos
hi = len(all_mapped_tos)
to_upper.append((k, (lo, hi)))
soft_dotteds = []
lines = open(prop_list, 'r').readlines()
for line in lines:
line = line[:-1]
if not line.startswith('#') and len(line) != 0:
fields = map(lambda x: x.strip(), line.split(';'))
if fields[1].startswith('Soft_Dotted'):
cps_ = fields[0].split('.')
soft_dotteds.append(cps_[0])
if 1 < len(cps_):
for i in range(int(cps_[0], 16) + 1, int(cps_[2], 16) + 1):
soft_dotteds.append(hex(i).upper()[2:])
cased_cps = []
cased_ignorable_cps = []
lines = open(derived_core_props, 'r').readlines()
for line in lines:
line = line[:-1]
if not line.startswith('#') and len(line) != 0:
fields = map(lambda x: x.strip(), line.split(';'))
if fields[1].startswith('Cased') or \
fields[1].startswith('Case_Ignorable') or \
fields[1].startswith('Changes_When_Lowercased') or \
fields[1].startswith('Changes_When_Uppercased') or \
fields[1].startswith('Changes_When_Titlecased'):
cps_ = fields[0].split('.')
if 1 < len(cps_):
r = range(int(cps_[0], 16) + 1, int(cps_[2], 16) + 1)
cps_ = cps_[:1]
for i in r:
cps_.append(hex(i).upper()[2:])
else:
cps_ = cps_[:1]
if fields[1].startswith('Cased'):
cased_cps += cps_
elif fields[1].startswith('Case_Ignorable'):
cased_ignorable_cps += cps_
elif fields[1].startswith('Changes_When_Uppercased'):
changes_when_u += cps_
elif fields[1].startswith('Changes_When_Lowercased'):
changes_when_l += cps_
elif fields[1].startswith('Changes_When_Titlecased'):
changes_when_t += cps_
return to_lower, to_title, to_upper, cps, conditions, soft_dotteds, \
cased_cps, cased_ignorable_cps, all_mapped_tos, changes_when_u, \
changes_when_l, changes_when_t
to_lower, to_title, to_upper, cps, conditions, soft_dotteds, \
cased_cps, cased_ignorable_cps, all_mapped_tos, \
changes_when_u, changes_when_l, changes_when_t = \
get_case_mappings('UnicodeData.txt', 'SpecialCasing.txt', \
'PropList.txt', 'DerivedCoreProperties.txt')
#changes_when_l = sorted(map(lambda x: int(x, 16), changes_when_l))
#changes_when_l_ranges = []
#prev_cp = 0xffffffff
#curr_range = [0xffffffff, 0xffffffff]
#ranged_n = 0
#for cp in changes_when_l:
# #cp = int(cp_, 16)
# if cp != prev_cp + 1:
# if curr_range[0] == 0xffffffff:
# curr_range[0] = cp
# else:
# curr_range[1] = prev_cp + 1
# if curr_range[1] != curr_range[0] + 1:
# changes_when_l_ranges.append((hex(curr_range[0]), hex(curr_range[1])))
# ranged_n += curr_range[1] - curr_range[0]
# curr_range = [0xffffffff, 0xffffffff]
# prev_cp = cp
#print changes_when_l_ranges
#print len(changes_when_l_ranges), ranged_n, len(changes_when_l)
hpp_file = open('case_constants.hpp', 'w')
condition_enums = []
for i in range(len(conditions)):
c = conditions[i]
condition_enums.append(' {} = {},'.format(c, 1 << i))
hpp_file.write(constants_header_form.format('\n'.join(condition_enums)))
def make_case_mapping_to(t):
return '{{ {}, {}, {} }}'.format(t[0][0], t[0][1], t[1])
def make_case_mapping(t):
return '{{ 0x{}, {}, {} }}'.format(t[0], t[1][0], t[1][1])
def compressed_cp_lines(cps):
values_per_line = 12
bytes_ = []
for cp in cps:
lzw.add_cp(bytes_, int(cp, 16))
compressed_bytes = lzw.compress(bytes_)
print 'rewrote {} * 32 = {} bits as {} * 8 = {} bits'.format(len(cps), len(cps)*32, len(bytes_), len(bytes_)*8)
print 'compressed to {} * 16 = {} bits'.format(len(compressed_bytes), len(compressed_bytes) * 16)
return lzw.compressed_bytes_to_lines(compressed_bytes, values_per_line)
# No real gain (<1%). Use uncompressed byte stream instead.
#compressed_cp_lines(cased_cps)
# No real gain (identical size!). Use uncompressed byte stream instead.
#compressed_cp_lines(cased_ignorable_cps)
case_conditions = {
'(uint16_t)case_condition::After_I': 1,
'(uint16_t)case_condition::After_Soft_Dotted': 2,
'(uint16_t)case_condition::Final_Sigma': 4,
'(uint16_t)case_condition::More_Above': 8,
'(uint16_t)case_condition::Not_Before_Dot': 16,
'(uint16_t)case_condition::az': 32,
'(uint16_t)case_condition::lt': 64,
'(uint16_t)case_condition::tr': 128
}
def compressed_case_mapping_to_lines(mappings):
values_per_line = 12
bytes_ = []
for t in mappings:
lzw.add_short(bytes_, t[0][0])
lzw.add_short(bytes_, t[0][1])
try:
x = case_conditions[t[1]] # TODO: Totally wrong! Just here for size eval.
except:
x = 0
lzw.add_short(bytes_, x)
compressed_bytes = lzw.compress(bytes_)
print 'rewrote {} * 48 = {} bits as {} * 8 = {} bits'.format(len(mappings), len(mappings)*48, len(bytes_), len(bytes_)*8)
print 'compressed to {} * 16 = {} bits'.format(len(compressed_bytes), len(compressed_bytes) * 16)
return lzw.compressed_bytes_to_lines(compressed_bytes, values_per_line)
# Heavy pessimization.
#compressed_case_mapping_to_lines(all_mapped_tos)
def compressed_case_mapping_lines(mappings):
values_per_line = 12
bytes_ = []
for t in mappings:
lzw.add_cp(bytes_, int(t[0], 16))
lzw.add_short(bytes_, t[1][0])
lzw.add_short(bytes_, t[1][1])
compressed_bytes = lzw.compress(bytes_)
print 'rewrote {} * 64 = {} bits as {} * 8 = {} bits'.format(len(mappings), len(mappings)*64, len(bytes_), len(bytes_)*8)
print 'compressed to {} * 16 = {} bits'.format(len(compressed_bytes), len(compressed_bytes) * 16)
return lzw.compressed_bytes_to_lines(compressed_bytes, values_per_line)
# compressed_case_mapping_lines(to_lower)
# compressed_case_mapping_lines(to_title)
# compressed_case_mapping_lines(to_upper)
def cps_string(cps):
return ''.join(map(lambda x: r'\U' + '0' * (8 - len(x)) + x, cps))
def cus_lines(cus):
as_ints = map(lambda x: ord(x), cus)
values_per_line = 12
return lzw.compressed_bytes_to_lines(as_ints, values_per_line)[0]
def utf8_cps(cps):
s = cps_string(cps)
exec("s = u'" + s + "'")
s = s.encode('UTF-8', 'strict')
retval = cus_lines(s)
print 'rewrote {} * 32 = {} bits as {} * 8 = {} bits ({} bytes saved)'.format(len(cps), len(cps) * 32, len(s), len(s) * 8, (len(cps) * 32 - len(s) * 8) / 8.0)
return retval
#utf8_cps(cps)
#utf8_cps(cased_cps)
#utf8_cps(cased_ignorable_cps)
#utf8_cps(soft_dotteds)
#utf8_cps(changes_when_u)
#utf8_cps(changes_when_l)
#utf8_cps(changes_when_t)
cpp_file = open('case_mapping.cpp', 'w')
cpp_file.write(case_impl_file_form.format(
',\n '.join(map(lambda x: '0x' + x, cps)),
len(cps),
',\n '.join(map(make_case_mapping_to, all_mapped_tos)),
len(all_mapped_tos),
',\n '.join(map(lambda x: '0x' + x, cased_cps)),
len(cased_cps),
',\n '.join(map(lambda x: '0x' + x, cased_ignorable_cps)),
len(cased_ignorable_cps),
',\n '.join(map(lambda x: '0x' + x, soft_dotteds)),
len(soft_dotteds),
',\n '.join(map(make_case_mapping, to_lower)),
len(to_lower),
',\n '.join(map(make_case_mapping, to_title)),
len(to_title),
',\n '.join(map(make_case_mapping, to_upper)),
len(to_upper),
',\n '.join(map(lambda x: '0x' + x, changes_when_u)),
len(changes_when_u),
',\n '.join(map(lambda x: '0x' + x, changes_when_l)),
len(changes_when_l),
',\n '.join(map(lambda x: '0x' + x, changes_when_t)),
len(changes_when_t)
))
| |
"""Benchmark to help choosing the best chunksize so as to optimize the access
time in random lookups."""
import subprocess
from pathlib import Path
from time import perf_counter as clock
import numpy as np
import tables as tb
# Constants
NOISE = 1e-15 # standard deviation of the noise compared with actual values
rdm_cod = ['lin', 'rnd']
def get_nrows(nrows_str):
powers = {'k': 3, 'm': 6, 'g': 9}
try:
return int(float(nrows_str[:-1]) * 10 ** powers[nrows_str[-1]])
except KeyError:
raise ValueError(
"value of nrows must end with either 'k', 'm' or 'g' suffixes.")
class DB:
def __init__(self, nrows, dtype, chunksize, userandom, datadir,
docompress=0, complib='zlib'):
self.dtype = dtype
self.docompress = docompress
self.complib = complib
self.filename = '-'.join([rdm_cod[userandom],
"n" + nrows, "s" + chunksize, dtype])
# Complete the filename
self.filename = "lookup-" + self.filename
if docompress:
self.filename += '-' + complib + str(docompress)
self.filename = datadir + '/' + self.filename + '.h5'
print("Processing database:", self.filename)
self.userandom = userandom
self.nrows = get_nrows(nrows)
self.chunksize = get_nrows(chunksize)
self.step = self.chunksize
self.scale = NOISE
def get_db_size(self):
sout = subprocess.Popen("sync;du -s %s" % self.filename, shell=True,
stdout=subprocess.PIPE).stdout
line = [l for l in sout][0]
return int(line.split()[0])
def print_mtime(self, t1, explain):
mtime = clock() - t1
print(f"{explain}: {mtime:.6f}")
print(f"Krows/s: {self.nrows / 1000 / mtime:.6f}")
def print_db_sizes(self, init, filled):
array_size = (filled - init) / 1024
print(f"Array size (MB): {array_size:.3f}")
def open_db(self, remove=0):
if remove and Path(self.filename).is_file():
Path(self.filename).unlink()
con = tb.open_file(self.filename, 'a')
return con
def create_db(self, verbose):
self.con = self.open_db(remove=1)
self.create_array()
init_size = self.get_db_size()
t1 = clock()
self.fill_array()
array_size = self.get_db_size()
self.print_mtime(t1, 'Insert time')
self.print_db_sizes(init_size, array_size)
self.close_db()
def create_array(self):
# The filters chosen
filters = tb.Filters(complevel=self.docompress,
complib=self.complib)
atom = tb.Atom.from_kind(self.dtype)
self.con.create_earray(self.con.root, 'earray', atom, (0,),
filters=filters,
expectedrows=self.nrows,
chunkshape=(self.chunksize,))
def fill_array(self):
"Fills the array"
earray = self.con.root.earray
j = 0
arr = self.get_array(0, self.step)
for i in range(0, self.nrows, self.step):
stop = (j + 1) * self.step
if stop > self.nrows:
stop = self.nrows
###arr = self.get_array(i, stop, dtype)
earray.append(arr)
j += 1
earray.flush()
def get_array(self, start, stop):
arr = np.arange(start, stop, dtype='float')
if self.userandom:
arr += np.random.normal(0, stop * self.scale, size=stop - start)
arr = arr.astype(self.dtype)
return arr
def print_qtime(self, ltimes):
ltimes = np.array(ltimes)
print("Raw query times:\n", ltimes)
print("Histogram times:\n", np.histogram(ltimes[1:]))
ntimes = len(ltimes)
qtime1 = ltimes[0] # First measured time
if ntimes > 5:
# Wait until the 5th iteration (in order to
# ensure that the index is effectively cached) to take times
qtime2 = sum(ltimes[5:]) / (ntimes - 5)
else:
qtime2 = ltimes[-1] # Last measured time
print(f"1st query time: {qtime1:.3f}")
print(f"Mean (skipping the first 5 meas.): {qtime2:.3f}")
def query_db(self, niter, avoidfscache, verbose):
self.con = self.open_db()
earray = self.con.root.earray
if avoidfscache:
rseed = int(np.random.randint(self.nrows))
else:
rseed = 19
np.random.seed(rseed)
np.random.randint(self.nrows)
ltimes = []
for i in range(niter):
t1 = clock()
self.do_query(earray, np.random.randint(self.nrows))
ltimes.append(clock() - t1)
self.print_qtime(ltimes)
self.close_db()
def do_query(self, earray, idx):
return earray[idx]
def close_db(self):
self.con.close()
if __name__ == "__main__":
import sys
import getopt
usage = """usage: %s [-v] [-m] [-c] [-q] [-x] [-z complevel] [-l complib] [-N niter] [-n nrows] [-d datadir] [-t] type [-s] chunksize
-v verbose
-m use random values to fill the array
-q do a (random) lookup
-x choose a different seed for random numbers (i.e. avoid FS cache)
-c create the file
-z compress with zlib (no compression by default)
-l use complib for compression (zlib used by default)
-N number of iterations for reading
-n sets the number of rows in the array
-d directory to save data (default: data.nobackup)
-t select the type for array ('int' or 'float'. def 'float')
-s select the chunksize for array
\n""" % sys.argv[0]
try:
opts, pargs = getopt.getopt(sys.argv[1:], 'vmcqxz:l:N:n:d:t:s:')
except:
sys.stderr.write(usage)
sys.exit(0)
# default options
verbose = 0
userandom = 0
docreate = 0
optlevel = 0
docompress = 0
complib = "zlib"
doquery = False
avoidfscache = 0
krows = '1k'
chunksize = '32k'
niter = 50
datadir = "data.nobackup"
dtype = "float"
# Get the options
for option in opts:
if option[0] == '-v':
verbose = 1
elif option[0] == '-m':
userandom = 1
elif option[0] == '-c':
docreate = 1
createindex = 1
elif option[0] == '-q':
doquery = True
elif option[0] == '-x':
avoidfscache = 1
elif option[0] == '-z':
docompress = int(option[1])
elif option[0] == '-l':
complib = option[1]
elif option[0] == '-N':
niter = int(option[1])
elif option[0] == '-n':
krows = option[1]
elif option[0] == '-d':
datadir = option[1]
elif option[0] == '-t':
if option[1] in ('int', 'float'):
dtype = option[1]
else:
print("type should be either 'int' or 'float'")
sys.exit(0)
elif option[0] == '-s':
chunksize = option[1]
if not avoidfscache:
# in order to always generate the same random sequence
np.random.seed(20)
if verbose:
if userandom:
print("using random values")
db = DB(krows, dtype, chunksize, userandom, datadir, docompress, complib)
if docreate:
if verbose:
print("writing %s rows" % krows)
db.create_db(verbose)
if doquery:
print("Calling query_db() %s times" % niter)
db.query_db(niter, avoidfscache, verbose)
| |
#!/usr/bin/env python3
#
# Copyright (c) 2012 Samuel G. D. Williams. <http://www.oriontransfer.co.nz>
# Copyright (c) 2012 Michal J Wallace. <http://www.michaljwallace.com/>
# Copyright (c) 2012, 2016 Charles Childers <http://forthworks.com/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import sys
#import StringIO
import pl0_parser
from pl0_node_visitor import StackingNodeVisitor
# AST->parable translator for operators
ops = {
'DIVIDE' : '/ floor', # integer div
'MODULO' : 'rem',
'TIMES' : '*',
'PLUS' : '+',
'MINUS' : '-',
}
rel_ops = {
'LT' : 'lt?',
'LTE' : 'lteq?',
'GT' : 'gt?',
'GTE' : 'gteq?',
'E' : 'eq?',
'NE' : '-eq?',
}
class Compiler(StackingNodeVisitor):
def __init__(self):
super(Compiler, self).__init__()
self.label_id = 0
def intermediate_label(self, hint = ''):
self.label_id += 1
return 't_' + hint + '_' + repr(self.label_id)
def generate(self, node):
self.push()
result = self.visit_node(node)
return [self.pop(), result]
def accept_variables(self, *node):
for var in node[1:]:
# Generate a unique name for the variable
variable_name = self.intermediate_label('var_' + var[1])
# Save the unique name for loading this variable in the future.
self.stack[-1].update(var[1], variable_name)
# Allocate static storage space for the variable
print("'" + variable_name + "' var")
# print " 0"
def accept_constants(self, *node):
for var in node[1:]:
self.stack[-1].define(var[1], var[2])
def accept_procedures(self, *node):
for proc in node[1:]:
# Generate a unique name for the procedure
proc_name = self.intermediate_label('proc_' + proc[1])
# Save the unique procedure name on the lexical stack
self.stack[-1].declare(proc[1], proc_name)
# Define a new lexical scope
self.push()
# Generate any static storage required by the procedure
print("\"Procedure " + proc[1] + "\"")
self.visit_expressions(proc[2][1:3])
# Generate the code for the procedure
print("[ ] '" + proc_name + "' :")
print("[")
self.visit_node(proc[2][4])
print("] '" + proc_name + "' :")
# Finished with lexical scope
self.pop()
def accept_program(self, *node):
# print "JMP main"
print("'Output' var\n&Output pop drop")
block = node[1]
self.visit_expressions(block[1:4])
print("[")
self.visit_node(block[4])
print("] 'main' :")
print('main Output')
def accept_while(self, *node):
top_label = self.intermediate_label("while_start")
bottom_label = self.intermediate_label("while_end")
condition = node[1]
loop = node[2]
print("[")
# print top_label + ":"
# Result of condition is on top of stack
# print "\tJE " + bottom_label
self.visit_node(loop)
# print "\tJMP " + top_label
# print bottom_label + ":"
self.visit_node(condition)
print("] while")
def accept_if(self, *node):
false_label = self.intermediate_label("if_false")
condition = node[1]
body = node[2]
self.visit_node(condition)
print("[")
# print "\tJE " + false_label
self.visit_node(body)
print("] if-true")
# print false_label + ":"
def accept_condition(self, *node):
operator = node[2]
lhs = node[1]
rhs = node[3]
self.visit_node(lhs)
self.visit_node(rhs)
# print "\tCMP" + operator
print(" " + rel_ops[operator])
def accept_set(self, *node):
name = node[1][1]
self.visit_node(node[2])
assign_to = node[1][1]
defined, value, level = self.find(assign_to)
if defined != 'VARIABLE':
raise NameError("Invalid assignment to non-variable " + assign_to + " of type " + defined)
print(" !" + str(value))
def accept_call(self, *node):
defined, value, level = self.find(node[1])
if defined != 'PROCEDURE':
raise NameError("Expecting procedure but got: " + defined)
print(" |" + value)
def accept_term(self, *node):
self.visit_node(node[1])
for term in node[2:]:
self.visit_node(term[1])
if term[0] == 'TIMES':
print(" *")
elif term[0] == 'DIVIDES':
print(" / floor")
def accept_expression(self, *node):
# Result of this expression will be on the top of stack
self.visit_node(node[2])
for term in node[3:]:
self.visit_node(term[1])
if term[0] == 'PLUS':
print(" +")
elif term[0] == 'MINUS':
print(" -")
if node[1] == 'MINUS':
print(" -1 *")
def accept_print(self, *node):
self.visit_node(node[1])
print(" &Output push")
# print "\tPOP"
def accept_number(self, *node):
print(" #" + repr(node[1]))
def accept_name(self, *node):
defined, value, level = self.find(node[1])
if defined == 'VARIABLE':
print(" @" + value)
elif defined == 'CONSTANT':
print(" " + str(value))
else:
raise NameError("Invalid value name " + node[1] + " of type " + defined)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('PL/0 to Parable Transpiler')
print('Usage:')
print(' ./pl0-parable.py input >output')
else:
code = open(sys.argv[1], 'r').read()
parser = pl0_parser.Parser()
parser.input(code)
program = parser.p_program()
compiler = Compiler()
compiler.generate(program)
| |
#!/usr/bin/env python
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Record the event logs during boot and output them to a file.
This script repeats the record of each event log during Android boot specified
times. By default, interval between measurements is adjusted in such a way that
CPUs are cooled down sufficiently to avoid boot time slowdown caused by CPU
thermal throttling. The result is output in a tab-separated value format.
Examples:
Repeat measurements 10 times. Interval between iterations is adjusted based on
CPU temperature of the device.
$ ./perfboot.py --iterations=10
Repeat measurements 20 times. 60 seconds interval is taken between each
iteration.
$ ./perfboot.py --iterations=20 --interval=60
Repeat measurements 20 times, show verbose output, output the result to
data.tsv, and read event tags from eventtags.txt.
$ ./perfboot.py --iterations=30 -v --output=data.tsv --tags=eventtags.txt
"""
import argparse
import atexit
import cStringIO
import glob
import inspect
import logging
import math
import os
import re
import subprocess
import sys
import threading
import time
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import adb
# The default event tags to record.
_DEFAULT_EVENT_TAGS = [
'boot_progress_start',
'boot_progress_preload_start',
'boot_progress_preload_end',
'boot_progress_system_run',
'boot_progress_pms_start',
'boot_progress_pms_system_scan_start',
'boot_progress_pms_data_scan_start',
'boot_progress_pms_scan_end',
'boot_progress_pms_ready',
'boot_progress_ams_ready',
'boot_progress_enable_screen',
'sf_stop_bootanim',
'wm_boot_animation_done',
]
class IntervalAdjuster(object):
"""A helper class to take suffficient interval between iterations."""
# CPU temperature values per product used to decide interval
_CPU_COOL_DOWN_THRESHOLDS = {
'flo': 40,
'flounder': 40000,
'razor': 40,
'volantis': 40000,
}
# The interval between CPU temperature checks
_CPU_COOL_DOWN_WAIT_INTERVAL = 10
# The wait time used when the value of _CPU_COOL_DOWN_THRESHOLDS for
# the product is not defined.
_CPU_COOL_DOWN_WAIT_TIME_DEFAULT = 120
def __init__(self, interval, device):
self._interval = interval
self._device = device
self._temp_paths = device.shell(
['ls', '/sys/class/thermal/thermal_zone*/temp'])[0].splitlines()
self._product = device.get_prop('ro.build.product')
self._waited = False
def wait(self):
"""Waits certain amount of time for CPUs cool-down."""
if self._interval is None:
self._wait_cpu_cool_down(self._product, self._temp_paths)
else:
if self._waited:
print 'Waiting for %d seconds' % self._interval
time.sleep(self._interval)
self._waited = True
def _get_cpu_temp(self, threshold):
max_temp = 0
for temp_path in self._temp_paths:
temp = int(self._device.shell(['cat', temp_path])[0].rstrip())
max_temp = max(max_temp, temp)
if temp >= threshold:
return temp
return max_temp
def _wait_cpu_cool_down(self, product, temp_paths):
threshold = IntervalAdjuster._CPU_COOL_DOWN_THRESHOLDS.get(
self._product)
if threshold is None:
print 'No CPU temperature threshold is set for ' + self._product
print ('Just wait %d seconds' %
IntervalAdjuster._CPU_COOL_DOWN_WAIT_TIME_DEFAULT)
time.sleep(IntervalAdjuster._CPU_COOL_DOWN_WAIT_TIME_DEFAULT)
return
while True:
temp = self._get_cpu_temp(threshold)
if temp < threshold:
logging.info('Current CPU temperature %s' % temp)
return
print 'Waiting until CPU temperature (%d) falls below %d' % (
temp, threshold)
time.sleep(IntervalAdjuster._CPU_COOL_DOWN_WAIT_INTERVAL)
class WatchdogTimer(object):
"""A timer that makes is_timedout() return true in |timeout| seconds."""
def __init__(self, timeout):
self._timedout = False
def notify_timeout():
self._timedout = True
self._timer = threading.Timer(timeout, notify_timeout)
self._timer.daemon = True
self._timer.start()
def is_timedout(self):
return self._timedout
def cancel(self):
self._timer.cancel()
def readlines_unbuffered(proc):
"""Read lines from |proc|'s standard out without buffering."""
while True:
buf = []
c = proc.stdout.read(1)
if c == '' and proc.poll() is not None:
break
while c != '\n':
if c == '' and proc.poll() is not None:
break
buf.append(c)
c = proc.stdout.read(1)
yield ''.join(buf)
def disable_dropbox(device):
"""Removes the files created by Dropbox and avoids creating the files."""
device.root()
device.wait()
device.shell(['rm', '-rf', '/system/data/dropbox'])
original_dropbox_max_files = device.shell(
['settings', 'get', 'global', 'dropbox_max_files'])[0].rstrip()
device.shell(['settings', 'put', 'global', 'dropbox_max_files', '0'])
return original_dropbox_max_files
def restore_dropbox(device, original_dropbox_max_files):
"""Restores the dropbox_max_files setting."""
device.root()
device.wait()
if original_dropbox_max_files == 'null':
device.shell(['settings', 'delete', 'global', 'dropbox_max_files'])
else:
device.shell(['settings', 'put', 'global', 'dropbox_max_files',
original_dropbox_max_files])
def init_perf(device, output, record_list, tags):
device.wait()
debuggable = device.get_prop('ro.debuggable')
original_dropbox_max_files = None
if debuggable == '1':
# Workaround for Dropbox issue (http://b/20890386).
original_dropbox_max_files = disable_dropbox(device)
def cleanup():
try:
if record_list:
print_summary(record_list, tags[-1])
output_results(output, record_list, tags)
if original_dropbox_max_files is not None:
restore_dropbox(device, original_dropbox_max_files)
except (subprocess.CalledProcessError, RuntimeError):
pass
atexit.register(cleanup)
def check_dm_verity_settings(device):
device.wait()
for partition in ['system', 'vendor']:
verity_mode = device.get_prop('partition.%s.verified' % partition)
if verity_mode is None:
logging.warning('dm-verity is not enabled for /%s. Did you run '
'adb disable-verity? That may skew the result.',
partition)
def read_event_tags(tags_file):
"""Reads event tags from |tags_file|."""
if not tags_file:
return _DEFAULT_EVENT_TAGS
tags = []
with open(tags_file) as f:
for line in f:
if '#' in line:
line = line[:line.find('#')]
line = line.strip()
if line:
tags.append(line)
return tags
def make_event_tags_re(tags):
"""Makes a regular expression object that matches event logs of |tags|."""
return re.compile(r'(?P<pid>[0-9]+) +[0-9]+ I (?P<tag>%s): (?P<time>\d+)' %
'|'.join(tags))
def filter_event_tags(tags, device):
"""Drop unknown tags not listed in device's event-log-tags file."""
device.wait()
supported_tags = set()
for l in device.shell(
['cat', '/system/etc/event-log-tags'])[0].splitlines():
tokens = l.split(' ')
if len(tokens) >= 2:
supported_tags.add(tokens[1])
filtered = []
for tag in tags:
if tag in supported_tags:
filtered.append(tag)
else:
logging.warning('Unknown tag \'%s\'. Ignoring...', tag)
return filtered
def get_values(record, tag):
"""Gets values that matches |tag| from |record|."""
keys = [key for key in record.keys() if key[0] == tag]
return [record[k] for k in sorted(keys)]
def get_last_value(record, tag):
"""Gets the last value that matches |tag| from |record|."""
values = get_values(record, tag)
if not values:
return 0
return values[-1]
def output_results(filename, record_list, tags):
"""Outputs |record_list| into |filename| in a TSV format."""
# First, count the number of the values of each tag.
# This is for dealing with events that occur multiple times.
# For instance, boot_progress_preload_start and boot_progress_preload_end
# are recorded twice on 64-bit system. One is for 64-bit zygote process
# and the other is for 32-bit zygote process.
values_counter = {}
for record in record_list:
for tag in tags:
# Some record might lack values for some tags due to unanticipated
# problems (e.g. timeout), so take the maximum count among all the
# record.
values_counter[tag] = max(values_counter.get(tag, 1),
len(get_values(record, tag)))
# Then creates labels for the data. If there are multiple values for one
# tag, labels for these values are numbered except the first one as
# follows:
#
# event_tag event_tag2 event_tag3
#
# The corresponding values are sorted in an ascending order of PID.
labels = []
for tag in tags:
for i in range(1, values_counter[tag] + 1):
labels.append('%s%s' % (tag, '' if i == 1 else str(i)))
# Finally write the data into the file.
with open(filename, 'w') as f:
f.write('\t'.join(labels) + '\n')
for record in record_list:
line = cStringIO.StringIO()
invalid_line = False
for i, tag in enumerate(tags):
if i != 0:
line.write('\t')
values = get_values(record, tag)
if len(values) < values_counter[tag]:
invalid_line = True
# Fill invalid record with 0
values += [0] * (values_counter[tag] - len(values))
line.write('\t'.join(str(t) for t in values))
if invalid_line:
logging.error('Invalid record found: ' + line.getvalue())
line.write('\n')
f.write(line.getvalue())
print 'Wrote: ' + filename
def median(data):
"""Calculates the median value from |data|."""
data = sorted(data)
n = len(data)
if n % 2 == 1:
return data[n / 2]
else:
n2 = n / 2
return (data[n2 - 1] + data[n2]) / 2.0
def mean(data):
"""Calculates the mean value from |data|."""
return float(sum(data)) / len(data)
def stddev(data):
"""Calculates the standard deviation value from |value|."""
m = mean(data)
return math.sqrt(sum((x - m) ** 2 for x in data) / len(data))
def print_summary(record_list, end_tag):
"""Prints the summary of |record_list|."""
# Filter out invalid data.
end_times = [get_last_value(record, end_tag) for record in record_list
if get_last_value(record, end_tag) != 0]
print 'mean: ', mean(end_times)
print 'median:', median(end_times)
print 'standard deviation:', stddev(end_times)
def do_iteration(device, interval_adjuster, event_tags_re, end_tag):
"""Measures the boot time once."""
device.wait()
interval_adjuster.wait()
device.reboot()
print 'Rebooted the device'
record = {}
booted = False
while not booted:
device.wait()
# Stop the iteration if it does not finish within 120 seconds.
timeout = 120
t = WatchdogTimer(timeout)
p = subprocess.Popen(
['adb', 'logcat', '-b', 'events', '-v', 'threadtime'],
stdout=subprocess.PIPE)
for line in readlines_unbuffered(p):
if t.is_timedout():
print '*** Timed out ***'
return record
m = event_tags_re.search(line)
if not m:
continue
tag = m.group('tag')
event_time = int(m.group('time'))
pid = m.group('pid')
record[(tag, pid)] = event_time
print 'Event log recorded: %s (%s) - %d ms' % (
tag, pid, event_time)
if tag == end_tag:
booted = True
t.cancel()
break
return record
def parse_args():
"""Parses the command line arguments."""
parser = argparse.ArgumentParser(
description=inspect.getdoc(sys.modules[__name__]),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--iterations', type=int, default=5,
help='Number of times to repeat boot measurements.')
parser.add_argument('--interval', type=int,
help=('Duration between iterations. If this is not '
'set explicitly, durations are determined '
'adaptively based on CPUs temperature.'))
parser.add_argument('-o', '--output', help='File name of output data.')
parser.add_argument('-v', '--verbose', action='store_true',
help='Show verbose output.')
parser.add_argument('-s', '--serial', default=os.getenv('ANDROID_SERIAL'),
help='Adb device serial number.')
parser.add_argument('-t', '--tags', help='Specify the filename from which '
'event tags are read. Every line contains one event '
'tag and the last event tag is used to detect that '
'the device has finished booting unless --end-tag is '
'specified.')
parser.add_argument('--end-tag', help='An event tag on which the script '
'stops measuring the boot time.')
parser.add_argument('--apk-dir', help='Specify the directory which contains '
'APK files to be installed before measuring boot time.')
return parser.parse_args()
def install_apks(device, apk_dir):
for apk in glob.glob(os.path.join(apk_dir, '*.apk')):
print 'Installing: ' + apk
device.install(apk, replace=True)
def main():
args = parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
device = adb.get_device(args.serial)
if not args.output:
device.wait()
args.output = 'perf-%s-%s.tsv' % (
device.get_prop('ro.build.flavor'),
device.get_prop('ro.build.version.incremental'))
check_dm_verity_settings(device)
if args.apk_dir:
install_apks(device, args.apk_dir)
record_list = []
event_tags = filter_event_tags(read_event_tags(args.tags), device)
end_tag = args.end_tag or event_tags[-1]
if end_tag not in event_tags:
sys.exit('%s is not a valid tag.' % end_tag)
event_tags = event_tags[0 : event_tags.index(end_tag) + 1]
init_perf(device, args.output, record_list, event_tags)
interval_adjuster = IntervalAdjuster(args.interval, device)
event_tags_re = make_event_tags_re(event_tags)
for i in range(args.iterations):
print 'Run #%d ' % i
record = do_iteration(
device, interval_adjuster, event_tags_re, end_tag)
record_list.append(record)
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
#
# This file is part of GetTor.
#
# :authors: Israel Leiva <ilv@torproject.org>
# Based on BridgeDB Twitter distributor (PoC) by wfn
# - https://github.com/wfn/twidibot
#
# :copyright: (c) 2008-2015, The Tor Project, Inc.
# (c) 2015, Israel Leiva
#
# :license: This is Free Software. See LICENSE for license information.
import os
import re
import tweepy
import logging
import gettext
import ConfigParser
import core
import utils
import blacklist
"""Twitter channel for distributing links to download Tor Browser."""
OS = {
'osx': 'Mac OS X',
'linux': 'Linux',
'windows': 'Windows'
}
class ConfigError(Exception):
pass
class InternalError(Exception):
pass
class GetTorStreamListener(tweepy.StreamListener):
""" Basic listener for Twitter's streaming API."""
def __init__(self, bot):
self.bot = bot
super(GetTorStreamListener, self).__init__(self.bot.api)
def on_direct_message(self, status):
""" Right now we only care about direct messages. """
if status.direct_message['sender']['id_str'] != self.bot.bot_info.id_str:
self.bot.parse_request(status.direct_message)
class TwitterBot(object):
""" Receive and reply requests via Twitter. """
def __init__(self, cfg=None):
""" Create new object by reading a configuration file.
:param: cfg (string) the path of the configuration file.
"""
default_cfg = 'twitter.cfg'
config = ConfigParser.ConfigParser()
if cfg is None or not os.path.isfile(cfg):
cfg = default_cfg
try:
with open(cfg) as f:
config.readfp(f)
except IOError:
raise ConfigError("File %s not found!" % cfg)
try:
self.api_key = config.get('access_config', 'api_key')
self.api_secret = config.get('access_config', 'api_secret')
self.access_token = config.get('access_config', 'access_token')
self.token_secret = config.get('access_config', 'token_secret')
self.mirrors = config.get('general', 'mirrors')
self.i18ndir = config.get('i18n', 'dir')
logdir = config.get('log', 'dir')
logfile = os.path.join(logdir, 'twitter.log')
loglevel = config.get('log', 'level')
blacklist_cfg = config.get('blacklist', 'cfg')
self.bl = blacklist.Blacklist(blacklist_cfg)
self.bl_max_request = config.get('blacklist', 'max_requests')
self.bl_max_request = int(self.bl_max_request)
self.bl_wait_time = config.get('blacklist', 'wait_time')
self.bl_wait_time = int(self.bl_wait_time)
core_cfg = config.get('general', 'core_cfg')
self.core = core.Core(core_cfg)
except ConfigParser.Error as e:
raise ConfigError("Configuration error: %s" % str(e))
except blacklist.ConfigError as e:
raise InternalError("Blacklist error: %s" % str(e))
except core.ConfigError as e:
raise InternalError("Core error: %s" % str(e))
# logging
log = logging.getLogger(__name__)
logging_format = utils.get_logging_format()
date_format = utils.get_date_format()
formatter = logging.Formatter(logging_format, date_format)
log.info('Redirecting Twitter logging to %s' % logfile)
logfileh = logging.FileHandler(logfile, mode='a+')
logfileh.setFormatter(formatter)
logfileh.setLevel(logging.getLevelName(loglevel))
log.addHandler(logfileh)
self.log = log
def _is_blacklisted(self, username):
"""Check if a user is blacklisted.
:param: addr (string) the hashed username.
:return: true is the username is blacklisted, false otherwise.
"""
hashed_username = utils.get_sha256(username)
try:
self.bl.is_blacklisted(
hashed_username,
'Twitter',
self.bl_max_request,
self.bl_wait_time
)
return False
except blacklist.BlacklistError as e:
return True
def _get_msg(self, msgid, lc):
"""Get message identified by msgid in a specific locale.
Params: msgid: the identifier of a string.
lc: the locale.
Return: a string containing the given message.
"""
try:
t = gettext.translation(lc, self.i18ndir, languages=[lc])
_ = t.ugettext
msgstr = _(msgid)
return msgstr
except IOError as e:
raise ConfigError("%s" % str(e))
def parse_text(self, msg):
""" Parse the text part of a message.
Split the message in words and look for patterns for locale,
operating system and mirrors requests.
:param: msg (string) the message received.
:return: request (list) 3-tuple with locale, os and type of request.
"""
# core knows what OS are supported
supported_os = self.core.get_supported_os()
supported_lc = self.core.get_supported_lc()
# default values
req = {}
req['lc'] = 'en'
req['os'] = None
req['type'] = 'help'
found_lc = False
found_os = False
found_mirrors = False
# analyze every word
words = re.split('\s+', msg.strip())
for word in words:
# look for lc and os
if not found_lc:
for lc in supported_lc:
if re.match(lc, word, re.IGNORECASE):
found_lc = True
req['lc'] = lc
if not found_os:
for os in supported_os:
if re.match(os, word, re.IGNORECASE):
found_os = True
req['os'] = os
req['type'] = 'links'
# mirrors
if not found_mirrors:
if re.match("mirrors?", word, re.IGNORECASE):
found_mirrors = True
req['type'] = 'mirrors'
if (found_lc and found_os) or (found_lc and found_mirrors):
break
return req
def parse_request(self, dm):
""" Process the request received.
Check if the user is not blacklisted and then check the body of
the message to find out what is asking.
:param: dm (status.direct_message) the direct message object received
via Twitter API.
"""
sender_id = dm['sender']['id_str']
msg = dm['text'].strip().lower()
bogus_req = False
req = None
status = ''
try:
if self._is_blacklisted(str(sender_id)):
self.log.info('blacklist; none; none')
bogus_req = True
if not bogus_req:
self.log.debug("Request seems legit, let's parse it")
# let's try to guess what the user is asking
req = self.parse_text(str(msg))
# possible options: links, mirrors, help
if req['type'] == 'links':
self.log.info('links; %s; %s' % (req['os'], req['lc']))
links = self.core.get_links(
'twitter', req['os'], req['lc']
)
reply = self._get_msg('links', 'en')
reply = reply % (OS[req['os']], links)
elif req['type'] == 'mirrors':
self.log.info('mirrors; none; %s' % req['lc'])
reply = self._get_msg('mirrors', 'en')
try:
with open(self.mirrors, "r") as list_mirrors:
mirrors = list_mirrors.read()
reply = reply % mirrors
except IOError as e:
reply = self._get_msg('mirrors_unavailable', 'en')
else:
self.log.info('help; none; %s' % req['lc'])
reply = self._get_msg('help', 'en')
self.api.send_direct_message(
user_id=sender_id,
text=reply
)
except (core.ConfigError, core.InternalError) as e:
# if core failes, send the user an error message, but keep going
self.log.error("Something went wrong internally: %s" % str(e))
reply = self._get_msg('internal_error', 'en')
def start(self):
""" Start the bot for handling requests.
Start a new Twitter bot.
"""
self.auth = tweepy.OAuthHandler(
self.api_key,
self.api_secret
)
self.auth.set_access_token(
self.access_token,
self.token_secret
)
self.api = tweepy.API(self.auth)
self.bot_info = self.api.me()
stream = tweepy.Stream(
auth=self.api.auth,
listener=GetTorStreamListener(self)
)
stream.userstream()
| |
# Calculates and optionally plots the entropy of input files.
import os
import math
import zlib
import binwalk.core.common
from binwalk.core.compat import *
from binwalk.core.module import Module, Option, Kwarg
class Entropy(Module):
XLABEL = 'Offset'
YLABEL = 'Entropy'
XUNITS = 'B'
YUNITS = 'E'
FILE_WIDTH = 1024
FILE_FORMAT = 'png'
COLORS = ['r', 'g', 'c', 'b', 'm']
DEFAULT_BLOCK_SIZE = 1024
DEFAULT_DATA_POINTS = 2048
DEFAULT_TRIGGER_HIGH = .95
DEFAULT_TRIGGER_LOW = .85
TITLE = "Entropy Analysis"
ORDER = 8
# TODO: Add --dpoints option to set the number of data points?
CLI = [
Option(short='E',
long='entropy',
kwargs={'enabled': True},
description='Calculate file entropy'),
Option(short='F',
long='fast',
kwargs={'use_zlib': True},
description='Use faster, but less detailed, entropy analysis'),
Option(short='J',
long='save',
kwargs={'save_plot': True},
description='Save plot as a PNG'),
Option(short='Q',
long='nlegend',
kwargs={'show_legend': False},
description='Omit the legend from the entropy plot graph'),
Option(short='N',
long='nplot',
kwargs={'do_plot': False},
description='Do not generate an entropy plot graph'),
Option(short='H',
long='high',
type=float,
kwargs={'trigger_high': DEFAULT_TRIGGER_HIGH},
description='Set the rising edge entropy trigger threshold (default: %.2f)' % DEFAULT_TRIGGER_HIGH),
Option(short='L',
long='low',
type=float,
kwargs={'trigger_low': DEFAULT_TRIGGER_LOW},
description='Set the falling edge entropy trigger threshold (default: %.2f)' % DEFAULT_TRIGGER_LOW),
]
KWARGS = [
Kwarg(name='enabled', default=False),
Kwarg(name='save_plot', default=False),
Kwarg(name='trigger_high', default=DEFAULT_TRIGGER_HIGH),
Kwarg(name='trigger_low', default=DEFAULT_TRIGGER_LOW),
Kwarg(name='use_zlib', default=False),
Kwarg(name='display_results', default=True),
Kwarg(name='do_plot', default=True),
Kwarg(name='show_legend', default=True),
Kwarg(name='block_size', default=0),
]
# Run this module last so that it can process all other module's results
# and overlay them on the entropy graph
PRIORITY = 0
def init(self):
self.HEADER[-1] = "ENTROPY"
self.max_description_length = 0
self.file_markers = {}
if self.use_zlib:
self.algorithm = self.gzip
else:
self.algorithm = self.shannon
# Get a list of all other module's results to mark on the entropy graph
for (module, obj) in iterator(self.modules):
for result in obj.results:
if result.plot and result.file and result.description:
description = result.description.split(',')[0]
if not has_key(self.file_markers, result.file.name):
self.file_markers[result.file.name] = []
if len(description) > self.max_description_length:
self.max_description_length = len(description)
self.file_markers[result.file.name].append(
(result.offset, description))
# If other modules have been run and they produced results, don't spam
# the terminal with entropy results
if self.file_markers:
self.display_results = False
if not self.block_size:
if self.config.block:
self.block_size = self.config.block
else:
self.block_size = None
def _entropy_sigterm_handler(self, *args):
print("FUck it all.")
def run(self):
# If generating a graphical plot, this function will never return, as it invokes
# pg.exit. Calling pg.exit is pretty much required, but pg.exit calls os._exit in
# order to work around QT cleanup issues.
self._run()
def _run(self):
# Sanity check and warning if pyqtgraph isn't found
if self.do_plot:
try:
import pyqtgraph as pg
except ImportError as e:
binwalk.core.common.warning(
"Failed to import pyqtgraph module, visual entropy graphing will be disabled")
self.do_plot = False
for fp in iter(self.next_file, None):
if self.display_results:
self.header()
self.calculate_file_entropy(fp)
if self.display_results:
self.footer()
if self.do_plot:
if not self.save_plot:
from pyqtgraph.Qt import QtGui
QtGui.QApplication.instance().exec_()
pg.exit()
def calculate_file_entropy(self, fp):
# Tracks the last displayed rising/falling edge (0 for falling, 1 for
# rising, None if nothing has been printed yet)
last_edge = None
# Auto-reset the trigger; if True, an entropy above/below
# self.trigger_high/self.trigger_low will be printed
trigger_reset = True
# Clear results from any previously analyzed files
self.clear(results=True)
# If -K was not specified, calculate the block size to create
# DEFAULT_DATA_POINTS data points
if self.block_size is None:
block_size = fp.size / self.DEFAULT_DATA_POINTS
# Round up to the nearest DEFAULT_BLOCK_SIZE (1024)
block_size = int(
block_size + ((self.DEFAULT_BLOCK_SIZE - block_size) % self.DEFAULT_BLOCK_SIZE))
else:
block_size = self.block_size
# Make sure block size is greater than 0
if block_size <= 0:
block_size = self.DEFAULT_BLOCK_SIZE
binwalk.core.common.debug("Entropy block size (%d data points): %d" %
(self.DEFAULT_DATA_POINTS, block_size))
while True:
file_offset = fp.tell()
(data, dlen) = fp.read_block()
if not data:
break
i = 0
while i < dlen:
entropy = self.algorithm(data[i:i + block_size])
display = self.display_results
description = "%f" % entropy
if not self.config.verbose:
if last_edge in [None, 0] and entropy > self.trigger_low:
trigger_reset = True
elif last_edge in [None, 1] and entropy < self.trigger_high:
trigger_reset = True
if trigger_reset and entropy >= self.trigger_high:
description = "Rising entropy edge (%f)" % entropy
display = self.display_results
last_edge = 1
trigger_reset = False
elif trigger_reset and entropy <= self.trigger_low:
description = "Falling entropy edge (%f)" % entropy
display = self.display_results
last_edge = 0
trigger_reset = False
else:
display = False
description = "%f" % entropy
r = self.result(offset=(file_offset + i),
file=fp,
entropy=entropy,
description=description,
display=display)
i += block_size
if self.do_plot:
self.plot_entropy(fp.name)
def shannon(self, data):
'''
Performs a Shannon entropy analysis on a given block of data.
'''
entropy = 0
if data:
length = len(data)
seen = dict(((chr(x), 0) for x in range(0, 256)))
for byte in data:
seen[byte] += 1
for x in range(0, 256):
p_x = float(seen[chr(x)]) / length
if p_x > 0:
entropy -= p_x * math.log(p_x, 2)
return (entropy / 8)
def gzip(self, data, truncate=True):
'''
Performs an entropy analysis based on zlib compression ratio.
This is faster than the shannon entropy analysis, but not as accurate.
'''
# Entropy is a simple ratio of: <zlib compressed size> / <original
# size>
e = float(
float(len(zlib.compress(str2bytes(data), 9))) / float(len(data)))
if truncate and e > 1.0:
e = 1.0
return e
def plot_entropy(self, fname):
try:
import numpy as np
import pyqtgraph as pg
import pyqtgraph.exporters as exporters
except ImportError as e:
return
i = 0
x = []
y = []
plotted_colors = {}
for r in self.results:
x.append(r.offset)
y.append(r.entropy)
plt = pg.plot(title=fname, clear=True)
# Disable auto-ranging of the Y (entropy) axis, as it
# can cause some very un-intuitive graphs, particularly
# for files with only high-entropy data.
plt.setYRange(0, 1)
if self.show_legend and has_key(self.file_markers, fname):
plt.addLegend(size=(self.max_description_length * 10, 0))
for (offset, description) in self.file_markers[fname]:
# If this description has already been plotted at a different offset, we need to
# use the same color for the marker, but set the description to None to prevent
# duplicate entries in the graph legend.
#
# Else, get the next color and use it to mark descriptions of
# this type.
if has_key(plotted_colors, description):
color = plotted_colors[description]
description = None
else:
color = self.COLORS[i]
plotted_colors[description] = color
i += 1
if i >= len(self.COLORS):
i = 0
plt.plot(x=[offset, offset], y=[0, 1.1],
name=description, pen=pg.mkPen(color, width=2.5))
# Plot data points
plt.plot(x, y, pen='y')
# TODO: legend is not displayed properly when saving plots to disk
if self.save_plot:
# Save graph to CWD
out_file = os.path.join(os.getcwd(), os.path.basename(fname))
# exporters.ImageExporter is different in different versions of
# pyqtgraph
try:
exporter = exporters.ImageExporter(plt.plotItem)
except TypeError:
exporter = exporters.ImageExporter.ImageExporter(plt.plotItem)
exporter.parameters()['width'] = self.FILE_WIDTH
exporter.export(
binwalk.core.common.unique_file_name(out_file, self.FILE_FORMAT))
else:
plt.setLabel('left', self.YLABEL, units=self.YUNITS)
plt.setLabel('bottom', self.XLABEL, units=self.XUNITS)
| |
import unittest
import visgraph.graphcore as v_graphcore
s1paths = [
('a','c','f'),
('a','b','d','f'),
('a','b','e','f'),
]
s2paths = [
('a','b'),
('a','b','c'),
]
class GraphCoreTest(unittest.TestCase):
def getSampleGraph1(self):
# simple branching/merging graph
g = v_graphcore.HierGraph()
g.addHierRootNode('a')
for c in ('b','c','d','e','f'):
g.addNode(c)
g.addEdgeByNids('a','b')
g.addEdgeByNids('a','c')
g.addEdgeByNids('c','f')
g.addEdgeByNids('b','d')
g.addEdgeByNids('b','e')
g.addEdgeByNids('d','f')
g.addEdgeByNids('e','f')
return g
def getSampleGraph2(self):
# primitive loop graph
g = v_graphcore.HierGraph()
g.addHierRootNode('a')
for c in ('b','c'):
g.addNode(c)
g.addEdgeByNids('a','b')
g.addEdgeByNids('b','b')
g.addEdgeByNids('b','c')
return g
def getSampleGraph3(self):
# flat loop graph
g = v_graphcore.HierGraph()
g.addHierRootNode('a')
for c in ('b','c','d'):
g.addNode(c)
g.addEdgeByNids('a','b')
g.addEdgeByNids('b','c')
g.addEdgeByNids('c','b')
g.addEdgeByNids('c','d')
return g
def test_visgraph_pathscount(self):
g = self.getSampleGraph1()
self.assertEqual(g.getHierPathCount(), 3)
g = self.getSampleGraph2()
self.assertEqual(g.getHierPathCount(), 1)
g = self.getSampleGraph3()
self.assertEqual(g.getHierPathCount(), 1)
def assertPathsFrom(self, g, paths):
allpaths = set(paths)
root = g.getNode('a')
for path in g.getHierPathsFrom(root):
nids = tuple([ n[0] for (n,e) in path])
self.assertIn(nids,allpaths)
allpaths.remove(nids)
self.assertFalse(allpaths)
def test_visgraph_pathsfrom(self):
self.assertPathsFrom( self.getSampleGraph1(), s1paths)
self.assertPathsFrom( self.getSampleGraph2(), s2paths)
def assertPathsTo(self, g, nid, paths):
allpaths = set(paths)
node = g.getNode(nid)
for path in g.getHierPathsTo(node):
nids = tuple([ n[0] for (n,e) in path])
self.assertIn(nids,allpaths)
allpaths.remove(nids)
self.assertFalse(allpaths)
def test_visgraph_pathsto(self):
'''
'''
self.assertPathsTo( self.getSampleGraph1(), 'f', s1paths)
self.assertPathsTo( self.getSampleGraph2(), 'c', [ ('a','b','c'), ])
def assertPathsThru(self, g, nid, paths):
allpaths = set(paths)
node = g.getNode(nid)
for path in g.getHierPathsThru(node):
nids = tuple([ n[0] for (n,e) in path])
self.assertIn(nids,allpaths)
allpaths.remove(nids)
self.assertFalse(allpaths)
def test_visgraph_paththru(self):
self.assertPathsThru( self.getSampleGraph1(),'b',[('a','b','d','f'),('a','b','e','f')])
self.assertPathsThru( self.getSampleGraph2(),'b',[('a','b'),('a','b','c'),])
def test_visgraph_nodeprops(self):
g = v_graphcore.Graph()
a = g.addNode('a')
g.setNodeProp(a,'foo','bar')
self.assertEqual(a[1].get('foo'), 'bar')
self.assertTrue( a in g.getNodesByProp('foo') )
self.assertTrue( a in g.getNodesByProp('foo','bar') )
self.assertFalse( a in g.getNodesByProp('foo','blah') )
g.delNodeProp(a,'foo')
self.assertFalse( a in g.getNodesByProp('foo') )
self.assertFalse( a in g.getNodesByProp('foo','bar') )
self.assertIsNone(a[1].get('foo'))
def test_visgraph_edgeprops(self):
g = v_graphcore.Graph()
a = g.addNode('a')
b = g.addNode('b')
e = g.addEdge(a,b)
g.setEdgeProp(e,'foo','bar')
self.assertEqual(e[3].get('foo'),'bar')
self.assertTrue( e in g.getEdgesByProp('foo') )
self.assertTrue( e in g.getEdgesByProp('foo','bar') )
self.assertFalse( e in g.getEdgesByProp('foo','blah') )
g.delEdgeProp(e,'foo')
self.assertFalse( e in g.getEdgesByProp('foo') )
self.assertFalse( e in g.getEdgesByProp('foo','bar') )
self.assertIsNone(e[3].get('foo'))
def test_visgraph_subcluster(self):
g = v_graphcore.Graph()
a = g.addNode('a')
b = g.addNode('b')
c = g.addNode('c')
d = g.addNode('d')
e = g.addNode('e')
r = g.addNode('f')
g.addEdgeByNids('a','b')
g.addEdgeByNids('a','c')
g.addEdgeByNids('d','e')
g.addEdgeByNids('d','f')
subs = g.getClusterGraphs()
self.assertEqual(len(subs),2)
subtests = [ set(['a','b','c']), set(['d','e','f']) ]
for sub in subs:
if sub.getNode('a'):
self.assertIsNone(sub.getNode('d'))
self.assertIsNone(sub.getNode('e'))
self.assertIsNone(sub.getNode('f'))
akids = [ edge[2] for edge in sub.getRefsFromByNid('a') ]
self.assertTrue('b' in akids )
self.assertTrue('c' in akids )
elif sub.getNode('d'):
self.assertIsNone(sub.getNode('a'))
self.assertIsNone(sub.getNode('b'))
self.assertIsNone(sub.getNode('c'))
dkids = [ edge[2] for edge in sub.getRefsFromByNid('d') ]
self.assertTrue('e' in dkids )
self.assertTrue('f' in dkids )
else:
raise Exception('Invalid SubCluster!')
def test_visgraph_formnode(self):
g = v_graphcore.Graph()
def wootctor(n):
g.setNodeProp(n,'lul',1)
n1 = g.formNode('woot', 10, ctor=wootctor)
self.assertEqual( n1[1].get('lul'), 1 )
g.setNodeProp(n1, 'lul', 2)
g.setNodeProp(n1, 'foo', 'bar')
n2 = g.formNode('woot', 20, ctor=wootctor)
n3 = g.formNode('woot', 10, ctor=wootctor)
self.assertEqual( n1[0], n3[0] )
self.assertEqual( n1[1].get('lul'), 2)
self.assertEqual( n3[1].get('foo'), 'bar')
self.assertNotEqual( n1[0], n2[0])
| |
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name == '2013' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 non-Express has a x64-x86 cross that we want to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError as e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
try:
import _winreg as winreg # python 2
except ImportError:
import winreg # python 3
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| |
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
import random
import math
##############################################
def sepLine(w, x):
return -((w[0]+w[1]*x)/w[2])
#end
def drawSepLine(w, minX, maxX):
sepx = range(minX, maxX)
sepy = []
for e in sepx:
tmp = sepLine(w, e)
sepy.append( tmp )
#end for
plt.plot(sepx, sepy )
#end drawSepLine
##############################################
"""
"""
#1/(1+exp(-x))
def theta(x):
ret = 0.
try:
ret = 1.0/( 1+math.exp(-x) )
#except OverflowError:
except:
ret = 0.
pass
#end try
return ret
#end
#hypothesis
def h(w, x):
return theta(np.inner(w,x))
#end
"""
Get the probability.
Note: the x has the constant item.
The return is 1 or -1.
if 1, then the x belong to w.
"""
def genProbability(x, w):
return h(w, x);
#emd
##############################################
#diamond
gdiamond=np.array([
[1.0, 12.0, 1.],
[1.5, 12.5, 1.],
[3.5, 11.5, 1.],
[4.5, 14.0, 1.],
[5.5, 16.0, 1.],
[6.0, 11.5, 1.],
[7.0, 10.5, 1.]
])
#rectangle
grectangle=np.array([
[9.5, 13.0, 2.],
[10.0, 11.5, 2.],
[10.5, 11.5, 2.],
[11.0, 13.0, 2.],
[12.0, 12.0, 2.],
[12.5, 12.5, 2.],
[13.0, 11.0, 2.],
[14.0, 10.0, 2.],
[15.0, 10.5, 2.],
[15.5, 10.6, 2.]
])
#triangle
gtriangle=np.array([
[1.0, 2.5, 3.],
[2.0, 6.0, 3.],
[3.0, 2.0, 3.],
[3.0, 5.0, 3.],
[4.0, 2.2, 3.],
[4.0, 5.5, 3.],
[6.0, 2.0, 3.],
[6.0, 5.5, 3.],
[6.5, 2.0, 3.],
[6.7, 0.5, 3.]
])
#star
gstar=np.array([
[9.5, 8.5, 4.],
[10.0, 1.5, 4.],
[11.0, 6.0, 4.],
[7.7, 6.0, 4.],
[8.0, 4.5, 4.],
[8.2, 4.0, 4.],
[9.0, 1.5, 4.],
[9.0, 4.5, 4.],
[9.5, 5.0, 4.],
[11.0, 1.5, 4.],
])
grtd = np.concatenate((gdiamond,grectangle, gtriangle, gstar))
gminX = (int)(np.min(grtd[:,:1]))-3
gmaxX = (int)(np.max(grtd[:,:1]))+3
gminY = np.min(grtd[:,1:2])-3
gmaxY = np.max(grtd[:,1:2])+3
grtestData = np.array([
[15.0, 15.0, 2.],
[13.0, 4.0, 4.],
[8.0, 8.0, 0.],
[10.0, 9.0, 0.],
[1.5, 7.0, 13.],
[2.0, 6.0, 13.],
[16.0, 7.0, 24.],
])
###plot the training data
plt.xlim( (gminX, gmaxX) )
plt.ylim( (gminY, gmaxY) )
plt.plot(gdiamond[:,:1], gdiamond[:, 1:2], '.')
plt.plot(grectangle[:,:1], grectangle[:, 1:2], '1')
plt.plot(gtriangle[:,:1], gtriangle[:, 1:2], '+')
plt.plot(gstar[:,:1], gstar[:, 1:2], '*')
################
"""
In many case, sy*np.inner(w, sx)--->+infite number-->thetav--->sum will not update-->grad will not update
How to fix this?
Here we use loop to ...
"""
def gradient_typical(td, w):
sum = 0.
num = len(td)
for idx in range(num):
sample = td[idx]
sx = sample[:len(sample)-1]; sy=sample[len(sample)-1]
thetav = theta(-sy*np.inner(w, sx));
#print("thetav is ", -sy*np.inner(w, sx), thetav)
sum += thetav*(-sy*sx)
#end for
#print("The is ", w, sum)
gradAvg = sum/num
return gradAvg
#end
"""
Use random number to ...
"""
def gradient_rand(td, w):
num = len(td)
idx = np.random.randint(0, num)
sample = td[idx]
sx = sample[:len(sample)-1]; sy=sample[len(sample)-1]
thetav = theta(-sy*np.inner(w, sx));
gradAvg = thetav*(-sy*sx)
return gradAvg
#end
"""
Here we use xxx to binary classify two class softly.
"""
def sgd(td):
maxIter=1000000
maxIter=500000
maxIter=1000
maxIter=10000
maxIter=100000
eta=0.0005
eta=0.005
eta=0.05
#The this initial value of w. td[0] include y. so we need to minus 1
w=np.zeros( len(td[0])-1 );
#generate gradient threshold
grad = gradient_rand(td, w)
gradThres = np.zeros(grad.shape)
gradThres[:]=0.0001
curIter=0
while(curIter<maxIter):
curIter = curIter +1;
#print("The curIter is ", curIter)
grad = gradient_rand(td, w)
tmp= np.less_equal( np.abs(grad), gradThres)
if(np.all(tmp)):
#Now we get a w that may be work, check this ...
newGrad=gradient_typical(td, w)
newTmp= np.less_equal( np.abs(newGrad), gradThres)
if(np.all(newTmp)):
break;
#end if
#print("The grad is ", grad)
#print("The w is ", w)
#drawSepLine(w, minX, maxX);
w = w - eta * grad
#end while
return w
#end
################
"""
if the y in each element of nrtd is not equal to label,
then set it as -1, thus we form the train data as one versus all.
Note:should set as -1 rather than 0!!!! refer to our current formula.
"""
def formOneVesusAll(td, label):
ntd = td.copy()
labelIdx = len(ntd[0])-1
for e in ntd:
if(e[labelIdx]!=label):
e[labelIdx]=-1 #IMPORTANT
else:
e[labelIdx]=1 #IMPORTANT
#end
return ntd
#end
labels=[1,2,3,4] #we can get shi from rtd[:,2:3], we just skip this here
glabels = labels
"""
Use the one versus all to calculate all w. store all w in ws
"""
def oneVersusAll(td, ws):
pass;
for label in labels:
nrtd = formOneVesusAll(td, label);
w = sgd(nrtd)
ws.append(w)
print("w for label ", label, " is ", w)
pass;
#end for
#end
################
#add constant two the training data
x0 = np.zeros( (len(grtd), 1) )
x0[:]=1.0
gtd = np.concatenate( (x0, grtd[:,:1], grtd[:,1:2], grtd[:,2:3]), 1 )
gw=[];
oneVersusAll(gtd, gw);
#plot the line
for w in gw:
print("w :", w)
drawSepLine(w, gminX, gmaxX)
#end for
#gw : 1, 2, 3, 4
#label: 1, 2, 3, 4
#probability:
#plot test data
plt.plot(grtestData[:,:1], grtestData[:, 1:2], '_')
#update the test data
xt0 = np.zeros( (len(grtestData), 1) )
xt0[:]=1.0
gtestData = np.concatenate( (xt0, grtestData[:,:1], grtestData[:,1:2], grtestData[:,2:3]), 1 )
#is there any data structure like gp here.
gp=[];
#test
for e in gtestData:
x = e[:len(e)-1]; y=e[len(e)-1]
msg = "For "+str(x)+" expented label:"+str(y)+", actual:"
ps=[];
for idx in range(len(gw)):
w = gw[idx]
label = glabels[idx]
probability=genProbability(x, w)
ps.append( (label, probability) )
msg += str(probability) + ";";
#end for
gp.append( (e, ps) )
print(msg)
#end for
#print final result for test data.
for e in gp:
key = e[0]
values = e[1]
midx=0;
for idx in range(len(values)):
if(values[idx][1]>values[midx][1]):
midx = idx
#end if
#end for
print(key, ", (label, p)=", values[midx])
#end for
################
| |
# AnalogClock's base classes
# E. A. Tacao <e.a.tacao |at| estadao.com.br>
# http://j.domaindlx.com/elements28/wxpython/
# 15 Fev 2006, 22:00 GMT-03:00
# Distributed under the wxWidgets license.
from time import strftime, localtime
import math
import wx
from styles import *
#----------------------------------------------------------------------
_targets = [HOUR, MINUTE, SECOND]
#----------------------------------------------------------------------
class Element:
"""Base class for face, hands and tick marks."""
def __init__(self, idx=0, pos=None, size=None, offset=0, clocksize=None,
scale=1, rotate=False, kind=""):
self.idx = idx
self.pos = pos
self.size = size
self.offset = offset
self.clocksize = clocksize
self.scale = scale
self.rotate = rotate
self.kind = kind
self.text = None
self.angfac = [6, 30][self.kind == "hours"]
def _pol2rect(self, m, t):
return m * math.cos(math.radians(t)), m * math.sin(math.radians(t))
def _rect2pol(self, x, y):
return math.hypot(x, y), math.degrees(math.atan2(y, x))
def DrawRotated(self, dc, offset=0):
pass
def DrawStraight(self, dc, offset=0):
pass
def Draw(self, dc, offset=0):
if self.rotate:
self.DrawRotated(dc, offset)
else:
self.DrawStraight(dc, offset)
def RecalcCoords(self, clocksize, centre, scale):
pass
def GetSize(self):
return self.size
def GetOffset(self):
return self.offset
def GetIsRotated(self, rotate):
return self.rotate
def GetMaxSize(self, scale=1):
return self.size * scale
def GetScale(self):
return self.scale
def SetIsRotated(self, rotate):
self.rotate = rotate
def GetMaxSize(self, scale=1):
return self.size * scale
def GetPolygon(self):
return self.polygon
def SetPosition(self, pos):
self.pos = pos
def SetSize(self, size):
self.size = size
def SetOffset(self, offset):
self.offset = offset
def SetClockSize(self, clocksize):
self.clocksize = clocksize
def SetScale(self, scale):
self.scale = scale
def SetIsRotated(self, rotate):
self.rotate = rotate
def SetPolygon(self, polygon):
self.polygon = polygon
#----------------------------------------------------------------------
class ElementWithDyer(Element):
"""Base class for clock face and hands."""
def __init__(self, **kwargs):
self.dyer = kwargs.pop("dyer", Dyer())
Element.__init__(self, **kwargs)
def GetFillColour(self):
return self.dyer.GetFillColour()
def GetBorderColour(self):
return self.dyer.GetBorderColour()
def GetBorderWidth(self):
return self.dyer.GetBorderWidth()
def GetShadowColour(self):
return self.dyer.GetShadowColour()
def SetFillColour(self, colour):
self.dyer.SetFillColour(colour)
def SetBorderColour(self, colour):
self.dyer.SetBorderColour(colour)
def SetBorderWidth(self, width):
self.dyer.SetBorderWidth(width)
def SetShadowColour(self, colour):
self.dyer.SetShadowColour(colour)
#----------------------------------------------------------------------
class Face(ElementWithDyer):
"""Holds info about the clock face."""
def __init__(self, **kwargs):
ElementWithDyer.__init__(self, **kwargs)
def Draw(self, dc):
self.dyer.Select(dc)
dc.DrawCircle(self.pos.x, self.pos.y, self.radius)
def RecalcCoords(self, clocksize, centre, scale):
self.radius = min(clocksize.Get()) / 2. - self.dyer.width / 2.
self.pos = centre
#----------------------------------------------------------------------
class Hand(ElementWithDyer):
"""Holds info about a clock hand."""
def __init__(self, **kwargs):
self.lenfac = kwargs.pop("lenfac")
ElementWithDyer.__init__(self, **kwargs)
self.SetPolygon([[-1, 0], [0, -1], [1, 0], [0, 4]])
def Draw(self, dc, end, offset=0):
radius, centre, r = end
angle = math.degrees(r)
polygon = self.polygon[:]
vscale = radius / max([y for x, y in polygon])
for i, (x, y) in enumerate(polygon):
x *= self.scale * self.size
y *= vscale * self.lenfac
m, t = self._rect2pol(x, y)
polygon[i] = self._pol2rect(m, t - angle)
dc.DrawPolygon(polygon, centre.x + offset, centre.y + offset)
def RecalcCoords(self, clocksize, centre, scale):
self.pos = centre
self.scale = scale
#----------------------------------------------------------------------
class TickSquare(Element):
"""Holds info about a tick mark."""
def __init__(self, **kwargs):
Element.__init__(self, **kwargs)
def Draw(self, dc, offset=0):
width = height = self.size * self.scale
x = self.pos.x - width / 2.
y = self.pos.y - height / 2.
dc.DrawRectangle(x + offset, y + offset, width, height)
#----------------------------------------------------------------------
class TickCircle(Element):
"""Holds info about a tick mark."""
def __init__(self, **kwargs):
Element.__init__(self, **kwargs)
def Draw(self, dc, offset=0):
radius = self.size * self.scale / 2.
x = self.pos.x
y = self.pos.y
dc.DrawCircle(x + offset, y + offset, radius)
#----------------------------------------------------------------------
class TickPoly(Element):
"""Holds info about a tick mark."""
def __init__(self, **kwargs):
Element.__init__(self, **kwargs)
self.SetPolygon([[0, 1], [1, 0], [2, 1], [1, 5]])
def _calcPolygon(self):
width = max([x for x, y in self.polygon])
height = max([y for x, y in self.polygon])
tscale = self.size / max(width, height) * self.scale
polygon = [(x * tscale, y * tscale) for x, y in self.polygon]
width = max([x for x, y in polygon])
height = max([y for x, y in polygon])
return polygon, width, height
def DrawStraight(self, dc, offset=0):
polygon, width, height = self._calcPolygon()
x = self.pos.x - width / 2.
y = self.pos.y - height / 2.
dc.DrawPolygon(polygon, x + offset, y + offset)
def DrawRotated(self, dc, offset=0):
polygon, width, height = self._calcPolygon()
angle = 360 - self.angfac * (self.idx + 1)
r = math.radians(angle)
for i in range(len(polygon)):
m, t = self._rect2pol(*polygon[i])
t -= angle
polygon[i] = self._pol2rect(m, t)
x = self.pos.x - math.cos(r) * width / 2. - math.sin(r) * height / 2.
y = self.pos.y - math.cos(r) * height / 2. + math.sin(r) * width / 2.
dc.DrawPolygon(polygon, x + offset, y + offset)
#----------------------------------------------------------------------
class TickDecimal(Element):
"""Holds info about a tick mark."""
def __init__(self, **kwargs):
Element.__init__(self, **kwargs)
self.text = "%s" % (self.idx + 1)
def DrawStraight(self, dc, offset=0):
width, height = dc.GetTextExtent(self.text)
x = self.pos.x - width / 2.
y = self.pos.y - height / 2.
dc.DrawText(self.text, x + offset, y + offset)
def DrawRotated(self, dc, offset=0):
width, height = dc.GetTextExtent(self.text)
angle = 360 - self.angfac * (self.idx + 1)
r = math.radians(angle)
x = self.pos.x - math.cos(r) * width / 2. - math.sin(r) * height / 2.
y = self.pos.y - math.cos(r) * height / 2. + math.sin(r) * width / 2.
dc.DrawRotatedText(self.text, x + offset, y + offset, angle)
#----------------------------------------------------------------------
class TickRoman(TickDecimal):
"""Holds info about a tick mark."""
def __init__(self, **kwargs):
TickDecimal.__init__(self, **kwargs)
self.text = ["I","II","III","IV","V", \
"VI","VII","VIII","IX","X", \
"XI","XII","XIII","XIV","XV", \
"XVI","XVII","XVIII","XIX","XX", \
"XXI","XXII","XXIII","XXIV","XXV", \
"XXVI","XXVII","XXVIII","XXIX","XXX", \
"XXXI","XXXII","XXXIII","XXXIV","XXXV", \
"XXXVI","XXXVII","XXXVIII","XXXIX","XL", \
"XLI","XLII","XLIII","XLIV","XLV", \
"XLVI","XLVII","XLVIII","XLIX","L", \
"LI","LII","LIII","LIV","LV", \
"LVI","LVII","LVIII","LIX","LX"][self.idx]
#----------------------------------------------------------------------
class TickBinary(TickDecimal):
"""Holds info about a tick mark."""
def __init__(self, **kwargs):
TickDecimal.__init__(self, **kwargs)
def d2b(n, b=""):
while n > 0:
b = str(n % 2) + b; n = n >> 1
return b.zfill(4)
self.text = d2b(self.idx + 1)
#----------------------------------------------------------------------
class TickHex(TickDecimal):
"""Holds info about a tick mark."""
def __init__(self, **kwargs):
TickDecimal.__init__(self, **kwargs)
self.text = hex(self.idx + 1)[2:].upper()
#----------------------------------------------------------------------
class TickNone(Element):
"""Holds info about a tick mark."""
def __init__(self, **kwargs):
Element.__init__(self, **kwargs)
def Draw(self, dc, offset=0):
pass
#----------------------------------------------------------------------
class Dyer:
"""Stores info about colours and borders of clock Elements."""
def __init__(self, border=None, width=0, fill=None, shadow=None):
"""
self.border (wx.Colour) border colour
self.width (int) border width
self.fill (wx.Colour) fill colour
self.shadow (wx.Colour) shadow colour
"""
self.border = border or \
wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT)
self.fill = fill or \
wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT)
self.shadow = shadow or \
wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DSHADOW)
self.width = width
def Select(self, dc, shadow=False):
"""Selects the current settings into the dc."""
if not shadow:
dc.SetPen(wx.Pen(self.border, self.width, wx.SOLID))
dc.SetBrush(wx.Brush(self.fill, wx.SOLID))
dc.SetTextForeground(self.fill)
else:
dc.SetPen(wx.Pen(self.shadow, self.width, wx.SOLID))
dc.SetBrush(wx.Brush(self.shadow, wx.SOLID))
dc.SetTextForeground(self.shadow)
def GetFillColour(self):
return self.fill
def GetBorderColour(self):
return self.border
def GetBorderWidth(self):
return self.width
def GetShadowColour(self):
return self.shadow
def SetFillColour(self, colour):
self.fill = colour
def SetBorderColour(self, colour):
self.border = colour
def SetBorderWidth(self, width):
self.width = width
def SetShadowColour(self, colour):
self.shadow = colour
#----------------------------------------------------------------------
class HandSet:
"""Manages the set of hands."""
def __init__(self, parent, h, m, s):
self.parent = parent
self.hands = [h, m, s]
self.radius = 1
self.centre = wx.Point(1, 1)
def _draw(self, dc, shadow=False):
ends = [int(x) for x in strftime("%I %M %S", localtime()).split()]
flags = [self.parent.clockStyle & flag \
for flag in self.parent.allHandStyles]
a_hand = self.hands[0]
if shadow:
offset = self.parent.shadowOffset * a_hand.GetScale()
else:
offset = 0
for i, hand in enumerate(self.hands):
# Is this hand supposed to be drawn?
if flags[i]:
idx = ends[i]
# Is this the hours hand?
if i == 0:
idx = idx * 5 + ends[1] / 12 - 1
# else prevent exceptions on leap seconds
elif idx <= 0 or idx > 60:
idx = 59
# and adjust idx offset for minutes and non-leap seconds
else:
idx = idx - 1
angle = math.radians(180 - 6 * (idx + 1))
hand.dyer.Select(dc, shadow)
hand.Draw(dc, (self.radius, self.centre, angle), offset)
def Draw(self, dc):
if self.parent.clockStyle & SHOW_SHADOWS:
self._draw(dc, True)
self._draw(dc)
def RecalcCoords(self, clocksize, centre, scale):
self.centre = centre
[hand.RecalcCoords(clocksize, centre, scale) for hand in self.hands]
def SetMaxRadius(self, radius):
self.radius = radius
def GetSize(self, target):
r = []
for i, hand in enumerate(self.hands):
if _targets[i] & target:
r.append(hand.GetSize())
return tuple(r)
def GetFillColour(self, target):
r = []
for i, hand in enumerate(self.hands):
if _targets[i] & target:
r.append(hand.GetFillColour())
return tuple(r)
def GetBorderColour(self, target):
r = []
for i, hand in enumerate(self.hands):
if _targets[i] & target:
r.append(hand.GetBorderColour())
return tuple(r)
def GetBorderWidth(self, target):
r = []
for i, hand in enumerate(self.hands):
if _targets[i] & target:
r.append(hand.GetBorderWidth())
return tuple(r)
def GetShadowColour(self):
r = []
for i, hand in enumerate(self.hands):
if _targets[i] & target:
r.append(hand.GetShadowColour())
return tuple(r)
def SetSize(self, size, target):
for i, hand in enumerate(self.hands):
if _targets[i] & target:
hand.SetSize(size)
def SetFillColour(self, colour, target):
for i, hand in enumerate(self.hands):
if _targets[i] & target:
hand.SetFillColour(colour)
def SetBorderColour(self, colour, target):
for i, hand in enumerate(self.hands):
if _targets[i] & target:
hand.SetBorderColour(colour)
def SetBorderWidth(self, width, target):
for i, hand in enumerate(self.hands):
if _targets[i] & target:
hand.SetBorderWidth(width)
def SetShadowColour(self, colour):
for i, hand in enumerate(self.hands):
hand.SetShadowColour(colour)
#----------------------------------------------------------------------
class TickSet:
"""Manages a set of tick marks."""
def __init__(self, parent, **kwargs):
self.parent = parent
self.dyer = Dyer()
self.noe = {"minutes": 60, "hours": 12}[kwargs["kind"]]
self.font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
style = kwargs.pop("style")
self.kwargs = kwargs
self.SetStyle(style)
def _draw(self, dc, shadow=False):
dc.SetFont(self.font)
a_tick = self.ticks[0]
if shadow:
offset = self.parent.shadowOffset * a_tick.GetScale()
else:
offset = 0
clockStyle = self.parent.clockStyle
for idx, tick in self.ticks.items():
draw = False
# Are we a set of hours?
if self.noe == 12:
# Should we show all hours ticks?
if clockStyle & SHOW_HOURS_TICKS:
draw = True
# Or is this tick a quarter and should we show only quarters?
elif clockStyle & SHOW_QUARTERS_TICKS and not (idx + 1) % 3.:
draw = True
# Are we a set of minutes and minutes should be shown?
elif self.noe == 60 and clockStyle & SHOW_MINUTES_TICKS:
# If this tick occupies the same position of an hour/quarter
# tick, should we still draw it anyway?
if clockStyle & OVERLAP_TICKS:
draw = True
# Right, sir. I promise I won't overlap any tick.
else:
# Ensure that this tick won't overlap an hour tick.
if clockStyle & SHOW_HOURS_TICKS:
if (idx + 1) % 5.:
draw = True
# Ensure that this tick won't overlap a quarter tick.
elif clockStyle & SHOW_QUARTERS_TICKS:
if (idx + 1) % 15.:
draw = True
# We're not drawing quarters nor hours, so we can draw all
# minutes ticks.
else:
draw = True
if draw:
tick.Draw(dc, offset)
def Draw(self, dc):
if self.parent.clockStyle & SHOW_SHADOWS:
self.dyer.Select(dc, True)
self._draw(dc, True)
self.dyer.Select(dc)
self._draw(dc)
def RecalcCoords(self, clocksize, centre, scale):
a_tick = self.ticks[0]
size = a_tick.GetMaxSize(scale)
maxsize = size
# Try to find a 'good' max size for text-based ticks.
if a_tick.text is not None:
self.font.SetPointSize(size)
dc = wx.MemoryDC()
dc.SelectObject(wx.EmptyBitmap(*clocksize.Get()))
dc.SetFont(self.font)
maxsize = size
for tick in self.ticks.values():
maxsize = max(*(dc.GetTextExtent(tick.text) + (maxsize,)))
radius = self.radius = min(clocksize.Get()) / 2. - \
self.dyer.width / 2. - \
maxsize / 2. - \
a_tick.GetOffset() * scale - \
self.parent.shadowOffset * scale
# If we are a set of hours, the number of elements of this tickset is
# 12 and ticks are separated by a distance of 30 degrees;
# if we are a set of minutes, the number of elements of this tickset is
# 60 and ticks are separated by a distance of 6 degrees.
angfac = [6, 30][self.noe == 12]
for i, tick in self.ticks.items():
tick.SetClockSize(clocksize)
tick.SetScale(scale)
deg = 180 - angfac * (i + 1)
angle = math.radians(deg)
x = centre.x + radius * math.sin(angle)
y = centre.y + radius * math.cos(angle)
tick.SetPosition(wx.Point(x, y))
def GetSize(self):
return self.kwargs["size"]
def GetFillColour(self):
return self.dyer.GetFillColour()
def GetBorderColour(self):
return self.dyer.GetBorderColour()
def GetBorderWidth(self):
return self.dyer.GetBorderWidth()
def GetPolygon(self):
a_tick = self.ticks.values()[0]
return a_tick.GetPolygon()
def GetFont(self):
return self.font
def GetOffset(self):
a_tick = self.ticks[0]
return a_tick.GetOffset()
def GetShadowColour(self):
return self.dyer.GetShadowColour()
def GetIsRotated(self):
a_tick = self.ticks[0]
return a_tick.GetIsRotated()
def GetStyle(self):
return self.style
def SetSize(self, size):
self.kwargs["size"] = size
[tick.SetSize(size) for tick in self.ticks.values()]
def SetFillColour(self, colour):
self.dyer.SetFillColour(colour)
def SetBorderColour(self, colour):
self.dyer.SetBorderColour(colour)
def SetBorderWidth(self, width):
self.dyer.SetBorderWidth(width)
def SetPolygon(self, polygon):
[tick.SetPolygon(polygon) for tick in self.ticks.values()]
def SetFont(self, font):
self.font = font
def SetOffset(self, offset):
self.kwargs["offset"] = offset
[tick.SetOffset(offset) for tick in self.ticks.values()]
def SetShadowColour(self, colour):
self.dyer.SetShadowColour(colour)
def SetIsRotated(self, rotate):
self.kwargs["rotate"] = rotate
[tick.SetIsRotated(rotate) for tick in self.ticks.values()]
def SetStyle(self, style):
self.style = style
tickclass = allTickStyles[style]
self.kwargs["rotate"] = self.parent.clockStyle & ROTATE_TICKS
self.ticks = {}
for i in range(self.noe):
self.kwargs["idx"] = i
self.ticks[i] = tickclass(**self.kwargs)
#----------------------------------------------------------------------
class Box:
"""Gathers info about the clock face and tick sets."""
def __init__(self, parent, Face, TicksM, TicksH):
self.parent = parent
self.Face = Face
self.TicksH = TicksH
self.TicksM = TicksM
def GetNiceRadiusForHands(self, centre):
a_tick = self.TicksM.ticks[0]
scale = a_tick.GetScale()
bw = max(self.TicksH.dyer.width / 2. * scale,
self.TicksM.dyer.width / 2. * scale)
mgt = self.TicksM.ticks[59]
my = mgt.pos.y + mgt.GetMaxSize(scale) + bw
hgt = self.TicksH.ticks[11]
hy = hgt.pos.y + hgt.GetMaxSize(scale) + bw
niceradius = centre.y - max(my, hy)
return niceradius
def Draw(self, dc):
[getattr(self, attr).Draw(dc) \
for attr in ["Face", "TicksM", "TicksH"]]
def RecalcCoords(self, size, centre, scale):
[getattr(self, attr).RecalcCoords(size, centre, scale) \
for attr in ["Face", "TicksH", "TicksM"]]
def GetTickSize(self, target):
r = []
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
r.append(tick.GetSize())
return tuple(r)
def GetTickFillColour(self, target):
r = []
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
r.append(tick.GetFillColour())
return tuple(r)
def GetTickBorderColour(self, target):
r = []
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
r.append(tick.GetBorderColour())
return tuple(r)
def GetTickBorderWidth(self, target):
r = []
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
r.append(tick.GetBorderWidth())
return tuple(r)
def GetTickPolygon(self, target):
r = []
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
r.append(tick.GetPolygon())
return tuple(r)
def GetTickFont(self, target):
r = []
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
r.append(tick.GetFont())
return tuple(r)
def GetIsRotated(self):
a_tickset = self.TicksH
return a_tickset.GetIsRotated()
def GetTickOffset(self, target):
r = []
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
r.append(tick.GetOffset())
return tuple(r)
def GetShadowColour(self):
a_tickset = self.TicksH
return a_tickset.GetShadowColour()
def GetTickStyle(self, target):
r = []
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
r.append(tick.GetStyle())
return tuple(r)
def SetTickSize(self, size, target):
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
tick.SetSize(size)
def SetTickFillColour(self, colour, target):
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
tick.SetFillColour(colour)
def SetTickBorderColour(self, colour, target):
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
tick.SetBorderColour(colour)
def SetTickBorderWidth(self, width, target):
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
tick.SetBorderWidth(width)
def SetTickPolygon(self, polygon, target):
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
tick.SetPolygon(polygon)
def SetTickFont(self, font, target):
fs = font.GetNativeFontInfoDesc()
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
tick.SetFont(wx.FontFromNativeInfoString(fs))
def SetIsRotated(self, rotate):
[getattr(self, attr).SetIsRotated(rotate) \
for attr in ["TicksH", "TicksM"]]
def SetTickOffset(self, offset, target):
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
tick.SetOffset(offset)
def SetShadowColour(self, colour):
for attr in ["TicksH", "TicksM"]:
tick = getattr(self, attr)
tick.SetShadowColour(colour)
def SetTickStyle(self, style, target):
for i, attr in enumerate(["TicksH", "TicksM"]):
if _targets[i] & target:
tick = getattr(self, attr)
tick.SetStyle(style)
#----------------------------------------------------------------------
# Relationship between styles and ticks class names.
allTickStyles = {TICKS_BINARY: TickBinary,
TICKS_CIRCLE: TickCircle,
TICKS_DECIMAL: TickDecimal,
TICKS_HEX: TickHex,
TICKS_NONE: TickNone,
TICKS_POLY: TickPoly,
TICKS_ROMAN: TickRoman,
TICKS_SQUARE: TickSquare}
#
##
### eof
| |
import unittest
import numpy as np
import pysal
from pysal.spreg.twosls import BaseTSLS, TSLS
class TestBaseTSLS(unittest.TestCase):
def setUp(self):
db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
self.y = np.array(db.by_col("CRIME"))
self.y = np.reshape(self.y, (49,1))
self.X = []
self.X.append(db.by_col("INC"))
self.X = np.array(self.X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.yd = []
self.yd.append(db.by_col("HOVAL"))
self.yd = np.array(self.yd).T
self.q = []
self.q.append(db.by_col("DISCBD"))
self.q = np.array(self.q).T
def test_basic(self):
reg = BaseTSLS(self.y, self.X, self.yd, self.q)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
h_0 = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_array_almost_equal(reg.h[0], h_0)
hth = np.array([[ 49. , 704.371999 , 139.75 ],
[ 704.371999 , 11686.67338121, 2246.12800625],
[ 139.75 , 2246.12800625, 498.5851 ]])
np.testing.assert_array_almost_equal(reg.hth, hth, 7)
hthi = np.array([[ 0.1597275 , -0.00762011, -0.01044191],
[-0.00762011, 0.00100135, -0.0023752 ],
[-0.01044191, -0.0023752 , 0.01563276]])
np.testing.assert_array_almost_equal(reg.hthi, hthi, 7)
self.assertEqual(reg.k, 3)
self.assertEqual(reg.kstar, 1)
self.assertAlmostEqual(reg.mean_y, 35.128823897959187, 7)
self.assertEqual(reg.n, 49)
pfora1a2 = np.array([[ 9.58156106, -0.22744226, -0.13820537],
[ 0.02580142, 0.08226331, -0.03143731],
[-3.13896453, -0.33487872, 0.20690965]])
np.testing.assert_array_almost_equal(reg.pfora1a2, pfora1a2, 7)
predy_5 = np.array([[-28.68949467], [ 28.99484984], [ 55.07344824], [ 38.26609504], [ 57.57145851]])
np.testing.assert_array_almost_equal(reg.predy[0:5], predy_5, 7)
q_5 = np.array([[ 5.03], [ 4.27], [ 3.89], [ 3.7 ], [ 2.83]])
np.testing.assert_array_equal(reg.q[0:5], q_5)
self.assertAlmostEqual(reg.sig2n_k, 587.56797852699822, 7)
self.assertAlmostEqual(reg.sig2n, 551.5944288212637, 7)
self.assertAlmostEqual(reg.sig2, 551.5944288212637, 7)
self.assertAlmostEqual(reg.std_y, 16.732092091229699, 7)
u_5 = np.array([[ 44.41547467], [-10.19309584], [-24.44666724], [ -5.87833504], [ -6.83994851]])
np.testing.assert_array_almost_equal(reg.u[0:5], u_5, 7)
self.assertAlmostEqual(reg.utu, 27028.127012241919, 7)
varb = np.array([[ 0.41526237, 0.01879906, -0.01730372],
[ 0.01879906, 0.00362823, -0.00184604],
[-0.01730372, -0.00184604, 0.0011406 ]])
np.testing.assert_array_almost_equal(reg.varb, varb, 7)
vm = np.array([[ 229.05640809, 10.36945783, -9.54463414],
[ 10.36945783, 2.0013142 , -1.01826408],
[ -9.54463414, -1.01826408, 0.62914915]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
x_0 = np.array([ 1. , 19.531])
np.testing.assert_array_almost_equal(reg.x[0], x_0, 7)
y_5 = np.array([[ 15.72598 ], [ 18.801754], [ 30.626781], [ 32.38776 ], [ 50.73151 ]])
np.testing.assert_array_almost_equal(reg.y[0:5], y_5, 7)
yend_5 = np.array([[ 80.467003], [ 44.567001], [ 26.35 ], [ 33.200001], [ 23.225 ]])
np.testing.assert_array_almost_equal(reg.yend[0:5], yend_5, 7)
z_0 = np.array([ 1. , 19.531 , 80.467003])
np.testing.assert_array_almost_equal(reg.z[0], z_0, 7)
zthhthi = np.array([[ 1.00000000e+00, -1.66533454e-16, 4.44089210e-16],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[ 1.26978671e+01, 1.05598709e+00, 3.70212359e+00]])
np.testing.assert_array_almost_equal(reg.zthhthi, zthhthi, 7)
def test_n_k(self):
reg = BaseTSLS(self.y, self.X, self.yd, self.q, sig2n_k=True)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([[ 243.99486949, 11.04572682, -10.16711028],
[ 11.04572682, 2.13183469, -1.08467261],
[ -10.16711028, -1.08467261, 0.67018062]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
def test_white(self):
reg = BaseTSLS(self.y, self.X, self.yd, self.q, robust='white')
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([[ 208.27139316, 15.6687805 , -11.53686154],
[ 15.6687805 , 2.26882747, -1.30312033],
[ -11.53686154, -1.30312033, 0.81940656]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
def test_hac(self):
gwk = pysal.kernelW_from_shapefile(pysal.examples.get_path('columbus.shp'),k=15,function='triangular', fixed=False)
reg = BaseTSLS(self.y, self.X, self.yd, self.q, robust='hac', gwk=gwk)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([[ 231.07254978, 15.42050291, -11.3941033 ],
[ 15.01376346, 1.92422887, -1.11865505],
[ -11.34381641, -1.1279227 , 0.72053806]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
class TestTSLS(unittest.TestCase):
def setUp(self):
db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
self.y = np.array(db.by_col("CRIME"))
self.y = np.reshape(self.y, (49,1))
self.X = []
self.X.append(db.by_col("INC"))
self.X = np.array(self.X).T
self.yd = []
self.yd.append(db.by_col("HOVAL"))
self.yd = np.array(self.yd).T
self.q = []
self.q.append(db.by_col("DISCBD"))
self.q = np.array(self.q).T
def test_basic(self):
reg = TSLS(self.y, self.X, self.yd, self.q)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
h_0 = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_array_almost_equal(reg.h[0], h_0)
hth = np.array([[ 49. , 704.371999 , 139.75 ],
[ 704.371999 , 11686.67338121, 2246.12800625],
[ 139.75 , 2246.12800625, 498.5851 ]])
np.testing.assert_array_almost_equal(reg.hth, hth, 7)
hthi = np.array([[ 0.1597275 , -0.00762011, -0.01044191],
[-0.00762011, 0.00100135, -0.0023752 ],
[-0.01044191, -0.0023752 , 0.01563276]])
np.testing.assert_array_almost_equal(reg.hthi, hthi, 7)
self.assertEqual(reg.k, 3)
self.assertEqual(reg.kstar, 1)
self.assertAlmostEqual(reg.mean_y, 35.128823897959187, 7)
self.assertEqual(reg.n, 49)
pfora1a2 = np.array([[ 9.58156106, -0.22744226, -0.13820537],
[ 0.02580142, 0.08226331, -0.03143731],
[-3.13896453, -0.33487872, 0.20690965]])
np.testing.assert_array_almost_equal(reg.pfora1a2, pfora1a2, 7)
predy_5 = np.array([[-28.68949467], [ 28.99484984], [ 55.07344824], [ 38.26609504], [ 57.57145851]])
np.testing.assert_array_almost_equal(reg.predy[0:5], predy_5, 7)
q_5 = np.array([[ 5.03], [ 4.27], [ 3.89], [ 3.7 ], [ 2.83]])
np.testing.assert_array_equal(reg.q[0:5], q_5)
self.assertAlmostEqual(reg.sig2n_k, 587.56797852699822, 7)
self.assertAlmostEqual(reg.sig2n, 551.5944288212637, 7)
self.assertAlmostEqual(reg.sig2, 551.5944288212637, 7)
self.assertAlmostEqual(reg.std_y, 16.732092091229699, 7)
u_5 = np.array([[ 44.41547467], [-10.19309584], [-24.44666724], [ -5.87833504], [ -6.83994851]])
np.testing.assert_array_almost_equal(reg.u[0:5], u_5, 7)
self.assertAlmostEqual(reg.utu, 27028.127012241919, 7)
varb = np.array([[ 0.41526237, 0.01879906, -0.01730372],
[ 0.01879906, 0.00362823, -0.00184604],
[-0.01730372, -0.00184604, 0.0011406 ]])
np.testing.assert_array_almost_equal(reg.varb, varb, 7)
vm = np.array([[ 229.05640809, 10.36945783, -9.54463414],
[ 10.36945783, 2.0013142 , -1.01826408],
[ -9.54463414, -1.01826408, 0.62914915]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
x_0 = np.array([ 1. , 19.531])
np.testing.assert_array_almost_equal(reg.x[0], x_0, 7)
y_5 = np.array([[ 15.72598 ], [ 18.801754], [ 30.626781], [ 32.38776 ], [ 50.73151 ]])
np.testing.assert_array_almost_equal(reg.y[0:5], y_5, 7)
yend_5 = np.array([[ 80.467003], [ 44.567001], [ 26.35 ], [ 33.200001], [ 23.225 ]])
np.testing.assert_array_almost_equal(reg.yend[0:5], yend_5, 7)
z_0 = np.array([ 1. , 19.531 , 80.467003])
np.testing.assert_array_almost_equal(reg.z[0], z_0, 7)
zthhthi = np.array([[ 1.00000000e+00, -1.66533454e-16, 4.44089210e-16],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[ 1.26978671e+01, 1.05598709e+00, 3.70212359e+00]])
np.testing.assert_array_almost_equal(reg.zthhthi, zthhthi, 7)
self.assertAlmostEqual(reg.pr2, 0.27936137128173893, 7)
z_stat = np.array([[ 5.84526447e+00, 5.05764078e-09],
[ 3.67601567e-01, 7.13170346e-01],
[ -1.99468913e+00, 4.60767956e-02]])
np.testing.assert_array_almost_equal(reg.z_stat, z_stat, 7)
title = 'TWO STAGE LEAST SQUARES'
self.assertEqual(reg.title, title)
def test_n_k(self):
reg = TSLS(self.y, self.X, self.yd, self.q, sig2n_k=True)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([[ 243.99486949, 11.04572682, -10.16711028],
[ 11.04572682, 2.13183469, -1.08467261],
[ -10.16711028, -1.08467261, 0.67018062]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
def test_white(self):
reg = TSLS(self.y, self.X, self.yd, self.q, robust='white')
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([[ 208.27139316, 15.6687805 , -11.53686154],
[ 15.6687805 , 2.26882747, -1.30312033],
[ -11.53686154, -1.30312033, 0.81940656]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
self.assertEqual(reg.robust, 'white')
def test_hac(self):
gwk = pysal.kernelW_from_shapefile(pysal.examples.get_path('columbus.shp'),k=5,function='triangular', fixed=False)
reg = TSLS(self.y, self.X, self.yd, self.q, robust='hac', gwk=gwk)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([[ 225.0795089 , 17.11660041, -12.22448566],
[ 17.67097154, 2.47483461, -1.4183641 ],
[ -12.45093722, -1.40495464, 0.8700441 ]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
self.assertEqual(reg.robust, 'hac')
def test_spatial(self):
w = pysal.queen_from_shapefile(pysal.examples.get_path('columbus.shp'))
reg = TSLS(self.y, self.X, self.yd, self.q, spat_diag=True, w=w)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([[ 229.05640809, 10.36945783, -9.54463414],
[ 10.36945783, 2.0013142 , -1.01826408],
[ -9.54463414, -1.01826408, 0.62914915]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
ak_test = np.array([ 1.16816972, 0.27977763])
np.testing.assert_array_almost_equal(reg.ak_test, ak_test, 7)
def test_names(self):
w = pysal.queen_from_shapefile(pysal.examples.get_path('columbus.shp'))
gwk = pysal.kernelW_from_shapefile(pysal.examples.get_path('columbus.shp'),k=5,function='triangular', fixed=False)
name_x = ['inc']
name_y = 'crime'
name_yend = ['hoval']
name_q = ['discbd']
name_w = 'queen'
name_gwk = 'k=5'
name_ds = 'columbus'
reg = TSLS(self.y, self.X, self.yd, self.q,
spat_diag=True, w=w, robust='hac', gwk=gwk,
name_x=name_x, name_y=name_y, name_q=name_q, name_w=name_w,
name_yend=name_yend, name_gwk=name_gwk, name_ds=name_ds)
betas = np.array([[ 88.46579584], [ 0.5200379 ], [ -1.58216593]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array([[ 225.0795089 , 17.11660041, -12.22448566],
[ 17.67097154, 2.47483461, -1.4183641 ],
[ -12.45093722, -1.40495464, 0.8700441 ]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
self.assertListEqual(reg.name_x, ['CONSTANT']+name_x)
self.assertListEqual(reg.name_yend, name_yend)
self.assertListEqual(reg.name_q, name_q)
self.assertEqual(reg.name_y, name_y)
self.assertEqual(reg.name_w, name_w)
self.assertEqual(reg.name_gwk, name_gwk)
self.assertEqual(reg.name_ds, name_ds)
if __name__ == '__main__':
unittest.main()
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class AllTimeList(ListResource):
def __init__(self, version, account_sid):
"""
Initialize the AllTimeList
:param Version version: Version that contains the resource
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeList
:rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeList
"""
super(AllTimeList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, }
self._uri = '/Accounts/{account_sid}/Usage/Records/AllTime.json'.format(**self._solution)
def stream(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset, limit=None,
page_size=None):
"""
Streams AllTimeInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param AllTimeInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date
:param date end_date: Only include usage that occurred on or before this date
:param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
category=category,
start_date=start_date,
end_date=end_date,
include_subaccounts=include_subaccounts,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset, limit=None,
page_size=None):
"""
Lists AllTimeInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param AllTimeInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date
:param date end_date: Only include usage that occurred on or before this date
:param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance]
"""
return list(self.stream(
category=category,
start_date=start_date,
end_date=end_date,
include_subaccounts=include_subaccounts,
limit=limit,
page_size=page_size,
))
def page(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of AllTimeInstance records from the API.
Request is executed immediately
:param AllTimeInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date
:param date end_date: Only include usage that occurred on or before this date
:param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of AllTimeInstance
:rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimePage
"""
data = values.of({
'Category': category,
'StartDate': serialize.iso8601_date(start_date),
'EndDate': serialize.iso8601_date(end_date),
'IncludeSubaccounts': include_subaccounts,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return AllTimePage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of AllTimeInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of AllTimeInstance
:rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimePage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return AllTimePage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.AllTimeList>'
class AllTimePage(Page):
def __init__(self, version, response, solution):
"""
Initialize the AllTimePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.usage.record.all_time.AllTimePage
:rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimePage
"""
super(AllTimePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of AllTimeInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance
:rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance
"""
return AllTimeInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.AllTimePage>'
class AllTimeInstance(InstanceResource):
class Category(object):
A2P_REGISTRATION_FEES = "a2p-registration-fees"
AGENT_CONFERENCE = "agent-conference"
ANSWERING_MACHINE_DETECTION = "answering-machine-detection"
AUTHY_AUTHENTICATIONS = "authy-authentications"
AUTHY_CALLS_OUTBOUND = "authy-calls-outbound"
AUTHY_MONTHLY_FEES = "authy-monthly-fees"
AUTHY_PHONE_INTELLIGENCE = "authy-phone-intelligence"
AUTHY_PHONE_VERIFICATIONS = "authy-phone-verifications"
AUTHY_SMS_OUTBOUND = "authy-sms-outbound"
CALL_PROGESS_EVENTS = "call-progess-events"
CALLERIDLOOKUPS = "calleridlookups"
CALLS = "calls"
CALLS_CLIENT = "calls-client"
CALLS_GLOBALCONFERENCE = "calls-globalconference"
CALLS_INBOUND = "calls-inbound"
CALLS_INBOUND_LOCAL = "calls-inbound-local"
CALLS_INBOUND_MOBILE = "calls-inbound-mobile"
CALLS_INBOUND_TOLLFREE = "calls-inbound-tollfree"
CALLS_OUTBOUND = "calls-outbound"
CALLS_PAY_VERB_TRANSACTIONS = "calls-pay-verb-transactions"
CALLS_RECORDINGS = "calls-recordings"
CALLS_SIP = "calls-sip"
CALLS_SIP_INBOUND = "calls-sip-inbound"
CALLS_SIP_OUTBOUND = "calls-sip-outbound"
CALLS_TRANSFERS = "calls-transfers"
CARRIER_LOOKUPS = "carrier-lookups"
CONVERSATIONS = "conversations"
CONVERSATIONS_API_REQUESTS = "conversations-api-requests"
CONVERSATIONS_CONVERSATION_EVENTS = "conversations-conversation-events"
CONVERSATIONS_ENDPOINT_CONNECTIVITY = "conversations-endpoint-connectivity"
CONVERSATIONS_EVENTS = "conversations-events"
CONVERSATIONS_PARTICIPANT_EVENTS = "conversations-participant-events"
CONVERSATIONS_PARTICIPANTS = "conversations-participants"
CPS = "cps"
FLEX_USAGE = "flex-usage"
FRAUD_LOOKUPS = "fraud-lookups"
GROUP_ROOMS = "group-rooms"
GROUP_ROOMS_DATA_TRACK = "group-rooms-data-track"
GROUP_ROOMS_ENCRYPTED_MEDIA_RECORDED = "group-rooms-encrypted-media-recorded"
GROUP_ROOMS_MEDIA_DOWNLOADED = "group-rooms-media-downloaded"
GROUP_ROOMS_MEDIA_RECORDED = "group-rooms-media-recorded"
GROUP_ROOMS_MEDIA_ROUTED = "group-rooms-media-routed"
GROUP_ROOMS_MEDIA_STORED = "group-rooms-media-stored"
GROUP_ROOMS_PARTICIPANT_MINUTES = "group-rooms-participant-minutes"
GROUP_ROOMS_RECORDED_MINUTES = "group-rooms-recorded-minutes"
IMP_V1_USAGE = "imp-v1-usage"
LOOKUPS = "lookups"
MARKETPLACE = "marketplace"
MARKETPLACE_ALGORITHMIA_NAMED_ENTITY_RECOGNITION = "marketplace-algorithmia-named-entity-recognition"
MARKETPLACE_CADENCE_TRANSCRIPTION = "marketplace-cadence-transcription"
MARKETPLACE_CADENCE_TRANSLATION = "marketplace-cadence-translation"
MARKETPLACE_CAPIO_SPEECH_TO_TEXT = "marketplace-capio-speech-to-text"
MARKETPLACE_CONVRIZA_ABABA = "marketplace-convriza-ababa"
MARKETPLACE_DEEPGRAM_PHRASE_DETECTOR = "marketplace-deepgram-phrase-detector"
MARKETPLACE_DIGITAL_SEGMENT_BUSINESS_INFO = "marketplace-digital-segment-business-info"
MARKETPLACE_FACEBOOK_OFFLINE_CONVERSIONS = "marketplace-facebook-offline-conversions"
MARKETPLACE_GOOGLE_SPEECH_TO_TEXT = "marketplace-google-speech-to-text"
MARKETPLACE_IBM_WATSON_MESSAGE_INSIGHTS = "marketplace-ibm-watson-message-insights"
MARKETPLACE_IBM_WATSON_MESSAGE_SENTIMENT = "marketplace-ibm-watson-message-sentiment"
MARKETPLACE_IBM_WATSON_RECORDING_ANALYSIS = "marketplace-ibm-watson-recording-analysis"
MARKETPLACE_IBM_WATSON_TONE_ANALYZER = "marketplace-ibm-watson-tone-analyzer"
MARKETPLACE_ICEHOOK_SYSTEMS_SCOUT = "marketplace-icehook-systems-scout"
MARKETPLACE_INFOGROUP_DATAAXLE_BIZINFO = "marketplace-infogroup-dataaxle-bizinfo"
MARKETPLACE_KEEN_IO_CONTACT_CENTER_ANALYTICS = "marketplace-keen-io-contact-center-analytics"
MARKETPLACE_MARCHEX_CLEANCALL = "marketplace-marchex-cleancall"
MARKETPLACE_MARCHEX_SENTIMENT_ANALYSIS_FOR_SMS = "marketplace-marchex-sentiment-analysis-for-sms"
MARKETPLACE_MARKETPLACE_NEXTCALLER_SOCIAL_ID = "marketplace-marketplace-nextcaller-social-id"
MARKETPLACE_MOBILE_COMMONS_OPT_OUT_CLASSIFIER = "marketplace-mobile-commons-opt-out-classifier"
MARKETPLACE_NEXIWAVE_VOICEMAIL_TO_TEXT = "marketplace-nexiwave-voicemail-to-text"
MARKETPLACE_NEXTCALLER_ADVANCED_CALLER_IDENTIFICATION = "marketplace-nextcaller-advanced-caller-identification"
MARKETPLACE_NOMOROBO_SPAM_SCORE = "marketplace-nomorobo-spam-score"
MARKETPLACE_PAYFONE_TCPA_COMPLIANCE = "marketplace-payfone-tcpa-compliance"
MARKETPLACE_REMEETING_AUTOMATIC_SPEECH_RECOGNITION = "marketplace-remeeting-automatic-speech-recognition"
MARKETPLACE_TCPA_DEFENSE_SOLUTIONS_BLACKLIST_FEED = "marketplace-tcpa-defense-solutions-blacklist-feed"
MARKETPLACE_TELO_OPENCNAM = "marketplace-telo-opencnam"
MARKETPLACE_TRUECNAM_TRUE_SPAM = "marketplace-truecnam-true-spam"
MARKETPLACE_TWILIO_CALLER_NAME_LOOKUP_US = "marketplace-twilio-caller-name-lookup-us"
MARKETPLACE_TWILIO_CARRIER_INFORMATION_LOOKUP = "marketplace-twilio-carrier-information-lookup"
MARKETPLACE_VOICEBASE_PCI = "marketplace-voicebase-pci"
MARKETPLACE_VOICEBASE_TRANSCRIPTION = "marketplace-voicebase-transcription"
MARKETPLACE_VOICEBASE_TRANSCRIPTION_CUSTOM_VOCABULARY = "marketplace-voicebase-transcription-custom-vocabulary"
MARKETPLACE_WHITEPAGES_PRO_CALLER_IDENTIFICATION = "marketplace-whitepages-pro-caller-identification"
MARKETPLACE_WHITEPAGES_PRO_PHONE_INTELLIGENCE = "marketplace-whitepages-pro-phone-intelligence"
MARKETPLACE_WHITEPAGES_PRO_PHONE_REPUTATION = "marketplace-whitepages-pro-phone-reputation"
MARKETPLACE_WOLFARM_SPOKEN_RESULTS = "marketplace-wolfarm-spoken-results"
MARKETPLACE_WOLFRAM_SHORT_ANSWER = "marketplace-wolfram-short-answer"
MARKETPLACE_YTICA_CONTACT_CENTER_REPORTING_ANALYTICS = "marketplace-ytica-contact-center-reporting-analytics"
MEDIASTORAGE = "mediastorage"
MMS = "mms"
MMS_INBOUND = "mms-inbound"
MMS_INBOUND_LONGCODE = "mms-inbound-longcode"
MMS_INBOUND_SHORTCODE = "mms-inbound-shortcode"
MMS_MESSAGES_CARRIERFEES = "mms-messages-carrierfees"
MMS_OUTBOUND = "mms-outbound"
MMS_OUTBOUND_LONGCODE = "mms-outbound-longcode"
MMS_OUTBOUND_SHORTCODE = "mms-outbound-shortcode"
MONITOR_READS = "monitor-reads"
MONITOR_STORAGE = "monitor-storage"
MONITOR_WRITES = "monitor-writes"
NOTIFY = "notify"
NOTIFY_ACTIONS_ATTEMPTS = "notify-actions-attempts"
NOTIFY_CHANNELS = "notify-channels"
NUMBER_FORMAT_LOOKUPS = "number-format-lookups"
PCHAT = "pchat"
PCHAT_USERS = "pchat-users"
PEER_TO_PEER_ROOMS_PARTICIPANT_MINUTES = "peer-to-peer-rooms-participant-minutes"
PFAX = "pfax"
PFAX_MINUTES = "pfax-minutes"
PFAX_MINUTES_INBOUND = "pfax-minutes-inbound"
PFAX_MINUTES_OUTBOUND = "pfax-minutes-outbound"
PFAX_PAGES = "pfax-pages"
PHONENUMBERS = "phonenumbers"
PHONENUMBERS_CPS = "phonenumbers-cps"
PHONENUMBERS_EMERGENCY = "phonenumbers-emergency"
PHONENUMBERS_LOCAL = "phonenumbers-local"
PHONENUMBERS_MOBILE = "phonenumbers-mobile"
PHONENUMBERS_SETUPS = "phonenumbers-setups"
PHONENUMBERS_TOLLFREE = "phonenumbers-tollfree"
PREMIUMSUPPORT = "premiumsupport"
PROXY = "proxy"
PROXY_ACTIVE_SESSIONS = "proxy-active-sessions"
PSTNCONNECTIVITY = "pstnconnectivity"
PV = "pv"
PV_COMPOSITION_MEDIA_DOWNLOADED = "pv-composition-media-downloaded"
PV_COMPOSITION_MEDIA_ENCRYPTED = "pv-composition-media-encrypted"
PV_COMPOSITION_MEDIA_STORED = "pv-composition-media-stored"
PV_COMPOSITION_MINUTES = "pv-composition-minutes"
PV_RECORDING_COMPOSITIONS = "pv-recording-compositions"
PV_ROOM_PARTICIPANTS = "pv-room-participants"
PV_ROOM_PARTICIPANTS_AU1 = "pv-room-participants-au1"
PV_ROOM_PARTICIPANTS_BR1 = "pv-room-participants-br1"
PV_ROOM_PARTICIPANTS_IE1 = "pv-room-participants-ie1"
PV_ROOM_PARTICIPANTS_JP1 = "pv-room-participants-jp1"
PV_ROOM_PARTICIPANTS_SG1 = "pv-room-participants-sg1"
PV_ROOM_PARTICIPANTS_US1 = "pv-room-participants-us1"
PV_ROOM_PARTICIPANTS_US2 = "pv-room-participants-us2"
PV_ROOMS = "pv-rooms"
PV_SIP_ENDPOINT_REGISTRATIONS = "pv-sip-endpoint-registrations"
RECORDINGS = "recordings"
RECORDINGSTORAGE = "recordingstorage"
ROOMS_GROUP_BANDWIDTH = "rooms-group-bandwidth"
ROOMS_GROUP_MINUTES = "rooms-group-minutes"
ROOMS_PEER_TO_PEER_MINUTES = "rooms-peer-to-peer-minutes"
SHORTCODES = "shortcodes"
SHORTCODES_CUSTOMEROWNED = "shortcodes-customerowned"
SHORTCODES_MMS_ENABLEMENT = "shortcodes-mms-enablement"
SHORTCODES_MPS = "shortcodes-mps"
SHORTCODES_RANDOM = "shortcodes-random"
SHORTCODES_UK = "shortcodes-uk"
SHORTCODES_VANITY = "shortcodes-vanity"
SMALL_GROUP_ROOMS = "small-group-rooms"
SMALL_GROUP_ROOMS_DATA_TRACK = "small-group-rooms-data-track"
SMALL_GROUP_ROOMS_PARTICIPANT_MINUTES = "small-group-rooms-participant-minutes"
SMS = "sms"
SMS_INBOUND = "sms-inbound"
SMS_INBOUND_LONGCODE = "sms-inbound-longcode"
SMS_INBOUND_SHORTCODE = "sms-inbound-shortcode"
SMS_MESSAGES_CARRIERFEES = "sms-messages-carrierfees"
SMS_MESSAGES_FEATURES = "sms-messages-features"
SMS_MESSAGES_FEATURES_SENDERID = "sms-messages-features-senderid"
SMS_OUTBOUND = "sms-outbound"
SMS_OUTBOUND_CONTENT_INSPECTION = "sms-outbound-content-inspection"
SMS_OUTBOUND_LONGCODE = "sms-outbound-longcode"
SMS_OUTBOUND_SHORTCODE = "sms-outbound-shortcode"
SPEECH_RECOGNITION = "speech-recognition"
STUDIO_ENGAGEMENTS = "studio-engagements"
SYNC = "sync"
SYNC_ACTIONS = "sync-actions"
SYNC_ENDPOINT_HOURS = "sync-endpoint-hours"
SYNC_ENDPOINT_HOURS_ABOVE_DAILY_CAP = "sync-endpoint-hours-above-daily-cap"
TASKROUTER_TASKS = "taskrouter-tasks"
TOTALPRICE = "totalprice"
TRANSCRIPTIONS = "transcriptions"
TRUNKING_CPS = "trunking-cps"
TRUNKING_EMERGENCY_CALLS = "trunking-emergency-calls"
TRUNKING_ORIGINATION = "trunking-origination"
TRUNKING_ORIGINATION_LOCAL = "trunking-origination-local"
TRUNKING_ORIGINATION_MOBILE = "trunking-origination-mobile"
TRUNKING_ORIGINATION_TOLLFREE = "trunking-origination-tollfree"
TRUNKING_RECORDINGS = "trunking-recordings"
TRUNKING_SECURE = "trunking-secure"
TRUNKING_TERMINATION = "trunking-termination"
TURNMEGABYTES = "turnmegabytes"
TURNMEGABYTES_AUSTRALIA = "turnmegabytes-australia"
TURNMEGABYTES_BRASIL = "turnmegabytes-brasil"
TURNMEGABYTES_GERMANY = "turnmegabytes-germany"
TURNMEGABYTES_INDIA = "turnmegabytes-india"
TURNMEGABYTES_IRELAND = "turnmegabytes-ireland"
TURNMEGABYTES_JAPAN = "turnmegabytes-japan"
TURNMEGABYTES_SINGAPORE = "turnmegabytes-singapore"
TURNMEGABYTES_USEAST = "turnmegabytes-useast"
TURNMEGABYTES_USWEST = "turnmegabytes-uswest"
TWILIO_INTERCONNECT = "twilio-interconnect"
VERIFY_PUSH = "verify-push"
VIDEO_RECORDINGS = "video-recordings"
VOICE_INSIGHTS = "voice-insights"
VOICE_INSIGHTS_CLIENT_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-client-insights-on-demand-minute"
VOICE_INSIGHTS_PTSN_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-ptsn-insights-on-demand-minute"
VOICE_INSIGHTS_SIP_INTERFACE_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-sip-interface-insights-on-demand-minute"
VOICE_INSIGHTS_SIP_TRUNKING_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-sip-trunking-insights-on-demand-minute"
WIRELESS = "wireless"
WIRELESS_ORDERS = "wireless-orders"
WIRELESS_ORDERS_ARTWORK = "wireless-orders-artwork"
WIRELESS_ORDERS_BULK = "wireless-orders-bulk"
WIRELESS_ORDERS_ESIM = "wireless-orders-esim"
WIRELESS_ORDERS_STARTER = "wireless-orders-starter"
WIRELESS_USAGE = "wireless-usage"
WIRELESS_USAGE_COMMANDS = "wireless-usage-commands"
WIRELESS_USAGE_COMMANDS_AFRICA = "wireless-usage-commands-africa"
WIRELESS_USAGE_COMMANDS_ASIA = "wireless-usage-commands-asia"
WIRELESS_USAGE_COMMANDS_CENTRALANDSOUTHAMERICA = "wireless-usage-commands-centralandsouthamerica"
WIRELESS_USAGE_COMMANDS_EUROPE = "wireless-usage-commands-europe"
WIRELESS_USAGE_COMMANDS_HOME = "wireless-usage-commands-home"
WIRELESS_USAGE_COMMANDS_NORTHAMERICA = "wireless-usage-commands-northamerica"
WIRELESS_USAGE_COMMANDS_OCEANIA = "wireless-usage-commands-oceania"
WIRELESS_USAGE_COMMANDS_ROAMING = "wireless-usage-commands-roaming"
WIRELESS_USAGE_DATA = "wireless-usage-data"
WIRELESS_USAGE_DATA_AFRICA = "wireless-usage-data-africa"
WIRELESS_USAGE_DATA_ASIA = "wireless-usage-data-asia"
WIRELESS_USAGE_DATA_CENTRALANDSOUTHAMERICA = "wireless-usage-data-centralandsouthamerica"
WIRELESS_USAGE_DATA_CUSTOM_ADDITIONALMB = "wireless-usage-data-custom-additionalmb"
WIRELESS_USAGE_DATA_CUSTOM_FIRST5MB = "wireless-usage-data-custom-first5mb"
WIRELESS_USAGE_DATA_DOMESTIC_ROAMING = "wireless-usage-data-domestic-roaming"
WIRELESS_USAGE_DATA_EUROPE = "wireless-usage-data-europe"
WIRELESS_USAGE_DATA_INDIVIDUAL_ADDITIONALGB = "wireless-usage-data-individual-additionalgb"
WIRELESS_USAGE_DATA_INDIVIDUAL_FIRSTGB = "wireless-usage-data-individual-firstgb"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_CANADA = "wireless-usage-data-international-roaming-canada"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_INDIA = "wireless-usage-data-international-roaming-india"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_MEXICO = "wireless-usage-data-international-roaming-mexico"
WIRELESS_USAGE_DATA_NORTHAMERICA = "wireless-usage-data-northamerica"
WIRELESS_USAGE_DATA_OCEANIA = "wireless-usage-data-oceania"
WIRELESS_USAGE_DATA_POOLED = "wireless-usage-data-pooled"
WIRELESS_USAGE_DATA_POOLED_DOWNLINK = "wireless-usage-data-pooled-downlink"
WIRELESS_USAGE_DATA_POOLED_UPLINK = "wireless-usage-data-pooled-uplink"
WIRELESS_USAGE_MRC = "wireless-usage-mrc"
WIRELESS_USAGE_MRC_CUSTOM = "wireless-usage-mrc-custom"
WIRELESS_USAGE_MRC_INDIVIDUAL = "wireless-usage-mrc-individual"
WIRELESS_USAGE_MRC_POOLED = "wireless-usage-mrc-pooled"
WIRELESS_USAGE_MRC_SUSPENDED = "wireless-usage-mrc-suspended"
WIRELESS_USAGE_SMS = "wireless-usage-sms"
WIRELESS_USAGE_VOICE = "wireless-usage-voice"
def __init__(self, version, payload, account_sid):
"""
Initialize the AllTimeInstance
:returns: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance
:rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance
"""
super(AllTimeInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'api_version': payload.get('api_version'),
'as_of': payload.get('as_of'),
'category': payload.get('category'),
'count': payload.get('count'),
'count_unit': payload.get('count_unit'),
'description': payload.get('description'),
'end_date': deserialize.iso8601_date(payload.get('end_date')),
'price': deserialize.decimal(payload.get('price')),
'price_unit': payload.get('price_unit'),
'start_date': deserialize.iso8601_date(payload.get('start_date')),
'subresource_uris': payload.get('subresource_uris'),
'uri': payload.get('uri'),
'usage': payload.get('usage'),
'usage_unit': payload.get('usage_unit'),
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, }
@property
def account_sid(self):
"""
:returns: The SID of the Account accrued the usage
:rtype: unicode
"""
return self._properties['account_sid']
@property
def api_version(self):
"""
:returns: The API version used to create the resource
:rtype: unicode
"""
return self._properties['api_version']
@property
def as_of(self):
"""
:returns: Usage records up to date as of this timestamp
:rtype: unicode
"""
return self._properties['as_of']
@property
def category(self):
"""
:returns: The category of usage
:rtype: AllTimeInstance.Category
"""
return self._properties['category']
@property
def count(self):
"""
:returns: The number of usage events
:rtype: unicode
"""
return self._properties['count']
@property
def count_unit(self):
"""
:returns: The units in which count is measured
:rtype: unicode
"""
return self._properties['count_unit']
@property
def description(self):
"""
:returns: A plain-language description of the usage category
:rtype: unicode
"""
return self._properties['description']
@property
def end_date(self):
"""
:returns: The last date for which usage is included in the UsageRecord
:rtype: date
"""
return self._properties['end_date']
@property
def price(self):
"""
:returns: The total price of the usage
:rtype: unicode
"""
return self._properties['price']
@property
def price_unit(self):
"""
:returns: The currency in which `price` is measured
:rtype: unicode
"""
return self._properties['price_unit']
@property
def start_date(self):
"""
:returns: The first date for which usage is included in this UsageRecord
:rtype: date
"""
return self._properties['start_date']
@property
def subresource_uris(self):
"""
:returns: A list of related resources identified by their relative URIs
:rtype: unicode
"""
return self._properties['subresource_uris']
@property
def uri(self):
"""
:returns: The URI of the resource, relative to `https://api.twilio.com`
:rtype: unicode
"""
return self._properties['uri']
@property
def usage(self):
"""
:returns: The amount of usage
:rtype: unicode
"""
return self._properties['usage']
@property
def usage_unit(self):
"""
:returns: The units in which usage is measured
:rtype: unicode
"""
return self._properties['usage_unit']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.AllTimeInstance>'
| |
"""
Helpers for working with ``eazy-py``.
"""
import numpy as np
def fix_aperture_corrections(tab, verbose=True, ref_filter=None):
"""
June 2020: Reapply total corrections using fixed bug for the kron total
corrections where the necessary pixel scale wasn't used.
"""
from grizli import prep, utils
pixel_scale = tab.meta['ASEC_0']/tab.meta['APER_0']
if ('TOTCFILT' not in tab.meta) & (ref_filter is None):
raise IOError('No ref_filter specified and TOTCFILT not in tab.meta')
if ref_filter is None:
ref_filter = np.atleast_1d(tab.meta['TOTCFILT'])[0].lower()
keys = list(tab.meta.keys())
for k in keys:
if k.endswith('_PLAM'):
filt = k.split('_PLAM')[0].lower()
if verbose:
print('Fix aperture corrections: ', filt)
tot_corr = prep.get_kron_tot_corr(tab, filt,
pixel_scale=pixel_scale)
rescale = tot_corr / tab[filt + '_tot_corr']
tab[f'{filt}_tot_corr'] = tot_corr
# Loop through apertures and fix corr and tot columns if found
for ka in keys:
if ka.startswith('APER_'):
ap = ka.split('_')[1]
if f'{filt}_tot_{ap}' in tab.colnames:
aper_radius = tab.meta[f'ASEC_{ap}']/2.
msg = f' aper #{ap} (R={aper_radius:.2f}")'
if f'EE_{filt}_{ap}' not in tab.meta:
# compute filter EE correction if not in header
ee_ratio = prep.get_filter_ee_ratio(filt,
ref_filter=ref_filter,
aper_radius=aper_radius)
tab.meta[f'EE_{filt}_{ap}'] = 1./ee_ratio
msg += ' + ee_filter'
for col in ['corr', 'ecorr', 'tot', 'etot']:
tab[f'{filt}_{col}_{ap}'] *= 1./ee_ratio
if verbose:
print(msg)
# Rescale total correction
tab[f'{filt}_tot_{ap}'] *= rescale
tab[f'{filt}_etot_{ap}'] *= rescale
return tab
def apply_catalog_corrections(root, total_flux='flux_auto', auto_corr=True, get_external_photometry=False, aperture_indices='all', suffix='_apcorr', verbose=True, apply_background=True, ee_correction=False):
"""
Aperture and background corrections to photometric catalog
First correct fluxes in individual filters scaled by the ratio of
``total_flux`` / aper_flux in the detection band, where ``total_flux`` is
``flux_auto`` by default.
>>> apcorr = {total_flux} / flux_aper_{ap}
>>> {filt}_corr_{ap} = {filt}_flux_aper_{ap} * apcorr
If ``ee_correction = True``, then apply an encircled energy correction for
the relative fraction for the relative flux between the target filter and
detection image for a *point source* flux within a photometric aperture.
The correction function is `~grizli.prep.get_filter_ee_ratio`.
>>> ee_{filt} = prep.get_filter_ee_ratio({filt},
ref_filter=ref_filter,
aper_radius={aper_arcsec})
>>> {filt}_corr_{ap} = {filt}_corr_{ap} / ee_{filt}
If `auto_corr`, compute total fluxes with a correction for flux outside
the Kron aperture derived for point sources using
`~grizli.prep.get_kron_tot_corr`.
>>> {filt}_tot_{ap} = {filt}_corr_{ap} * {filt}_tot_corr
Note that any background has already been subtracted from
{filt}_flux_aper_{ap}.in the SEP catalog.
"""
import os
import eazy
import numpy as np
from grizli import utils, prep
import mastquery.utils
cat = utils.read_catalog('{0}_phot.fits'.format(root))
filters = []
for c in cat.meta:
if c.endswith('_ZP'):
filters.append(c.split('_ZP')[0].lower())
if get_external_photometry:
print('Get external photometry from Vizier')
try:
external_limits = 3
external_timeout = 300
external_sys_err = 0.03
ext = get_external_catalog(cat, external_limits=external_limits,
timeout=external_timeout,
sys_err=external_sys_err)
for c in ext.colnames:
if c not in cat.colnames:
cat[c] = ext[c]
for k in ext.meta:
cat.meta[k] = ext.meta[k]
except:
print(' - External catalog FAILED')
pass
# Fix: Take flux_auto when flag==0, flux otherwise
if (total_flux == 'flux_auto_fix') & (total_flux not in cat.colnames):
flux = cat['flux_auto']*1.
flagged = (cat['flag'] > 0)
flux[flagged] = cat['flux'][flagged]
cat['flux_auto_fix'] = flux*1.
# Additional auto correction
cat.meta['TOTALCOL'] = total_flux, 'Column for total flux'
#cat.meta['HASTOT'] = (auto_corr & ('tot_corr' in cat.colnames), 'Catalog has full total flux')
apcorr = {}
for NAPER in range(100):
if 'APER_{0}'.format(NAPER) not in cat.meta:
break
if aperture_indices == 'all':
aperture_indices = range(NAPER)
# EE correction
cat.meta['EE_CORR'] = ee_correction
if ee_correction:
for f in filters:
ref_filter = np.atleast_1d(cat.meta['TOTCFILT'])[0]
_ = prep.get_filter_ee_ratio(cat, f.lower(),
ref_filter=ref_filter.lower())
for i in aperture_indices:
if verbose:
print('Compute aperture corrections: i={0}, D={1:.2f}" aperture'.format(i, cat.meta['ASEC_{0}'.format(i)]))
if 'flux_aper_{0}'.format(i) in cat.colnames:
cat['apcorr_{0}'.format(i)] = cat[total_flux]/cat['flux_aper_{0}'.format(i)]
for f in filters:
bkgc = '{0}_bkg_aper_{1}'.format(f, i)
# if (bkgc in cat.colnames) & apply_background:
# bkg = cat[bkgc]
# else:
# bkg = 0.
# background already subtracted from flux columns
bkg = 0.
cat['{0}_corr_{1}'.format(f, i)] = (cat['{0}_flux_aper_{1}'.format(f, i)]-bkg)*cat['apcorr_{0}'.format(i)]
cat['{0}_ecorr_{1}'.format(f, i)] = cat['{0}_fluxerr_aper_{1}'.format(f, i)]*cat['apcorr_{0}'.format(i)]
if ee_correction:
cat[f'{f}_corr_{i}'] *= cat[f'{f}_ee_{i}']
#cat.meta['EE_{0}_{1}'.format(f, i)] = 1./ee_ratio
#cat['{0}_corr_{1}'.format(f, i)] /= ee_ratio
#cat['{0}_ecorr_{1}'.format(f, i)] /= ee_ratio
aper_area = np.pi*(cat.meta['APER_{0}'.format(i)]/2)**2
mask_thresh = aper_area
bad = cat['{0}_mask_aper_{1}'.format(f, i)] > 0.2*mask_thresh
cat['{0}_corr_{1}'.format(f, i)][bad] = -99
cat['{0}_ecorr_{1}'.format(f, i)][bad] = -99
tot_col = '{0}_tot_corr'.format(f.lower())
if auto_corr and (tot_col in cat.colnames):
cat['{0}_tot_{1}'.format(f, i)] = cat['{0}_corr_{1}'.format(f, i)]*cat[tot_col]
cat['{0}_etot_{1}'.format(f, i)] = cat['{0}_ecorr_{1}'.format(f, i)]*cat[tot_col]
cat['{0}_tot_{1}'.format(f, i)][bad] = -99
cat['{0}_etot_{1}'.format(f, i)][bad] = -99
if 'id' not in cat.colnames:
cat.rename_column('number', 'id')
if 'z_spec' not in cat.colnames:
cat['z_spec'] = cat['id']*0.-1
# Spurious sources, sklearn SVM model trained for a single field
morph_model = os.path.join(os.path.dirname(utils.__file__),
'data/sep_catalog_junk.pkl')
# Only apply if detection pixel scale is 0.06"
if 'ASEC_0' in cat.meta:
try:
detection_pscale = cat.meta['ASEC_0'][0]/cat.meta['APER_0'][0]
except:
detection_pscale = cat.meta['ASEC_0']/cat.meta['APER_0']
run_morph_model = np.isclose(detection_pscale, 0.06, atol=0.005)
else:
run_morph_model = True
if os.path.exists(morph_model) & run_morph_model:
if verbose:
print('Apply morphological validity class')
try:
from sklearn.externals import joblib
clf = joblib.load(morph_model)
X = np.hstack([[cat['peak']/cat['flux'],
cat['cpeak']/cat['peak']]]).T
# Predict labels, which where generated for
# bad_bright, bad_faint, stars, big_galaxies, small_galaxies
pred = clf.predict_proba(X)
# Should be >~ 0.9 for valid sources, stars & galaxies
# in "ir" image
cat['class_valid'] = pred[:, -3:].sum(axis=1)
cat['class_valid'].format = '.2f'
except:
cat['class_valid'] = 99.
cat['class_valid'].format = '.2f'
msg = "Couldn't run morph classification from {0}"
print(msg.format(morph_model))
cat['dummy_err'] = 10**(-0.4*(8-23.9))
cat['dummy_flux'] = cat[total_flux] # detection band
if suffix:
if verbose:
print('Write {0}_phot{1}.fits'.format(root, suffix))
cat.write('{0}_phot{1}.fits'.format(root, suffix), overwrite=True)
return cat
def eazy_photoz(root, force=False, object_only=True, apply_background=True, aper_ix=1, apply_prior=False, beta_prior=True, get_external_photometry=False, external_limits=3, external_sys_err=0.3, external_timeout=300, sys_err=0.05, z_step=0.01, z_min=0.01, z_max=12, total_flux='flux_auto', auto_corr=True, compute_residuals=False, dummy_prior=False, extra_rf_filters=[], quiet=True, aperture_indices='all', zpfile='zphot.zeropoint', extra_params={}, extra_translate={}, force_apcorr=False, ebv=None, absmag_filters=[], **kwargs):
import os
import eazy
import numpy as np
from grizli import utils
import mastquery.utils
if (os.path.exists('{0}.eazypy.self.npy'.format(root))) & (not force):
self = np.load('{0}.eazypy.self.npy'.format(root), allow_pickle=True)[0]
zout = utils.read_catalog('{0}.eazypy.zout.fits'.format(root))
cat = utils.read_catalog('{0}_phot_apcorr.fits'.format(root))
return self, cat, zout
trans = {'f098m': 201, 'f105w': 202, 'f110w': 241, 'f125w': 203, 'f140w': 204, 'f160w': 205, 'f435w': 233, 'f475w': 234, 'f555w': 235, 'f606w': 236, 'f625w': 237, 'f775w': 238, 'f814w': 239, 'f850lp': 240, 'f702w': 15, 'f600lpu': 243, 'f225wu': 207, 'f275wu': 208, 'f336wu': 209, 'f350lpu': 339, 'f438wu': 211, 'f475wu': 212, 'f475xu': 242, 'f555wu': 213, 'f606wu': 214, 'f625wu': 215, 'f775wu': 216, 'f814wu': 217, 'f390wu': 210, 'ch1': 18, 'ch2': 19, 'f336w':209, 'f350lp':339}
# trans.pop('f814w')
if (not os.path.exists('{0}_phot_apcorr.fits'.format(root))) | force_apcorr:
print('Apply catalog corrections')
apply_catalog_corrections(root, suffix='_apcorr', aperture_indices=aperture_indices)
cat = utils.read_catalog('{0}_phot_apcorr.fits'.format(root))
filters = []
for c in cat.meta:
if c.endswith('_ZP'):
filters.append(c.split('_ZP')[0].lower())
if len(filters) == 0:
for f in trans:
if f'{f}_tot_{aper_ix}' in cat.colnames:
filters.append(f.lower())
# Translate
fp = open('zphot.translate', 'w')
for f in filters:
if f in trans:
fp.write('{0}_tot_{1} F{2}\n'.format(f, aper_ix, trans[f]))
fp.write('{0}_etot_{1} E{2}\n'.format(f, aper_ix, trans[f]))
extra_filters = {'hscg':314, 'hscr':315, 'hsci':316,
'hscz':317, 'hscy':318,
'irac_ch1':18, 'irac_ch2':19,
'irac_ch3':20, 'irac_ch4':21}
for c in extra_filters:
if f'{c}_flux' in cat.colnames:
fp.write(f'{c}_flux F{extra_filters[c]}\n')
fp.write(f'{c}_err E{extra_filters[c]}\n')
# fp.write('irac_ch1_flux F18\n')
# fp.write('irac_ch1_err E18\n')
#
# fp.write('irac_ch2_flux F19\n')
# fp.write('irac_ch2_err E19\n')
#
# fp.write('irac_ch3_flux F20\n')
# fp.write('irac_ch3_err E20\n')
#
# fp.write('irac_ch4_flux F21\n')
# fp.write('irac_ch4_err E21\n')
for k in extra_translate:
fp.write('{0} {1}\n'.format(k, extra_translate[k]))
# For zeropoint
if dummy_prior:
fp.write('dummy_flux F205x\n')
fp.write('dummy_err E205x\n')
fp.close()
params = {}
params['CATALOG_FILE'] = '{0}_phot_apcorr.fits'.format(root)
params['Z_STEP'] = z_step
params['MAIN_OUTPUT_FILE'] = '{0}.eazypy'.format(root)
params['Z_MAX'] = z_max
if ebv is None:
try:
ebv = mastquery.utils.get_mw_dust(np.median(cat['ra']),
np.median(cat['dec']))
except:
try:
ebv = mastquery.utils.get_irsa_dust(np.median(cat['ra']),
np.median(cat['dec']))
except:
print("Couldn't get EBV, fall back to ebv=0.0")
ebv = 0.
params['MW_EBV'] = ebv
params['PRIOR_ABZP'] = 23.9
params['SYS_ERR'] = sys_err
params['CAT_HAS_EXTCORR'] = False
# Pick prior filter, starting from reddest
for f in ['f435w', 'f606w', 'f814w', 'f105w', 'f110w', 'f125w', 'f140w', 'f160w'][::-1]:
if f in filters:
if dummy_prior:
params['PRIOR_FILTER'] = 'dummy_flux'
else:
params['PRIOR_FILTER'] = trans[f]
mag = 23.9-2.5*np.log10(cat['{0}_tot_{1}'.format(f, aper_ix)])
break
if os.path.exists('templates/fsps_full/tweak_fsps_QSF_11_v3_noRed.param.fits'):
params['TEMPLATES_FILE'] = 'templates/fsps_full/tweak_fsps_QSF_11_v3_noRed.param'
else:
params['TEMPLATES_FILE'] = 'templates/fsps_full/tweak_fsps_QSF_12_v3.param'
for k in extra_params:
params[k] = extra_params[k]
if zpfile is not None:
if not os.path.exists(zpfile):
zpfile = None
load_products = False
if ((not os.path.exists('FILTER.RES.latest')) |
(not os.path.exists('templates'))):
try:
# should work with eazy-py >= 0.2.0
eazy.symlink_eazy_inputs() #(path=None)
except:
print("""
The filter file ``FILTER.RES.latest`` and ``templates`` directory were not
found in the working directory and the automatic command to retrieve them
failed:
>>> import eazy; eazy.symlink_eazy_inputs(path=None)
Run it with ``path`` pointing to the location of the ``eazy-photoz`` repository.""")
return False
self = eazy.photoz.PhotoZ(param_file=None, translate_file='zphot.translate', zeropoint_file=zpfile, params=params, load_prior=True, load_products=load_products)
if quiet:
self.param.params['VERBOSITY'] = 1.
if object_only:
return self
idx = np.arange(self.NOBJ)
# sample = (mag < 27) #& (self.cat['star_flag'] != 1)
#sample |= (self.cat['z_spec'] > 0)
sample = np.isfinite(self.cat['ra']) # mag)
for iter in range(1+(get_external_photometry & compute_residuals)*1):
self.fit_catalog(idx[sample], n_proc=10)
if compute_residuals:
self.error_residuals()
self.fit_phoenix_stars()
self.standard_output(prior=apply_prior, beta_prior=beta_prior,
extra_rf_filters=extra_rf_filters,
absmag_filters=absmag_filters)
zout = utils.read_catalog('{0}.eazypy.zout.fits'.format(root))
np.save('{0}.eazypy.self.npy'.format(root), [self])
return self, cat, zout
def show_from_ds9(ds9, self, zout, use_sky=True, **kwargs):
import numpy as np
if use_sky:
xy = np.cast[float](ds9.get('pan fk5').split())
cosd = np.cos(xy[1]/180*np.pi)
r = np.sqrt((self.cat['ra']-xy[0])**2*cosd**2 + (self.cat['dec']-xy[1])**2)*3600
runit = 'arcsec'
if (not use_sky) | (xy.sum() == 0):
xy = np.cast[float](ds9.get('pan image').split())
r = np.sqrt((self.cat['x_image']-xy[0])**2 + (self.cat['y_image']-xy[1])**2)
runit = 'pix'
ix = np.argmin(r)
print('ID: {0}, r={1:.1f} {2}'.format(self.cat['id'][ix], r[ix], runit))
print(' z={0:.2f} logM={1:.2f}'.format(zout['z_phot'][ix], np.log10(zout['mass'][ix])))
fig = self.show_fit(ix, id_is_idx=True, **kwargs)
return fig, self.cat['id'][ix], ix, zout['z_phot'][ix]
class EazyPhot(object):
def __init__(self, photoz, grizli_templates=None, zgrid=None, apcorr=None, include_photometry=True, include_pz=False, source_text='unknown'):
"""
Parameters
----------
photoz : `~eazypy.photoz.PhotoZ`
Photoz object.
apcorr : `~numpy.ndarray`
Aperture correction applied to the photometry to match the
grism spectra. For the internal grizli catalogs, this should
generally be something like
>>> apcorr = 'flux_iso' / 'flux_auto'
"""
try:
from .. import utils
except:
from grizli import utils
not_obs_mask = (photoz.fnu < -90) | (photoz.efnu < 0)
self.zgrid = photoz.zgrid
if apcorr is None:
self.apcorr = np.ones(photoz.NOBJ)
else:
self.apcorr = apcorr
self.source_text = source_text
self.ext_corr = photoz.ext_corr
self.flam = photoz.fnu*photoz.to_flam*photoz.zp*photoz.ext_corr
self.flam[not_obs_mask] = -99
self.eflam = photoz.efnu*photoz.to_flam*photoz.zp*photoz.ext_corr
self.eflam[not_obs_mask] = -99
self.include_photometry = include_photometry
#self.rdcat = utils.GTable(photoz.cat['ra','dec'])
self.ra_cat = photoz.cat['ra'].data
self.dec_cat = photoz.cat['dec'].data
self.filters = photoz.filters
self.f_numbers = photoz.f_numbers
self.param = photoz.param
if 'z_spec' in photoz.cat.colnames:
self.z_spec = photoz.cat['z_spec']*1
else:
self.z_spec = np.zeros(photoz.N)-1
self.include_pz = include_pz
if include_pz:
try:
self.pz = photoz.pz*1
self.zgrid = photoz.zgrid
except:
self.pz = None
else:
self.pz = None
if grizli_templates is None:
self.tempfilt = None
self.grizli_templates = None
else:
self.tempfilt = self.initialize_templates(grizli_templates,
zgrid=zgrid)
self.grizli_templates = grizli_templates
def initialize_templates(self, grizli_templates, zgrid=None):
from eazy import templates as templates_module
from eazy.photoz import TemplateGrid
if zgrid is None:
zgrid = self.zgrid
template_list = [templates_module.Template(arrays=(grizli_templates[k].wave, grizli_templates[k].flux), name=k) for k in grizli_templates]
tempfilt = TemplateGrid(zgrid, template_list, RES=self.param['FILTERS_RES'], f_numbers=self.f_numbers, add_igm=True, galactic_ebv=self.param['MW_EBV'], Eb=self.param['SCALE_2175_BUMP'])
return tempfilt
def get_phot_dict(self, ra, dec):
"""
Return catalog info for nearest object to supplied coordinates
Returns:
phot - photometry dictionary
ix - index in catalog of nearest match
dr - distance to nearest match
"""
from collections import OrderedDict
try:
from .. import utils
except:
from grizli import utils
icat = utils.GTable()
icat['ra'] = [ra]
icat['dec'] = [dec]
rdcat = utils.GTable()
rdcat['ra'] = self.ra_cat
rdcat['dec'] = self.dec_cat
ix, dr = rdcat.match_to_catalog_sky(icat)
phot = OrderedDict()
apcorr_i = self.apcorr[ix[0]]
try:
phot['source'] = self.source_text
except:
phot['source'] = 'unknown'
phot['flam'] = self.flam[ix[0], :]*1.e-19*apcorr_i
phot['eflam'] = self.eflam[ix[0], :]*1.e-19*apcorr_i
phot['filters'] = self.filters
phot['tempfilt'] = self.tempfilt
try:
phot['ext_corr'] = self.ext_corr
except:
phot['ext_corr'] = 1
if self.include_pz & (self.pz is not None):
pz = (self.zgrid, self.pz[ix[0], :].flatten())
else:
pz = None
phot['pz'] = pz
try:
phot['z_spec'] = self.z_spec[ix[0]]
except:
phot['z_spec'] = -1
if not self.include_photometry:
phot['flam'] = None
return phot, ix[0], dr[0]
def get_external_catalog(phot, filter_file='/usr/local/share/eazy-photoz/filters/FILTER.RES.latest', ZP=23.9, sys_err=0.3, verbose=True, external_limits=3, timeout=300):
"""
Fetch photometry from vizier
"""
import numpy as np
import astropy.units as u
try:
from .. import utils
except:
from grizli import utils
from eazy.filters import FilterFile
res = FilterFile(filter_file)
vizier_catalog = list(utils.VIZIER_VEGA.keys())
ra = np.median(phot['ra'])
dec = np.median(phot['dec'])
dr = np.sqrt((phot['ra']-ra)**2*np.cos(phot['dec']/180*np.pi)**2 + (phot['dec']-dec)**2)*60
radius = 1.5*dr.max() # arcmin
tabs = utils.get_Vizier_photometry(ra, dec, templates=None, radius=radius*60, vizier_catalog=vizier_catalog, filter_file=filter_file, MW_EBV=0, convert_vega=False, raw_query=True, verbose=True, timeout=timeout)
extern_phot = utils.GTable()
N = len(phot)
for t_i in tabs:
# Match
if 'RAJ2000' in t_i.colnames:
other_radec = ('RAJ2000', 'DEJ2000')
elif 'RA_ICRS' in t_i.colnames:
other_radec = ('RA_ICRS', 'DE_ICRS')
else:
other_radec = ('ra', 'dec')
idx, dr = phot.match_to_catalog_sky(t_i, other_radec=other_radec)
if (dr < 2*u.arcsec).sum() == 0:
continue
tab = t_i[dr < 2*u.arcsec]
idx = idx[dr < 2*u.arcsec]
# Downweight PS1 if have SDSS
# if (tab.meta['name'] == PS1_VIZIER) & (SDSS_DR12_VIZIER in viz_tables):
# continue
# err_scale = 1
# else:
# err_scale = 1
err_scale = 1
if (tab.meta['name'] == utils.UKIDSS_LAS_VIZIER):
flux_scale = 1.33
else:
flux_scale = 1.
convert_vega = utils.VIZIER_VEGA[tab.meta['name']]
bands = utils.VIZIER_BANDS[tab.meta['name']]
# if verbose:
# print(tab.colnames)
#filters += [res.filters[res.search(b, verbose=False)[0]] for b in bands]
to_flam = 10**(-0.4*(48.6))*3.e18 # / pivot(Ang)**2
for ib, b in enumerate(bands):
f_number = res.search(b, verbose=False)[0]+1
filt = res.filters[f_number-1]
# filters.append(filt)
if convert_vega:
to_ab = filt.ABVega()
else:
to_ab = 0.
fcol, ecol = bands[b]
# pivot.append(filt.pivot())
fnu = 10**(-0.4*(tab[fcol]+to_ab-ZP))
efnu = tab[ecol]*np.log(10)/2.5*fnu*err_scale
efnu = np.sqrt(efnu**2+(sys_err*fnu)**2)
fnu.fill_value = -99
efnu.fill_value = -99
comment = 'Filter {0} from {1} (N={2})'.format(bands[b][0], t_i.meta['name'], len(idx))
if verbose:
print(comment)
if ((~efnu.mask).sum() > 4) & (external_limits > 0):
fill = np.percentile(efnu.data[~efnu.mask], [90])
efill = external_limits * fill
else:
fill = -99
efill = -99
extern_phot.meta['F{0}'.format(f_number)] = b, comment
extern_phot['F{0}'.format(f_number)] = fill*np.ones(N)
extern_phot['F{0}'.format(f_number)][idx] = fnu.filled()
extern_phot['E{0}'.format(f_number)] = efill*np.ones(N)
extern_phot['E{0}'.format(f_number)][idx] = efnu.filled()
return extern_phot
######## Selecting objects
# def select_objects():
# import os
#
# from grizli.pipeline import photoz
# import numpy as np
#
# total_flux = 'flux_auto_fix'
# total_flux = 'flux_auto' # new segmentation masked SEP catalogs
# object_only = False
#
# self, cat, zout = photoz.eazy_photoz(root, object_only=object_only, apply_prior=False, beta_prior=True, aper_ix=2, force=True, get_external_photometry=False, compute_residuals=False, total_flux=total_flux)
#
# if False:
# args = np.load('fit_args.npy', allow_pickle=True)[0]
# phot_obj = photoz.EazyPhot(self, grizli_templates=args['t0'], zgrid=self.zgrid, apcorr=self.idx*0.+1)
#
# flux = self.cat[total_flux]*1.
# hmag = 23.9-2.5*np.log10(cat['f160w_tot_1'])
#
# # Reddest HST band
# lc_clip = self.lc*1
# lc_clip[self.lc > 1.55e4] = 0 # in case ground-based / WISE red band
# ixb = np.argmax(lc_clip)
# sn_red = self.cat[self.flux_columns[ixb]]/self.cat[self.err_columns[ixb]]
#
# grad = np.gradient(self.zgrid)
# cumpz = np.cumsum(self.pz*grad, axis=1)
# cumpz = (cumpz.T/cumpz[:, -1]).T
#
# chi2 = (self.chi_best / self.nusefilt)
#
# iz6 = np.where(self.zgrid > 6.0)[0][0]
# iz7 = np.where(self.zgrid > 7)[0][0]
# iz8 = np.where(self.zgrid > 8)[0][0]
# iz9 = np.where(self.zgrid > 9)[0][0]
#
# highz_sel = (hmag < 27.5) # & (self.cat['class_valid'] > 0.8)
# #highz_sel |= (cumpz[:,iz6] < 0.3) & (self.cat['flux_radius'] > 2.5)
# #highz_sel &= (self.cat['flux_radius'] > 2.5)
# highz_sel &= chi2 < 3
# highz_sel &= (sn_red > 5)
# highz_sel &= self.nusefilt >= 3
#
# flux_ratio = cat['f160w_flux_aper_3']/cat['f160w_flux_aper_0']
# flux_ratio /= cat.meta['APER_3']**2/cat.meta['APER_0']**2
#
# if False:
# sel = highz_sel
# so = np.argsort(cumpz[sel, iz7])
# ids = self.cat['id'][sel][so]
# i = -1
# so = np.argsort(flux_ratio[sel])[::-1]
# ids = self.cat['id'][sel][so]
# i = -1
#
# highz_sel &= (cumpz[:, iz6] > 0) & (flux_ratio < 0.45) & ((cumpz[:, iz6] < 0.3) | (cumpz[:, iz7] < 0.3) | (((cumpz[:, iz8] < 0.4) | (cumpz[:, iz9] < 0.5)) & (flux_ratio < 0.5)))
#
# # Big objects likely diffraction spikes
# # big = (self.cat['flux_radius'] > 10)
# # highz_sel &= ~big
# #flux_ratio = self.cat['flux_aper_0']/self.cat['flux_aper_2']
#
# sel = highz_sel
# so = np.argsort(hmag[sel])
# ids = self.cat['id'][sel][so]
# i = -1
#
# # Red
# uv = -2.5*np.log10(zout['restU']/zout['restV'])
# red_sel = ((zout['z160'] > 1.) & (uv > 1.5)) | ((zout['z160'] > 1.5) & (uv > 1.1))
# red_sel &= (self.zbest < 4) & (hmag < 22) # & (~hmag.mask)
# red_sel &= (zout['mass'] > 10**10.5) # & (self.cat['class_valid'] > 0.8)
# red_sel &= (self.cat['flux_radius'] > 2.5)
# red_sel &= (zout['restV']/zout['restV_err'] > 3)
# red_sel &= (chi2 < 3)
# #red_sel &= (sn_red > 20)
#
# sel = red_sel
#
# so = np.argsort(hmag[sel])
# ids = self.cat['id'][sel][so]
# i = -1
#
# ds9 = None
#
# for j in self.idx[sel][so]:
# id_j, ra, dec = self.cat['id', 'ra', 'dec'][j]
#
# # Photo-z
# fig, data = self.show_fit(id_j, ds9=ds9, show_fnu=True) # highz_sel[j])
# lab = '{0} {1}\n'.format(root, id_j)
# lab += 'H={0:.1f} z={1:.1f}\n'.format(hmag[j], self.zbest[j])
# lab += 'U-V={0:.1f}, logM={1:4.1f}'.format(uv[j], np.log10(zout['mass'][j]))
#
# ax = fig.axes[0]
# ax.text(0.95, 0.95, lab, ha='right', va='top', transform=ax.transAxes, fontsize=9, bbox=dict(facecolor='w', edgecolor='None', alpha=0.5))
# yl = ax.get_ylim()
# ax.set_ylim(yl[0], yl[1]*1.1)
#
# fig.savefig('{0}_{1:05d}.eazy.png'.format(root, id_j), dpi=70)
# plt.close()
#
# # Cutout
# #from grizli_aws.aws_drizzle import drizzle_images
#
# #rgb_params = {'output_format': 'png', 'output_dpi': 75, 'add_labels': False, 'show_ir': False, 'suffix':'.rgb'}
# rgb_params = None
#
# #aws_bucket = 's3://grizli/SelectedObjects/'
# aws_bucket = None
#
# label = '{0}_{1:05d}'.format(root, id_j)
# if not os.path.exists('{0}.rgb.png'.format(label)):
# drizzle_images(label=label, ra=ra, dec=dec, pixscale=0.06, size=8, pixfrac=0.8, theta=0, half_optical_pixscale=False, filters=['f160w', 'f814w', 'f140w', 'f125w', 'f105w', 'f110w', 'f098m', 'f850lp', 'f775w', 'f606w', 'f475w'], remove=False, rgb_params=rgb_params, master='grizli-jan2019', aws_bucket=aws_bucket)
#
# show_all_thumbnails(label=label, filters=['f775w', 'f814w', 'f098m', 'f105w', 'f110w', 'f125w', 'f140w', 'f160w'], scale_ab=np.clip(hmag[j], 19, 22), close=True)
############
def show_all_thumbnails(label='j022708p4901_00273', filters=['f775w', 'f814w', 'f098m', 'f105w', 'f110w', 'f125w', 'f140w', 'f160w'], scale_ab=21, close=True):
"""
Show individual filter and RGB thumbnails
"""
import glob
import matplotlib.pyplot as plt
from astropy.visualization import make_lupton_rgb
import astropy.io.fits as pyfits
from grizli import utils
from grizli.pipeline import auto_script
#from PIL import Image
ims = {}
for filter in filters:
drz_files = glob.glob('{0}-{1}*_dr*sci.fits'.format(label, filter))
if len(drz_files) > 0:
im = pyfits.open(drz_files[0])
ims[filter] = im
slx, sly, filts, fig = auto_script.field_rgb(root=label, xsize=4, output_dpi=None, HOME_PATH=None, show_ir=False, pl=1, pf=1, scl=1, rgb_scl=[1, 1, 1], ds9=None, force_ir=False, filters=ims.keys(), add_labels=False, output_format='png', rgb_min=-0.01, xyslice=None, pure_sort=False, verbose=True, force_rgb=None, suffix='.rgb', scale_ab=scale_ab)
if close:
plt.close()
#rgb = np.array(Image.open('{0}.rgb.png'.format(label)))
rgb = plt.imread('{0}.rgb.png'.format(label))
NX = (len(filters)+1)
fig = plt.figure(figsize=[1.5*NX, 1.5])
ax = fig.add_subplot(1, NX, NX)
ax.imshow(rgb, origin='upper', interpolation='nearest')
ax.text(0.5, 0.95, label, ha='center', va='top', transform=ax.transAxes, fontsize=7, bbox=dict(facecolor='w', edgecolor='None', alpha=0.5))
for i, filter in enumerate(filters):
if filter in ims:
zp_i = utils.calc_header_zeropoint(ims[filter], ext=0)
scl = 10**(-0.4*(zp_i-5-scale_ab))
img = ims[filter][0].data*scl
image = make_lupton_rgb(img, img, img, stretch=0.1, minimum=-0.01)
ax = fig.add_subplot(1, NX, i+1)
ax.imshow(255-image, origin='lower', interpolation='nearest')
ax.text(0.5, 0.95, filter, ha='center', va='top', transform=ax.transAxes, fontsize=7, bbox=dict(facecolor='w', edgecolor='None', alpha=0.5))
for ax in fig.axes:
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout(pad=0.1)
fig.savefig('{0}.thumb.png'.format(label))
if close:
plt.close()
| |
#!/usr/bin/python
from string import Template
import os
import io
import glob
import shutil
from mimetypes import guess_type
from mimetypes import add_type
platforms = ("win32", "android", "macosx", "ios", "cmake", "emscripten", "all")
def relpath(a, b):
try:
return os.path.relpath(a, b)
except ValueError:
pass
return a
def unixpath(path):
return path.replace("\\", "/")
def run(args):
if args.hello:
dest = args.dest
local = os.path.dirname(__file__)
if not local:
local = "."
src = local + "/../examples/HelloWorld/src"
data = local + "/../examples/HelloWorld/data"
try:
shutil.copytree(src, dest + "/src")
except OSError:
pass
try:
shutil.copytree(data, dest + "/data")
except OSError:
pass
for p in platforms:
if p == "all":
continue
args.type = p
args.src = dest + "/src"
args.dest = dest + "/proj." + p
_run(args)
else:
_run(args)
def _run(args):
name = args.name
project = "proj." + args.type + "/"
values = {"PROJECT":name}
text = (".properties", ".cmd", ".mk", ".java", ".sln", ".vcxproj", ".filters", ".user", ".plist", ".pch", ".json", ".pbxproj")
for t in text:
add_type("text/plain", t)
try:
os.makedirs(args.dest)
except OSError:
#print "folder already exists"
#return
pass
templates_path = os.path.dirname(__file__)
if not templates_path:
templates_path = "."
templates_path += "/templates/"
ox_path = templates_path + "/../../"
root_path = ox_path + "../"
root_path = os.path.abspath(root_path)
ox_path = os.path.abspath(ox_path)
dest_path = os.path.abspath(args.dest) + "/"
root_path = relpath(root_path, dest_path) + "/"
ox_path = relpath(ox_path, dest_path) + "/"
values["OXYGINE"] = unixpath(ox_path)
values["ROOT"] = unixpath(root_path)
def process(template, relto, path, gl, fn = None):
res = ""
for src in gl:
ext = os.path.splitext(src)[1]
if ext in (".icf", ".orig"):
continue
rel_src_path = relpath(src, relto)
data = {"FILE":unixpath(rel_src_path), "FULLFILE":src}
if fn:
data = fn(data)
t = Template(template)
res += t.safe_substitute(data)
return res
SRC = ""
INCLUDE = ""
data = "../data"
abs_data = os.path.normpath(args.dest + "/" + data)
data_files_ = sorted(glob.glob(abs_data + "/*"))
data_files = []
for d in data_files_:
if os.path.splitext(d)[1] in (".dll", ".lib"):
continue
data_files.append(d)
cpp_files = sorted(glob.glob(args.src + "/*.cpp"))
h_files = sorted(glob.glob(args.src + "/*.h") )
values["PBXBuildFile"] = ""
values["PBXFileReference"] = ""
values["PBXGroup_src"] = ""
values["PBXGroupSupporting"] = ""
values["PBXResourcesBuildPhase"] = ""
values["PBXSourcesBuildPhase"] = ""
if args.src:
relto = dest_path
if args.type == "android":
tmsrc = "${FILE} "
relto = dest_path + "/jni/src/"
SRC = process(tmsrc, relto, args.src, cpp_files)
#INCLUDE = process(tminc, relto, args.src, h_files)
if args.type == "cmake" or args.type == "emscripten":
tmsrc = "${FILE} "
relto = dest_path
SRC = process(tmsrc, relto, args.src, cpp_files)
INCLUDE = process(tmsrc, relto, args.src, h_files)
if args.type == "win32":
tmsrc = """<ClCompile Include="${FILE}" />"""
tminc = """<ClInclude Include="${FILE}" />"""
SRC = process(tmsrc, relto, args.src, cpp_files)
INCLUDE = process(tminc, relto, args.src, h_files)
if args.type in ("macosx", "ios"):
import random
r = random.Random()
r.seed(1)
class Refs:
def __init__(self):
self._refs = []
def getObject(self, file):
for ref in self._refs:
if ref[0] == file:
return ref[1]
obj = ""
for i in range(24):
obj += r.choice("0123456789ABCDEF")
self._refs.append((file, obj))
return obj
def getFile(obj):
for ref in self._refs:
if ref[1] == obj:
return ref[0]
raise KeyError
globalRefs = Refs()
fileRefs = Refs()
def fn(values):
FILE = values["FILE"]
values["GREF"] = globalRefs.getObject(FILE)
values["FREF"] = fileRefs.getObject(FILE)
values["NAME"] = os.path.split(FILE)[1]
FULL_FILE = values["FULLFILE"]
tp = "wtf"
if os.path.isdir(FULL_FILE):
tp = "folder"
else:
ext = os.path.splitext(FULL_FILE)[1]
if ext == ".cpp":
tp = "sourcecode.cpp.cpp"
if ext == ".h":
tp = "sourcecode.c.h"
values["TYPE"] = tp
return values
#0405A0061872139000BA6557 /* demo in Resources */ = {isa = PBXBuildFile; fileRef = 0405A0021872139000BA6557 /* demo */; };
#0405A0071872139000BA6557 /* ext in Resources */ = {isa = PBXBuildFile; fileRef = 0405A0031872139000BA6557 /* ext */; };
#0405A0081872139000BA6557 /* images in Resources */ = {isa = PBXBuildFile; fileRef = 0405A0041872139000BA6557 /* images */; };
#0405A0091872139000BA6557 /* xmls in Resources */ = {isa = PBXBuildFile; fileRef = 0405A0051872139000BA6557 /* xmls */; };
#04A57D601871FF9F0068B1E5 /* entry_point.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 04A57D3A1871FF9F0068B1E5 /* entry_point.cpp */; };
#04A57D621871FF9F0068B1E5 /* example.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 04A57D3C1871FF9F0068B1E5 /* example.cpp */; };
#04A57D651871FF9F0068B1E5 /* test.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 04A57D401871FF9F0068B1E5 /* test.cpp */; };
tm = "\t\t${GREF} /* ${FILE} in Sources */ = {isa = PBXBuildFile; fileRef = ${FREF} /* ${FILE} */; };\n"
values["PBXBuildFile"] = process(tm, relto, args.src, cpp_files, fn)
values["PBXBuildFile"]+= process(tm, relto, abs_data, data_files, fn)
#04A57D3A1871FF9F0068B1E5 /* entry_point.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = entry_point.cpp; path = ../../src/entry_point.cpp; sourceTree = "<group>"; };
#04A57D3C1871FF9F0068B1E5 /* example.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = example.cpp; path = ../../src/example.cpp; sourceTree = "<group>"; };
#04A57D3E1871FF9F0068B1E5 /* example.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = example.h; path = ../../src/example.h; sourceTree = "<group>"; };
#04A57D401871FF9F0068B1E5 /* test.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = test.cpp; path = ../../src/test.cpp; sourceTree = "<group>"; };
#0405A0021872139000BA6557 /* demo */ = {isa = PBXFileReference; lastKnownFileType = folder; name = demo; path = ../../data/demo; sourceTree = "<group>"; };
#0405A0031872139000BA6557 /* ext */ = {isa = PBXFileReference; lastKnownFileType = folder; name = ext; path = ../../data/ext; sourceTree = "<group>"; };
#0405A0041872139000BA6557 /* images */ = {isa = PBXFileReference; lastKnownFileType = folder; name = images; path = ../../data/images; sourceTree = "<group>"; };
#0405A0051872139000BA6557 /* xmls */ = {isa = PBXFileReference; lastKnownFileType = folder; name = xmls; path = ../../data/xmls; sourceTree = "<group>"; };
files = []
files.extend(cpp_files)
files.extend(h_files)
files.extend(data_files)
tm = """\t\t${FREF} /* ${FILE} */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = ${TYPE}; name = ${NAME}; path = ${FILE}; sourceTree = "<group>"; };""" + "\n"
values["PBXFileReference"] = process(tm, relto, args.src, files, fn)
#04A57D3A1871FF9F0068B1E5 /* entry_point.cpp */,
#04A57D3C1871FF9F0068B1E5 /* example.cpp */,
#04A57D3E1871FF9F0068B1E5 /* example.h */,
#04A57D401871FF9F0068B1E5 /* test.cpp */,
tm = "\t\t\t\t${FREF} /* ${NAME} */, \n"
files = []
files.extend(cpp_files)
files.extend(h_files)
values["PBXGroup_src"] = process(tm, relto, args.src, files, fn)
#0405A0021872139000BA6557 /* demo */,
#0405A0031872139000BA6557 /* ext */,
#0405A0041872139000BA6557 /* images */,
#0405A0051872139000BA6557 /* xmls */,
values["PBXGroupSupporting"] = process(tm, relto, abs_data, data_files, fn)
#0405A0081872139000BA6557 /* images in Resources */,
#0405A0071872139000BA6557 /* ext in Resources */,
#0405A0061872139000BA6557 /* demo in Resources */,
#0405A0091872139000BA6557 /* xmls in Resources */,
tm = "\t\t\t\t${GREF} /* ${NAME} */, \n"
values["PBXResourcesBuildPhase"] = process(tm, relto, abs_data, data_files, fn)
#04A57D621871FF9F0068B1E5 /* example.cpp in Sources */,
#04A57D601871FF9F0068B1E5 /* entry_point.cpp in Sources */,
#04A57D651871FF9F0068B1E5 /* test.cpp in Sources */,
values["PBXSourcesBuildPhase"] = process(tm, relto, args.src, cpp_files, fn)
values["SRC"] = SRC
values["INCLUDE"] = INCLUDE
values["DATA"] = "../data"
def process_folder(path, dest):
try:
os.mkdir(dest)
except OSError:
pass
for src in os.listdir(path):
src_path = path + src
t = Template(src)
dest_local = t.substitute(**values)
if os.path.isdir(src_path):
process_folder(src_path + "/", dest + dest_local + "/")
continue
dest_path = dest + dest_local
from mimetypes import guess_type
print("src " + src_path)
tp = guess_type(src_path)
if not tp[0]:
continue
print(tp[0])
q = tp[0].split("/")
if q[0] == "text" or q[1] in ("xml", "x-msdos-program", "x-sh"):
print("creating file: " + dest_path)
src_data = open(src_path, "r").read()
t = Template(src_data)
dest_data = t.safe_substitute(**values)
if args.type == "ios" or args.type == "macosx":
dest_file = io.open(dest_path, "w", newline="\n")
try:
dest_file.write(str(dest_data, encoding='utf-8'))
except TypeError:
dest_file.write(unicode(dest_data, encoding='utf-8'))
else:
dest_file = open(dest_path, "w")
dest_file.write(dest_data)
shutil.copystat(src_path, dest_path)
else:
print("copying file: " + dest_path)
shutil.copyfile(src_path, dest_path)
top_path = templates_path + project + "/"
process_folder(top_path, args.dest + "/")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="oxygine projects template generator")
parser.add_argument("-t", "--type", help = "choose your IDE/build tools",
choices = platforms, default = "win32")
parser.add_argument("-s", "--src", help = "folder with already created source files", default = "")
parser.add_argument("-d", "--dest", help = "destination location", default = ".")
parser.add_argument("--hello", help = "generates full copy of HelloWorld example. It includes all platforms, data and src folder", action="store_true", default = False)
parser.add_argument("name")
args = parser.parse_args()
run(args)
| |
"""Incremental Feature Dependency Discovery"""
# iFDD implementation based on ICML 2011 paper
from copy import deepcopy
import numpy as np
from rlpy.Tools import printClass, PriorityQueueWithNovelty
from rlpy.Tools import powerset, combinations, addNewElementForAllActions
from rlpy.Tools import plt
from .Representation import Representation
import warnings
from collections import defaultdict
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
__author__ = "Alborz Geramifard"
class iFDD_feature(object):
''' This object represents a feature used for linear function approximation.
The feature can be an initial feature or made using the conjunction of existing features.
index: is the location of the feature in the feature vector
f_set: set of features that their conjunction creates this feature.
p1, p2: parents of the current feature. Notice that multiple combinations of parents may
lead to the same feature. For example given 3 basic features of X, Y, and Z
P1=XY and P2=XZ and P1=X and P2=XZ both lead to the new features XYZ.
Both these parameters are used for visualizations only.
'''
# Unique index Corresponding to its location in a vector (0 based!)
index = None
f_set = None # A set of basic features that this feature corresponds to
# Parent 1 feature index *Basic features have -1 for both nodes
p1 = None
# Parent 2 feature index *Basic features have -1 for both nodes
p2 = None
def __init__(self, potential):
if isinstance(potential, iFDD_potential):
self.index = potential.index
self.f_set = deepcopy(potential.f_set)
self.p1 = potential.p1
self.p2 = potential.p2
else:
# This is the case where a single integer is passed to generate an
# initial feature
index = potential
self.index = index
self.f_set = frozenset([index])
self.p1 = -1
self.p2 = -1
def __deepcopy__(self, memo):
new_f = iFDD_feature(self.index)
new_f.p1 = self.p1
new_f.p2 = self.p2
new_f.f_set = deepcopy(self.f_set)
return new_f
def show(self):
printClass(self)
class iFDD_potential(object):
''' This object represents a potential feature that can be promoted to a permanent feature.
The structure of this object is very similar to iFDD feature object, except it has a relevance parameter
that measures the importance of creating a new feature out of this potential feature.
'''
relevance = None # Sum of errors corresponding to this feature
f_set = None # Set of features it corresponds to [Immutable]
# If this feature has been discovered set this to its index else 0
index = None
p1 = None # Parent 1 feature index
p2 = None # Parent 2 feature index
count = None # Number of times this feature was visited
def __init__(self, f_set, parent1, parent2):
self.f_set = deepcopy(f_set)
self.index = -1 # -1 means it has not been discovered yet
self.p1 = parent1
self.p2 = parent2
self.cumtderr = 0
self.cumabstderr = 0
self.count = 1
def __deepcopy__(self, memo):
new_p = iFDD_potential(self.f_set, self.p1, self.p2)
new_p.cumtderr = self.cumtderr
new_p.cumabstderr = self.cumabstderr
return new_p
def show(self):
printClass(self)
class iFDD(Representation):
''' The incremental Feature Dependency Discovery Representation based on
[Geramifard et al. 2011 ICML paper]. This representation starts with a set of given
binary features and adds new features as the conjunction of existing features. Given n features
iFDD can expand the set of features up to 2^n-1 features (i.e. conjunction of each subset of n
features can be considered as a new feature.
'''
# It is a good starting point to see how relevances grow if threshold is
# set to infinity.
PRINT_MAX_RELEVANCE = False
discovery_threshold = None # psi in the paper
# boolean specifying the use of the trick mentioned in the paper so that
# features are getting sparser with more feature discoveries (i.e. use
# greedy algorithm for feature activation)
sparsify = None
# dictionary mapping initial feature sets to iFDD_feature
iFDD_features = None
# dictionary mapping initial feature sets to iFDD_potential
iFDD_potentials = None
# dictionary mapping each feature index (ID) to its feature object
featureIndex2feature = None
debug = 0 # Print more stuff
# dictionary mapping initial active feature set phi_0(s) to its
# corresponding active features at phi(s). Based on Tuna's Trick to speed
# up iFDD
cache = None
# this should only increase speed. If results are different something is
# wrong
useCache = 0
# Number of features to be expanded in the batch setting
maxBatchDiscovery = 0
# Minimum value of feature relevance for the batch setting
batchThreshold = 0
# ICML 11 iFDD would add sum of abs(TD-errors) while the iFDD plus uses
# the abs(sum(TD-Error))/sqrt(potential feature presence count)
iFDDPlus = 0
# This is a priority queue based on the size of the features (Largest ->
# Smallest). For same size features, it is also sorted based on the newest
# -> oldest. Each element is the pointer to feature object.
sortediFDDFeatures = None
# A Representation that provides the initial set of features for iFDD
initial_representation = None
# Helper parameter to get a sense of appropriate threshold on the
# relevance for discovery
maxRelevance = -np.inf
# As Christoph mentioned adding new features may affect the phi for all
# states. This idea was to make sure both conditions for generating active
# features generate the same result.
use_chirstoph_ordered_features = True
def __init__(
self, domain, discovery_threshold, initial_representation,
sparsify=True, discretization=20, debug=0, useCache=0,
maxBatchDiscovery=1, batchThreshold=0, iFDDPlus=1, seed=1):
self.iFDD_features = {}
self.iFDD_potentials = {}
self.featureIndex2feature = {}
self.cache = {}
self.discovery_threshold = discovery_threshold
self.sparsify = sparsify
self.setBinsPerDimension(domain, discretization)
self.features_num = initial_representation.features_num
self.debug = debug
self.useCache = useCache
self.maxBatchDiscovery = maxBatchDiscovery
self.batchThreshold = batchThreshold
self.sortediFDDFeatures = PriorityQueueWithNovelty()
self.initial_representation = initial_representation
self.iFDDPlus = iFDDPlus
self.isDynamic = True
self.addInitialFeatures()
super(iFDD, self).__init__(domain, discretization, seed)
def phi_nonTerminal(self, s):
""" Based on Tuna's Master Thesis 2012 """
F_s = np.zeros(self.features_num, 'bool')
F_s_0 = self.initial_representation.phi_nonTerminal(
s)
activeIndices = np.where(F_s_0 != 0)[0]
if self.useCache:
finalActiveIndices = self.cache.get(frozenset(activeIndices))
if finalActiveIndices is None:
# run regular and update the cache
finalActiveIndices = self.findFinalActiveFeatures(
activeIndices)
else:
finalActiveIndices = self.findFinalActiveFeatures(
activeIndices)
F_s[finalActiveIndices] = 1
return F_s
def findFinalActiveFeatures(self, intialActiveFeatures):
"""
Given the active indices of phi_0(s) find the final active indices of phi(s) based on discovered features
"""
finalActiveFeatures = []
k = len(intialActiveFeatures)
initialSet = set(intialActiveFeatures)
if 2 ** k <= self.features_num:
# k can be big which can cause this part to be very slow
# if k is large then find active features by enumerating on the
# discovered features.
if self.use_chirstoph_ordered_features:
for i in range(k, 0, -1):
if len(initialSet) == 0:
break
# generate list of all combinations with i elements
cand_i = [(c, self.iFDD_features[frozenset(c)].index)
for c in combinations(initialSet, i)
if frozenset(c) in self.iFDD_features]
# sort (recent features (big ids) first)
cand_i.sort(key=lambda x: x[1], reverse=True)
# idx = -1
for candidate, ind in cand_i:
# the next block is for testing only
# cur_idx = self.iFDD_features[frozenset(candidate)].index
# if idx > 0:
# assert(idx > cur_idx)
# idx = cur_idx
if len(initialSet) == 0:
# No more initial features to be mapped to extended
# ones
break
# This was missing from ICML 2011 paper algorithm.
# Example: [0,1,20], [0,20] is discovered, but if [0]
# is checked before [1] it will be added even though it
# is already covered by [0,20]
if initialSet.issuperset(set(candidate)):
feature = self.iFDD_features.get(
frozenset(candidate))
if feature is not None:
finalActiveFeatures.append(feature.index)
if self.sparsify:
# print "Sets:", initialSet, feature.f_set
initialSet = initialSet - feature.f_set
# print "Remaining Set:", initialSet
else:
for candidate in powerset(initialSet, ascending=0):
if len(initialSet) == 0:
# No more initial features to be mapped to extended
# ones
break
# This was missing from ICML 2011 paper algorithm. Example:
# [0,1,20], [0,20] is discovered, but if [0] is checked
# before [1] it will be added even though it is already
# covered by [0,20]
if initialSet.issuperset(set(candidate)):
feature = self.iFDD_features.get(frozenset(candidate))
if feature is not None:
finalActiveFeatures.append(feature.index)
if self.sparsify:
# print "Sets:", initialSet, feature.f_set
initialSet = initialSet - feature.f_set
# print "Remaining Set:", initialSet
else:
# print "********** Using Alternative: %d > %d" % (2**k, self.features_num)
# Loop on all features sorted on their size and then novelty and
# activate features
for feature in self.sortediFDDFeatures.toList():
if len(initialSet) == 0:
# No more initial features to be mapped to extended ones
break
if initialSet.issuperset(set(feature.f_set)):
finalActiveFeatures.append(feature.index)
if self.sparsify:
# print "Sets:", initialSet, feature.f_set
initialSet = initialSet - feature.f_set
# print "Remaining Set:", initialSet
if self.useCache:
self.cache[frozenset(intialActiveFeatures)] = finalActiveFeatures
return finalActiveFeatures
def post_discover(self, s, terminal, a, td_error, phi_s):
"""
returns the number of added features
"""
# Indices of non-zero elements of vector phi_s
activeFeatures = phi_s.nonzero()[0]
discovered = 0
for g_index, h_index in combinations(activeFeatures, 2):
discovered += self.inspectPair(g_index, h_index, td_error)
return discovered
def inspectPair(self, g_index, h_index, td_error):
# Inspect feature f = g union h where g_index and h_index are the indices of features g and h
# If the relevance is > Threshold add it to the list of features
# Returns True if a new feature is added
g = self.featureIndex2feature[g_index].f_set
h = self.featureIndex2feature[h_index].f_set
f = g.union(h)
feature = self.iFDD_features.get(f)
if not self.iFDDPlus:
td_error = abs(td_error)
if feature is not None:
# Already exists
return False
# Look it up in potentials
potential = self.iFDD_potentials.get(f)
if potential is None:
# Generate a new potential and put it in the dictionary
potential = iFDD_potential(f, g_index, h_index)
self.iFDD_potentials[f] = potential
potential.cumtderr += td_error
potential.cumabstderr += abs(td_error)
potential.count += 1
# Check for discovery
if self.random_state.rand() < self.iFDDPlus:
relevance = abs(potential.cumtderr) / np.sqrt(potential.count)
else:
relevance = potential.cumabstderr
if relevance >= self.discovery_threshold:
self.maxRelevance = -np.inf
self.addFeature(potential)
return True
else:
self.updateMaxRelevance(relevance)
return False
def show(self):
self.showFeatures()
self.showPotentials()
self.showCache()
def updateWeight(self, p1_index, p2_index):
# Add a new weight corresponding to the new added feature for all actions.
# The new weight is set to zero if sparsify = False, and equal to the
# sum of weights corresponding to the parents if sparsify = True
a = self.domain.actions_num
# Number of feature before adding the new one
f = self.features_num - 1
if self.sparsify:
newElem = (self.weight_vec[p1_index::f] +
self.weight_vec[p2_index::f]).reshape((-1, 1))
else:
newElem = None
self.weight_vec = addNewElementForAllActions(self.weight_vec, a, newElem)
# We dont want to reuse the hased phi because phi function is changed!
self.hashed_s = None
def addInitialFeatures(self):
for i in xrange(self.initial_representation.features_num):
feature = iFDD_feature(i)
# shout(self,self.iFDD_features[frozenset([i])].index)
self.iFDD_features[frozenset([i])] = feature
self.featureIndex2feature[feature.index] = feature
# priority is 1/number of initial features corresponding to the
# feature
priority = 1
self.sortediFDDFeatures.push(priority, feature)
def addFeature(self, potential):
# Add it to the list of features
# Features_num is always one more than the max index (0-based)
potential.index = self.features_num
self.features_num += 1
feature = iFDD_feature(potential)
self.iFDD_features[potential.f_set] = feature
# Expand the size of the weight_vec
self.updateWeight(feature.p1, feature.p2)
# Update the index to feature dictionary
self.featureIndex2feature[feature.index] = feature
# print "IN IFDD, New Feature = %d => Total Features = %d" % (feature.index, self.features_num)
# Update the sorted list of features
# priority is 1/number of initial features corresponding to the feature
priority = 1 / (len(potential.f_set) * 1.)
self.sortediFDDFeatures.push(priority, feature)
# If you use cache, you should invalidate entries that their initial
# set contains the set corresponding to the new feature
if self.useCache:
for initialActiveFeatures in self.cache.keys():
if initialActiveFeatures.issuperset(feature.f_set):
if self.sparsify:
self.cache.pop(initialActiveFeatures)
else:
# If sparsification is not used, simply add the new
# feature id to all cached values that have feature set
# which is a super set of the features corresponding to
# the new discovered feature
self.cache[initialActiveFeatures].append(feature.index)
if self.debug:
self.show()
def batchDiscover(self, td_errors, phi, states):
# Discovers features using iFDD in batch setting.
# TD_Error: p-by-1 (How much error observed for each sample)
# phi: n-by-p features corresponding to all samples (each column corresponds to one sample)
# self.batchThreshold is the minimum relevance value for the feature to
# be expanded
SHOW_PLOT = 0 # Shows the histogram of relevances
maxDiscovery = self.maxBatchDiscovery
n = self.features_num # number of features
p = len(td_errors) # Number of samples
counts = np.zeros((n, n))
relevances = np.zeros((n, n))
for i in xrange(p):
phiphiT = np.outer(phi[i, :], phi[i, :])
if self.iFDDPlus:
relevances += phiphiT * td_errors[i]
else:
relevances += phiphiT * abs(td_errors[i])
counts += phiphiT
# Remove Diagonal and upper part of the relevances as they are useless
relevances = np.triu(relevances, 1)
non_zero_index = np.nonzero(relevances)
if self.iFDDPlus:
# Calculate relevances based on theoretical results of ICML 2013
# potential submission
relevances[non_zero_index] = np.divide(
np.abs(relevances[non_zero_index]),
np.sqrt(counts[non_zero_index]))
else:
# Based on Geramifard11_ICML Paper
relevances[non_zero_index] = relevances[non_zero_index]
# Find indexes to non-zero excited pairs
# F1 and F2 are the parents of the potentials
(F1, F2) = relevances.nonzero()
relevances = relevances[F1, F2]
if len(relevances) == 0:
# No feature to add
self.logger.debug("iFDD Batch: Max Relevance = 0")
return False
if SHOW_PLOT:
e_vec = relevances.flatten()
e_vec = e_vec[e_vec != 0]
e_vec = np.sort(e_vec)
plt.ioff()
plt.plot(e_vec, linewidth=3)
plt.show()
# Sort based on relevances
# We want high to low hence the reverse: [::-1]
sortedIndices = np.argsort(relevances)[::-1]
max_relevance = relevances[sortedIndices[0]]
# Add top <maxDiscovery> features
self.logger.debug(
"iFDD Batch: Max Relevance = {0:g}".format(max_relevance))
added_feature = False
new_features = 0
for j in xrange(len(relevances)):
if new_features >= maxDiscovery:
break
max_index = sortedIndices[j]
f1 = F1[max_index]
f2 = F2[max_index]
relevance = relevances[max_index]
if relevance > self.batchThreshold:
# print "Inspecting",
# f1,f2,'=>',self.getStrFeatureSet(f1),self.getStrFeatureSet(f2)
if self.inspectPair(f1, f2, np.inf):
self.logger.debug(
'New Feature %d: %s, Relevance = %0.3f' %
(self.features_num - 1, self.getStrFeatureSet(self.features_num - 1), relevances[max_index]))
new_features += 1
added_feature = True
else:
# Because the list is sorted, there is no use to look at the
# others
break
return (
# A signal to see if the representation has been expanded or not
added_feature
)
def showFeatures(self):
print "Features:"
print "-" * 30
print " index\t| f_set\t| p1\t| p2\t | Weights (per action)"
print "-" * 30
for feature in reversed(self.sortediFDDFeatures.toList()):
# for feature in self.iFDD_features.itervalues():
# print " %d\t| %s\t| %s\t| %s\t| %s" %
# (feature.index,str(list(feature.f_set)),feature.p1,feature.p2,str(self.weight_vec[feature.index::self.features_num]))
print " %d\t| %s\t| %s\t| %s\t| Omitted" % (feature.index, self.getStrFeatureSet(feature.index), feature.p1, feature.p2)
def showPotentials(self):
print "Potentials:"
print "-" * 30
print " index\t| f_set\t| relevance\t| count\t| p1\t| p2"
print "-" * 30
for _, potential in self.iFDD_potentials.iteritems():
print " %d\t| %s\t| %0.2f\t| %d\t| %s\t| %s" % (potential.index, str(np.sort(list(potential.f_set))), potential.relevance, potential.count, potential.p1, potential.p2)
def showCache(self):
if self.useCache:
print "Cache:"
if len(self.cache) == 0:
print 'EMPTY!'
return
print "-" * 30
print " initial\t| Final"
print "-" * 30
for initial, active in self.cache.iteritems():
print " %s\t| %s" % (str(list(initial)), active)
def updateMaxRelevance(self, newRelevance):
# Update a global max relevance and outputs it if it is updated
if self.maxRelevance < newRelevance:
self.maxRelevance = newRelevance
if self.PRINT_MAX_RELEVANCE:
self.logger.debug(
"iFDD Batch: Max Relevance = {0:g}".format(newRelevance))
def getFeature(self, f_id):
# returns a feature given a feature id
if f_id in self.featureIndex2feature.keys():
return self.featureIndex2feature[f_id]
else:
print "F_id %d is not valid" % f_id
return None
def getStrFeatureSet(self, f_id):
# returns a string that corresponds to the set of features specified by
# the given feature_id
if f_id in self.featureIndex2feature.keys():
return str(sorted(list(self.featureIndex2feature[f_id].f_set)))
else:
print "F_id %d is not valid" % f_id
return None
def featureType(self):
return bool
def __deepcopy__(self, memo):
ifdd = iFDD(
self.domain,
self.discovery_threshold,
self.initial_representation,
self.sparsify,
self.discretization,
self.debug,
self.useCache,
self.maxBatchDiscovery,
self.batchThreshold,
self.iFDDPlus)
for s, f in self.iFDD_features.items():
new_f = deepcopy(f)
new_s = deepcopy(s)
ifdd.iFDD_features[new_s] = new_f
ifdd.featureIndex2feature[new_f.index] = new_f
for s, p in self.iFDD_potentials.items():
new_s = deepcopy(s)
new_p = deepcopy(p)
ifdd.iFDD_potentials[new_s] = deepcopy(new_p)
ifdd.cache = deepcopy(self.cache)
ifdd.sortediFDDFeatures = deepcopy(self.sortediFDDFeatures)
ifdd.features_num = self.features_num
ifdd.weight_vec = deepcopy(self.weight_vec)
return ifdd
class iFDDK_potential(iFDD_potential):
f_set = None # Set of features it corresponds to [Immutable]
index = None # If this feature has been discovered set this to its index else 0
p1 = None # Parent 1 feature index
p2 = None # Parent 2 feature index
a = None # tE[phi |\delta|] estimate
b = None # tE[phi \delta] estimate
c = 0. # || phi ||^2_d estimate
n_crho = 0 # rho episode index of last update
e = None # eligibility trace
nu = 0. # w value of last statistics update
x_a = 0. # y_a value of last statistics update
x_b = 0. # y_b value of last stistics update
l = 0 # t value of last statistics update
def __init__(self, f_set, parent1, parent2):
self.f_set = deepcopy(f_set)
self.index = -1 # -1 means it has not been discovered yet
self.p1 = parent1
self.p2 = parent2
try:
self.hp_dtype = np.dtype('float128')
except TypeError:
self.hp_dtype = np.dtype('float64')
self.a = np.array(0., dtype=self.hp_dtype)
self.b = np.array(0., dtype=self.hp_dtype)
self.e = np.array(0., dtype=self.hp_dtype)
def relevance(self, kappa=None, plus=None):
if plus is None:
assert(kappa is not None)
plus = self.random_state.rand() >= kappa
if plus:
return np.abs(self.b) / np.sqrt(self.c)
else:
return self.a / np.sqrt(self.c)
def update_statistics(self, rho, td_error, lambda_, discount_factor, phi, n_rho):
# phi = phi_s[self.p1] * phi_s[self.p2]
if n_rho > self.n_crho:
self.e = 0
self.e = rho * (lambda_ * discount_factor * self.e + phi)
self.a += np.abs(td_error) * self.e
self.b += td_error * self.e
self.c += phi ** 2
self.n_crho = n_rho
def update_lazy_statistics(self, rho, td_error, lambda_, discount_factor, phi, y_a, y_b, t_rho, w, t, n_rho):
# phi = phi_s[self.p1] * phi_s[self.p2]
if lambda_ > 0:
# catch up on old updates
gl = np.power(np.array(discount_factor * lambda_, dtype=self.hp_dtype), t_rho[n_rho] - self.l)
sa, sb = self.a.copy(), self.b.copy()
self.a += self.e * (y_a[self.n_crho] - self.x_a) * np.exp(-self.nu) * gl
self.b += self.e * (y_b[self.n_crho] - self.x_b) * np.exp(-self.nu) * gl
if not np.isfinite(self.a):
self.a = sa
warnings.warn("Overflow in potential relevance estimate")
if not np.isfinite(self.b):
self.b = sb
warnings.warn("Overflow in potential relevance estimate")
if n_rho > self.n_crho:
self.e = 0
# TODO clean up y_a and y_b
else:
self.e *= gl * (lambda_ * discount_factor) ** (t - 1 - t_rho[n_rho]) * np.exp(w - self.nu)
# updates based on current transition
# np.seterr(under="warn")
self.e = rho * (lambda_ * discount_factor * self.e + phi)
self.a += np.abs(td_error) * self.e
self.b += td_error * self.e
# np.seterr(under="raise")
self.c += phi ** 2
# save current values for next catch-up update
self.l = t
self.x_a = y_a[n_rho].copy()
self.x_b = y_b[n_rho].copy()
self.nu = w
self.n_crho = n_rho
def __deepcopy__(self, memo):
new_p = iFDD_potential(self.f_set, self.p1, self.p2)
for i in ["a", "b", "c", "n_crho", "e", "nu", "x_a", "x_b", "l"]:
new_p.__dict__[i] = self.__dict__[i]
return new_p
def _set_comp_lt(a, b):
if len(a) < len(b):
return -1
elif len(a) > len(b):
return 1
l1 = list(a)
l2 = list(b)
l1.sort()
l2.sort()
for i in xrange(len(l1)):
if l1[i] < l2[i]:
return -1
if l1[i] > l2[i]:
return +1
return 0
class iFDDK(iFDD):
"""iFDD(kappa) algorithm with support for elibility traces
The iFDD(kappa) algorithm is a stochastic mixture of iFDD and iFDD+
to retain the best properties of both algorithms."""
w = 0 # log(rho) trace
n_rho = 0 # index for rho episodes
t = 0
t_rho = defaultdict(int)
def __init__(
self, domain, discovery_threshold, initial_representation, sparsify=True,
discretization=20, debug=0, useCache=0, kappa=1e-5, lambda_=0., lazy=False):
try:
self.hp_dtype = np.dtype('float128')
except TypeError:
self.hp_dtype = np.dtype('float64')
self.y_a = defaultdict(lambda: np.array(0., dtype=self.hp_dtype))
self.y_b = defaultdict(lambda: np.array(0., dtype=self.hp_dtype))
self.lambda_ = lambda_
self.kappa = kappa
self.discount_factor = domain.discount_factor
self.lazy = lazy # lazy updating?
super(
iFDDK, self).__init__(domain, discovery_threshold, initial_representation,
sparsify=sparsify, discretization=discretization, debug=debug,
useCache=useCache)
def episodeTerminated(self):
self.n_rho += 1
self.w = 0
self.t_rho[self.n_rho] = self.t
def showPotentials(self):
print "Potentials:"
print "-" * 30
print " index\t| f_set\t| relevance\t| count\t| p1\t| p2"
print "-" * 30
k = sorted(self.iFDD_potentials.iterkeys(), cmp=_set_comp_lt)
for f_set in k:
potential = self.iFDD_potentials[f_set]
print " %d\t| %s\t| %g\t| %d\t| %s\t| %s" % (potential.index, str(np.sort(list(potential.f_set))), potential.relevance(plus=True), potential.c, potential.p1, potential.p2)
def post_discover(self, s, terminal, a, td_error, phi_s, rho=1):
"""
returns the number of added features
"""
self.t += 1
discovered = 0
plus = self.random_state.rand() >= self.kappa
# if self.t == 22:
# import ipdb; ipdb.set_trace()
activeFeatures = phi_s.nonzero()[
0] # Indices of non-zero elements of vector phi_s
# print "Active Features:", activeFeatures, phi_s
if not self.lazy:
dd = defaultdict(float)
for g_index, h_index in combinations(activeFeatures, 2):
# create potential if necessary
dd[self.get_potential(g_index, h_index).f_set] = phi_s[g_index] * phi_s[h_index]
for f, potential in self.iFDD_potentials.items():
potential.update_statistics(
rho, td_error, self.lambda_, self.discount_factor, dd[f], self.n_rho)
# print plus, potential.relevance(plus=plus)
if potential.relevance(plus=plus) >= self.discovery_threshold:
self.addFeature(potential)
del self.iFDD_potentials[f]
discovered += 1
# print "t", self.t, "td_error", td_error, discovered
# self.showCache()
# print [ f.f_set for f in self.sortediFDDFeatures.toList()]
# self.showPotentials()
return discovered
for g_index, h_index in combinations(activeFeatures, 2):
discovered += self.inspectPair(g_index, h_index, td_error, phi_s, rho, plus)
if rho > 0:
self.w += np.log(rho)
else:
# cut e-traces
self.n_rho += 1
self.w = 0
self.t_rho[self.n_rho] = self.t
# "rescale" to avoid numerical problems
# if self.t_rho[self.n_rho] + 300 < self.t:
# self.y_a[self.n_rho] *= (self.discount_factor * self.lambda_) ** (-300)
# self.y_b[self.n_rho] *= (self.discount_factor * self.lambda_) ** (-300)
# self.t_rho[self.n_rho] += 300
if self.lambda_ > 0:
self.y_a[self.n_rho] += np.exp(self.w) * (self.discount_factor * self.lambda_) ** (
self.t - self.t_rho[self.n_rho]) * np.abs(td_error)
self.y_b[self.n_rho] += np.exp(self.w) * (self.discount_factor * self.lambda_) ** (
self.t - self.t_rho[self.n_rho]) * td_error
assert(np.isfinite(self.y_a[self.n_rho]))
assert(np.isfinite(self.y_b[self.n_rho]))
# print "t", self.t, "td_error", td_error, discovered
# self.showCache()
# print [ f.f_set for f in self.sortediFDDFeatures.toList()]
# self.showPotentials()
return discovered
def get_potential(self, g_index, h_index):
g = self.featureIndex2feature[g_index].f_set
h = self.featureIndex2feature[h_index].f_set
f = g.union(h)
feature = self.iFDD_features.get(f)
if feature is not None:
# Already exists
return None
# Look it up in potentials
potential = self.iFDD_potentials.get(f)
if potential is None:
# Generate a new potential and put it in the dictionary
potential = iFDDK_potential(f, g_index, h_index)
self.iFDD_potentials[f] = potential
return potential
def inspectPair(self, g_index, h_index, td_error, phi_s, rho, plus):
# Inspect feature f = g union h where g_index and h_index are the indices of features g and h
# If the relevance is > Threshold add it to the list of features
# Returns True if a new feature is added
potential = self.get_potential(g_index, h_index)
phi = phi_s[g_index] * phi_s[h_index]
potential.update_lazy_statistics(
rho, td_error, self.lambda_, self.discount_factor, phi, self.y_a,
self.y_b, self.t_rho, self.w, self.t, self.n_rho)
# Check for discovery
relevance = potential.relevance(plus=plus)
if relevance >= self.discovery_threshold:
self.maxRelevance = -np.inf
self.addFeature(potential)
del self.iFDD_potentials[potential.f_set]
return 1
else:
self.updateMaxRelevance(relevance)
return 0
| |
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Common client utilities"""
import getpass
import logging
import os
import six
import time
from oslo_utils import importutils
from openstackclient.common import exceptions
def log_method(log, level=logging.DEBUG):
"""Logs a method and its arguments when entered."""
def decorator(func):
func_name = func.__name__
@six.wraps(func)
def wrapper(self, *args, **kwargs):
if log.isEnabledFor(level):
pretty_args = []
if args:
pretty_args.extend(str(a) for a in args)
if kwargs:
pretty_args.extend(
"%s=%s" % (k, v) for k, v in six.iteritems(kwargs))
log.log(level, "%s(%s)", func_name, ", ".join(pretty_args))
return func(self, *args, **kwargs)
return wrapper
return decorator
def find_resource(manager, name_or_id, **kwargs):
"""Helper for the _find_* methods.
:param manager: A client manager class
:param name_or_id: The resource we are trying to find
:param kwargs: To be used in calling .find()
:rtype: The found resource
This method will attempt to find a resource in a variety of ways.
Primarily .get() methods will be called with `name_or_id` as an integer
value, and tried again as a string value.
If both fail, then a .find() is attempted, which is essentially calling
a .list() function with a 'name' query parameter that is set to
`name_or_id`.
Lastly, if any kwargs are passed in, they will be treated as additional
query parameters. This is particularly handy in the case of finding
resources in a domain.
"""
# Try to get entity as integer id
try:
if isinstance(name_or_id, int) or name_or_id.isdigit():
return manager.get(int(name_or_id), **kwargs)
# FIXME(dtroyer): The exception to catch here is dependent on which
# client library the manager passed in belongs to.
# Eventually this should be pulled from a common set
# of client exceptions.
except Exception as ex:
if type(ex).__name__ == 'NotFound':
pass
else:
raise
# Try directly using the passed value
try:
return manager.get(name_or_id, **kwargs)
except Exception:
pass
if len(kwargs) == 0:
kwargs = {}
try:
# Prepare the kwargs for calling find
if 'NAME_ATTR' in manager.resource_class.__dict__:
# novaclient does this for oddball resources
kwargs[manager.resource_class.NAME_ATTR] = name_or_id
else:
kwargs['name'] = name_or_id
except Exception:
pass
# finally try to find entity by name
try:
return manager.find(**kwargs)
# FIXME(dtroyer): The exception to catch here is dependent on which
# client library the manager passed in belongs to.
# Eventually this should be pulled from a common set
# of client exceptions.
except Exception as ex:
if type(ex).__name__ == 'NotFound':
msg = "No %s with a name or ID of '%s' exists." % \
(manager.resource_class.__name__.lower(), name_or_id)
raise exceptions.CommandError(msg)
if type(ex).__name__ == 'NoUniqueMatch':
msg = "More than one %s exists with the name '%s'." % \
(manager.resource_class.__name__.lower(), name_or_id)
raise exceptions.CommandError(msg)
else:
pass
try:
for resource in manager.list():
# short circuit and return the first match
if (resource.get('id') == name_or_id or
resource.get('name') == name_or_id):
return resource
else:
# we found no match, keep going to bomb out
pass
except Exception:
# in case the list fails for some reason
pass
# if we hit here, we've failed, report back this error:
msg = "Could not find resource %s" % name_or_id
raise exceptions.CommandError(msg)
def format_dict(data):
"""Return a formatted string of key value pairs
:param data: a dict
:rtype: a string formatted to key='value'
"""
output = ""
for s in sorted(data):
output = output + s + "='" + six.text_type(data[s]) + "', "
return output[:-2]
def format_list(data):
"""Return a formatted strings
:param data: a list of strings
:rtype: a string formatted to a,b,c
"""
return ', '.join(sorted(data))
def get_field(item, field):
try:
if isinstance(item, dict):
return item[field]
else:
return getattr(item, field)
except Exception:
msg = "Resource doesn't have field %s" % field
raise exceptions.CommandError(msg)
def get_item_properties(item, fields, mixed_case_fields=[], formatters={}):
"""Return a tuple containing the item properties.
:param item: a single item resource (e.g. Server, Project, etc)
:param fields: tuple of strings with the desired field names
:param mixed_case_fields: tuple of field names to preserve case
:param formatters: dictionary mapping field names to callables
to format the values
"""
row = []
for field in fields:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(item, field_name, '')
if field in formatters:
row.append(formatters[field](data))
else:
row.append(data)
return tuple(row)
def get_dict_properties(item, fields, mixed_case_fields=[], formatters={}):
"""Return a tuple containing the item properties.
:param item: a single dict resource
:param fields: tuple of strings with the desired field names
:param mixed_case_fields: tuple of field names to preserve case
:param formatters: dictionary mapping field names to callables
to format the values
"""
row = []
for field in fields:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = item[field_name] if field_name in item else ''
if field in formatters:
row.append(formatters[field](data))
else:
row.append(data)
return tuple(row)
def sort_items(items, sort_str):
"""Sort items based on sort keys and sort directions given by sort_str.
:param items: a list or generator object of items
:param sort_str: a string defining the sort rules, the format is
'<key1>:[direction1],<key2>:[direction2]...', direction can be 'asc'
for ascending or 'desc' for descending, if direction is not given,
it's ascending by default
:return: sorted items
"""
if not sort_str:
return items
# items may be a generator object, transform it to a list
items = list(items)
sort_keys = sort_str.strip().split(',')
for sort_key in reversed(sort_keys):
reverse = False
if ':' in sort_key:
sort_key, direction = sort_key.split(':', 1)
if not sort_key:
msg = "empty string is not a valid sort key"
raise exceptions.CommandError(msg)
if direction not in ['asc', 'desc']:
if not direction:
direction = "empty string"
msg = ("%s is not a valid sort direction for sort key %s, "
"use asc or desc instead" % (direction, sort_key))
raise exceptions.CommandError(msg)
if direction == 'desc':
reverse = True
items.sort(key=lambda item: get_field(item, sort_key),
reverse=reverse)
return items
def string_to_bool(arg):
return arg.strip().lower() in ('t', 'true', 'yes', '1')
def env(*vars, **kwargs):
"""Search for the first defined of possibly many env vars
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def get_client_class(api_name, version, version_map):
"""Returns the client class for the requested API version
:param api_name: the name of the API, e.g. 'compute', 'image', etc
:param version: the requested API version
:param version_map: a dict of client classes keyed by version
:rtype: a client class for the requested API version
"""
try:
client_path = version_map[str(version)]
except (KeyError, ValueError):
msg = "Invalid %s client version '%s'. must be one of: %s" % (
(api_name, version, ', '.join(version_map.keys())))
raise exceptions.UnsupportedVersion(msg)
return importutils.import_class(client_path)
def wait_for_status(status_f,
res_id,
status_field='status',
success_status=['active'],
error_status=['error'],
sleep_time=5,
callback=None):
"""Wait for status change on a resource during a long-running operation
:param status_f: a status function that takes a single id argument
:param res_id: the resource id to watch
:param status_field: the status attribute in the returned resource object
:param success_status: a list of status strings for successful completion
:param error_status: a list of status strings for error
:param sleep_time: wait this long (seconds)
:param callback: called per sleep cycle, useful to display progress
:rtype: True on success
"""
while True:
res = status_f(res_id)
status = getattr(res, status_field, '').lower()
if status in success_status:
retval = True
break
elif status in error_status:
retval = False
break
if callback:
progress = getattr(res, 'progress', None) or 0
callback(progress)
time.sleep(sleep_time)
return retval
def wait_for_delete(manager,
res_id,
status_field='status',
sleep_time=5,
timeout=300,
callback=None):
"""Wait for resource deletion
:param manager: the manager from which we can get the resource
:param res_id: the resource id to watch
:param status_field: the status attribute in the returned resource object,
this is used to check for error states while the resource is being
deleted
:param sleep_time: wait this long between checks (seconds)
:param timeout: check until this long (seconds)
:param callback: called per sleep cycle, useful to display progress; this
function is passed a progress value during each iteration of the wait
loop
:rtype: True on success, False if the resource has gone to error state or
the timeout has been reached
"""
total_time = 0
while total_time < timeout:
try:
# might not be a bad idea to re-use find_resource here if it was
# a bit more friendly in the exceptions it raised so we could just
# handle a NotFound exception here without parsing the message
res = manager.get(res_id)
except Exception as ex:
if type(ex).__name__ == 'NotFound':
return True
raise
status = getattr(res, status_field, '').lower()
if status == 'error':
return False
if callback:
progress = getattr(res, 'progress', None) or 0
callback(progress)
time.sleep(sleep_time)
total_time += sleep_time
# if we got this far we've timed out
return False
def get_effective_log_level():
"""Returns the lowest logging level considered by logging handlers
Retrieve and return the smallest log level set among the root
logger's handlers (in case of multiple handlers).
"""
root_log = logging.getLogger()
min_log_lvl = logging.CRITICAL
for handler in root_log.handlers:
min_log_lvl = min(min_log_lvl, handler.level)
return min_log_lvl
def get_password(stdin, prompt=None, confirm=True):
message = prompt or "User Password:"
if hasattr(stdin, 'isatty') and stdin.isatty():
try:
while True:
first_pass = getpass.getpass(message)
if not confirm:
return first_pass
second_pass = getpass.getpass("Repeat " + message)
if first_pass == second_pass:
return first_pass
print("The passwords entered were not the same")
except EOFError: # Ctl-D
raise exceptions.CommandError("Error reading password.")
raise exceptions.CommandError("There was a request to be prompted for a"
" password and a terminal was not detected.")
def read_blob_file_contents(blob_file):
try:
with open(blob_file) as file:
blob = file.read().strip()
return blob
except IOError:
msg = "Error occurred trying to read from file %s"
raise exceptions.CommandError(msg % blob_file)
def build_kwargs_dict(arg_name, value):
"""Return a dictionary containing `arg_name` if `value` is set."""
kwargs = {}
if value:
kwargs[arg_name] = value
return kwargs
def is_ascii(string):
try:
string.decode('ascii')
return True
except UnicodeDecodeError:
return False
| |
# -*- coding: utf-8 -*-
from datetime import timedelta
import arrow
import mock
from django import http
from django import test
from django.conf import settings
from django.utils import timezone
from cradmin_legacy import cradmin_testhelpers
from model_bakery import baker
from devilry.apps.core import models as core_models
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
from devilry.devilry_dbcache.models import AssignmentGroupCachedData
from devilry.devilry_deadlinemanagement.views import manage_deadline_view
from devilry.devilry_group import devilry_group_baker_factories as group_baker
from devilry.devilry_group import models as group_models
from devilry.utils import datetimeutils
from devilry.utils.datetimeutils import from_isoformat_noseconds, isoformat_withseconds
class ExaminerTestCaseMixin(test.TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = manage_deadline_view.ManageDeadlineAllGroupsView
handle_deadline = 'new-attempt'
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def _get_mock_instance(self, assignment):
mock_instance = mock.MagicMock()
mock_instance.get_devilryrole_type.return_value = 'examiner'
mock_instance.assignment = assignment
return mock_instance
def _get_mock_app(self, user=None):
mock_app = mock.MagicMock()
mock_app.get_devilryrole.return_value = 'examiner'
mock_app.get_accessible_group_queryset.return_value = core_models.AssignmentGroup.objects\
.filter_examiner_has_access(user=user)
return mock_app
class TestManageDeadlineNewAttemptAllGroupsView(ExaminerTestCaseMixin):
viewclass = manage_deadline_view.ManageDeadlineAllGroupsView
handle_deadline = 'new-attempt'
def test_info_box_not_showing_when_zero_groups_were_excluded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
group_baker.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertFalse(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
def test_info_box_showing_when_one_group_was_excluded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertTrue(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
self.assertIn(
'1 group(s) excluded',
mockresponse.selector.one('.devilry-deadline-management-info-box').alltext_normalized)
def test_info_box_showing_when_multiple_groups_were_excluded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup3 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup2)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup3)
examiner_user = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup3)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertTrue(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
self.assertIn(
'2 group(s) excluded',
mockresponse.selector.one('.devilry-deadline-management-info-box').alltext_normalized)
def test_all_groups_added_to_form_hidden(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup2)
testgroup3 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup3)
testgroup4 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup4)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup3, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup4, relatedexaminer__user=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8'))
self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_1').__str__().decode('utf-8'))
self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_2').__str__().decode('utf-8'))
self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_3').__str__().decode('utf-8'))
self.assertIn('value="{}"'.format(testgroup1.id), mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8'))
self.assertIn('value="{}"'.format(testgroup2.id), mockresponse.selector.one('#id_selected_items_1').__str__().decode('utf-8'))
self.assertIn('value="{}"'.format(testgroup3.id), mockresponse.selector.one('#id_selected_items_2').__str__().decode('utf-8'))
self.assertIn('value="{}"'.format(testgroup4.id), mockresponse.selector.one('#id_selected_items_3').__str__().decode('utf-8'))
def test_all_only_one_group_added_to_form_hidden(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup2)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8'))
self.assertIn('value="{}"'.format(testgroup1.id), mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8'))
def test_post_only_groups_added_as_form_hidden_input(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup2)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
new_deadline = timezone.now() + timezone.timedelta(days=3)
new_deadline = new_deadline.replace(microsecond=0)
self.mock_http302_postrequest(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
},
requestkwargs={
'data': {
'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)),
'comment_text': 'You have been given a new attempt.',
'selected_items': [testgroup1.id]
}
}
)
feedbacksets = group_models.FeedbackSet.objects.all()
self.assertEqual(3, feedbacksets.count())
group1 = core_models.AssignmentGroup.objects.get(id=testgroup1.id)
group2 = core_models.AssignmentGroup.objects.get(id=testgroup2.id)
new_deadline = new_deadline.replace(second=59) # Deadline is cleaned to seconds as 59.
self.assertEqual(new_deadline, group1.cached_data.last_feedbackset.deadline_datetime)
self.assertNotEqual(new_deadline, group2.cached_data.last_feedbackset.deadline_datetime)
def test_post_groups_unpublished_raises_error(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup2)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
new_deadline = timezone.now() + timezone.timedelta(days=3)
new_deadline = new_deadline.replace(microsecond=0)
with self.assertRaises(http.Http404):
self.mock_http302_postrequest(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
},
requestkwargs={
'data': {
'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)),
'comment_text': 'You have been given a new attempt.',
'selected_items': [testgroup1.id, testgroup2.id]
}
}
)
self.assertEqual(2, group_models.FeedbackSet.objects.count())
class TestManageDeadlineMoveDeadlineAllGroupsView(ExaminerTestCaseMixin):
viewclass = manage_deadline_view.ManageDeadlineAllGroupsView
handle_deadline = 'move-deadline'
def test_info_box_not_showing_when_zero_groups_were_excluded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup1)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertFalse(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
def test_info_box_showing_when_one_group_was_excluded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup1)
group_baker.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertTrue(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
self.assertIn(
'1 group(s) excluded',
mockresponse.selector.one('.devilry-deadline-management-info-box').alltext_normalized)
def test_info_box_showing_when_multiple_groups_were_excluded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup3 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup1)
group_baker.feedbackset_first_attempt_published(group=testgroup2)
group_baker.feedbackset_first_attempt_published(group=testgroup3)
examiner_user = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup3)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertTrue(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
self.assertIn(
'2 group(s) excluded',
mockresponse.selector.one('.devilry-deadline-management-info-box').alltext_normalized)
def test_all_groups_added_to_form_hidden(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup1)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup2)
testgroup3 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup3)
testgroup4 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup4)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup3, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup4, relatedexaminer__user=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8'))
self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_1').__str__().decode('utf-8'))
self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_2').__str__().decode('utf-8'))
self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_3').__str__().decode('utf-8'))
self.assertIn('value="{}"'.format(testgroup1.id), mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8'))
self.assertIn('value="{}"'.format(testgroup2.id), mockresponse.selector.one('#id_selected_items_1').__str__().decode('utf-8'))
self.assertIn('value="{}"'.format(testgroup3.id), mockresponse.selector.one('#id_selected_items_2').__str__().decode('utf-8'))
self.assertIn('value="{}"'.format(testgroup4.id), mockresponse.selector.one('#id_selected_items_3').__str__().decode('utf-8'))
def test_all_only_one_group_added_to_form_hidden(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup1)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup2)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8'))
self.assertIn('value="{}"'.format(testgroup1.id), mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8'))
def test_post_only_groups_added_as_form_hidden_input(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup1)
group_baker.feedbackset_first_attempt_published(group=testgroup2)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
new_deadline = timezone.now() + timezone.timedelta(days=3)
new_deadline = new_deadline.replace(microsecond=0)
self.mock_http302_postrequest(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
},
requestkwargs={
'data': {
'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)),
'comment_text': 'Deadline has been moved',
'selected_items': [testgroup1.id]
}
}
)
feedbacksets = group_models.FeedbackSet.objects.all()
self.assertEqual(2, feedbacksets.count())
group1 = core_models.AssignmentGroup.objects.get(id=testgroup1.id)
group2 = core_models.AssignmentGroup.objects.get(id=testgroup2.id)
self.assertEqual(group1.cached_data.last_feedbackset, group1.cached_data.first_feedbackset)
new_deadline = new_deadline.replace(second=59) # Deadline is cleaned to seconds as 59.
self.assertEqual(new_deadline, group1.cached_data.last_feedbackset.deadline_datetime)
self.assertNotEqual(new_deadline, group2.cached_data.last_feedbackset.deadline_datetime)
def test_post_groups_published_raises_error(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup2)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
new_deadline = timezone.now() + timezone.timedelta(days=3)
new_deadline = new_deadline.replace(microsecond=0)
with self.assertRaises(http.Http404):
self.mock_http302_postrequest(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
},
requestkwargs={
'data': {
'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)),
'comment_text': 'Deadline has been moved',
'selected_items': [testgroup1.id, testgroup2.id]
}
}
)
self.assertEqual(2, group_models.FeedbackSet.objects.count())
def test_post_only_moves_deadline_for_feedbacksets_that_are_last(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
last_deadline = timezone.now()
group_baker.feedbackset_first_attempt_published(group=testgroup1)
group_baker.feedbackset_first_attempt_published(group=testgroup2)
group_baker.feedbackset_new_attempt_unpublished(group=testgroup1, deadline_datetime=last_deadline)
group_baker.feedbackset_new_attempt_unpublished(group=testgroup2, deadline_datetime=last_deadline)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
new_deadline = timezone.now() + timezone.timedelta(days=3)
new_deadline = new_deadline.replace(microsecond=0)
self.mock_http302_postrequest(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(last_deadline),
'handle_deadline': self.handle_deadline
},
requestkwargs={
'data': {
'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)),
'comment_text': 'Deadline has been moved',
'selected_items': [testgroup1.id, testgroup2.id]
}
}
)
self.assertEqual(4, group_models.FeedbackSet.objects.count())
cached_data_group1 = core_models.AssignmentGroup.objects.get(id=testgroup1.id).cached_data
cached_data_group2 = core_models.AssignmentGroup.objects.get(id=testgroup2.id).cached_data
self.assertEqual(cached_data_group1.first_feedbackset.deadline_datetime, testassignment.first_deadline)
self.assertEqual(cached_data_group2.first_feedbackset.deadline_datetime, testassignment.first_deadline)
new_deadline = new_deadline.replace(second=59) # Deadline is cleaned to seconds as 59.
self.assertEqual(cached_data_group2.last_feedbackset.deadline_datetime, new_deadline)
self.assertEqual(cached_data_group2.last_feedbackset.deadline_datetime, new_deadline)
self.assertEqual(cached_data_group2.last_feedbackset.last_updated_by, testuser)
self.assertEqual(cached_data_group2.last_feedbackset.last_updated_by, testuser)
def test_post_only_moves_deadline_for_feedbacksets_that_are_last_first_attempt_and_new_attempt(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
last_deadline = timezone.now().replace(microsecond=0)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup1, deadline_datetime=last_deadline)
group_baker.feedbackset_first_attempt_published(group=testgroup2)
group_baker.feedbackset_new_attempt_unpublished(group=testgroup2, deadline_datetime=last_deadline)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
new_deadline = timezone.now() + timezone.timedelta(days=3)
new_deadline = new_deadline.replace(microsecond=0)
self.mock_http302_postrequest(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(last_deadline),
'handle_deadline': self.handle_deadline
},
requestkwargs={
'data': {
'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)),
'comment_text': 'Deadline has been moved',
'selected_items': [testgroup1.id, testgroup2.id]
}
}
)
self.assertEqual(3, group_models.FeedbackSet.objects.count())
cached_data_group1 = core_models.AssignmentGroup.objects.get(id=testgroup1.id).cached_data
cached_data_group2 = core_models.AssignmentGroup.objects.get(id=testgroup2.id).cached_data
new_deadline = new_deadline.replace(second=59) # Deadline is cleaned to seconds as 59.
self.assertEqual(cached_data_group1.last_feedbackset.deadline_datetime, new_deadline)
self.assertEqual(cached_data_group1.last_feedbackset.last_updated_by, testuser)
self.assertEqual(cached_data_group2.first_feedbackset.deadline_datetime, testassignment.first_deadline)
class TestManageDeadlineNewAttemptFromPreviousView(ExaminerTestCaseMixin):
"""
Tests posting data from another view, and the actual posting in this view.
"""
viewclass = manage_deadline_view.ManageDeadlineFromPreviousView
handle_deadline = 'new-attempt'
def test_info_box_not_showing_when_zero_group_were_excluded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
group_baker.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertFalse(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
def test_info_box_showing_when_one_group_was_excluded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertTrue(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
self.assertIn(
'1 group(s) excluded',
mockresponse.selector.one('.devilry-deadline-management-info-box').alltext_normalized)
def test_info_box_showing_when_multiple_groups_were_excluded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup3 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup2)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup3)
examiner_user = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup3)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertTrue(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
self.assertIn(
'2 group(s) excluded',
mockresponse.selector.one('.devilry-deadline-management-info-box').alltext_normalized)
def test_post_from_previous_view_selected_groups_are_hidden(self):
# By adding the post_type_received_data to the key, we are simulating that the
# post comes from a different view.
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup2)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
mockresponse = self.mock_http200_postrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
},
requestkwargs={
'data': {
'post_type_received_data': '',
'selected_items': [testgroup1.id, testgroup2.id]
}
}
)
self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8'))
self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_1').__str__().decode('utf-8'))
self.assertIn('value="{}"'.format(testgroup1.id), mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8'))
self.assertIn('value="{}"'.format(testgroup2.id), mockresponse.selector.one('#id_selected_items_1').__str__().decode('utf-8'))
def test_post_new_attempt(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup2)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
new_deadline = timezone.now() + timezone.timedelta(days=3)
new_deadline = new_deadline.replace(microsecond=0)
self.mock_postrequest(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': 'new-attempt'
},
requestkwargs={
'data': {
'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)),
'comment_text': 'You have been given a new attempt.',
'selected_items': [testgroup1.id, testgroup2.id]
}
}
)
self.assertEqual(4, group_models.FeedbackSet.objects.count())
self.assertEqual(2, group_models.GroupComment.objects.count())
group_comments = group_models.GroupComment.objects.all()
last_feedbackset_group1 = AssignmentGroupCachedData.objects.get(group_id=testgroup1.id).last_feedbackset
last_feedbackset_group2 = AssignmentGroupCachedData.objects.get(group_id=testgroup2.id).last_feedbackset
new_deadline = new_deadline.replace(second=59) # Deadline is cleaned to seconds as 59.
self.assertEqual(last_feedbackset_group1.deadline_datetime, new_deadline)
self.assertEqual(last_feedbackset_group2.deadline_datetime, new_deadline)
self.assertEqual(last_feedbackset_group1.last_updated_by, testuser)
self.assertEqual(last_feedbackset_group2.last_updated_by, testuser)
self.assertEqual('You have been given a new attempt.', group_comments[0].text)
self.assertEqual('You have been given a new attempt.', group_comments[1].text)
def test_post_groups_published_raises_error(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup2)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
new_deadline = timezone.now() + timezone.timedelta(days=3)
new_deadline = new_deadline.replace(microsecond=0)
with self.assertRaises(http.Http404):
self.mock_http302_postrequest(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
},
requestkwargs={
'data': {
'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)),
'comment_text': 'You have been given a new attempt.',
'selected_items': [testgroup1.id, testgroup2.id]
}
}
)
self.assertEqual(2, group_models.FeedbackSet.objects.count())
class TestManageDeadlineMoveDeadlineFromPreviousView(ExaminerTestCaseMixin):
viewclass = manage_deadline_view.ManageDeadlineFromPreviousView
handle_deadline = 'move-deadline'
def test_info_box_not_showing_when_zero_group_were_excluded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup1)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertFalse(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
def test_info_box_showing_when_one_group_was_excluded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup1)
group_baker.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertTrue(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
self.assertIn(
'1 group(s) excluded',
mockresponse.selector.one('.devilry-deadline-management-info-box').alltext_normalized)
def test_info_box_showing_when_multiple_groups_were_excluded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup3 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup1)
group_baker.feedbackset_first_attempt_published(group=testgroup2)
group_baker.feedbackset_first_attempt_published(group=testgroup3)
examiner_user = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup3)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
}
)
self.assertTrue(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
self.assertIn(
'2 group(s) excluded',
mockresponse.selector.one('.devilry-deadline-management-info-box').alltext_normalized)
def test_post_from_previous_view_selected_groups_are_hidden(self):
# By adding the post_type_received_data to the key, we are simulating that the
# post comes from a different view.
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup1)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup2)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
mockresponse = self.mock_http200_postrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
},
requestkwargs={
'data': {
'post_type_received_data': '',
'selected_items': [testgroup1.id, testgroup2.id]
}
}
)
self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8'))
self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_1').__str__().decode('utf-8'))
self.assertIn('value="{}"'.format(testgroup1.id), mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8'))
self.assertIn('value="{}"'.format(testgroup2.id), mockresponse.selector.one('#id_selected_items_1').__str__().decode('utf-8'))
def test_post_new_attempt(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup2)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
new_deadline = timezone.now() + timezone.timedelta(days=3)
new_deadline = new_deadline.replace(microsecond=0)
self.mock_postrequest(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': 'new-attempt'
},
requestkwargs={
'data': {
'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)),
'comment_text': 'You have been given a new attempt.',
'selected_items': [testgroup1.id, testgroup2.id]
}
}
)
self.assertEqual(4, group_models.FeedbackSet.objects.count())
self.assertEqual(2, group_models.GroupComment.objects.count())
group_comments = group_models.GroupComment.objects.all()
last_feedbackset_group1 = AssignmentGroupCachedData.objects.get(group_id=testgroup1.id).last_feedbackset
last_feedbackset_group2 = AssignmentGroupCachedData.objects.get(group_id=testgroup2.id).last_feedbackset
new_deadline = new_deadline.replace(second=59) # Deadline is cleaned to seconds as 59.
self.assertEqual(last_feedbackset_group1.deadline_datetime, new_deadline)
self.assertEqual(last_feedbackset_group2.deadline_datetime, new_deadline)
self.assertEqual(last_feedbackset_group1.last_updated_by, testuser)
self.assertEqual(last_feedbackset_group2.last_updated_by, testuser)
self.assertEqual('You have been given a new attempt.', group_comments[0].text)
self.assertEqual('You have been given a new attempt.', group_comments[1].text)
def test_post_groups_published_raises_error(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup2)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
new_deadline = timezone.now() + timezone.timedelta(days=3)
new_deadline = new_deadline.replace(microsecond=0)
with self.assertRaises(http.Http404):
self.mock_http302_postrequest(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline
},
requestkwargs={
'data': {
'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)),
'comment_text': 'You have been given a new attempt.',
'selected_items': [testgroup1.id, testgroup2.id]
}
}
)
self.assertEqual(2, group_models.FeedbackSet.objects.count())
class TestManageDeadlineNewAttemptSingleGroup(ExaminerTestCaseMixin):
viewclass = manage_deadline_view.ManageDeadlineSingleGroupView
handle_deadline = 'new-attempt'
def test_info_box_not_showing_when_groups_should_be_excluded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup2)
examiner_user = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'group_id': testgroup1.id
}
)
self.assertFalse(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
def test_all_groups_added_to_form_hidden(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'group_id': testgroup1.id
}
)
self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8'))
self.assertIn('value="{}"'.format(testgroup1.id), mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8'))
def test_post(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup, relatedexaminer__user=testuser)
new_deadline = timezone.now() + timezone.timedelta(days=3)
new_deadline = new_deadline.replace(microsecond=0)
self.mock_http302_postrequest(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'group_id': testgroup.id
},
requestkwargs={
'data': {
'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)),
'comment_text': 'You have been given a new attempt.',
'selected_items': [testgroup.id]
}
}
)
self.assertEqual(2, group_models.FeedbackSet.objects.count())
cached_data_group = core_models.AssignmentGroup.objects.get(id=testgroup.id).cached_data
self.assertEqual(cached_data_group.first_feedbackset.deadline_datetime, testassignment.first_deadline)
new_deadline = new_deadline.replace(second=59) # Deadline is cleaned to seconds as 59.
self.assertEqual(cached_data_group.last_feedbackset.deadline_datetime, new_deadline)
self.assertEqual(cached_data_group.last_feedbackset.last_updated_by, testuser)
def test_post_multiple_groups_raises_error(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup1)
group_baker.feedbackset_first_attempt_published(group=testgroup2)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
new_deadline = timezone.now() + timezone.timedelta(days=3)
new_deadline = new_deadline.replace(microsecond=0)
with self.assertRaises(http.Http404):
self.mock_http302_postrequest(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'group_id': testgroup1.id
},
requestkwargs={
'data': {
'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)),
'comment_text': 'You have been given a new attempt.',
'selected_items': [testgroup1.id, testgroup2.id]
}
}
)
self.assertEqual(2, group_models.FeedbackSet.objects.count())
cached_data_group1 = core_models.AssignmentGroup.objects.get(id=testgroup1.id).cached_data
cached_data_group2 = core_models.AssignmentGroup.objects.get(id=testgroup2.id).cached_data
self.assertNotEqual(cached_data_group1.last_feedbackset.deadline_datetime, new_deadline)
self.assertNotEqual(cached_data_group2.last_feedbackset.deadline_datetime, new_deadline)
def test_get_earliest_possible_deadline_last_deadline_in_past(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
last_feedbackset_last_deadline = testassignment.first_deadline
group_baker.feedbackset_first_attempt_published(group=testgroup)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup, relatedexaminer__user=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(last_feedbackset_last_deadline),
'handle_deadline': self.handle_deadline,
'group_id': testgroup.id
}
)
earliest_date = mockresponse.selector.list('.devilry-deadlinemanagement-suggested-deadline')[0] \
.get('cradmin-legacy-setfieldvalue')
converted_datetime = from_isoformat_noseconds(earliest_date)
self.assertTrue(converted_datetime > timezone.now())
self.assertTrue(converted_datetime < timezone.now() + timezone.timedelta(days=8))
def test_get_all_suggested_deadlines_deadline_in_future(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
last_deadline = group_models.FeedbackSet.clean_deadline(timezone.now() + timedelta(days=1)).replace(second=0)
testfeedbackset = group_baker.feedbackset_first_attempt_published(group=testgroup,
deadline_datetime=last_deadline)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup, relatedexaminer__user=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testfeedbackset.deadline_datetime),
'handle_deadline': self.handle_deadline,
'group_id': testgroup.id
}
)
added_days = 7
feedbackset_current_deadline = arrow.get(testfeedbackset.deadline_datetime).to(settings.TIME_ZONE).replace(
hour=23, minute=59, second=59, microsecond=0)
for element in mockresponse.selector.list('.devilry-deadlinemanagement-suggested-deadline'):
# Replacing seconds simply ensures that the seconds correspond, since
# the given isoformat does not contain seconds but the compared deadline
# datetime does.
suggested_date = from_isoformat_noseconds(element.get('cradmin-legacy-setfieldvalue')).replace(second=59)
suggested_deadline_from_current_deadline = feedbackset_current_deadline.shift(days=+added_days).datetime
self.assertEqual(
suggested_date,
suggested_deadline_from_current_deadline)
added_days += 7
def test_get_earliest_possible_deadline_last_deadline_in_future(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
last_feedbackset_last_deadline = group_models.FeedbackSet.clean_deadline(
timezone.now() + timezone.timedelta(days=30)).replace(second=0)
testfeedbackset = group_baker.feedbackset_first_attempt_published(
group=testgroup,
deadline_datetime=last_feedbackset_last_deadline)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup, relatedexaminer__user=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(last_feedbackset_last_deadline),
'handle_deadline': self.handle_deadline,
'group_id': testgroup.id
}
)
earliest_date = mockresponse.selector.list('.devilry-deadlinemanagement-suggested-deadline')[0] \
.get('cradmin-legacy-setfieldvalue')
# Replacing seconds simply ensures that the seconds correspond, since
# the given isoformat does not contain seconds but the compared deadline
# datetime does.
converted_datetime = from_isoformat_noseconds(earliest_date).replace(second=59)
now_with_same_time_as_deadline = arrow.get(last_feedbackset_last_deadline).to(settings.TIME_ZONE).replace(
hour=23,
minute=59,
second=59,
microsecond=0
).shift(days=+7).datetime
self.assertEqual(now_with_same_time_as_deadline, converted_datetime)
def test_get_earliest_possible_deadline_uses_multiple_feedbacksets(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
now = timezone.now()
group_baker.feedbackset_first_attempt_published(group=testgroup)
last_feedbackset_last_deadline = group_models.FeedbackSet.clean_deadline(now + timezone.timedelta(days=30))\
.replace(second=0)
testfeedbackset_last = group_baker.feedbackset_new_attempt_published(
group=testgroup,
deadline_datetime=last_feedbackset_last_deadline)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup, relatedexaminer__user=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(last_feedbackset_last_deadline),
'handle_deadline': self.handle_deadline,
'group_id': testgroup.id
}
)
earliest_date = mockresponse.selector.list('.devilry-deadlinemanagement-suggested-deadline')[0] \
.get('cradmin-legacy-setfieldvalue')
# Replacing seconds simply ensures that the seconds correspond, since
# the given isoformat does not contain seconds but the compared deadline
# datetime does.
converted_datetime = from_isoformat_noseconds(earliest_date).replace(second=59)
now_with_same_time_as_deadline = arrow.get(last_feedbackset_last_deadline).to(settings.TIME_ZONE).replace(
hour=23,
minute=59,
second=59,
microsecond=0
).shift(days=+7).datetime
self.assertEqual(now_with_same_time_as_deadline, converted_datetime)
class TestManageDeadlineMoveDeadlineSingleGroup(ExaminerTestCaseMixin):
viewclass = manage_deadline_view.ManageDeadlineSingleGroupView
handle_deadline = 'move-deadline'
def test_move_deadline_last_attempt_is_graded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_published(group=testgroup)
examiner_user = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup, relatedexaminer__user=examiner_user)
new_deadline = timezone.now() + timezone.timedelta(days=3)
new_deadline = new_deadline.replace(microsecond=0)
with self.assertRaises(http.Http404):
self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'group_id': testgroup.id
}
)
def test_info_box_not_showing_when_groups_should_be_excluded(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_OFF)
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup1)
group_baker.feedbackset_first_attempt_published(group=testgroup2)
examiner_user = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup1)
baker.make('core.Examiner',
relatedexaminer__user=examiner_user,
assignmentgroup=testgroup2)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
requestuser=examiner_user,
cradmin_app=self._get_mock_app(examiner_user),
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'group_id': testgroup1.id
}
)
self.assertFalse(mockresponse.selector.exists('.devilry-deadline-management-info-box'))
def test_all_groups_added_to_form_hidden(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup1)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'group_id': testgroup1.id
}
)
self.assertIn('type="hidden"', mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8'))
self.assertIn('value="{}"'.format(testgroup1.id), mockresponse.selector.one('#id_selected_items_0').__str__().decode('utf-8'))
def test_post(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup, relatedexaminer__user=testuser)
new_deadline = timezone.now() + timezone.timedelta(days=3)
new_deadline = new_deadline.replace(microsecond=0)
self.mock_http302_postrequest(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'group_id': testgroup.id
},
requestkwargs={
'data': {
'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)),
'comment_text': 'You have been given a new attempt.',
'selected_items': [testgroup.id]
}
}
)
self.assertEqual(1, group_models.FeedbackSet.objects.count())
cached_data_group = core_models.AssignmentGroup.objects.get(id=testgroup.id).cached_data
self.assertEqual(cached_data_group.last_feedbackset, cached_data_group.first_feedbackset)
new_deadline = new_deadline.replace(second=59) # Deadline is cleaned to seconds as 59.
self.assertEqual(cached_data_group.last_feedbackset.deadline_datetime, new_deadline)
self.assertEqual(cached_data_group.last_feedbackset.last_updated_by, testuser)
def test_post_multiple_groups_raises_error(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup1 = baker.make('core.AssignmentGroup', parentnode=testassignment)
testgroup2 = baker.make('core.AssignmentGroup', parentnode=testassignment)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup1)
group_baker.feedbackset_first_attempt_unpublished(group=testgroup2)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup1, relatedexaminer__user=testuser)
baker.make('core.Examiner', assignmentgroup=testgroup2, relatedexaminer__user=testuser)
new_deadline = timezone.now() + timezone.timedelta(days=3)
new_deadline = new_deadline.replace(microsecond=0)
with self.assertRaises(http.Http404):
self.mock_http302_postrequest(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testassignment.first_deadline),
'handle_deadline': self.handle_deadline,
'group_id': testgroup1.id
},
requestkwargs={
'data': {
'new_deadline': isoformat_withseconds(timezone.localtime(new_deadline)),
'comment_text': 'You have been given a new attempt.',
'selected_items': [testgroup1.id, testgroup2.id]
}
}
)
self.assertEqual(2, group_models.FeedbackSet.objects.count())
cached_data_group1 = core_models.AssignmentGroup.objects.get(id=testgroup1.id).cached_data
cached_data_group2 = core_models.AssignmentGroup.objects.get(id=testgroup2.id).cached_data
self.assertNotEqual(cached_data_group1.last_feedbackset.deadline_datetime, new_deadline)
self.assertNotEqual(cached_data_group2.last_feedbackset.deadline_datetime, new_deadline)
def test_get_earliest_suggested_deadline_count(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
# The final deadline for the first feedbackset
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup, relatedexaminer__user=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testfeedbackset.deadline_datetime),
'handle_deadline': self.handle_deadline,
'group_id': testgroup.id
}
)
self.assertEqual(mockresponse.selector.count('.devilry-deadlinemanagement-suggested-deadline'), 4)
def test_get_earliest_suggested_deadline_last_deadline_in_past(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup, relatedexaminer__user=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testfeedbackset.deadline_datetime),
'handle_deadline': self.handle_deadline,
'group_id': testgroup.id
}
)
earliest_date = mockresponse.selector.list('.devilry-deadlinemanagement-suggested-deadline')[0]\
.get('cradmin-legacy-setfieldvalue')
converted_datetime = from_isoformat_noseconds(earliest_date)
current_feedbackset_deadline = arrow.get(testfeedbackset.deadline_datetime).to(settings.TIME_ZONE)
now_with_same_time_as_deadline = arrow.utcnow().to(settings.TIME_ZONE).replace(
hour=current_feedbackset_deadline.hour,
minute=current_feedbackset_deadline.minute,
second=current_feedbackset_deadline.second,
microsecond=current_feedbackset_deadline.microsecond
).shift(days=+7).datetime
self.assertEqual(now_with_same_time_as_deadline, converted_datetime)
def test_get_all_suggested_deadlines_deadline_in_future(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
last_deadline = group_models.FeedbackSet.clean_deadline(timezone.now() + timedelta(days=1)).replace(second=0)
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup,
deadline_datetime=last_deadline)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup, relatedexaminer__user=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(testfeedbackset.deadline_datetime),
'handle_deadline': self.handle_deadline,
'group_id': testgroup.id
}
)
added_days = 7
feedbackset_current_deadline = arrow.get(testfeedbackset.deadline_datetime).to(settings.TIME_ZONE).replace(
hour=23, minute=59, second=59, microsecond=0)
for element in mockresponse.selector.list('.devilry-deadlinemanagement-suggested-deadline'):
# Replacing seconds simply ensures that the seconds correspond, since
# the given isoformat does not contain seconds but the compared deadline
# datetime does.
suggested_date = from_isoformat_noseconds(element.get('cradmin-legacy-setfieldvalue')).replace(second=59)
suggested_deadline_from_current_deadline = feedbackset_current_deadline.shift(days=+added_days).datetime
self.assertEqual(
suggested_date,
suggested_deadline_from_current_deadline)
added_days += 7
def test_get_earliest_possible_deadline_last_deadline_in_future(self):
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
# The final deadline for the first feedbackset
first_feedbackset_last_deadline = group_models.FeedbackSet.clean_deadline(
timezone.now() + timezone.timedelta(days=10)).replace(second=0)
testfeedbackset1 = group_baker.feedbackset_first_attempt_published(
group=testgroup,
deadline_datetime=first_feedbackset_last_deadline)
# The current final deadline for the last feedbackset
last_feedbackset_last_deadline = group_models.FeedbackSet.clean_deadline(
timezone.now() + timezone.timedelta(days=30)).replace(second=0)
testfeedbackset2 = group_baker.feedbackset_new_attempt_published(
group=testgroup,
deadline_datetime=last_feedbackset_last_deadline)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('core.Examiner', assignmentgroup=testgroup, relatedexaminer__user=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self._get_mock_instance(testassignment),
cradmin_app=self._get_mock_app(user=testuser),
requestuser=testuser,
viewkwargs={
'deadline': datetimeutils.datetime_to_url_string(last_feedbackset_last_deadline),
'handle_deadline': self.handle_deadline,
'group_id': testgroup.id
}
)
earliest_date = mockresponse.selector.list('.devilry-deadlinemanagement-suggested-deadline')[0]\
.get('cradmin-legacy-setfieldvalue')
# Replacing seconds simply ensures that the seconds correspond, since
# the given isoformat does not contain seconds but the compared deadline
# datetime does.
converted_datetime = from_isoformat_noseconds(earliest_date).replace(second=59)
now_with_same_time_as_deadline = arrow.get(testfeedbackset2.deadline_datetime).to(settings.TIME_ZONE).replace(
hour=23,
minute=59,
second=59,
microsecond=0
).shift(days=+7).datetime
self.assertEqual(now_with_same_time_as_deadline, converted_datetime)
| |
#!/usr/bin/env python
#
# Website info gathering
#
# TODO collect WHOIS information
# TODO add choice to select different report types about website
# TODO add choice to select different report format (text, json, html) about website
# TODO add sitemap-image support (http://support.google.com/webmasters/bin/answer.py?hl=en&answer=178636&topic=20986&ctx=topic)
# TODO add sitemap-vide support ({http://www.google.com/schemas/sitemap-video/1.0}video)
__author__ = 'Andrey Usov <https://github.com/ownport/pywsinfo>'
__version__ = '0.1'
import re
import sys
import socket
import requests
import urlparse
import datetime
from gzip import GzipFile
from cStringIO import StringIO
try:
import xml.etree.cElementTree as xml_parser
except ImportError:
import xml.etree.ElementTree as xml_parser
# default setting for python-requests
USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) pywsinfo/{}'.format(__version__)
requests.defaults.defaults['base_headers']['User-Agent'] = USER_AGENT
NAMESPACES = (
'{http://www.google.com/schemas/sitemap/0.84}',
'{http://www.google.com/schemas/sitemap/0.9}',
'{http://www.sitemaps.org/schemas/sitemap/0.9}',
)
def validate_url(url):
''' returns True if url validated '''
pass
def parse_url(url):
''' parse website url, remove path if exists '''
url_parts = urlparse.urlparse(url)
source_url = urlparse.urlunsplit((url_parts.scheme,url_parts.netloc,'','',''))
if ':' in url_parts.netloc:
host, port = url_parts.netloc.split(':')
else:
host = url_parts.netloc
return { 'source_url': source_url, 'host': host }
def nslookup(host):
''' returns result of DNS lookup '''
# return socket.gethostbyname(host)
try:
return socket.gethostbyname_ex(host)[2]
except:
return []
# Entity escape
#
# Character Escape Code
# ---------------------+-----------
# Ampersand(&) &
# Single Quote (') '
# Double Quote (") "
# Greater Than (>) >
# Less Than (<) <
def parse_html_head(content):
''' parse HTML head, extract keywords & description '''
# TODO extract links to RSS/Atom feeds
# <link rel="alternate" type="application/rss+xml" href="http://www.example.com/rss.xml" title="Example RSS Feed">
# TODO extract info about generators
# <meta name="generator" content="WordPress 3.4.2" />
# TODO links to RSS/Atom
# <link rel="alternate" type="application/rss+xml" title="RSS 2.0" href="..." />
# <link rel="alternate" type="text/xml" title="RSS .92" href="..." />
# <link rel="alternate" type="application/atom+xml" title="Atom 0.3" href="..." />
result = dict()
content = content.replace('\r', '')
content = content.replace('\n', '')
# select HEAD section
head = ''.join(re.findall(r'<head(.+?)</head>', content, re.I))
# extract title information
title = ''.join(re.findall(r'<title>(.+?)</title>', content, re.I))
if title:
result['title'] = title.strip()
# select meta information: keywords and description
metas = re.findall(r'<meta(.+?)[/]?>', head, re.I)
for meta in metas:
meta_dict = dict(re.findall(r'(\w+)\s*=\s*"(.+?)"', meta, re.I))
if 'name' in meta_dict and 'content' in meta_dict:
# keywords
if meta_dict['name'].lower() == 'keywords':
result['keywords'] = [c.strip() for c in meta_dict['content'].split(',')]
# description
if meta_dict['name'].lower() == 'description':
result['description'] = meta_dict['content']
return result
# -----------------------------------------------
# WebsiteInfo
# -----------------------------------------------
class WebsiteInfo(object):
''' website information '''
def __init__(self, website_url, debug=False):
self._debug = debug
self._url = website_url
self._homepage_content = None
self._details = parse_url(self._url)
def _make_request(self, method, url):
''' make request to website '''
result = dict()
resp = requests.request(method, url, timeout=60)
result['final_url'] = resp.url
result['status_code'] = resp.status_code
if 'server' in resp.headers:
result['server'] = resp.headers['server']
if 'powered-by' in resp.headers:
result['powered-by'] = resp.headers['x-powered-by']
result['content-type'] = resp.headers.get('content-type', '')
if resp.content:
result['content'] = resp.content
return result
def _check_robots_txt(self):
''' robots.txt '''
pass
def _check_sitemap(self):
''' sitemap.xml '''
pass
def gather(self):
''' run website info retrieval '''
# nslookup
self._details['ip_addreses'] = nslookup(self._details['host'])
# first request to web site. Try to detect
if len(self._details['ip_addreses']) == 0:
raise RuntimeError('Cannot resolve IP addresses for host')
resp = self._make_request('GET', self._details['source_url'])
for k in resp.keys():
if k == 'content':
head_params = parse_html_head(resp[k])
for k in head_params:
self._details[k] = head_params[k]
elif k == 'content-type':
pass
else:
self._details[k] = resp[k]
# check robots.txt
if self._details.get('status_code') == 200:
robots_url = urlparse.urljoin(self._details['final_url'],'/robots.txt')
resp = self._make_request('GET', robots_url)
if (resp['status_code'] == 200 and \
resp['content-type'].startswith('text/plain') and \
'content' in resp):
self._details['robots.txt'] = resp['content']
# extract link to sitemap.xml
for line in self._details['robots.txt'].split('\n'):
if line.startswith('Sitemap'):
if 'sitemaps' not in self._details:
self._details['sitemaps'] = list()
sitemap_url = line.replace('Sitemap:', '').strip()
if not sitemap_url.startswith('http'):
sitemap_url = urlparse.urljoin(self._details['final_url'], sitemap_url)
self._details['sitemaps'].append(sitemap_url)
# check default sitemap
if 'sitemaps' not in self._details:
sitemaps_url = urlparse.urljoin(self._details['final_url'],'/sitemap.xml')
resp = self._make_request('HEAD', sitemaps_url)
if resp['status_code'] == 200:
self._details['sitemaps'] = sitemaps_url
# latest update datetime
# TODO change format datetime to 'YYYY-mm-DDTHH:MM:SSZ'
self._details['last_update'] = str(datetime.datetime.now())
def details(self):
''' return details '''
return self._details
def report(self, output=None):
''' website report
supported formats, defined by output:
None print result to standard output
<filename>.json save report to json file
<filename>.kvlite save report to kvlite database. If the record exists,
update information
'''
# TODO json format
# TODO kvlite format
import pprint
pprint.pprint(self._details)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='website info extraction')
parser.add_argument('-u', '--url', help='website url')
parser.add_argument('-d', '--debug', action='store_true', help='activate debug')
args = parser.parse_args()
if args.url:
wsinfo = WebsiteInfo(args.url, debug=args.debug)
wsinfo.gather()
wsinfo.report()
else:
parser.print_help()
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import datetime
import json
import logging
import mock
import six
import zlib
from sentry import tagstore
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.utils import timezone
from exam import fixture
from gzip import GzipFile
from raven import Client
from six import StringIO
from sentry.models import (Group, Event)
from sentry.testutils import TestCase, TransactionTestCase
from sentry.testutils.helpers import get_auth_header
from sentry.utils.settings import (validate_settings, ConfigurationError, import_string)
DEPENDENCY_TEST_DATA = {
"postgresql": (
'DATABASES', 'psycopg2.extensions', "database engine",
"django.db.backends.postgresql_psycopg2", {
'default': {
'ENGINE': "django.db.backends.postgresql_psycopg2",
'NAME': 'test',
'USER': 'root',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': ''
}
}
),
"mysql": (
'DATABASES', 'MySQLdb', "database engine", "django.db.backends.mysql", {
'default': {
'ENGINE': "django.db.backends.mysql",
'NAME': 'test',
'USER': 'root',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': ''
}
}
),
"oracle": (
'DATABASES', 'cx_Oracle', "database engine", "django.db.backends.oracle", {
'default': {
'ENGINE': "django.db.backends.oracle",
'NAME': 'test',
'USER': 'root',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': ''
}
}
),
"memcache": (
'CACHES', 'memcache', "caching backend",
"django.core.cache.backends.memcached.MemcachedCache", {
'default': {
'BACKEND': "django.core.cache.backends.memcached.MemcachedCache",
'LOCATION': '127.0.0.1:11211',
}
}
),
"pylibmc": (
'CACHES', 'pylibmc', "caching backend", "django.core.cache.backends.memcached.PyLibMCCache",
{
'default': {
'BACKEND': "django.core.cache.backends.memcached.PyLibMCCache",
'LOCATION': '127.0.0.1:11211',
}
}
),
}
def get_fixture_path(name):
return os.path.join(os.path.dirname(__file__), 'fixtures', name)
def load_fixture(name):
with open(get_fixture_path(name)) as fp:
return fp.read()
class AssertHandler(logging.Handler):
def emit(self, entry):
raise AssertionError(entry.message)
class RavenIntegrationTest(TransactionTestCase):
"""
This mocks the test server and specifically tests behavior that would
happen between Raven <--> Sentry over HTTP communication.
"""
def setUp(self):
self.user = self.create_user('coreapi@example.com')
self.project = self.create_project()
self.pk = self.project.key_set.get_or_create()[0]
self.configure_sentry_errors()
def configure_sentry_errors(self):
assert_handler = AssertHandler()
sentry_errors = logging.getLogger('sentry.errors')
sentry_errors.addHandler(assert_handler)
sentry_errors.setLevel(logging.DEBUG)
def remove_handler():
sentry_errors.handlers.pop(sentry_errors.handlers.index(assert_handler))
self.addCleanup(remove_handler)
def sendRemote(self, url, data, headers={}):
content_type = headers.pop('Content-Type', None)
headers = dict(
('HTTP_' + k.replace('-', '_').upper(), v) for k, v in six.iteritems(headers)
)
if isinstance(data, six.text_type):
data = data.encode('utf-8')
resp = self.client.post(
reverse('sentry-api-store', args=[self.pk.project_id]),
data=data,
content_type=content_type,
**headers
)
assert resp.status_code == 200, resp.content
@mock.patch('raven.base.Client.send_remote')
def test_basic(self, send_remote):
send_remote.side_effect = self.sendRemote
client = Client(
dsn='http://%s:%s@localhost:8000/%s' %
(self.pk.public_key, self.pk.secret_key, self.pk.project_id)
)
with self.tasks():
client.captureMessage(message='foo')
assert send_remote.call_count is 1
assert Group.objects.count() == 1
group = Group.objects.get()
assert group.event_set.count() == 1
instance = group.event_set.get()
assert instance.data['sentry.interfaces.Message']['message'] == 'foo'
class SentryRemoteTest(TestCase):
@fixture
def path(self):
return reverse('sentry-api-store')
def test_minimal(self):
kwargs = {'message': 'hello', 'tags': {'foo': 'bar'}}
resp = self._postWithHeader(kwargs)
assert resp.status_code == 200, resp.content
event_id = json.loads(resp.content)['id']
instance = Event.objects.get(event_id=event_id)
assert instance.message == 'hello'
assert tagstore.get_tag_key(self.project.id, None, 'foo') is not None
assert tagstore.get_tag_value(self.project.id, None, 'foo', 'bar') is not None
assert tagstore.get_group_tag_key(
self.project.id, instance.group_id, None, 'foo') is not None
assert tagstore.get_group_tag_value(
instance.project_id,
instance.group_id,
None,
'foo',
'bar') is not None
def test_timestamp(self):
timestamp = timezone.now().replace(
microsecond=0, tzinfo=timezone.utc
) - datetime.timedelta(hours=1)
kwargs = {u'message': 'hello', 'timestamp': timestamp.strftime('%s.%f')}
resp = self._postWithSignature(kwargs)
assert resp.status_code == 200, resp.content
instance = Event.objects.get()
assert instance.message == 'hello'
assert instance.datetime == timestamp
group = instance.group
assert group.first_seen == timestamp
assert group.last_seen == timestamp
def test_timestamp_as_iso(self):
timestamp = timezone.now().replace(
microsecond=0, tzinfo=timezone.utc
) - datetime.timedelta(hours=1)
kwargs = {u'message': 'hello', 'timestamp': timestamp.strftime('%Y-%m-%dT%H:%M:%S.%f')}
resp = self._postWithSignature(kwargs)
assert resp.status_code == 200, resp.content
instance = Event.objects.get()
assert instance.message == 'hello'
assert instance.datetime == timestamp
group = instance.group
assert group.first_seen == timestamp
assert group.last_seen == timestamp
def test_ungzipped_data(self):
kwargs = {'message': 'hello'}
resp = self._postWithSignature(kwargs)
assert resp.status_code == 200
instance = Event.objects.get()
assert instance.message == 'hello'
@override_settings(SENTRY_ALLOW_ORIGIN='sentry.io')
def test_correct_data_with_get(self):
kwargs = {'message': 'hello'}
resp = self._getWithReferer(kwargs)
assert resp.status_code == 200, resp.content
instance = Event.objects.get()
assert instance.message == 'hello'
@override_settings(SENTRY_ALLOW_ORIGIN='*')
def test_get_without_referer_allowed(self):
self.project.update_option('sentry:origins', '')
kwargs = {'message': 'hello'}
resp = self._getWithReferer(kwargs, referer=None, protocol='4')
assert resp.status_code == 200, resp.content
@override_settings(SENTRY_ALLOW_ORIGIN='sentry.io')
def test_correct_data_with_post_referer(self):
kwargs = {'message': 'hello'}
resp = self._postWithReferer(kwargs)
assert resp.status_code == 200, resp.content
instance = Event.objects.get()
assert instance.message == 'hello'
@override_settings(SENTRY_ALLOW_ORIGIN='sentry.io')
def test_post_without_referer(self):
self.project.update_option('sentry:origins', '')
kwargs = {'message': 'hello'}
resp = self._postWithReferer(kwargs, referer=None, protocol='4')
assert resp.status_code == 200, resp.content
@override_settings(SENTRY_ALLOW_ORIGIN='*')
def test_post_without_referer_allowed(self):
self.project.update_option('sentry:origins', '')
kwargs = {'message': 'hello'}
resp = self._postWithReferer(kwargs, referer=None, protocol='4')
assert resp.status_code == 200, resp.content
@override_settings(SENTRY_ALLOW_ORIGIN='google.com')
def test_post_with_invalid_origin(self):
self.project.update_option('sentry:origins', 'sentry.io')
kwargs = {'message': 'hello'}
resp = self._postWithReferer(
kwargs,
referer='https://getsentry.net',
protocol='4'
)
assert resp.status_code == 403, resp.content
def test_signature(self):
kwargs = {'message': 'hello'}
resp = self._postWithSignature(kwargs)
assert resp.status_code == 200, resp.content
instance = Event.objects.get()
assert instance.message == 'hello'
def test_content_encoding_deflate(self):
kwargs = {'message': 'hello'}
message = zlib.compress(json.dumps(kwargs))
key = self.projectkey.public_key
secret = self.projectkey.secret_key
with self.tasks():
resp = self.client.post(
self.path,
message,
content_type='application/octet-stream',
HTTP_CONTENT_ENCODING='deflate',
HTTP_X_SENTRY_AUTH=get_auth_header('_postWithHeader', key, secret),
)
assert resp.status_code == 200, resp.content
event_id = json.loads(resp.content)['id']
instance = Event.objects.get(event_id=event_id)
assert instance.message == 'hello'
def test_content_encoding_gzip(self):
kwargs = {'message': 'hello'}
message = json.dumps(kwargs)
fp = StringIO()
try:
f = GzipFile(fileobj=fp, mode='w')
f.write(message)
finally:
f.close()
key = self.projectkey.public_key
secret = self.projectkey.secret_key
with self.tasks():
resp = self.client.post(
self.path,
fp.getvalue(),
content_type='application/octet-stream',
HTTP_CONTENT_ENCODING='gzip',
HTTP_X_SENTRY_AUTH=get_auth_header('_postWithHeader', key, secret),
)
assert resp.status_code == 200, resp.content
event_id = json.loads(resp.content)['id']
instance = Event.objects.get(event_id=event_id)
assert instance.message == 'hello'
def test_protocol_v2_0_without_secret_key(self):
kwargs = {'message': 'hello'}
resp = self._postWithHeader(
data=kwargs,
key=self.projectkey.public_key,
protocol='2.0',
)
assert resp.status_code == 200, resp.content
event_id = json.loads(resp.content)['id']
instance = Event.objects.get(event_id=event_id)
assert instance.message == 'hello'
def test_protocol_v3(self):
kwargs = {'message': 'hello'}
resp = self._postWithHeader(
data=kwargs,
key=self.projectkey.public_key,
secret=self.projectkey.secret_key,
protocol='3',
)
assert resp.status_code == 200, resp.content
event_id = json.loads(resp.content)['id']
instance = Event.objects.get(event_id=event_id)
assert instance.message == 'hello'
def test_protocol_v4(self):
kwargs = {'message': 'hello'}
resp = self._postWithHeader(
data=kwargs,
key=self.projectkey.public_key,
secret=self.projectkey.secret_key,
protocol='4',
)
assert resp.status_code == 200, resp.content
event_id = json.loads(resp.content)['id']
instance = Event.objects.get(event_id=event_id)
assert instance.message == 'hello'
def test_protocol_v5(self):
kwargs = {'message': 'hello'}
resp = self._postWithHeader(
data=kwargs,
key=self.projectkey.public_key,
secret=self.projectkey.secret_key,
protocol='5',
)
assert resp.status_code == 200, resp.content
event_id = json.loads(resp.content)['id']
instance = Event.objects.get(event_id=event_id)
assert instance.message == 'hello'
def test_protocol_v6(self):
kwargs = {'message': 'hello'}
resp = self._postWithHeader(
data=kwargs,
key=self.projectkey.public_key,
secret=self.projectkey.secret_key,
protocol='6',
)
assert resp.status_code == 200, resp.content
event_id = json.loads(resp.content)['id']
instance = Event.objects.get(event_id=event_id)
assert instance.message == 'hello'
class DepdendencyTest(TestCase):
def raise_import_error(self, package):
def callable(package_name):
if package_name != package:
return import_string(package_name)
raise ImportError("No module named %s" % (package, ))
return callable
@mock.patch('django.conf.settings', mock.Mock())
@mock.patch('sentry.utils.settings.import_string')
def validate_dependency(
self, key, package, dependency_type, dependency, setting_value, import_string
):
import_string.side_effect = self.raise_import_error(package)
with self.settings(**{key: setting_value}):
with self.assertRaises(ConfigurationError):
validate_settings(settings)
def test_validate_fails_on_postgres(self):
self.validate_dependency(*DEPENDENCY_TEST_DATA['postgresql'])
def test_validate_fails_on_mysql(self):
self.validate_dependency(*DEPENDENCY_TEST_DATA['mysql'])
def test_validate_fails_on_oracle(self):
self.validate_dependency(*DEPENDENCY_TEST_DATA['oracle'])
def test_validate_fails_on_memcache(self):
self.validate_dependency(*DEPENDENCY_TEST_DATA['memcache'])
def test_validate_fails_on_pylibmc(self):
self.validate_dependency(*DEPENDENCY_TEST_DATA['pylibmc'])
def get_fixtures(name):
path = os.path.join(os.path.dirname(__file__), 'fixtures/csp', name)
try:
with open(path + '_input.json', 'rb') as fp1:
input = fp1.read()
except IOError:
input = None
try:
with open(path + '_output.json', 'rb') as fp2:
output = json.load(fp2)
except IOError:
output = None
return input, output
class CspReportTest(TestCase):
def assertReportCreated(self, input, output):
resp = self._postCspWithHeader(input)
assert resp.status_code == 201, resp.content
assert Event.objects.count() == 1
e = Event.objects.all()[0]
Event.objects.bind_nodes([e], 'data')
assert output['message'] == e.data['sentry.interfaces.Message']['message']
for key, value in six.iteritems(output['tags']):
assert e.get_tag(key) == value
self.assertDictContainsSubset(output['data'], e.data, e.data)
def assertReportRejected(self, input):
resp = self._postCspWithHeader(input)
assert resp.status_code in (400, 403), resp.content
def test_chrome_blocked_asset(self):
self.assertReportCreated(*get_fixtures('chrome_blocked_asset'))
def test_firefox_missing_effective_uri(self):
input, _ = get_fixtures('firefox_blocked_asset')
self.assertReportRejected(input)
| |
#!/usr/bin/env python2
# -*- mode: python -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from unicodedata import normalize
from . import bitcoin, ecc, constants
from .bitcoin import *
from .ecc import string_to_number, number_to_string
from .crypto import pw_decode, pw_encode
from .util import (PrintError, InvalidPassword, hfu, WalletFileException,
BitcoinException)
from .mnemonic import Mnemonic, load_wordlist
from .plugin import run_hook
class KeyStore(PrintError):
def has_seed(self):
return False
def is_watching_only(self):
return False
def can_import(self):
return False
def may_have_password(self):
"""Returns whether the keystore can be encrypted with a password."""
raise NotImplementedError()
def get_tx_derivations(self, tx):
keypairs = {}
for txin in tx.inputs():
num_sig = txin.get('num_sig')
if num_sig is None:
continue
x_signatures = txin['signatures']
signatures = [sig for sig in x_signatures if sig]
if len(signatures) == num_sig:
# input is complete
continue
for k, x_pubkey in enumerate(txin['x_pubkeys']):
if x_signatures[k] is not None:
# this pubkey already signed
continue
derivation = self.get_pubkey_derivation(x_pubkey)
if not derivation:
continue
keypairs[x_pubkey] = derivation
return keypairs
def can_sign(self, tx):
if self.is_watching_only():
return False
return bool(self.get_tx_derivations(tx))
def ready_to_sign(self):
return not self.is_watching_only()
class Software_KeyStore(KeyStore):
def __init__(self):
KeyStore.__init__(self)
def may_have_password(self):
return not self.is_watching_only()
def sign_message(self, sequence, message, password):
privkey, compressed = self.get_private_key(sequence, password)
key = ecc.ECPrivkey(privkey)
return key.sign_message(message, compressed)
def decrypt_message(self, sequence, message, password):
privkey, compressed = self.get_private_key(sequence, password)
ec = ecc.ECPrivkey(privkey)
decrypted = ec.decrypt_message(message)
return decrypted
def sign_transaction(self, tx, password):
if self.is_watching_only():
return
# Raise if password is not correct.
self.check_password(password)
# Add private keys
keypairs = self.get_tx_derivations(tx)
for k, v in keypairs.items():
keypairs[k] = self.get_private_key(v, password)
# Sign
if keypairs:
tx.sign(keypairs)
class Imported_KeyStore(Software_KeyStore):
# keystore for imported private keys
def __init__(self, d):
Software_KeyStore.__init__(self)
self.keypairs = d.get('keypairs', {})
def is_deterministic(self):
return False
def get_master_public_key(self):
return None
def dump(self):
return {
'type': 'imported',
'keypairs': self.keypairs,
}
def can_import(self):
return True
def check_password(self, password):
pubkey = list(self.keypairs.keys())[0]
self.get_private_key(pubkey, password)
def import_privkey(self, sec, password):
txin_type, privkey, compressed = deserialize_privkey(sec)
pubkey = ecc.ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
# re-serialize the key so the internal storage format is consistent
serialized_privkey = serialize_privkey(
privkey, compressed, txin_type, internal_use=True)
# NOTE: if the same pubkey is reused for multiple addresses (script types),
# there will only be one pubkey-privkey pair for it in self.keypairs,
# and the privkey will encode a txin_type but that txin_type cannot be trusted.
# Removing keys complicates this further.
self.keypairs[pubkey] = pw_encode(serialized_privkey, password)
return txin_type, pubkey
def delete_imported_key(self, key):
self.keypairs.pop(key)
def get_private_key(self, pubkey, password):
sec = pw_decode(self.keypairs[pubkey], password)
txin_type, privkey, compressed = deserialize_privkey(sec)
# this checks the password
if pubkey != ecc.ECPrivkey(privkey).get_public_key_hex(compressed=compressed):
raise InvalidPassword()
return privkey, compressed
def get_pubkey_derivation(self, x_pubkey):
if x_pubkey[0:2] in ['02', '03', '04']:
if x_pubkey in self.keypairs.keys():
return x_pubkey
elif x_pubkey[0:2] == 'fd':
addr = bitcoin.script_to_address(x_pubkey[2:])
if addr in self.addresses:
return self.addresses[addr].get('pubkey')
def update_password(self, old_password, new_password):
self.check_password(old_password)
if new_password == '':
new_password = None
for k, v in self.keypairs.items():
b = pw_decode(v, old_password)
c = pw_encode(b, new_password)
self.keypairs[k] = c
class Deterministic_KeyStore(Software_KeyStore):
def __init__(self, d):
Software_KeyStore.__init__(self)
self.seed = d.get('seed', '')
self.passphrase = d.get('passphrase', '')
def is_deterministic(self):
return True
def dump(self):
d = {}
if self.seed:
d['seed'] = self.seed
if self.passphrase:
d['passphrase'] = self.passphrase
return d
def has_seed(self):
return bool(self.seed)
def is_watching_only(self):
return not self.has_seed()
def add_seed(self, seed):
if self.seed:
raise Exception("a seed exists")
self.seed = self.format_seed(seed)
def get_seed(self, password):
return pw_decode(self.seed, password)
def get_passphrase(self, password):
return pw_decode(self.passphrase, password) if self.passphrase else ''
class Xpub:
def __init__(self):
self.xpub = None
self.xpub_receive = None
self.xpub_change = None
def get_master_public_key(self):
return self.xpub
def derive_pubkey(self, for_change, n):
xpub = self.xpub_change if for_change else self.xpub_receive
if xpub is None:
xpub = bip32_public_derivation(self.xpub, "", "/%d"%for_change)
if for_change:
self.xpub_change = xpub
else:
self.xpub_receive = xpub
return self.get_pubkey_from_xpub(xpub, (n,))
@classmethod
def get_pubkey_from_xpub(self, xpub, sequence):
_, _, _, _, c, cK = deserialize_xpub(xpub)
for i in sequence:
cK, c = CKD_pub(cK, c, i)
return bh2u(cK)
def get_xpubkey(self, c, i):
s = ''.join(map(lambda x: bitcoin.int_to_hex(x,2), (c, i)))
return 'ff' + bh2u(bitcoin.DecodeBase58Check(self.xpub)) + s
@classmethod
def parse_xpubkey(self, pubkey):
assert pubkey[0:2] == 'ff'
pk = bfh(pubkey)
pk = pk[1:]
xkey = bitcoin.EncodeBase58Check(pk[0:78])
dd = pk[78:]
s = []
while dd:
n = int(bitcoin.rev_hex(bh2u(dd[0:2])), 16)
dd = dd[2:]
s.append(n)
assert len(s) == 2
return xkey, s
def get_pubkey_derivation(self, x_pubkey):
if x_pubkey[0:2] != 'ff':
return
xpub, derivation = self.parse_xpubkey(x_pubkey)
if self.xpub != xpub:
return
return derivation
class BIP32_KeyStore(Deterministic_KeyStore, Xpub):
def __init__(self, d):
Xpub.__init__(self)
Deterministic_KeyStore.__init__(self, d)
self.xpub = d.get('xpub')
self.xprv = d.get('xprv')
def format_seed(self, seed):
return ' '.join(seed.split())
def dump(self):
d = Deterministic_KeyStore.dump(self)
d['type'] = 'bip32'
d['xpub'] = self.xpub
d['xprv'] = self.xprv
return d
def get_master_private_key(self, password):
return pw_decode(self.xprv, password)
def check_password(self, password):
xprv = pw_decode(self.xprv, password)
if deserialize_xprv(xprv)[4] != deserialize_xpub(self.xpub)[4]:
raise InvalidPassword()
def update_password(self, old_password, new_password):
self.check_password(old_password)
if new_password == '':
new_password = None
if self.has_seed():
decoded = self.get_seed(old_password)
self.seed = pw_encode(decoded, new_password)
if self.passphrase:
decoded = self.get_passphrase(old_password)
self.passphrase = pw_encode(decoded, new_password)
if self.xprv is not None:
b = pw_decode(self.xprv, old_password)
self.xprv = pw_encode(b, new_password)
def is_watching_only(self):
return self.xprv is None
def add_xprv(self, xprv):
self.xprv = xprv
self.xpub = bitcoin.xpub_from_xprv(xprv)
def add_xprv_from_seed(self, bip32_seed, xtype, derivation):
xprv, xpub = bip32_root(bip32_seed, xtype)
xprv, xpub = bip32_private_derivation(xprv, "m/", derivation)
self.add_xprv(xprv)
def get_private_key(self, sequence, password):
xprv = self.get_master_private_key(password)
_, _, _, _, c, k = deserialize_xprv(xprv)
pk = bip32_private_key(sequence, k, c)
return pk, True
class Old_KeyStore(Deterministic_KeyStore):
def __init__(self, d):
Deterministic_KeyStore.__init__(self, d)
self.mpk = d.get('mpk')
def get_hex_seed(self, password):
return pw_decode(self.seed, password).encode('utf8')
def dump(self):
d = Deterministic_KeyStore.dump(self)
d['mpk'] = self.mpk
d['type'] = 'old'
return d
def add_seed(self, seedphrase):
Deterministic_KeyStore.add_seed(self, seedphrase)
s = self.get_hex_seed(None)
self.mpk = self.mpk_from_seed(s)
def add_master_public_key(self, mpk):
self.mpk = mpk
def format_seed(self, seed):
from . import old_mnemonic, mnemonic
seed = mnemonic.normalize_text(seed)
# see if seed was entered as hex
if seed:
try:
bfh(seed)
return str(seed)
except Exception:
pass
words = seed.split()
seed = old_mnemonic.mn_decode(words)
if not seed:
raise Exception("Invalid seed")
return seed
def get_seed(self, password):
from . import old_mnemonic
s = self.get_hex_seed(password)
return ' '.join(old_mnemonic.mn_encode(s))
@classmethod
def mpk_from_seed(klass, seed):
secexp = klass.stretch_key(seed)
privkey = ecc.ECPrivkey.from_secret_scalar(secexp)
return privkey.get_public_key_hex(compressed=False)[2:]
@classmethod
def stretch_key(self, seed):
x = seed
for i in range(100000):
x = hashlib.sha256(x + seed).digest()
return string_to_number(x)
@classmethod
def get_sequence(self, mpk, for_change, n):
return string_to_number(Hash(("%d:%d:"%(n, for_change)).encode('ascii') + bfh(mpk)))
@classmethod
def get_pubkey_from_mpk(self, mpk, for_change, n):
z = self.get_sequence(mpk, for_change, n)
master_public_key = ecc.ECPubkey(bfh('04'+mpk))
public_key = master_public_key + z*ecc.generator()
return public_key.get_public_key_hex(compressed=False)
def derive_pubkey(self, for_change, n):
return self.get_pubkey_from_mpk(self.mpk, for_change, n)
def get_private_key_from_stretched_exponent(self, for_change, n, secexp):
secexp = (secexp + self.get_sequence(self.mpk, for_change, n)) % ecc.CURVE_ORDER
pk = number_to_string(secexp, ecc.CURVE_ORDER)
return pk
def get_private_key(self, sequence, password):
seed = self.get_hex_seed(password)
self.check_seed(seed)
for_change, n = sequence
secexp = self.stretch_key(seed)
pk = self.get_private_key_from_stretched_exponent(for_change, n, secexp)
return pk, False
def check_seed(self, seed):
secexp = self.stretch_key(seed)
master_private_key = ecc.ECPrivkey.from_secret_scalar(secexp)
master_public_key = master_private_key.get_public_key_bytes(compressed=False)[1:]
if master_public_key != bfh(self.mpk):
print_error('invalid password (mpk)', self.mpk, bh2u(master_public_key))
raise InvalidPassword()
def check_password(self, password):
seed = self.get_hex_seed(password)
self.check_seed(seed)
def get_master_public_key(self):
return self.mpk
def get_xpubkey(self, for_change, n):
s = ''.join(map(lambda x: bitcoin.int_to_hex(x,2), (for_change, n)))
return 'fe' + self.mpk + s
@classmethod
def parse_xpubkey(self, x_pubkey):
assert x_pubkey[0:2] == 'fe'
pk = x_pubkey[2:]
mpk = pk[0:128]
dd = pk[128:]
s = []
while dd:
n = int(bitcoin.rev_hex(dd[0:4]), 16)
dd = dd[4:]
s.append(n)
assert len(s) == 2
return mpk, s
def get_pubkey_derivation(self, x_pubkey):
if x_pubkey[0:2] != 'fe':
return
mpk, derivation = self.parse_xpubkey(x_pubkey)
if self.mpk != mpk:
return
return derivation
def update_password(self, old_password, new_password):
self.check_password(old_password)
if new_password == '':
new_password = None
if self.has_seed():
decoded = pw_decode(self.seed, old_password)
self.seed = pw_encode(decoded, new_password)
class Hardware_KeyStore(KeyStore, Xpub):
# Derived classes must set:
# - device
# - DEVICE_IDS
# - wallet_type
#restore_wallet_class = BIP32_RD_Wallet
max_change_outputs = 1
def __init__(self, d):
Xpub.__init__(self)
KeyStore.__init__(self)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.xpub = d.get('xpub')
self.label = d.get('label')
self.derivation = d.get('derivation')
self.handler = None
run_hook('init_keystore', self)
def set_label(self, label):
self.label = label
def may_have_password(self):
return False
def is_deterministic(self):
return True
def dump(self):
return {
'type': 'hardware',
'hw_type': self.hw_type,
'xpub': self.xpub,
'derivation':self.derivation,
'label':self.label,
}
def unpaired(self):
'''A device paired with the wallet was disconnected. This can be
called in any thread context.'''
self.print_error("unpaired")
def paired(self):
'''A device paired with the wallet was (re-)connected. This can be
called in any thread context.'''
self.print_error("paired")
def can_export(self):
return False
def is_watching_only(self):
'''The wallet is not watching-only; the user will be prompted for
pin and passphrase as appropriate when needed.'''
assert not self.has_seed()
return False
def get_password_for_storage_encryption(self):
from .storage import get_derivation_used_for_hw_device_encryption
client = self.plugin.get_client(self)
derivation = get_derivation_used_for_hw_device_encryption()
xpub = client.get_xpub(derivation, "standard")
password = self.get_pubkey_from_xpub(xpub, ())
return password
def has_usable_connection_with_device(self):
if not hasattr(self, 'plugin'):
return False
client = self.plugin.get_client(self, force_pair=False)
if client is None:
return False
return client.has_usable_connection_with_device()
def ready_to_sign(self):
return super().ready_to_sign() and self.has_usable_connection_with_device()
def bip39_normalize_passphrase(passphrase):
return normalize('NFKD', passphrase or '')
def bip39_to_seed(mnemonic, passphrase):
import hashlib, hmac
PBKDF2_ROUNDS = 2048
mnemonic = normalize('NFKD', ' '.join(mnemonic.split()))
passphrase = bip39_normalize_passphrase(passphrase)
return hashlib.pbkdf2_hmac('sha512', mnemonic.encode('utf-8'),
b'mnemonic' + passphrase.encode('utf-8'), iterations = PBKDF2_ROUNDS)
# returns tuple (is_checksum_valid, is_wordlist_valid)
def bip39_is_checksum_valid(mnemonic):
words = [ normalize('NFKD', word) for word in mnemonic.split() ]
words_len = len(words)
wordlist = load_wordlist("english.txt")
n = len(wordlist)
checksum_length = 11*words_len//33
entropy_length = 32*checksum_length
i = 0
words.reverse()
while words:
w = words.pop()
try:
k = wordlist.index(w)
except ValueError:
return False, False
i = i*n + k
if words_len not in [12, 15, 18, 21, 24]:
return False, True
entropy = i >> checksum_length
checksum = i % 2**checksum_length
h = '{:x}'.format(entropy)
while len(h) < entropy_length/4:
h = '0'+h
b = bytearray.fromhex(h)
hashed = int(hfu(hashlib.sha256(b).digest()), 16)
calculated_checksum = hashed >> (256 - checksum_length)
return checksum == calculated_checksum, True
def from_bip39_seed(seed, passphrase, derivation, xtype=None):
k = BIP32_KeyStore({})
bip32_seed = bip39_to_seed(seed, passphrase)
if xtype is None:
xtype = xtype_from_derivation(derivation)
k.add_xprv_from_seed(bip32_seed, xtype, derivation)
return k
def xtype_from_derivation(derivation: str) -> str:
"""Returns the script type to be used for this derivation."""
if derivation.startswith("m/84'"):
return 'p2wpkh'
elif derivation.startswith("m/49'"):
return 'p2wpkh-p2sh'
elif derivation.startswith("m/44'"):
return 'standard'
elif derivation.startswith("m/45'"):
return 'standard'
bip32_indices = list(bip32_derivation(derivation))
if len(bip32_indices) >= 4:
if bip32_indices[0] == 48 + BIP32_PRIME:
# m / purpose' / coin_type' / account' / script_type' / change / address_index
script_type_int = bip32_indices[3] - BIP32_PRIME
script_type = PURPOSE48_SCRIPT_TYPES_INV.get(script_type_int)
if script_type is not None:
return script_type
return 'standard'
# extended pubkeys
def is_xpubkey(x_pubkey):
return x_pubkey[0:2] == 'ff'
def parse_xpubkey(x_pubkey):
assert x_pubkey[0:2] == 'ff'
return BIP32_KeyStore.parse_xpubkey(x_pubkey)
def xpubkey_to_address(x_pubkey):
if x_pubkey[0:2] == 'fd':
address = bitcoin.script_to_address(x_pubkey[2:])
return x_pubkey, address
if x_pubkey[0:2] in ['02', '03', '04']:
pubkey = x_pubkey
elif x_pubkey[0:2] == 'ff':
xpub, s = BIP32_KeyStore.parse_xpubkey(x_pubkey)
pubkey = BIP32_KeyStore.get_pubkey_from_xpub(xpub, s)
elif x_pubkey[0:2] == 'fe':
mpk, s = Old_KeyStore.parse_xpubkey(x_pubkey)
pubkey = Old_KeyStore.get_pubkey_from_mpk(mpk, s[0], s[1])
else:
raise BitcoinException("Cannot parse pubkey. prefix: {}"
.format(x_pubkey[0:2]))
if pubkey:
address = public_key_to_p2pkh(bfh(pubkey))
return pubkey, address
def xpubkey_to_pubkey(x_pubkey):
pubkey, address = xpubkey_to_address(x_pubkey)
return pubkey
hw_keystores = {}
def register_keystore(hw_type, constructor):
hw_keystores[hw_type] = constructor
def hardware_keystore(d):
hw_type = d['hw_type']
if hw_type in hw_keystores:
constructor = hw_keystores[hw_type]
return constructor(d)
raise WalletFileException('unknown hardware type: {}. hw_keystores: {}'.format(hw_type, list(hw_keystores.keys())))
def load_keystore(storage, name):
d = storage.get(name, {})
t = d.get('type')
if not t:
raise WalletFileException(
'Wallet format requires update.\n'
'Cannot find keystore for name {}'.format(name))
if t == 'old':
k = Old_KeyStore(d)
elif t == 'imported':
k = Imported_KeyStore(d)
elif t == 'bip32':
k = BIP32_KeyStore(d)
elif t == 'hardware':
k = hardware_keystore(d)
else:
raise WalletFileException(
'Unknown type {} for keystore named {}'.format(t, name))
return k
def is_old_mpk(mpk: str) -> bool:
try:
int(mpk, 16)
except:
return False
if len(mpk) != 128:
return False
try:
ecc.ECPubkey(bfh('04' + mpk))
except:
return False
return True
def is_address_list(text):
parts = text.split()
return bool(parts) and all(bitcoin.is_address(x) for x in parts)
def get_private_keys(text):
parts = text.split('\n')
parts = map(lambda x: ''.join(x.split()), parts)
parts = list(filter(bool, parts))
if bool(parts) and all(bitcoin.is_private_key(x) for x in parts):
return parts
def is_private_key_list(text):
return bool(get_private_keys(text))
is_mpk = lambda x: is_old_mpk(x) or is_xpub(x)
is_private = lambda x: is_seed(x) or is_xprv(x) or is_private_key_list(x)
is_master_key = lambda x: is_old_mpk(x) or is_xprv(x) or is_xpub(x)
is_private_key = lambda x: is_xprv(x) or is_private_key_list(x)
is_bip32_key = lambda x: is_xprv(x) or is_xpub(x)
def bip44_derivation(account_id, bip43_purpose=44):
coin = constants.net.BIP44_COIN_TYPE
return "m/%d'/%d'/%d'" % (bip43_purpose, coin, int(account_id))
def purpose48_derivation(account_id: int, xtype: str) -> str:
# m / purpose' / coin_type' / account' / script_type' / change / address_index
bip43_purpose = 48
coin = constants.net.BIP44_COIN_TYPE
account_id = int(account_id)
script_type_int = PURPOSE48_SCRIPT_TYPES.get(xtype)
if script_type_int is None:
raise Exception('unknown xtype: {}'.format(xtype))
return "m/%d'/%d'/%d'/%d'" % (bip43_purpose, coin, account_id, script_type_int)
def from_seed(seed, passphrase, is_p2sh):
t = seed_type(seed)
if t == 'old':
keystore = Old_KeyStore({})
keystore.add_seed(seed)
elif t in ['standard', 'segwit']:
keystore = BIP32_KeyStore({})
keystore.add_seed(seed)
keystore.passphrase = passphrase
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
if t == 'standard':
der = "m/"
xtype = 'standard'
else:
der = "m/1'/" if is_p2sh else "m/0'/"
xtype = 'p2wsh' if is_p2sh else 'p2wpkh'
keystore.add_xprv_from_seed(bip32_seed, xtype, der)
else:
raise BitcoinException('Unexpected seed type {}'.format(t))
return keystore
def from_private_key_list(text):
keystore = Imported_KeyStore({})
for x in get_private_keys(text):
keystore.import_key(x, None)
return keystore
def from_old_mpk(mpk):
keystore = Old_KeyStore({})
keystore.add_master_public_key(mpk)
return keystore
def from_xpub(xpub):
k = BIP32_KeyStore({})
k.xpub = xpub
return k
def from_xprv(xprv):
xpub = bitcoin.xpub_from_xprv(xprv)
k = BIP32_KeyStore({})
k.xprv = xprv
k.xpub = xpub
return k
def from_master_key(text):
if is_xprv(text):
k = from_xprv(text)
elif is_old_mpk(text):
k = from_old_mpk(text)
elif is_xpub(text):
k = from_xpub(text)
else:
raise BitcoinException('Invalid master key')
return k
| |
from __future__ import unicode_literals
from django.views.generic import View
from django.shortcuts import render_to_response, RequestContext, Http404, HttpResponseRedirect
from gge_proxy_manager.models import ProductionJob, ProductionLog, Player
from django.core.urlresolvers import reverse
from intern.forms.my import MyProductionJobForm, MyRecruitmentJobForm
from django.contrib import messages
from . import PlayerMixin
from django.utils.timezone import now, timedelta
from django.db.models import Sum
import json
import random
def set_active(request, queryset):
queryset.update(is_active=True)
messages.success(request, "Jobs wurden aktiviert")
def set_inactive(request, queryset):
queryset.update(is_active=False)
messages.success(request, "Jobs wurden deaktiviert")
def enable_burst_mode(request, queryset):
queryset.update(burst_mode=True)
messages.success(request, "Burstmode wurde eingeschaltet")
def disable_burst_mode(request, queryset):
queryset.update(burst_mode=False)
messages.success(request, "Burstmode wurde ausgeschaltet")
def clone(request, queryset):
for obj in queryset.all():
obj.pk = None
obj.is_active = False
obj.save()
messages.success(request, "Jobs wurden kopiert")
def delete(request, queryset):
for obj in queryset.all():
obj.delete()
messages.success(request, "Jobs wurden entfernt")
class ProductionJobMixin(object):
def get_queryset(self):
player = self.player_or_404()
return ProductionJob.objects.filter(player=player).order_by('castle__gge_id', 'unit__gge_id')
class ProductionFormMixin(object):
def get_form(self, id, unit_type):
player = self.player_or_404()
initial = {}
job = ProductionJob()
if id:
try:
job = ProductionJob.objects.get(pk=id, player=player)
except ProductionJob.DoesNotExist:
raise Http404
initial = {
"castle": job.castle,
"unit": job.unit,
"food_balance_limit": job.food_balance_limit,
"valid_until": job.valid_until,
"gold_limit": job.gold_limit,
"wood_limit": job.wood_limit,
"stone_limit": job.stone_limit,
"is_active": job.is_active,
"burst_mode": job.burst_mode
}
form = self.form_class(self.request.POST if self.request.POST else None, player=player, unit_type=unit_type, initial=initial)
return form, job
class JobActionMixin(object):
ACTIONS = {
'set_active': set_active,
'set_inactive': set_inactive,
'enable_burst_mode': enable_burst_mode,
'disable_burst_mode': disable_burst_mode,
'clone': clone,
'delete': delete,
}
def post(self, request):
id_list = request.POST.getlist("ids")
action = request.POST.get("action", None)
if 'all' in id_list:
id_list.remove('all')
if action and action in self.ACTIONS:
function = self.ACTIONS[action]
function(request, self.get_queryset().filter(pk__in=id_list))
return HttpResponseRedirect(request.get_full_path())
class MyProductionJobListView(View, PlayerMixin, ProductionJobMixin, JobActionMixin):
template_name = 'my/production_job_list.html'
def get(self, request):
object_list = self.get_queryset().filter(unit__type='tool')
return render_to_response(
self.template_name,
{
'object_list': object_list
},
context_instance=RequestContext(request)
)
class MyProductionJobFormView(View, PlayerMixin, ProductionJobMixin, ProductionFormMixin):
template_name = 'my/production_job_form.html'
form_class = MyProductionJobForm
def get(self, request, id):
form, job = self.get_form(id, 'tool')
return render_to_response(
self.template_name,
{
'form': form
},
context_instance=RequestContext(request)
)
def post(self, request, id):
form, job = self.get_form(id, 'tool')
if form.is_valid():
job.player = self.player_or_404()
job.castle = form.cleaned_data.get("castle")
job.unit = form.cleaned_data.get("unit")
job.valid_until = form.cleaned_data.get("valid_until")
job.food_balance_limit = form.cleaned_data.get("food_balance_limit")
job.gold_limit = form.cleaned_data.get("gold_limit")
job.wood_limit = form.cleaned_data.get("wood_limit")
job.stone_limit = form.cleaned_data.get("stone_limit")
job.is_active = form.cleaned_data.get("is_active")
job.burst_mode = form.cleaned_data.get("burst_mode")
job.save()
return HttpResponseRedirect(reverse('intern:my_production_job_list'))
return render_to_response(
self.template_name,
{
'form': form
},
context_instance=RequestContext(request)
)
class MyRecruitmentJobListView(View, PlayerMixin, ProductionJobMixin, JobActionMixin):
template_name = 'my/recruitment_job_list.html'
def get(self, request):
object_list = self.get_queryset().filter(unit__type='soldier')
return render_to_response(
self.template_name,
{
'object_list': object_list
},
context_instance=RequestContext(request)
)
class MyRecruitmentJobFormView(View, PlayerMixin, ProductionJobMixin, ProductionFormMixin):
template_name = 'my/production_job_form.html'
form_class = MyRecruitmentJobForm
def get(self, request, id):
form, job = self.get_form(id, 'soldier')
return render_to_response(
self.template_name,
{
'form': form
},
context_instance=RequestContext(request)
)
def post(self, request, id):
form, job = self.get_form(id, 'soldier')
if form.is_valid():
job.player = self.player_or_404()
job.castle = form.cleaned_data.get("castle")
job.unit = form.cleaned_data.get("unit")
job.valid_until = form.cleaned_data.get("valid_until")
job.food_balance_limit = form.cleaned_data.get("food_balance_limit")
job.gold_limit = form.cleaned_data.get("gold_limit")
job.wood_limit = form.cleaned_data.get("wood_limit")
job.stone_limit = form.cleaned_data.get("stone_limit")
job.is_active = form.cleaned_data.get("is_active")
job.burst_mode = form.cleaned_data.get("burst_mode")
job.save()
return HttpResponseRedirect(reverse('intern:my_recruitment_job_list'))
return render_to_response(
self.template_name,
{
'form': form
},
context_instance=RequestContext(request)
)
class MyJobStatisticView(View):
template_name = 'my/job_statistic.html'
colors = [
'#adfe09',
'#28b3a9',
'#cbce58',
'#df5058',
'#e3f3a5',
'#ad115d',
'#94e3d8',
'#be9710',
'#4174b1',
]
def get_production_amount(self, f):
n = now()
r = []
for d in range(0, 7):
date = n - timedelta(hours=24*d)
#f['produced__year'] = date.year
#f['produced__month'] = date.month
#f['produced__day'] = date.day
f['produced__gte'] = date.replace(hour=0, minute=0, second=0)
f['produced__lte'] = date.replace(hour=23, minute=59, second=59)
r.append(ProductionLog.objects.filter(**f).aggregate(Sum('amount')).get('amount__sum', 0))
return r
def active_player_ids(self, f):
#res = ProductionLog.objects.filter(**f).order_by('player').distinct('player').values('player')
res = ProductionLog.objects.filter(**f).values_list('player', flat=True)
return set(res)
def get(self, request):
if not request.player:
raise Http404()
datasets = list()
f = dict(player=request.player, unit__type='soldier')
datasets += ({
'label': 'Rekrutierung',
'fillColor': "rgba(220,220,220,0.2)",
'strokeColor': "rgba(220,220,220,1)",
'pointColor': "rgba(220,220,220,1)",
'pointStrokeColor': "#fff",
'pointHighlightFill': "#fff",
'pointHighlightStroke': "rgba(220,220,220,1)",
'data': self.get_production_amount(f)
}, )
f = dict(player=request.player, unit__type='tool')
datasets += ({
'label': 'Produktion',
'fillColor': "rgba(151,187,205,0.2)",
'strokeColor': "rgba(151,187,205,1)",
'pointColor': "rgba(151,187,205,1)",
'pointStrokeColor': "#fff",
'pointHighlightFill': "#fff",
'pointHighlightStroke': "rgba(151,187,205,1)",
'data': self.get_production_amount(f)
}, )
total_datasets = list()
f = dict(unit__type='soldier')
total_datasets += ({
'label': 'Rekrutierung',
'fillColor': "rgba(220,220,220,0.2)",
'strokeColor': "rgba(220,220,220,1)",
'pointColor': "rgba(220,220,220,1)",
'pointStrokeColor': "#fff",
'pointHighlightFill': "#fff",
'pointHighlightStroke': "rgba(220,220,220,1)",
'data': self.get_production_amount(f)
}, )
f = dict(unit__type='tool')
total_datasets += ({
'label': 'Produktion',
'fillColor': "rgba(151,187,205,0.2)",
'strokeColor': "rgba(151,187,205,1)",
'pointColor': "rgba(151,187,205,1)",
'pointStrokeColor': "#fff",
'pointHighlightFill': "#fff",
'pointHighlightStroke': "rgba(151,187,205,1)",
'data': self.get_production_amount(f)
}, )
try:
players = self.active_player_ids(dict(produced__gte=now() - timedelta(hours=24*7)))
players = [Player.objects.get(pk=player_id) for player_id in players]
player_tool_datasets = list()
player_soldier_datasets = list()
for player in players:
f = dict(player=player, unit__type='tool')
color = ''.join(random.choice("0123456789abcdef") for _ in range(6))
color = "#" + color
player_tool_datasets += ({
'label': player.name,
'strokeColor': color,
'pointColor': color,
'pointStrokeColor': "#fff",
'pointHighlightFill': "#fff",
'pointHighlightStroke': color,
'data': self.get_production_amount(f)
}, )
f = dict(player=player, unit__type='soldier')
player_soldier_datasets += ({
'label': player.name,
'strokeColor': color,
'pointColor': color,
'pointStrokeColor': "#fff",
'pointHighlightFill': "#fff",
'pointHighlightStroke': color,
'data': self.get_production_amount(f)
}, )
except NotImplementedError:
player_soldier_datasets = list()
player_tool_datasets = list()
labels = ["heute", "gestern", "vorgestern", "vor 3 Tagen", "vor 4 Tagen", "vor 5 Tagen", "vor 6 Tagen"]
return render_to_response(
self.template_name,
{
'chart_data': json.dumps({
'labels': labels,
'datasets': datasets
}),
'chart_two_data': json.dumps({
'labels': labels,
'datasets': player_tool_datasets
}),
'chart_three_data': json.dumps({
'labels': labels,
'datasets': player_soldier_datasets
}),
'chart_total_data': json.dumps({
'labels': labels,
'datasets': total_datasets
})
},
context_instance=RequestContext(request)
)
| |
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, HttpResponseBadRequest
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from donomo.archive.api import api_impl
from donomo.archive.models import Page, Document
from donomo.archive.utils.http import http_method_dispatcher
from donomo.billing.models import Account, Invoice
from recaptcha import RecaptchaForm, RecaptchaFieldPlaceholder, RecaptchaWidget
from registration.models import RegistrationProfile
import os
import logging
logging = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
def trial(request):
if request.method == 'GET':
return render_to_response('account/trial.html',
context_instance = RequestContext(request))
elif request.method == 'POST':
if not request.POST.has_key('email'):
return HttpResponseBadRequest('no email') #TODO: validation
try:
user, created = get_or_create_trial_account(request.POST['email'])
if not created:
return HttpResponseForbidden("Trial for %s has expired" % user.email)
# now that the user object is created, submit the files for processing
api_impl.process_uploaded_files(request.FILES, user)
except Exception, e:
print str(e);
return HttpResponse('User created: ' + str(created))
else:
return HttpResponse('method not supported')
def get_or_create_trial_account(email):
try:
return (User.objects.get(email__exact = email), False)
except User.DoesNotExist:
user = User.objects.create_user(email, email)
user.set_unusable_password()
user.is_active = False
user.save()
return (user, True)
@login_required()
def account_delete(request, username):
"""
Deletes user's account.
"""
#TODO: delete all user's data on solr, s3, etc.
if request.user.is_authenticated() and username == user:
request.user.delete()
return logout(request)
else:
return HttpResponse('forbidden')
def signin(request):
if request.method == 'GET':
return auth.views.login(request)
elif request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
auth.login(request, user)
return request.is_ajax() and HttpResponse() or HttpResponseRedirect('/')
else:
return request.is_ajax() and HttpResponseForbidden('{"error": "account %s is disabled"}' % user.username ) or render_to_response('account/account_disabled.html')
else:
return request.is_ajax() and HttpResponseForbidden('{"error": "invalid login for %s"}' % username ) or render_to_response('account/invalid_login.html', locals())
@login_required()
def logout(request):
logging.debug(RequestContext(request))
return auth.logout(request,
next_page=request.REQUEST.get('next', None),
template_name = RequestContext(request)['template_name'])
def account_detail(request, username = None):
if request.method == 'HEAD':
return head_account_detail(request, username)
elif request.method == 'GET':
return get_account_detail(request, username)
def head_account_detail(request, username = None):
"""
Returns HttpResponse 200 if the account exists, 404 otherwise.
Used to verify the existence of the account.
Can be invoked by anonymous request.
"""
response = HttpResponse(content_type = 'text/plain')
try:
user = User.objects.get(username__exact = username)
response['donomo-account-active'] = user.is_active
except User.DoesNotExist:
response.status_code = 404
return response
@login_required()
def get_account_detail(request, username = None):
"""
Renders account management UI
"""
#TODO replace with generic views once
#http://code.djangoproject.com/ticket/3639 is resolved
if username is not None and username != request.user.username:
return HttpResponse('forbidden: username %s' % username)
if request.method == 'GET':
page_count = Page.objects.filter(owner = request.user).count()
document_count = Document.objects.filter(owner = request.user).count()
return render_to_response('account/userprofile_form.html',
{'page_count' : page_count,
'document_count': document_count,
'remaining_storage_days' : "30" },
context_instance = RequestContext(request))
else:
return HttpResponse('forbidden')
@login_required()
def account_export(request, username):
"""
Renders account management UI
"""
#TODO replace with generic views once
#http://code.djangoproject.com/ticket/3639 is resolved
if username != request.user.username and not request.user.staff:
return HttpResponse('forbidden: username %s' % username)
if request.method == 'GET':
return HttpResponse('Not implemented yet')
else:
return HttpResponse('forbidden')
# I put this on all required fields, because it's easier to pick up
# on them with CSS or JavaScript if they have a class of "required"
# in the HTML. Your mileage may vary. If/when Django ticket #3515
# lands in trunk, this will no longer be necessary.
attrs_dict = { 'class': 'required' }
class RegistrationForm(RecaptchaForm):
"""
Form for registering a new user account.
Requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should either preserve the base ``save()`` or implement
a ``save()`` which accepts the ``profile_callback`` keyword
argument and passes it through to
``RegistrationProfile.objects.create_inactive_user()``.
"""
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict,
maxlength=75)),
label=_(u'Email address (must already exist)'),
help_text=_(u"You'll use this address to log in Donomo. We'll use this address to send you notifications when your documents are processed. We will never share it with third parties without your permission."))
password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_(u'Enter Password'))
password2 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_(u'Retype Password'))
captcha = RecaptchaFieldPlaceholder(widget=RecaptchaWidget(theme='white'),
label=_(u'Word Verification'),
help_text=_(u"Type the characters you see in the picture"))
def clean_username(self):
"""
Validate that the email is not already
in use.
"""
try:
user = User.objects.get(username__iexact=self.cleaned_data['email'])
except User.DoesNotExist:
return self.cleaned_data['email']
raise forms.ValidationError(_(u'This email is already registered. Please choose another.'))
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_(u'You must type the same password each time'))
return self.cleaned_data
def save(self, profile_callback=None):
"""
Create the new ``User`` and ``RegistrationProfile``, and
returns the ``User``.
This is essentially a light wrapper around
``RegistrationProfile.objects.create_inactive_user()``,
feeding it the form data and a profile callback (see the
documentation on ``create_inactive_user()`` for details) if
supplied.
"""
new_user = RegistrationProfile.objects.create_inactive_user(
username=self.cleaned_data['email'],
password=self.cleaned_data['password1'],
email=self.cleaned_data['email'],
profile_callback=profile_callback)
return new_user
class SiteProfileNotAvailable(Exception):
pass
def create_profile_model(user):
"""
This method is supplied as "profile_callback" and will be called just after the
new user object object is created.
"""
from django.db import models
from django.core.exceptions import ImproperlyConfigured
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
model = models.get_model(app_label, model_name)
profile, created = model._default_manager.get_or_create(
user=user,
defaults={"balance": Account.BALANCE_ON_CREATION })
logging.debug("%s is created? %s" %(profile, created))
return profile
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
def register(request,
success_url=None,
form_class = RegistrationForm,
profile_callback=create_profile_model,
template_name='registration/registration_form.html',
extra_context=None):
"""
Allow a new user to register an account.
Following successful registration, issue a redirect; by default,
this will be whatever URL corresponds to the named URL pattern
``registration_complete``, which will be
``/accounts/register/complete/`` if using the included URLConf. To
change this, point that named pattern at another URL, or pass your
preferred URL as the keyword argument ``success_url``.
By default, ``registration.forms.RegistrationForm`` will be used
as the registration form; to change this, pass a different form
class as the ``form_class`` keyword argument. The form class you
specify must have a method ``save`` which will create and return
the new ``User``, and that method must accept the keyword argument
``profile_callback`` (see below).
To enable creation of a site-specific user profile object for the
new user, pass a function which will create the profile object as
the keyword argument ``profile_callback``. See
``RegistrationManager.create_inactive_user`` in the file
``models.py`` for details on how to write this function.
By default, use the template
``registration/registration_form.html``; to change this, pass the
name of a template as the keyword argument ``template_name``.
**Required arguments**
None.
**Optional arguments**
``form_class``
The form class to use for registration.
``extra_context``
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context.
``profile_callback``
A function which will be used to create a site-specific
profile instance for the new ``User``.
``success_url``
The URL to redirect to on successful registration.
``template_name``
A custom template to use.
**Context:**
``form``
The registration form.
Any extra variables supplied in the ``extra_context`` argument
(see above).
**Template:**
registration/registration_form.html or ``template_name`` keyword
argument.
"""
remote_ip = request.META['REMOTE_ADDR']
if request.method == 'POST':
form = form_class(remote_ip, data=request.POST, files=request.FILES)
if form.is_valid():
new_user = form.save(profile_callback=profile_callback)
# success_url needs to be dynamically generated here; setting a
# a default value using reverse() will cause circular-import
# problems with the default URLConf for this application, which
# imports this file.
return HttpResponseRedirect(success_url or reverse('registration_complete'))
else:
form = form_class(remote_ip)
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name,
{ 'form': form },
context_instance=context)
| |
from direct.fsm import ClassicFSM, State
from toontown.shtiker.OptionsPageGUI import OptionButton
from toontown.toonbase.TTLocalizer import Controls, RemapPrompt, RemapPopup
from toontown.toonbase.ToontownGlobals import OptionsPageHotkey
from toontown.toontowngui import TTDialog
class ControlRemap:
UP = 0
LEFT = 1
DOWN = 2
RIGHT = 3
JUMP = 4
ACTION_BUTTON = 5
OPTIONS_PAGE_HOTKEY = 6
CHAT_HOTKEY = 7
def __init__(self):
self.dialog = TTDialog.TTGlobalDialog(
dialogName='ControlRemap', doneEvent='doneRemapping', style=TTDialog.TwoChoice,
text=RemapPrompt, text_wordwrap=24,
text_pos=(0, 0, -0.8), suppressKeys = True, suppressMouse = True
)
scale = self.dialog.component('image0').getScale()
scale.setX(((scale[0] * 2.5) / base.getAspectRatio()) * 1.2)
scale.setZ(scale[2] * 2.5)
self.dialog.component('image0').setScale(scale)
button_x = -0.6
button_y = 0.4
labelPos = (0, 0, 0.1)
self.upKey = OptionButton(
parent=self.dialog,
text=base.MOVE_UP,
pos=(button_x, 0.0, button_y),
command=self.enterWaitForKey, extraArgs=[self.UP],
wantLabel=True, labelOrientation='top', labelPos=labelPos,
labelText=Controls[0])
self.leftKey = OptionButton(
parent=self.dialog,
text=base.MOVE_LEFT,
pos=(button_x + 0.4, 0.0, button_y),
command=self.enterWaitForKey, extraArgs=[self.LEFT],
wantLabel=True, labelOrientation='top', labelPos=labelPos,
labelText=Controls[1])
self.downKey = OptionButton(
parent=self.dialog,
text=base.MOVE_DOWN,
pos=(button_x + 0.8, 0.0, button_y),
command=self.enterWaitForKey, extraArgs=[self.DOWN],
wantLabel=True, labelOrientation='top', labelPos=labelPos,
labelText=Controls[2])
self.rightKey = OptionButton(
parent=self.dialog,
text=base.MOVE_RIGHT,
pos=(button_x + 1.2, 0.0, button_y),
command=self.enterWaitForKey, extraArgs=[self.RIGHT],
wantLabel=True, labelOrientation='top', labelPos=labelPos,
labelText=Controls[3])
self.jumpKey = OptionButton(
parent=self.dialog,
text=base.JUMP,
pos=(button_x, 0.0, button_y - 0.3),
command=self.enterWaitForKey, extraArgs=[self.JUMP],
wantLabel=True, labelOrientation='top', labelPos=labelPos,
labelText=Controls[4])
self.actionKey = OptionButton(
parent=self.dialog,
text=base.ACTION_BUTTON,
pos=(button_x + 0.4, 0.0, button_y - 0.3),
command=self.enterWaitForKey, extraArgs=[self.ACTION_BUTTON],
wantLabel=True, labelOrientation='top', labelPos=labelPos,
labelText=Controls[5])
self.optionsKey = OptionButton(
parent=self.dialog,
text=OptionsPageHotkey,
pos=(button_x + 0.8, 0.0, button_y - 0.3),
command=self.enterWaitForKey, extraArgs=[self.OPTIONS_PAGE_HOTKEY],
wantLabel=True, labelOrientation='top', labelPos=labelPos,
labelText=Controls[6])
self.chatHotkey = OptionButton(
parent=self.dialog,
text=base.CHAT_HOTKEY,
pos=(button_x + 1.2, 0.0, button_y - 0.3),
command=self.enterWaitForKey, extraArgs=[self.CHAT_HOTKEY],
wantLabel=True, labelOrientation='top', labelPos=labelPos,
labelText=Controls[7])
self.controlsToBeSaved = {
self.UP: base.MOVE_UP,
self.LEFT: base.MOVE_LEFT,
self.DOWN: base.MOVE_DOWN,
self.RIGHT: base.MOVE_RIGHT,
self.JUMP: base.JUMP,
self.ACTION_BUTTON: base.ACTION_BUTTON,
self.OPTIONS_PAGE_HOTKEY: OptionsPageHotkey,
self.CHAT_HOTKEY: base.CHAT_HOTKEY
}
self.popupDialog = None
self.dialog.show()
self.fsm = ClassicFSM.ClassicFSM(
'ControlRemapDialog',
[
State.State('off', self.enterShow, self.exitShow, ['waitForKey']),
State.State('waitForKey', self.enterWaitForKey, self.exitWaitForKey, ['off']),
], 'off', 'off')
self.fsm.enterInitialState()
self.dialog.accept('doneRemapping', self.exit)
messenger.send('disable-hotkeys')
base.localAvatar.chatMgr.disableBackgroundFocus()
def enterShow(self):
pass
def exitShow(self):
pass
def enterWaitForKey(self, controlNum):
base.transitions.fadeScreen(0.9)
self.dialog.hide()
if self.popupDialog:
self.popupDialog.cleanup()
self.popupDialog = TTDialog.TTDialog(style=TTDialog.NoButtons,
text=RemapPopup, suppressMouse=True, suppressKeys=True)
scale = self.popupDialog.component('image0').getScale()
scale.setX((scale[0] * 3.5) / base.getAspectRatio())
scale.setZ(scale[2] * 3)
self.popupDialog.setScale(scale)
self.popupDialog.show()
base.buttonThrowers[0].node().setButtonDownEvent('buttonPress-' + str(controlNum))
self.dialog.accept('buttonPress-' + str(controlNum), self.registerKey, [controlNum])
def registerKey(self, controlNum, keyName):
self.popupDialog.cleanup()
self.controlsToBeSaved[controlNum] = keyName
if controlNum == self.UP:
self.upKey['text'] = keyName
elif controlNum == self.LEFT:
self.leftKey['text'] = keyName
elif controlNum == self.DOWN:
self.downKey['text'] = keyName
elif controlNum == self.RIGHT:
self.rightKey['text'] = keyName
elif controlNum == self.JUMP:
self.jumpKey['text'] = keyName
elif controlNum == self.ACTION_BUTTON:
self.actionKey['text'] = keyName
elif controlNum == self.OPTIONS_PAGE_HOTKEY:
self.optionsKey['text'] = keyName
elif controlNum == self.CHAT_HOTKEY:
self.chatHotkey['text'] = keyName
self.dialog.show()
self.exitWaitForKey(controlNum, keyName)
def exitWaitForKey(self, controlNum, keyName):
self.dialog.ignore('buttonPress-' + str(controlNum))
def exit(self):
if self.dialog.doneStatus == 'ok':
self.enterSave()
else:
self.enterCancel()
def enterSave(self):
keymap = settings.get('keymap', {})
keymap['MOVE_UP'] = self.controlsToBeSaved[self.UP]
keymap['MOVE_LEFT'] = self.controlsToBeSaved[self.LEFT]
keymap['MOVE_DOWN'] = self.controlsToBeSaved[self.DOWN]
keymap['MOVE_RIGHT'] = self.controlsToBeSaved[self.RIGHT]
keymap['JUMP'] = self.controlsToBeSaved[self.JUMP]
keymap['ACTION_BUTTON'] = self.controlsToBeSaved[self.ACTION_BUTTON]
keymap['OPTIONS_PAGE_HOTKEY'] = self.controlsToBeSaved[self.OPTIONS_PAGE_HOTKEY]
keymap['CHAT_HOTKEY'] = self.controlsToBeSaved[self.CHAT_HOTKEY]
settings['keymap'] = keymap
base.reloadControls()
base.localAvatar.controlManager.reload()
base.localAvatar.chatMgr.reloadWASD()
self.unload()
base.localAvatar.controlManager.disable()
pass
def exitSave(self):
pass
def enterCancel(self):
self.unload()
def exitCancel(self):
pass
def unload(self):
if self.popupDialog:
self.popupDialog.cleanup()
del self.popupDialog
self.dialog.cleanup()
del self.dialog
messenger.send('enable-hotkeys')
| |
"""
BOOTMACHINE: A-Z transmutation of aluminium into rhodium.
"""
import copy
import getpass
import logging
import sys
import telnetlib
from fabric.api import env, local, run, sudo
from fabric.decorators import parallel, task
from fabric.colors import blue, cyan, green, magenta, red, white, yellow # noqa
from fabric.contrib.files import exists
from fabric.context_managers import settings as fabric_settings
from fabric.operations import reboot
from fabric.utils import abort
import settings
from bootmachine import known_hosts
from bootmachine.settings_validator import validate_settings
validate_settings(settings)
# Incrase reconnection attempts when rebooting
env.connection_attempts = 12
def import_module(module):
"""
Allows custom providers, configurators and distros.
Import the provider, configurator, or distro module via a string.
ex. ``bootmachine.contrib.providers.rackspace_openstack_v2``
ex. ``bootmachine.contrib.configurators.salt``
ex. ``bootmachine.contrib.distros.arch_201208``
"""
try:
__import__(module)
return sys.modules[module]
except ImportError:
abort("Unable to import the module: {0}".format(module))
# import provider and configurator here so their fabric tasks are properly namespaced
provider = import_module(settings.PROVIDER_MODULE)
configurator = import_module(settings.CONFIGURATOR_MODULE)
@task(default=True)
def bootmachine():
"""
Boot, bootstrap and configure all servers as per the settings.
Usage:
fab bootmachine
"""
# set environment variables
master()
env.new_server_booted = False
# boot new servers in serial to avoid api overlimit
boot()
output = local("fab each ssh_test", capture=True)
if "CONFIGURATOR FAIL!" not in output:
print(green("all servers are fully provisioned."))
return
local("fab each bootstrap_distro")
print(green("the distro is bootstrapped on all servers."))
local("fab each bootstrap_configurator")
print(green("the configurator is bootstrapped on all servers."))
configure()
# change the following output with caution
# runtests.sh depends on exactly the following successful output
print(green("all servers are fully provisioned."))
@task
def boot():
"""
Boot servers as per the config.
Usage:
fab boot
"""
if not hasattr(env, "bootmachine_servers"):
master()
servers = copy.copy(settings.SERVERS)
while servers:
server = servers.pop(0)
if not server["servername"] in [s.name for s in env.bootmachine_servers]:
provider.bootem(settings.SERVERS)
print(green("new server(s) have been booted."))
env.new_server_booted = True
print(green("all servers are booted."))
return
print(green("all servers are booted."))
@task
@parallel
def bootstrap_distro():
"""
Bootstraps the distro.
In parallel for speed.
Usage:
fab each bootstrap_distro
"""
if not hasattr(env, "bootmachine_servers"):
abort("bootstrap_distro(): Try `fab each bootstrap_distro`")
__set_ssh_vars(env)
if exists("/root/.bootmachine_distro_bootstrapped", use_sudo=True):
print(green("{ip_addr} distro is already bootstrapped, skipping.".format(ip_addr=env.host)))
return
print(cyan("... {ip_addr} distro has begun bootstrapping .".format(ip_addr=env.host)))
# upgrade distro
server = [s for s in env.bootmachine_servers if s.public_ip == env.host][0]
distro = import_module(server.distro_module)
distro.bootstrap()
sudo("touch /root/.bootmachine_distro_bootstrapped")
print(green("{0} distro is bootstrapped.".format(server.name)))
@task
@parallel
def bootstrap_configurator():
"""
Bootstraps the configurator.
Installs the configurator and starts its processes.
Does not run the configurator.
Assumes the distro has been bootstrapped on all servers.
In parallel for speed.
Usage:
fab each bootstrap_distro
"""
if not hasattr(env, "bootmachine_servers"):
abort("bootstrap_configurator(): Try `fab each bootstrap_configurator`")
__set_ssh_vars(env)
if exists("/root/.bootmachine_configurator_bootstrapped", use_sudo=True):
print(green("{ip_addr} configurator is already bootstrapped, skipping.".format(
ip_addr=env.host)))
return
print(cyan("... {ip_addr} configurator has begun bootstrapping .".format(ip_addr=env.host)))
# bootstrap configurator
server = [s for s in env.bootmachine_servers if s.public_ip == env.host][0]
distro = import_module(server.distro_module)
configurator.install(distro)
configurator.setup(distro)
configurator.start(distro)
sudo("touch /root/.bootmachine_configurator_bootstrapped")
print(green("{0} configurator is bootstrapped.".format(server.name)))
@task
def configure():
"""
Configure all unconfigured servers.
Assumes the distro and configurator have been bootstrapped on all
servers.
Usage:
fab configure
"""
master()
configurator.launch()
# run the configurator from the the master server, maximum of 5x
attempts = 0
__set_unconfigured_servers()
while env.unconfigured_servers:
if attempts != 0:
local("fab master configurator.restartall")
if attempts == 5:
abort("unable to configure the servers")
attempts += 1
print(yellow("attempt #{0} for {1}".format(attempts, env.unconfigured_servers)))
configurator.configure()
for server in env.unconfigured_servers:
# if configuration was a success, reboot.
# for example, a reboot is required when rebuilding a custom kernel
server = __set_ssh_vars(server) # mask the server
if server.port == int(settings.SSH_PORT):
reboot_server(server.name)
else:
print(red("after #{0} attempts, server {1} is still unconfigured".format(
attempts, server.name)))
__set_ssh_vars(env) # back to default
__set_unconfigured_servers()
# last, ensure that SSH is configured (locked down) for each server
output = local("fab each ssh_test", capture=True)
if "CONFIGURATOR FAIL!" in output:
print(red("configurator failure."))
@task
def reboot_server(name):
"""
Simply reboot a server by name.
The trick here is to change the env vars to that of the server
to be rebooted. Perform the reboot and change env vars back
to their original value.
Usage:
fab reboot_server:name
"""
__shared_setup()
try:
server = [s for s in env.bootmachine_servers if s.name == name][0]
except IndexError:
abort("The server '{0}' was not found.".format(name))
original_user = env.user
original_host_string = env.host_string
try:
env.port = 22
telnetlib.Telnet(server.public_ip, env.port)
env.user = "root"
except IOError:
env.port = int(settings.SSH_PORT)
env.user = getpass.getuser()
telnetlib.Telnet(server.public_ip, env.port)
env.host_string = "{0}:{1}".format(server.public_ip, env.port)
env.keepalive = 30 # keep the ssh key active, see fabric issue #402
with fabric_settings(warn_only=True):
reboot()
env.user = original_user
env.host_string = original_host_string
@task
@parallel
def ssh_test():
"""
Prove that ssh is open on `settings.SSH_PORT`.
Usage:
fab each ssh_test
"""
for server in env.bootmachine_servers:
if server.status != "ACTIVE":
abort("The server '{0}' is in the '{1}' state.".format(server.name, server.status))
__set_ssh_vars(env)
if ":{0}".format(settings.SSH_PORT) not in env.host_string:
local("echo 'CONFIGURATOR FAIL!'")
return
try:
run("echo 'CONFIGURATOR SUCCESS!'")
sudo("echo 'CONFIGURATOR SUCCESS!'")
except:
local("echo 'CONFIGURATOR FAIL!'")
@task
def each():
"""
Set the env variables for a command to be run on all servers.
Warning::
Bootmachine assumes 'one' master and therefore
currently does not support a multi-master configuration.
"""
__shared_setup()
for server in env.bootmachine_servers:
if server.public_ip not in env.hosts:
env.hosts.append(server.public_ip)
if server.public_ip not in env.all_hosts:
env.all_hosts.append(server.public_ip)
@task
def master():
"""
Set the env variables for a command only to be run on the master server.
"""
__shared_setup()
for server in env.bootmachine_servers:
if server.name == env.master_server.name:
env.port = server.port
env.user = server.user
env.hosts.append(server.public_ip)
env.host = server.public_ip
env.host_string = "{0}:{1}".format(server.public_ip, server.port)
def __shared_setup():
"""
Set the env variables common to both master() and each().
"""
provider.set_bootmachine_servers()
for server in env.bootmachine_servers:
if server.name == settings.MASTER:
env.master_server = server
server = __set_ssh_vars(server)
# the following prevent prompts and warnings related to ssh keys by:
# a) skipping false man-in-the-middle warnings
# b) adding hosts to ~/.ssh/known_hosts
known_hosts.add(server.user, server.public_ip, server.port)
def __set_ssh_vars(valid_object):
"""
This method takes a valid_object, either the env or a server,
and based on the results of telnet, it sets port, user,
host_string varibles for ssh. It also sets a configured
variable if the SSH_PORT matches that in the settings. This
would only match if the server is properly configured.
"""
if valid_object == env:
public_ip = env.host
else:
public_ip = valid_object.public_ip
try:
port = 22
telnetlib.Telnet(public_ip, port)
except IOError:
port = int(settings.SSH_PORT)
telnetlib.Telnet(public_ip, port)
valid_object.port = port
if valid_object.port == 22:
valid_object.configured = False
valid_object.user = "root"
else:
valid_object.configured = True
valid_object.user = getpass.getuser()
valid_object.host_string = "{0}:{1}".format(public_ip, port)
return valid_object
def __set_unconfigured_servers():
env.unconfigured_servers = []
for server in env.bootmachine_servers:
if server.port != int(settings.SSH_PORT):
env.unconfigured_servers.append(server)
# Resolve issue with paramiko which occasionaly causes bootmachine to hang
# http://forum.magiksys.net/viewtopic.php?f=5&t=82
logging.getLogger("paramiko").setLevel(logging.DEBUG)
| |
from dataserv_client import common
import os
import tempfile
import unittest
import datetime
import json
import psutil
from future.moves.urllib.request import urlopen
from dataserv_client import cli
from dataserv_client import api
from btctxstore import BtcTxStore
from dataserv_client import exceptions
url = "http://127.0.0.1:5000"
common.SHARD_SIZE = 1024 * 128 # monkey patch shard size to 128K
class AbstractTestSetup(object):
def setUp(self):
self.btctxstore = BtcTxStore()
# debug output the server online list
# print(urlopen(url + '/api/online/json').read().decode('utf8'))
class TestClientRegister(AbstractTestSetup, unittest.TestCase):
def test_register_payout(self):
client = api.Client(url=url, config_path=tempfile.mktemp())
config = client.config()
self.assertTrue(client.register())
result = json.loads(
urlopen(url + '/api/online/json').read().decode('utf8')
)
result = [farmers for farmers in result['farmers']
if farmers['btc_addr'] == config['payout_address']]
last_seen = result[0]['last_seen']
reg_time = result[0]['reg_time']
result = json.dumps(result, sort_keys=True)
expected = json.dumps([{
'height': 0,
'btc_addr': config['payout_address'],
'last_seen': last_seen,
'payout_addr': config['payout_address'],
'reg_time': reg_time,
'uptime': 100.0
}], sort_keys=True)
self.assertEqual(result, expected)
def test_register(self): # register without createing a config
client = api.Client(url=url)
self.assertTrue(client.register())
def test_already_registered(self):
def callback():
client = api.Client(url=url, config_path=tempfile.mktemp())
client.register()
client.register()
self.assertRaises(exceptions.AddressAlreadyRegistered, callback)
def test_invalid_farmer(self):
def callback():
client = api.Client(url=url + "/xyz",
config_path=tempfile.mktemp())
client.register()
self.assertRaises(exceptions.ServerNotFound, callback)
class TestClientPing(AbstractTestSetup, unittest.TestCase):
def test_ping(self):
client = api.Client(url=url, config_path=tempfile.mktemp())
self.assertTrue(client.register())
self.assertTrue(client.ping())
def test_invalid_farmer(self):
def callback():
client = api.Client(url=url + "/xyz",
config_path=tempfile.mktemp())
client.ping()
self.assertRaises(exceptions.ServerNotFound, callback)
class TestClientPoll(AbstractTestSetup, unittest.TestCase):
def test_poll(self):
client = api.Client(url=url, config_path=tempfile.mktemp())
client.register()
before = datetime.datetime.now()
self.assertTrue(client.poll(delay=2, limit=2))
after = datetime.datetime.now()
# check that poll did 2 pings with 2 sec delay
self.assertTrue(datetime.timedelta(seconds=2) <= (after - before))
class TestInvalidArgument(AbstractTestSetup, unittest.TestCase):
def test_invalid_retry_limit(self):
def callback():
api.Client(connection_retry_limit=-1,
config_path=tempfile.mktemp())
self.assertRaises(exceptions.InvalidInput, callback)
def test_invalid_retry_delay(self):
def callback():
api.Client(connection_retry_delay=-1,
config_path=tempfile.mktemp())
self.assertRaises(exceptions.InvalidInput, callback)
def test_invalid_negativ_max_size(self):
def callback():
api.Client(max_size=-1, config_path=tempfile.mktemp())
self.assertRaises(exceptions.InvalidInput, callback)
def test_invalid_zero_max_size(self):
def callback():
api.Client(max_size=0, config_path=tempfile.mktemp())
self.assertRaises(exceptions.InvalidInput, callback)
def test_invalid_negativ_min_free_size(self):
def callback():
api.Client(min_free_size=-1, config_path=tempfile.mktemp())
self.assertRaises(exceptions.InvalidInput, callback)
def test_invalid_zero_min_free_size(self):
def callback():
api.Client(min_free_size=0, config_path=tempfile.mktemp())
self.assertRaises(exceptions.InvalidInput, callback)
def test_build_invalid_negative_workers(self):
def callback():
client = api.Client(config_path=tempfile.mktemp())
client.build(workers=-1)
self.assertRaises(exceptions.InvalidInput, callback)
def test_farm_invalid_zero_workers(self):
def callback():
client = api.Client(config_path=tempfile.mktemp())
client.farm(workers=0)
self.assertRaises(exceptions.InvalidInput, callback)
def test_build_invalid_negative_set_height_interval(self):
def callback():
client = api.Client(config_path=tempfile.mktemp())
client.build(set_height_interval=-1)
self.assertRaises(exceptions.InvalidInput, callback)
def test_farm_invalid_zero_set_height_interval(self):
def callback():
client = api.Client(config_path=tempfile.mktemp())
client.farm(set_height_interval=0)
self.assertRaises(exceptions.InvalidInput, callback)
def test_farm_invalid_negative_set_height_interval(self):
def callback():
client = api.Client(config_path=tempfile.mktemp())
client.farm(set_height_interval=-1)
self.assertRaises(exceptions.InvalidInput, callback)
def test_build_invalid_zero_set_height_interval(self):
def callback():
client = api.Client(config_path=tempfile.mktemp())
client.build(set_height_interval=0)
self.assertRaises(exceptions.InvalidInput, callback)
def test_poll_invalid_negativ_delay(self):
def callback():
client = api.Client(config_path=tempfile.mktemp())
client.poll(delay=-1, limit=0)
self.assertRaises(exceptions.InvalidInput, callback)
def test_audit_invalid_negativ_delay(self):
def callback():
client = api.Client(config_path=tempfile.mktemp())
client.audit(delay=-1, limit=0)
self.assertRaises(exceptions.InvalidInput, callback)
class TestConnectionRetry(AbstractTestSetup, unittest.TestCase):
def test_no_retry(self):
def callback():
client = api.Client(url="http://invalid.url",
connection_retry_limit=0,
connection_retry_delay=0,
config_path=tempfile.mktemp())
client.register()
before = datetime.datetime.now()
self.assertRaises(exceptions.ConnectionError, callback)
after = datetime.datetime.now()
self.assertTrue(datetime.timedelta(seconds=15) > (after - before))
def test_retry_server_not_found(self):
def callback():
client = api.Client(url="http://ServerNotFound.url",
config_path=tempfile.mktemp(),
connection_retry_limit=2,
connection_retry_delay=2)
client.register()
before = datetime.datetime.now()
self.assertRaises(exceptions.ConnectionError, callback)
after = datetime.datetime.now()
self.assertTrue(datetime.timedelta(seconds=4) < (after - before))
def test_retry_invalid_url(self):
def callback():
client = api.Client(url="http://127.0.0.257",
config_path=tempfile.mktemp(),
connection_retry_limit=2,
connection_retry_delay=2)
client.register()
before = datetime.datetime.now()
self.assertRaises(exceptions.ConnectionError, callback)
after = datetime.datetime.now()
self.assertTrue(datetime.timedelta(seconds=4) < (after - before))
def test_retry_high_retry_limit(self):
def callback():
client = api.Client(url="http://127.0.0.257",
config_path=tempfile.mktemp(),
connection_retry_limit=2000,
connection_retry_delay=0,
quiet=True)
client.register()
self.assertRaises(exceptions.ConnectionError, callback)
class TestClientBuild(AbstractTestSetup, unittest.TestCase):
def test_build(self):
client = api.Client(url=url,
config_path=tempfile.mktemp(),
max_size=1024 * 256) # 256K
client.register()
generated = client.build(cleanup=True)
self.assertTrue(len(generated))
client = api.Client(url=url,
config_path=tempfile.mktemp(),
max_size=1024 * 512) # 512K
config = client.config()
client.register()
generated = client.build(cleanup=True)
self.assertTrue(len(generated) == 4)
result = json.loads(
urlopen(url + '/api/online/json').read().decode('utf8')
)
result = [farmers for farmers in result['farmers']
if farmers['btc_addr'] == config['payout_address']]
last_seen = result[0]['last_seen']
reg_time = result[0]['reg_time']
result = json.dumps(result, sort_keys=True)
expected = json.dumps([{
'height': 4,
'btc_addr': config['payout_address'],
'last_seen': last_seen,
'payout_addr': config['payout_address'],
'reg_time': reg_time,
'uptime': 100.0
}], sort_keys=True)
self.assertEqual(result, expected)
def test_build_min_free_space(self):
store_path = tempfile.mktemp()
os.mkdir(store_path)
my_free_size = psutil.disk_usage(store_path).free - (1024 * 256) # 256
client = api.Client(url=url,
config_path=tempfile.mktemp(),
store_path=store_path,
max_size=1024 * 1024 * 2,
min_free_size=my_free_size) # 256
config = client.config()
client.register()
generated = client.build()
self.assertTrue(len(generated) > 0) # build at least 1 shard
self.assertTrue(len(generated) < 16) # stoped cause of free Space
result = json.loads(
urlopen(url + '/api/online/json').read().decode('utf8')
)
result = [farmers for farmers in result['farmers']
if farmers['btc_addr'] == config['payout_address']]
last_seen = result[0]['last_seen']
reg_time = result[0]['reg_time']
result = json.dumps(result, sort_keys=True)
expected = json.dumps([{
'height': len(generated),
'btc_addr': config['payout_address'],
'last_seen': last_seen,
'payout_addr': config['payout_address'],
'reg_time': reg_time,
'uptime': 100.0
}], sort_keys=True)
self.assertEqual(result, expected)
class TestClientFarm(AbstractTestSetup, unittest.TestCase):
def test_farm(self):
client = api.Client(url=url,
config_path=tempfile.mktemp(),
max_size=1024 * 256) # 256K
befor = datetime.datetime.now()
self.assertTrue(client.farm(delay=2, limit=2)) # check farm return true
after = datetime.datetime.now()
# check that farm did 2 pings with 2 sec delay
self.assertTrue(datetime.timedelta(seconds=2) <= (after - befor))
def test_farm_registered(self):
client = api.Client(url=url,
config_path=tempfile.mktemp(),
max_size=1024 * 256) # 256K
config = client.config()
client.register()
befor = datetime.datetime.now()
self.assertTrue(client.farm(delay=2, limit=2)) # check farm return true
after = datetime.datetime.now()
# check that farm did 2 pings with 2 sec delay
self.assertTrue(datetime.timedelta(seconds=2) <= (after - befor))
result = json.loads(
urlopen(url + '/api/online/json').read().decode('utf8')
)
result = [farmers for farmers in result['farmers']
if farmers['btc_addr'] == config['payout_address']]
last_seen = result[0]['last_seen']
reg_time = result[0]['reg_time']
result = json.dumps(result, sort_keys=True)
expected = json.dumps([{
'height': 2,
'btc_addr': config['payout_address'],
'last_seen': last_seen,
'payout_addr': config['payout_address'],
'reg_time': reg_time,
'uptime': 100.0
}], sort_keys=True)
self.assertEqual(result, expected)
class TestClientAudit(AbstractTestSetup, unittest.TestCase):
def test_audit(self):
client = api.Client(url=url,
config_path=tempfile.mktemp(),
max_size=1024 * 256) # 256K
client.register()
self.assertTrue(client.audit(delay=1, limit=1))
class TestClientCliArgs(AbstractTestSetup, unittest.TestCase):
def test_version(self):
args = [
"--nop2p",
"--config_path=" + tempfile.mktemp(),
"version"
]
self.assertTrue(cli.main(args))
def test_poll(self):
path = tempfile.mktemp()
args = [
"--nop2p",
"--url=" + url,
"--config_path=" + path,
"register",
]
cli.main(args)
args = [
"--nop2p",
"--url=" + url,
"--config_path=" + path,
"poll",
"--delay=0",
"--limit=0"
] # no pings needed for check args
self.assertTrue(cli.main(args))
def test_register(self):
args = [
"--nop2p",
"--url=" + url,
"--config_path=" + tempfile.mktemp(),
"register"
]
self.assertTrue(cli.main(args))
def test_build(self):
path = tempfile.mktemp()
args = [
"--nop2p",
"--url=" + url,
"--config_path=" + path,
"register",
]
cli.main(args)
args = [
"--nop2p",
"--url=" + url,
"--config_path=" + path,
"--max_size=" + str(1024 * 256), # 256K
"--min_free_size=" + str(1024 * 256), # 256K
"build",
"--workers=4",
"--cleanup",
"--rebuild",
"--repair",
"--set_height_interval=3"
]
self.assertTrue(cli.main(args))
def test_audit(self):
path = tempfile.mktemp()
args = [
"--nop2p",
"--url=" + url,
"--config_path=" + path,
"register",
]
cli.main(args)
args = [
"--nop2p",
"--url=" + url,
"--config_path=" + path,
"audit",
"--delay=0",
"--limit=0"
] # no audit needed for check args
self.assertTrue(cli.main(args))
def test_farm(self):
args = [
"--nop2p",
"--url=" + url,
"--config_path=" + tempfile.mktemp(),
"--max_size=" + str(1024 * 256), # 256K
"--min_free_size=" + str(1024 * 256), # 256K
"farm",
"--workers=4",
"--cleanup",
"--rebuild",
"--repair",
"--set_height_interval=3",
"--delay=0",
"--limit=0"
] # no pings needed for check args
self.assertTrue(cli.main(args))
def test_ping(self):
config_path = tempfile.mktemp()
args = [
"--nop2p",
"--url=" + url,
"--config_path=" + config_path,
"register"
]
self.assertTrue(cli.main(args))
args = [
"--nop2p",
"--url=" + url,
"--config_path=" + config_path,
"ping"
]
self.assertTrue(cli.main(args))
def test_no_command_error(self):
def callback():
cli.main([])
self.assertRaises(SystemExit, callback)
def test_input_error(self):
def callback():
path = tempfile.mktemp()
cli.main([
"--nop2p",
"--url=" + url,
"--config_path=" + path,
"register",
])
cli.main([
"--nop2p",
"--url=" + url,
"--config_path=" + path,
"poll",
"--delay=5",
"--limit=xyz"
])
self.assertRaises(ValueError, callback)
class TestConfig(AbstractTestSetup, unittest.TestCase):
def test_show(self):
payout_wif = self.btctxstore.create_key()
hwif = self.btctxstore.create_wallet()
payout_address = self.btctxstore.get_address(payout_wif)
client = api.Client(config_path=tempfile.mktemp())
config = client.config(set_wallet=hwif,
set_payout_address=payout_address)
self.assertEqual(config["wallet"], hwif)
self.assertEqual(config["payout_address"], payout_address)
def test_validation(self):
def callback():
client = api.Client(config_path=tempfile.mktemp())
client.config(set_payout_address="invalid")
self.assertRaises(exceptions.InvalidAddress, callback)
def test_persistance(self):
config_path = tempfile.mktemp()
a = api.Client(config_path=config_path).config()
b = api.Client(config_path=config_path).config()
c = api.Client(config_path=config_path).config()
self.assertEqual(a, b, c)
self.assertTrue(c["wallet"] is not None)
if __name__ == '__main__':
unittest.main()
| |
from approver.models import Person, Project, Keyword, ClinicalArea, ClinicalSetting, BigAim, Descriptor, Contact
from approver.constants import SESSION_VARS
from approver.utils import extract_tags, update_tags, extract_model
import approver.utils as utils
from approver.utilities import send_email
from approver.constants import similarity_factors, email_from_address, base_url
import approver.templates.email_template as email_builder # get_email_body_person_added, get_email_subject_person_added
from django.contrib.auth.models import User
from django.utils import timezone, dateparse
from django.db.models.query import QuerySet
from django.urls import reverse
from approver.workflows import contact_person
def create_or_update_project(current_user, project_form, project_id=None):
"""
Creates a new project or updates and existing one using a project form
"""
project = None
if project_exists(project_id):
project = Project.objects.get(id=project_id)
update_project_from_project_form(project, project_form, current_user)
else:
project = create_new_project_from_user_form(current_user, project_form)
return project
def create_new_project_from_user_form(current_user, form):
"""
This function creates a project using user information
from the current session and a title
"""
now = timezone.now()
person = current_user.person
new_project = Project(owner=person, title=form.get('title'))
new_project.save(last_modified_by=current_user)
update_project_from_project_form(new_project, form, current_user)
return new_project
def update_project_from_project_form(project, project_form, editing_user):
"""
This function changes an existing project entry
based on the information in the project_form.
This will not work if the project does not yet
exist.
"""
now = timezone.now()
parse_date = dateparse.parse_date
project.title = project_form.get('title')
project.description = project_form.get('description')
project.overall_goal = project_form.get('overall_goal')
project.measures = project_form.get('measures')
project.proposed_start_date = parse_date(project_form.get('proposed_start_date'))
project.proposed_end_date = parse_date(project_form.get('proposed_end_date'))
project.big_aim = extract_model(BigAim, "name", project_form.get('select-big_aim'))
clinical_area = extract_tags(project_form, 'clinical_area')
clinical_setting = extract_tags(project_form, 'clinical_setting')
mesh_keyword = extract_tags(project_form, 'mesh_keyword')
project.collaborator = contact_person.get_collaborators_from_form(project_form, editing_user)
project.advisor = contact_person.get_advisors_from_form(project_form, editing_user)
project = update_tags(model=project,
tag_property='mesh_keyword',
tags=mesh_keyword,
tag_model=Descriptor,
tagging_user=editing_user)
project = update_tags(model=project,
tag_property='clinical_area',
tags=clinical_area,
tag_model=ClinicalArea,
tagging_user=editing_user)
project = update_tags(model=project,
tag_property='clinical_setting',
tags=clinical_setting,
tag_model=ClinicalSetting,
tagging_user=editing_user)
email_advs_and_collabs(project, editing_user)
email_confirmation(project)
project.save(editing_user)
def get_project_or_none(project_id):
"""
This returns a project instance if it exists or it returns None
"""
try:
project = Project.objects.get(id=project_id)
return project
except:
return None
def project_exists(project_id):
"""
This returns a boolean for if the project exists or not
"""
return (len(Project.objects.filter(id=project_id)) > 0)
def current_user_is_superuser(current_user):
return current_user.person.is_admin
def current_user_is_project_owner(current_user, project):
"""
This returns a boolean about if the current_user.person.id is the
same as the project.owner.id
"""
return current_user.person.id == project.owner.id
def is_current_project_editable(current_user,project):
return current_user_is_superuser(current_user) or (
current_user_is_project_owner(current_user, project) and
project.get_is_editable())
def current_user_is_project_advisor_or_collaborator(current_user, project):
"""
This returns a boolean true if the current_user.person.id is in
project.advisor or project.collaborator
"""
if current_user.person in project.advisor.all():
return True
elif current_user.person in project.collaborator.all():
return True
else:
return False
def current_user_can_perform_project_delete(current_user,project):
"""
This returns an error message if user cannot delete the project, returns empty String when
user is the owner for the project and the project is editable.
"""
toast_message = ""
if current_user.person.is_admin:
project.delete(current_user)
return 'Deleted Project'
if(toast_message == "" and project is None):
toast_message = 'Project with id {} does not exist.'.format(project_id)
return toast_message
if(toast_message == "" and current_user_is_project_owner(current_user, project) is not True):
return 'You are not authorized to delete this project.'
if (toast_message == "" and project.get_is_editable() is not True):
return 'You are not allowed to delete/edit this project.'
project.delete(current_user)
return 'Deleted Project'
def current_user_can_archive_project(current_user,project):
"""Only Super User can archive projects"""
project.archived = True
project.save(current_user)
return 'Archived Project'
def current_user_can_unarchive_project(current_user,project):
"""Only Super User can unarchive projects"""
project.archived = False
project.save(current_user)
return 'UnArchived Project'
def get_approved_projects():
"""
This returns a list of all the existing approved projects
"""
return Project.objects.exclude(approval_date__isnull=True)
def get_similar_projects(project):
projects = get_approved_projects()
project_scores = []
for member in projects:
similarity = _calculate_similarity_score(project, member)
if similarity != 0:
project_scores.append((member.id, member, similarity))
return sorted(project_scores, key=lambda score: score[2], reverse = True)
def _calculate_similarity_score(project, member):
'''
Need to be improved based on priority.
Sum can be 100 to scale from zero to 100 (like a percentage)
'''
similarity = 0.0
if project.title is not None and member.title is not None:
similarity += similarity_factors['title'] * _jaccard_similarity(project.title, member.title)
if project.mesh_keyword is not None and member.mesh_keyword is not None:
similarity += similarity_factors['keyword'] * _jaccard_similarity(project.mesh_keyword.all(), member.mesh_keyword.all())
if project.description is not None and member.description is not None:
similarity += similarity_factors['description'] * _jaccard_similarity(project.description, member.description)
if project.big_aim is not None and member.big_aim is not None:
similarity += similarity_factors['big_aim'] * _jaccard_similarity(project.big_aim.name, member.big_aim.name)
if project.clinical_setting is not None and member.clinical_setting is not None:
similarity += similarity_factors['clinical_setting'] * _jaccard_similarity(project.clinical_setting.all(), member.clinical_setting.all())
if project.clinical_area is not None and member.clinical_area is not None:
similarity += similarity_factors['clinical_area'] * _jaccard_similarity(project.clinical_area.all(), member.clinical_area.all())
if project.category is not None and member.category is not None:
similarity += similarity_factors['category'] * _jaccard_similarity(project.category.all(), member.category.all())
return similarity
def _get_set_for_query(queryset):
res = set()
for element in queryset.all():
res.add(getattr(element, element.tag_property_name))
return res
def _jaccard_similarity(doc1, doc2):
a = set()
b = set()
if isinstance(doc1, QuerySet):
a = _get_set_for_query(doc1)
b = _get_set_for_query(doc2)
else:
a = set(doc1.split())
b = set(doc2.split())
intersection = len(a.intersection(b))
if intersection == 0: return intersection
similarity = float(intersection*1.0/len(a.union(b)))
return similarity
def email_advs_and_collabs(project, editing_user):
advisors = set(project.advisor.all())
collaborators = set(project.collaborator.all())
prev_sent_email_set = set(project.sent_email_list.all())
advisors_to_email = advisors.difference(prev_sent_email_set)
__generate_email(advisors_to_email, editing_user, 'advisor', project)
collaborators_to_email = collaborators.difference(prev_sent_email_set)
__generate_email(collaborators_to_email, editing_user, 'collaborator', project)
project.sent_email_list = advisors.union(collaborators)
def __generate_email(to_person_set, editing_user, role, project):
project_url = base_url + reverse('approver:projects', args=[project.id])
email_body_kwargs = {'first_name': editing_user.person.first_name,
'last_name': editing_user.person.last_name,
'role': role,
'project_title': project.title,
'project_url': project_url,
}
email_subject = email_builder.get_email_subject_person_added()
email_body = email_builder.get_email_body_person_added(**email_body_kwargs)
for person in to_person_set:
send_email(email_subject, email_body,
email_from_address, person.email_address)
def email_confirmation(project):
title = project.title
url = base_url + reverse('approver:projects', args=[project.id])
send_email(email_builder.get_email_subject_confirmation(),
email_builder.get_email_sent_confirmation_body(title, url),
email_from_address,
project.owner.email_address)
| |
from direct.distributed import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from pandac.PandaModules import *
import DistributedPhysicsWorldAI
from direct.fsm.FSM import FSM
from toontown.ai.ToonBarrier import *
from toontown.golf import GolfGlobals
import random
from toontown.golf import GolfHoleBase
class DistributedGolfHoleAI(DistributedPhysicsWorldAI.DistributedPhysicsWorldAI, FSM, GolfHoleBase.GolfHoleBase):
defaultTransitions = {'Off': ['Cleanup', 'WaitTee'],
'WaitTee': ['WaitSwing',
'Cleanup',
'WaitTee',
'WaitPlayback'],
'WaitSwing': ['WaitPlayback',
'Cleanup',
'WaitSwing',
'WaitTee'],
'WaitPlayback': ['WaitSwing',
'Cleanup',
'WaitTee',
'WaitPlayback'],
'Cleanup': ['Off']}
id = 0
notify = directNotify.newCategory('DistributedGolfHoleAI')
def __init__(self, zoneId, golfCourse, holeId):
FSM.__init__(self, 'Golf_%s_FSM' % self.id)
DistributedPhysicsWorldAI.DistributedPhysicsWorldAI.__init__(self, simbase.air)
GolfHoleBase.GolfHoleBase.__init__(self)
self.zoneId = zoneId
self.golfCourse = golfCourse
self.holeId = holeId
self.avIdList = golfCourse.avIdList[:]
self.watched = [0,
0,
0,
0]
self.barrierPlayback = None
self.trustedPlayerId = None
self.activeGolferIndex = None
self.activeGolferId = None
self.holeInfo = GolfGlobals.HoleInfo[self.holeId]
self.teeChosen = {}
for avId in self.avIdList:
self.teeChosen[avId] = -1
self.ballPos = {}
for avId in self.avIdList:
self.ballPos[avId] = Vec3(0, 0, 0)
self.playStarted = False
return
def curGolfBall(self):
return self.ball
def generate(self):
DistributedPhysicsWorldAI.DistributedPhysicsWorldAI.generate(self)
self.ball = self.createBall()
self.createRays()
if len(self.teePositions) > 1:
startPos = self.teePositions[1]
else:
startPos = self.teePositions[0]
startPos += Vec3(0, 0, GolfGlobals.GOLF_BALL_RADIUS)
self.ball.setPosition(startPos)
def delete(self):
self.notify.debug('__delete__')
DistributedPhysicsWorldAI.DistributedPhysicsWorldAI.delete(self)
self.notify.debug('calling self.terrainModel.removeNode')
self.terrainModel.removeNode()
self.notify.debug('self.barrierPlayback is %s' % self.barrierPlayback)
if self.barrierPlayback:
self.notify.debug('calling self.barrierPlayback.cleanup')
self.barrierPlayback.cleanup()
self.notify.debug('calling self.barrierPlayback = None')
self.barrierPlayback = None
self.activeGolferId = None
return
def setZoneId(self, zoneId):
self.zoneId = zoneId
def setAvatarReadyHole(self):
self.notify.debugStateCall(self)
avId = self.air.getAvatarIdFromSender()
self.golfCourse.avatarReadyHole(avId)
def startPlay(self):
self.notify.debug('startPlay')
self.playStarted = True
self.numGolfers = len(self.golfCourse.getGolferIds())
self.selectNextGolfer()
def selectNextGolfer(self):
self.notify.debug('selectNextGolfer, old golferIndex=%s old golferId=%s' % (self.activeGolferIndex, self.activeGolferId))
if self.golfCourse.isCurHoleDone():
return
if self.activeGolferIndex == None:
self.activeGolferIndex = 0
self.activeGolferId = self.golfCourse.getGolferIds()[self.activeGolferIndex]
else:
self.activeGolferIndex += 1
if self.activeGolferIndex >= len(self.golfCourse.getGolferIds()):
self.activeGolferIndex = 0
self.activeGolferId = self.golfCourse.getGolferIds()[self.activeGolferIndex]
safety = 0
while safety < 50 and not self.golfCourse.checkGolferPlaying(self.golfCourse.getGolferIds()[self.activeGolferIndex]):
self.activeGolferIndex += 1
self.notify.debug('Index %s' % self.activeGolferIndex)
if self.activeGolferIndex >= len(self.golfCourse.getGolferIds()):
self.activeGolferIndex = 0
self.activeGolferId = self.golfCourse.getGolferIds()[self.activeGolferIndex]
safety += 1
if safety != 50:
golferId = self.golfCourse.getGolferIds()[self.activeGolferIndex]
if self.teeChosen[golferId] == -1:
self.sendUpdate('golferChooseTee', [golferId])
self.request('WaitTee')
else:
self.sendUpdate('golfersTurn', [golferId])
self.request('WaitSwing')
else:
self.notify.debug('safety')
self.notify.debug('selectNextGolfer, new golferIndex=%s new golferId=%s' % (self.activeGolferIndex, self.activeGolferId))
return
def clearWatched(self):
self.watched = [1,
1,
1,
1]
for index in xrange(len(self.golfCourse.getGolferIds())):
self.watched[index] = 0
def setWatched(self, avId):
for index in xrange(len(self.golfCourse.getGolferIds())):
if self.golfCourse.getGolferIds()[index] == avId:
self.watched[index] = 1
def checkWatched(self):
if 0 not in self.watched:
return True
else:
return False
def turnDone(self):
self.notify.debug('Turn Done')
avId = self.air.getAvatarIdFromSender()
if self.barrierPlayback:
self.barrierPlayback.clear(avId)
def ballInHole(self, golferId = None):
self.notify.debug('ballInHole')
if golferId:
avId = golferId
else:
avId = self.air.getAvatarIdFromSender()
self.golfCourse.setBallIn(avId)
if self.golfCourse.isCurHoleDone():
self.notify.debug('ballInHole doing nothing')
else:
self.notify.debug('ballInHole calling self.selectNextGolfer')
self.selectNextGolfer()
def getHoleId(self):
return self.holeId
def finishHole(self):
self.notify.debug('finishHole')
self.golfCourse.holeOver()
def getGolferIds(self):
return self.avIdList
def loadLevel(self):
GolfHoleBase.GolfHoleBase.loadLevel(self)
optionalObjects = self.terrainModel.findAllMatches('**/optional*')
requiredObjects = self.terrainModel.findAllMatches('**/required*')
self.parseLocators(optionalObjects, 1)
self.parseLocators(requiredObjects, 0)
self.teeNodePath = self.terrainModel.find('**/tee0')
if self.teeNodePath.isEmpty():
teePos = Vec3(0, 0, 10)
else:
teePos = self.teeNodePath.getPos()
teePos.setZ(teePos.getZ() + GolfGlobals.GOLF_BALL_RADIUS)
self.notify.debug('teeNodePath heading = %s' % self.teeNodePath.getH())
self.teePositions = [teePos]
teeIndex = 1
teeNode = self.terrainModel.find('**/tee%d' % teeIndex)
while not teeNode.isEmpty():
teePos = teeNode.getPos()
teePos.setZ(teePos.getZ() + GolfGlobals.GOLF_BALL_RADIUS)
self.teePositions.append(teePos)
self.notify.debug('teeNodeP heading = %s' % teeNode.getH())
teeIndex += 1
teeNode = self.terrainModel.find('**/tee%d' % teeIndex)
def createLocatorDict(self):
self.locDict = {}
locatorNum = 1
curNodePath = self.hardSurfaceNodePath.find('**/locator%d' % locatorNum)
while not curNodePath.isEmpty():
self.locDict[locatorNum] = curNodePath
locatorNum += 1
curNodePath = self.hardSurfaceNodePath.find('**/locator%d' % locatorNum)
def loadBlockers(self):
loadAll = simbase.config.GetBool('golf-all-blockers', 0)
self.createLocatorDict()
self.blockerNums = self.holeInfo['blockers']
for locatorNum in self.locDict:
if locatorNum in self.blockerNums or loadAll:
locator = self.locDict[locatorNum]
locatorParent = locator.getParent()
locator.getChildren().wrtReparentTo(locatorParent)
else:
self.locDict[locatorNum].removeNode()
self.hardSurfaceNodePath.flattenStrong()
def createBall(self):
golfBallGeom = self.createSphere(self.world, self.space, GolfGlobals.GOLF_BALL_DENSITY, GolfGlobals.GOLF_BALL_RADIUS, 1)[1]
return golfBallGeom
def preStep(self):
GolfHoleBase.GolfHoleBase.preStep(self)
def postStep(self):
GolfHoleBase.GolfHoleBase.postStep(self)
def postSwing(self, cycleTime, power, x, y, z, dirX, dirY):
avId = self.air.getAvatarIdFromSender()
self.storeAction = [avId,
cycleTime,
power,
x,
y,
z,
dirX,
dirY]
if self.commonHoldData:
self.doAction()
def postSwingState(self, cycleTime, power, x, y, z, dirX, dirY, curAimTime, commonObjectData):
self.notify.debug('postSwingState')
if not self.golfCourse.getStillPlayingAvIds():
return
avId = self.air.getAvatarIdFromSender()
self.storeAction = [avId,
cycleTime,
power,
x,
y,
z,
dirX,
dirY]
self.commonHoldData = commonObjectData
self.trustedPlayerId = self.choosePlayerToSimulate()
self.sendUpdateToAvatarId(self.trustedPlayerId, 'assignRecordSwing', [avId,
cycleTime,
power,
x,
y,
z,
dirX,
dirY,
commonObjectData])
self.golfCourse.addAimTime(avId, curAimTime)
def choosePlayerToSimulate(self):
stillPlaying = self.golfCourse.getStillPlayingAvIds()
playerId = 0
if simbase.air.config.GetBool('golf-trust-driver-first', 0):
if stillPlaying:
playerId = stillPlaying[0]
else:
playerId = random.choice(stillPlaying)
return playerId
def ballMovie2AI(self, cycleTime, avId, movie, spinMovie, ballInFrame, ballTouchedHoleFrame, ballFirstTouchedHoleFrame, commonObjectData):
sentFromId = self.air.getAvatarIdFromSender()
if sentFromId == self.trustedPlayerId:
lastFrameNum = len(movie) - 2
if lastFrameNum < 0:
lastFrameNum = 0
lastFrame = movie[lastFrameNum]
lastPos = Vec3(lastFrame[1], lastFrame[2], lastFrame[3])
self.ballPos[avId] = lastPos
self.golfCourse.incrementScore(avId)
for id in self.golfCourse.getStillPlayingAvIds():
if not id == sentFromId:
self.sendUpdateToAvatarId(id, 'ballMovie2Client', [cycleTime,
avId,
movie,
spinMovie,
ballInFrame,
ballTouchedHoleFrame,
ballFirstTouchedHoleFrame,
commonObjectData])
if self.state == 'WaitPlayback' or self.state == 'WaitTee':
self.notify.warning('ballMovie2AI requesting from %s to WaitPlayback' % self.state)
self.request('WaitPlayback')
elif self.trustedPlayerId == None:
return
else:
self.doAction()
self.trustedPlayerId = None
return
def performReadyAction(self):
avId = self.storeAction[0]
if self.state == 'WaitPlayback':
self.notify.debugStateCall(self)
self.notify.debug('ignoring the postSwing for avId=%d since we are in WaitPlayback' % avId)
return
if avId == self.activeGolferId:
self.golfCourse.incrementScore(self.activeGolferId)
else:
self.notify.warning('activGolferId %d not equal to sender avId %d' % (self.activeGolferId, avId))
if avId not in self.golfCourse.drivingToons:
position = self.ballPos[avId]
else:
position = Vec3(self.storeAction[3], self.storeAction[4], self.storeAction[5])
self.useCommonObjectData(self.commonHoldData)
newPos = self.trackRecordBodyFlight(self.ball, self.storeAction[1], self.storeAction[2], position, self.storeAction[6], self.storeAction[7])
if self.state == 'WaitPlayback' or self.state == 'WaitTee':
self.notify.warning('performReadyAction requesting from %s to WaitPlayback' % self.state)
self.request('WaitPlayback')
self.sendUpdate('ballMovie2Client', [self.storeAction[1],
avId,
self.recording,
self.aVRecording,
self.ballInHoleFrame,
self.ballTouchedHoleFrame,
self.ballFirstTouchedHoleFrame,
self.commonHoldData])
self.ballPos[avId] = newPos
self.trustedPlayerId = None
return
def postResult(self, cycleTime, avId, recording, aVRecording, ballInHoleFrame, ballTouchedHoleFrame, ballFirstTouchedHoleFrame):
pass
def enterWaitSwing(self):
pass
def exitWaitSwing(self):
pass
def enterWaitTee(self):
pass
def exitWaitTee(self):
pass
def enterWaitPlayback(self):
self.notify.debug('enterWaitPlayback')
stillPlayingList = self.golfCourse.getStillPlayingAvIds()
self.barrierPlayback = ToonBarrier('waitClientsPlayback', self.uniqueName('waitClientsPlayback'), stillPlayingList, 120, self.handleWaitPlaybackDone, self.handlePlaybackTimeout)
def hasCurGolferReachedMaxSwing(self):
strokes = self.golfCourse.getCurHoleScore(self.activeGolferId)
maxSwing = self.holeInfo['maxSwing']
retval = strokes >= maxSwing
if retval:
av = simbase.air.doId2do.get(self.activeGolferId)
if av:
if av.getUnlimitedSwing():
retval = False
return retval
def handleWaitPlaybackDone(self):
if self.isCurBallInHole(self.activeGolferId) or self.hasCurGolferReachedMaxSwing():
if self.activeGolferId:
self.ballInHole(self.activeGolferId)
else:
self.selectNextGolfer()
def isCurBallInHole(self, golferId):
retval = False
for holePos in self.holePositions:
displacement = self.ballPos[golferId] - holePos
length = displacement.length()
self.notify.debug('hole %s length=%s' % (holePos, length))
if length <= GolfGlobals.DistanceToBeInHole:
retval = True
break
return retval
def exitWaitPlayback(self):
self.notify.debug('exitWaitPlayback')
if hasattr(self, 'barrierPlayback') and self.barrierPlayback:
self.barrierPlayback.cleanup()
self.barrierPlayback = None
return
def enterCleanup(self):
pass
def exitCleanup(self):
pass
def handlePlaybackTimeout(self, task = None):
self.notify.debug('handlePlaybackTimeout')
self.handleWaitPlaybackDone()
def getGolfCourseDoId(self):
return self.golfCourse.doId
def avatarDropped(self, avId):
self.notify.warning('avId %d dropped, self.state=%s' % (avId, self.state))
if self.barrierPlayback:
self.barrierPlayback.clear(avId)
else:
if avId == self.trustedPlayerId:
self.doAction()
if avId == self.activeGolferId and not self.golfCourse.haveAllGolfersExited():
self.selectNextGolfer()
def setAvatarTee(self, chosenTee):
golferId = self.air.getAvatarIdFromSender()
self.teeChosen[golferId] = chosenTee
self.ballPos[golferId] = self.teePositions[chosenTee]
self.sendUpdate('setAvatarFinalTee', [golferId, chosenTee])
self.sendUpdate('golfersTurn', [golferId])
self.request('WaitSwing')
def setBox(self, pos0, pos1, pos2, quat0, quat1, quat2, quat3, anV0, anV1, anV2, lnV0, lnV1, lnV2):
self.sendUpdate('sendBox', [pos0,
pos1,
pos2,
quat0,
quat1,
quat2,
quat3,
anV0,
anV1,
anV2,
lnV0,
lnV1,
lnV2])
def parseLocators(self, objectCollection, optional = 0):
if optional and objectCollection.getNumPaths():
if 'optionalMovers' in self.holeInfo:
for optionalMoverId in self.holeInfo['optionalMovers']:
searchStr = 'optional_mover_' + str(optionalMoverId)
for objIndex in xrange(objectCollection.getNumPaths()):
object = objectCollection.getPath(objIndex)
if searchStr in object.getName():
self.fillLocator(objectCollection, objIndex)
break
else:
for index in xrange(objectCollection.getNumPaths()):
self.fillLocator(objectCollection, index)
def fillLocator(self, objectCollection, index):
path = objectCollection[index]
pathName = path.getName()
pathArray = pathName.split('_')
sizeX = None
sizeY = None
move = None
type = None
for subString in pathArray:
if subString[:1] == 'X':
dataString = subString[1:]
dataString = dataString.replace('p', '.')
sizeX = float(dataString)
elif subString[:1] == 'Y':
dataString = subString[1:]
dataString = dataString.replace('p', '.')
sizeY = float(dataString)
elif subString[:1] == 'd':
dataString = subString[1:]
dataString = dataString.replace('p', '.')
move = float(dataString)
elif subString == 'mover':
type = 4
elif subString == 'windmillLocator':
type = 3
if type == 4 and move and sizeX and sizeY:
self.createCommonObject(4, path.getPos(), path.getHpr(), sizeX, sizeY, move)
elif type == 3:
self.createCommonObject(3, path.getPos(), path.getHpr())
return
| |
"""
This allows creation of a community of groups without a graphical user interface.
WARNING: As these routines run in administrative mode, no access control is used.
Care must be taken to generate reasonable metadata, specifically, concerning
who owns what. Non-sensical options are possible to create.
This code is not a design pattern for actually interacting with communities.
WARNING: This command cannot be executed via 'hsctl' because that doesn't honor
the strings one needs to embed community names with embedded spaces.
Please connect to the bash shell for the hydroshare container before running them.
"""
from django.core.management.base import BaseCommand
from django.core.files import File
from hs_access_control.models.community import Community
from hs_access_control.models.privilege import PrivilegeCodes, \
UserGroupPrivilege, UserCommunityPrivilege, GroupCommunityPrivilege
from hs_access_control.management.utilities import community_from_name_or_id, \
group_from_name_or_id, user_from_name
from hs_access_control.models.invite import GroupCommunityRequest
import os
from pprint import pprint
def usage():
print("access_community usage:")
print(" access_community [{cname} [{request} [{options}]]]")
print("Where:")
print(" {cname} is a community name. Use '' to embed spaces.")
print(" {request} is one of:")
print(" list: print the configuration of a community.")
print(" create: create the community.")
print(" update: update metadata for community.")
print(" remove: remove community.")
print(" rename: rename community.")
print(" Options for create and update include:")
print(" --owner={username}: set an owner for the community.")
print(" --description='{description}': set the description to the text provided.")
print(" --purpose='{purpose}': set the purpose to the text provided.")
print(" group {gname} {request} {options}: group commands.")
print(" {gname}: group name.")
print(" {request} is one of:")
print(" add: add the group to the community.")
print(" update: update community metadata for the group.")
print(" remove: remove the group from the community.")
print(" invite: invite the group to join the community.")
print(" request: request from group owner to join the community.")
print(" approve: approve a request or invitation.")
print(" decline: decline a request or invitation.")
print(" owner {oname} {request}: owner commands")
print(" {oname}: owner name.")
print(" {request} is one of:")
print(" [blank]: list community owners")
print(" add: add an owner for the community.")
print(" remove: remove an owner from the community.")
print(" banner {path-to-banner}: upload a banner.")
class Command(BaseCommand):
help = """Manage communities of groups."""
def add_arguments(self, parser):
# a command to execute
parser.add_argument('command', nargs='*', type=str)
parser.add_argument(
'--syntax',
action='store_true', # True for presence, False for absence
dest='syntax', # value is options['syntax']
help='print help message',
)
parser.add_argument(
'--owner',
dest='owner',
help='owner of community (does not affect quota)'
)
parser.add_argument(
'--description',
dest='description',
help='description of community'
)
parser.add_argument(
'--purpose',
dest='purpose',
help='purpose of community'
)
def handle(self, *args, **options):
if options['syntax']:
usage()
exit(1)
if len(options['command']) > 0:
cname = options['command'][0]
else:
cname = None
if len(options['command']) > 1:
command = options['command'][1]
else:
command = None
# resolve owner: used in several update commands as grantor
if options['owner'] is not None:
oname = options['owner']
else:
oname = 'admin'
owner = user_from_name(oname)
if owner is None:
usage()
exit(1)
privilege = PrivilegeCodes.VIEW
# not specifing a community lists active communities
if cname is None:
print("All communities:")
for c in Community.objects.all():
print(" '{}' (id={})".format(c.name, str(c.id)))
usage()
exit(0)
if command is None or command == 'list':
community = community_from_name_or_id(cname)
if community is None:
usage()
exit(1)
print("community '{}' (id={}):".format(community.name, community.id))
print(" description: {}".format(community.description))
print(" purpose: {}".format(community.purpose))
print(" owners:")
for ucp in UserCommunityPrivilege.objects.filter(community=community,
privilege=PrivilegeCodes.OWNER):
print(" {} (grantor {})".format(ucp.user.username, ucp.grantor.username))
print(" member groups:")
for gcp in GroupCommunityPrivilege.objects.filter(community=community):
if gcp.privilege == PrivilegeCodes.CHANGE:
others = "can edit community resources"
else:
others = "can view community resources"
print(" '{}' (id={}) (grantor={}):"
.format(gcp.group.name, gcp.group.id, gcp.grantor.username))
print(" {}.".format(others))
print(" '{}' (id={}) owners are:".format(gcp.group.name, str(gcp.group.id)))
for ugp in UserGroupPrivilege.objects.filter(group=gcp.group,
privilege=PrivilegeCodes.OWNER):
print(" {}".format(ugp.user.username))
print(" invitations and requests:")
for gcr in GroupCommunityRequest.objects.filter(community=community, redeemed=False):
if (gcr.group_owner is None):
print(" '{}' (id={}) invited (by community owner={}):"
.format(gcr.group.name, gcr.group.id, gcr.community_owner.username))
else:
print(" '{}' (id={}) requested membership (by group owner={}):"
.format(gcr.group.name, gcr.group.id, gcr.group_owner.username))
exit(0)
# These are idempotent actions. Creating a community twice does nothing.
if command == 'update' or command == 'create':
community = community_from_name_or_id(cname)
if community is not None:
community = Community.objects.get(name=cname)
if options['description'] is not None:
community.description = options['description']
community.save()
if options['purpose'] is not None:
community.purpose = options['purpose']
community.save()
UserCommunityPrivilege.update(user=owner,
community=community,
privilege=PrivilegeCodes.OWNER,
grantor=owner)
else: # if it does not exist, create it
if options['description'] is not None:
description = options['description']
else:
description = "No description"
purpose = options['purpose']
print("creating community '{}' with owner '{}' and description '{}'"
.format(cname, owner, description))
owner.uaccess.create_community(cname, description, purpose=purpose)
elif command == 'remove':
# at this point, community must exist
community = community_from_name_or_id(cname)
if community is None:
print("community '{}' does not exist".format(cname))
exit(1)
print("removing community '{}' (id={})".format(community.name, community.id))
community.delete()
elif command == 'rename':
# at this point, community must exist
community = community_from_name_or_id(cname)
if community is None:
print("community '{}' does not exist".format(cname))
exit(1)
nname = options['command'][2]
print("renaming community '{}' (id={}) to '{}'".format(community.name, community.id, nname))
community.name = nname
community.save()
elif command == 'owner':
# at this point, community must exist
community = community_from_name_or_id(cname)
if community is None:
usage()
exit(1)
if len(options['command']) < 3:
# list owners
print("owners of community '{}' (id={})".format(community.name, str(community.id)))
for ucp in UserCommunityPrivilege.objects.filter(community=community,
privilege=PrivilegeCodes.OWNER):
print(" {}".format(ucp.user.username))
exit(0)
oname = options['command'][2]
owner = user_from_name(oname)
if owner is None:
usage()
exit(1)
if len(options['command']) < 4:
print("user {} owns community '{}' (id={})"
.format(owner.username, community.name, str(community.id)))
action = options['command'][3]
if action == 'add':
print("adding {} as owner of {} (id={})"
.format(owner.username, community.name, str(community.id)))
UserCommunityPrivilege.share(user=owner, community=community,
privilege=PrivilegeCodes.OWNER, grantor=owner)
elif action == 'remove':
print("removing {} as owner of {} (id={})"
.format(owner.username, community.name, str(community.id)))
UserCommunityPrivilege.unshare(user=owner, community=community, grantor=owner)
else:
print("unknown owner action '{}'".format(action))
usage()
exit(1)
elif command == 'group':
# at this point, community must exist
community = community_from_name_or_id(cname)
if community is None:
usage()
exit(1)
# not specifying a group should list groups
if len(options['command']) < 3:
print("Community '{}' groups:")
for gcp in GroupCommunityPrivilege.objects.filter(community=community):
if gcp.privilege == PrivilegeCodes.CHANGE:
others = "can edit community resources"
else:
others = "can view community resources"
print(" '{}' (grantor {}):".format(gcp.group.name, gcp.grantor.username))
print(" {}.".format(others))
exit(0)
gname = options['command'][2]
group = group_from_name_or_id(gname)
if group is None:
usage()
exit(1)
if len(options['command']) < 4:
print("community groups: no action specified.")
usage()
exit(1)
action = options['command'][3]
if action == 'update' or action == 'add':
# resolve privilege of group
privilege = PrivilegeCodes.VIEW
try:
print("Updating group '{}' (id={}) status in community '{}' (id={})."
.format(gname, str(group.id), cname, str(community.id)))
gcp = GroupCommunityPrivilege.objects.get(group=group, community=community)
# pass privilege changes through the privilege system to record provenance.
if gcp.privilege != privilege or owner != gcp.grantor:
GroupCommunityPrivilege.share(group=group, community=community,
privilege=privilege, grantor=owner)
else:
print("Group '{}' is already a member of community '{}'"
.format(gname, cname))
except GroupCommunityPrivilege.DoesNotExist:
print("Adding group '{}' (id={}) to community '{}' (id={})"
.format(gname, str(group.id), cname, str(community.id)))
# create the privilege record
GroupCommunityPrivilege.share(group=group, community=community,
privilege=privilege, grantor=owner)
# update view status if different than default
gcp = GroupCommunityPrivilege.objects.get(group=group, community=community)
elif action == 'invite':
# resolve privilege of group
privilege = PrivilegeCodes.VIEW
try:
print("Inviting group '{}' (id={}) to community '{}' (id={})."
.format(gname, str(group.id), cname, str(community.id)))
gcp = GroupCommunityPrivilege.objects.get(group=group, community=community)
# pass privilege changes through the privilege system to record provenance.
if gcp.privilege != privilege or owner != gcp.grantor:
community_owner = community.first_owner
message, _ = GroupCommunityRequest.create_or_update(
community=community, requester=community_owner, group=group)
print(message)
else:
print("Group '{}' is already a member of community '{}'"
.format(group.name, community.name))
except GroupCommunityPrivilege.DoesNotExist:
print("Adding group '{}' (id={}) to community '{}' (id={})"
.format(gname, str(group.id), cname, str(community.id)))
community_owner = community.first_owner
message, _ = GroupCommunityRequest.create_or_update(
community=community, requester=community_owner, group=group)
print(message)
# update gcp for result of situation
try:
gcp = GroupCommunityPrivilege.objects.get(group=group, community=community)
except GroupCommunityPrivilege.DoesNotExist:
gcp = None
elif action == 'request':
# resolve privilege of group
privilege = PrivilegeCodes.VIEW
print("Requesting that group '{}' (id={}) join community '{}' (id={})."
.format(gname, str(group.id), cname, str(community.id)))
try:
gcp = GroupCommunityPrivilege.objects.get(group=group, community=community)
# pass privilege changes through the privilege system to record provenance.
if gcp.privilege != privilege or owner != gcp.grantor:
group_owner = group.gaccess.first_owner
message, _ = GroupCommunityRequest.create_or_update(
community=community, requester=group_owner, group=group)
print(message)
else:
print("Group '{}' is already a member of community '{}'"
.format(group.name, community.name))
except GroupCommunityPrivilege.DoesNotExist:
group_owner = group.gaccess.first_owner
message, _ = GroupCommunityRequest.create_or_update(
community=community, requester=group_owner, group=group)
# update gcp for result of situation
try:
gcp = GroupCommunityPrivilege.objects.get(group=group, community=community)
except GroupCommunityPrivilege.DoesNotExist:
gcp = None
elif action == 'approve':
try:
gcr = GroupCommunityRequest.objects.get(community=community, group=group)
except GroupCommunityRequest.DoesNotExist:
print("GroupCommunityRequest for community '{}' and group '{}' does not exist."
.format(cname, gname))
if (gcr.redeemed):
print("request connecting '{}' and '{}' is already redeemed."
.format(community.name, group.name))
exit(1)
elif (gcr.community_owner is None):
community_owner = community.first_owner
print("owner '{}' of community '{}' approves request from group '{}'"
.format(community_owner.username, cname, gname))
message, _ = gcr.approve(responder=community_owner)
else:
group_owner = group.gaccess.first_owner
print("owner '{}' of group '{}' approves invitation from community '{}'"
.format(group_owner.username, gname, cname))
message, _ = gcr.approve(responder=group_owner)
# update gcp for result of situation
try:
gcp = GroupCommunityPrivilege.objects.get(group=group, community=community)
except GroupCommunityPrivilege.DoesNotExist:
gcp = None
elif action == 'decline':
try:
gcr = GroupCommunityRequest.objects.get(community=community, group=group)
except GroupCommunityRequest.DoesNotExist:
print("GroupCommunityRequest for community '{}' and group '{}' does not exist."
.format(cname, gname))
if (gcr.redeemed):
print("request connecting '{}' and '{}' is already redeemed."
.format(community.name, group.name))
exit(1)
elif (gcr.community_owner is None):
community_owner = community.first_owner
print("owner '{}' of community '{}' declines request from group '{}'"
.format(community_owner.username, cname, gname))
message, _ = gcr.decline(responder=community_owner)
else:
pprint(group)
pprint(group.gaccess)
group_owner = group.gaccess.first_owner
print("owner '{}' of group '{}' declines invitation from community '{}'"
.format(group_owner.username, gname, cname))
message, _ = gcr.decline(responder=group_owner)
# update gcp for result of situation
try:
gcp = GroupCommunityPrivilege.objects.get(group=group, community=community)
except GroupCommunityPrivilege.DoesNotExist:
gcp = None
elif action == 'remove':
print("removing group '{}' (id={}) from community '{}' (id={})"
.format(group.name, str(group.id), community.name, str(community.id)))
GroupCommunityPrivilege.unshare(group=group, community=community, grantor=owner)
else:
print("unknown group command '{}'.".format(action))
usage()
exit(1)
elif command == 'banner':
# upload a banner
community = community_from_name_or_id(cname)
if community is None:
usage()
exit(1)
if len(options['command']) > 2:
pname = options['command'][2]
nname = os.path.basename(pname)
community.picture.save(nname, File(open(pname, 'rb')))
else:
print("no file name given for banner image")
usage()
exit(1)
elif command == 'remove':
community = community_from_name_or_id(cname)
if community is None:
usage()
exit(1)
print("removing community '{}' (id={}).".format(community.name, community.id))
community.delete()
else:
print("unknown command '{}'.".format(command))
usage()
exit(1)
| |
"""Database support module for the benchbuild study."""
import logging
from sqlalchemy.exc import IntegrityError
from benchbuild.settings import CFG
LOG = logging.getLogger(__name__)
def validate(func):
def validate_run_func(run, session, *args, **kwargs):
if run.status == 'failed':
LOG.debug("Run failed. Execution of '%s' cancelled", str(func))
return None
return func(run, session, *args, **kwargs)
return validate_run_func
def create_run(cmd, project, exp, grp):
"""
Create a new 'run' in the database.
This creates a new transaction in the database and creates a new
run in this transaction. Afterwards we return both the transaction as
well as the run itself. The user is responsible for committing it when
the time comes.
Args:
cmd: The command that has been executed.
prj: The project this run belongs to.
exp: The experiment this run belongs to.
grp: The run_group (uuid) we blong to.
Returns:
The inserted tuple representing the run and the session opened with
the new run. Don't forget to commit it at some point.
"""
from benchbuild.utils import schema as s
session = s.Session()
run = s.Run(command=str(cmd),
project_name=project.name,
project_group=project.group,
experiment_name=exp.name,
run_group=str(grp),
experiment_group=exp.id)
session.add(run)
session.commit()
return (run, session)
def create_run_group(prj, experiment):
"""
Create a new 'run_group' in the database.
This creates a new transaction in the database and creates a new run_group
within this transaction. Afterwards we return both the transaction as well
as the run_group itself. The user is responsible for committing it when the
time comes.
Args:
prj - The project for which we open the run_group.
experiment - The experiment this group belongs to.
Returns:
A tuple (group, session) containing both the newly created run_group and
the transaction object.
"""
from benchbuild.utils import schema as s
session = s.Session()
group = s.RunGroup(id=prj.run_uuid, experiment=experiment.id)
session.add(group)
session.commit()
return (group, session)
def persist_project(project):
"""
Persist this project in the benchbuild database.
Args:
project: The project we want to persist.
"""
from benchbuild.utils.schema import Project, Session
session = Session()
projects = session.query(Project) \
.filter(Project.name == project.name) \
.filter(Project.group_name == project.group)
name = project.name
desc = project.__doc__
domain = str(project.domain)
group_name = str(project.group)
version = str(project.variant)
try:
src_url = project.src_uri
except AttributeError:
src_url = 'unknown'
if projects.count() == 0:
newp = Project()
newp.name = name
newp.description = desc
newp.src_url = src_url
newp.domain = domain
newp.group_name = group_name
newp.version = version
session.add(newp)
else:
newp_value = {
"name": name,
"description": desc,
"src_url": src_url,
"domain": domain,
"group_name": group_name,
"version": version
}
projects.update(newp_value)
session.commit()
return (projects, session)
def persist_experiment(experiment):
"""
Persist this experiment in the benchbuild database.
Args:
experiment: The experiment we want to persist.
"""
from benchbuild.utils.schema import Experiment, Session
session = Session()
cfg_exp = experiment.id
LOG.debug("Using experiment ID stored in config: %s", cfg_exp)
exps = session.query(Experiment).filter(Experiment.id == cfg_exp)
desc = str(CFG["experiment_description"])
name = experiment.name
if exps.count() == 0:
newe = Experiment()
newe.id = cfg_exp
newe.name = name
newe.description = desc
session.add(newe)
ret = newe
else:
exps.update({'name': name, 'description': desc})
ret = exps.first()
try:
session.commit()
except IntegrityError:
session.rollback()
persist_experiment(experiment)
return (ret, session)
@validate
def persist_time(run, session, timings):
"""
Persist the run results in the database.
Args:
run: The run we attach this timing results to.
session: The db transaction we belong to.
timings: The timing measurements we want to store.
"""
from benchbuild.utils import schema as s
for timing in timings:
session.add(s.Metric(name="time.user_s", value=timing[0],
run_id=run.id))
session.add(
s.Metric(name="time.system_s", value=timing[1], run_id=run.id))
session.add(s.Metric(name="time.real_s", value=timing[2],
run_id=run.id))
def persist_perf(run, session, svg_path):
"""
Persist the flamegraph in the database.
The flamegraph exists as a SVG image on disk until we persist it in the
database.
Args:
run: The run we attach these perf measurements to.
session: The db transaction we belong to.
svg_path: The path to the SVG file we want to store.
"""
from benchbuild.utils import schema as s
with open(svg_path, 'r') as svg_file:
svg_data = svg_file.read()
session.add(
s.Metadata(name="perf.flamegraph", value=svg_data, run_id=run.id))
def persist_compilestats(run, session, stats):
"""
Persist the run results in the database.
Args:
run: The run we attach the compilestats to.
session: The db transaction we belong to.
stats: The stats we want to store in the database.
"""
for stat in stats:
stat.run_id = run.id
session.add(stat)
def persist_config(run, session, cfg):
"""
Persist the configuration in as key-value pairs.
Args:
run: The run we attach the config to.
session: The db transaction we belong to.
cfg: The configuration we want to persist.
"""
from benchbuild.utils import schema as s
for cfg_elem in cfg:
session.add(s.Config(name=cfg_elem, value=cfg[cfg_elem], run_id=run.id))
| |
"""Adds LRU cache management and automatic data timeout to Python's Shelf.
Classes:
LRUShelf: A shelf with LRU cache management.
TimeoutShelf: A shelf with automatic data timeout features.
LRUTimeoutShelf: A shelf with LRU cache management and data timeout.
Functions:
open: Open a database file as a persistent dictionary.
"""
from collections import deque
from shelve import Shelf
import sys
from time import time
is_py3 = sys.version_info[0] > 2
DEFAULT_MAXSIZE = 300
DEFAULT_TIMEOUT = 300 # 5 minutes
class _LRUMixin(object):
"""Adds LRU cache management to containers, e.g. :class:`~shelve.Shelf`.
This mixin will keep a container under a given size by discarding the
least recently used items when the container overflows.
.. NOTE::
The queue that keeps track of which keys are the least recently used
is not stored in the container itself. This means that even if the
container is persistent, the LRU queue will not persist with the data.
For this mixin to work well, all dict methods that involve setting a key,
getting a value, or deleting a key need to be routed through this class'
:meth:`__setitem__`, :meth:`__getitem__`, and :meth:`__delitem__`. The
built-in dict class won't do this by default, so it is better to inherit
from UserDict if you want to make a custom dictionary. If you subclass
dict, you might want to also inherit from
:class:`~collections.abc.MutableMapping` so the _LRUMixin will work
properly. Otherwise, you will need to manually code methods such as
``update()``, ``copy()``, ``keys()``, ``values()``, etc. So, it's best to
stick with :class:`~collections.abc.MutableMapping` or
:class:`~collections.UserDict` if possible.
"""
def __init__(self, *args, **kwargs):
"""Initialize LRU size management for a container.
Keyword arguments:
maxsize: The maximum size the container should be. Defaults to
module-level DEFAULT_MAXSIZE.
"""
self.maxsize = kwargs.get('maxsize', DEFAULT_MAXSIZE)
if 'maxsize' in kwargs:
del kwargs['maxsize']
if self.maxsize is None:
raise TypeError("maxsize must be a non-negative integer")
super(_LRUMixin, self).__init__(*args, **kwargs)
self._queue = deque() # create a queue of keys
for key in list(self.keys()): # populate queue with existing keys
self._remove_add_key(key)
def _remove_add_key(self, key):
"""Move a key to the end of the linked list and discard old entries."""
if not hasattr(self, '_queue'):
return # haven't initialized yet, so don't bother
if key in self._queue:
self._queue.remove(key)
self._queue.append(key)
if self.maxsize == 0:
return
while len(self._queue) > self.maxsize:
del self[self._queue[0]]
def __getitem__(self, key):
value = super(_LRUMixin, self).__getitem__(key)
self._remove_add_key(key)
return value
def __setitem__(self, key, value):
super(_LRUMixin, self).__setitem__(key, value)
self._remove_add_key(key)
def __delitem__(self, key):
super(_LRUMixin, self).__delitem__(key)
if hasattr(self, '_queue'):
self._queue.remove(key)
class _TimeoutMixin(object):
"""A mixin that adds automatic data timeout to mapping containers.
If you try to access an expired key, a KeyError will be raised, just like
when you try to access a non-existent key.
For this mixin to work well, all dict methods that involve setting a key,
getting a value, deleting a key, iterating over the container, or getting
the length or formal representation need to be routed through this class'
:meth:`__setitem__`, :meth:`__getitem__`, :meth:`__delitem__`,
:meth:`__iter__`, :meth:`__len__`, and :meth:`__repr__`. The built-in dict
class won't do this by default, so it is better to inherit from
:class:`~collections.UserDict` if you want to make a custom dictionary. If
you subclass dict, you might want to also inherit from
:class:`~collections.abc.MutableMapping` so the _TimeoutMixin will work
properly. Otherwise, you will need to manually code methods such as
``update()``, ``copy()``, ``keys()``, ``values()``, etc. So, it's
best to stick with :class:`~collections.abc.MutableMapping` or
:class:`~collections.UserDict` if possible.
Attributes:
timeout: The default timeout value in seconds.
A zero means that keys won't timeout by default.
_index: The timeout index mapping (maps keys to timeout values).
_INDEX: The key name used for the timeout index.
"""
#: The timeout index key name. This key is considered protected and access
#: to it is blocked.
_INDEX = 'f1dd04ff3d4d9adfabd43a3f9fda9b4b78302b21'
def __init__(self, *args, **kwargs):
"""Initialize the timeout features of the mapping container.
After calling the base class' __init__() method, the timeout index
is read from the container or created if it doesn't exist. Then, any
existing expired values are deleted.
Keyword arguments:
timeout: The default timeout value in seconds to use. If
not present, the module-level constant timeout value
is used.
"""
self.timeout = kwargs.get('timeout', DEFAULT_TIMEOUT)
if 'timeout' in kwargs:
del kwargs['timeout']
if self.timeout is None:
raise TypeError("timeout must be a non-negative integer")
super(_TimeoutMixin, self).__init__(*args, **kwargs)
try:
self._index = super(_TimeoutMixin, self).__getitem__(self._INDEX)
except KeyError:
self._index = {}
super(_TimeoutMixin, self).__setitem__(self._INDEX, self._index)
else:
for key in self:
pass # Force keys to expire using __iter__().
def _is_expired(self, key):
"""Check if a key is expired. If so, delete the key."""
if not hasattr(self, '_index'):
return False # haven't initalized yet, so don't bother
try:
timeout = self._index[key]
except KeyError:
if self.timeout:
self._index[key] = int(time() + self.timeout)
else:
self._index[key] = None
return False
if timeout is None or timeout >= time():
return False
del self[key] # key expired, so delete it from container
return True
def __getitem__(self, key):
if key == self._INDEX:
raise KeyError("cannot access protected key '%s'" % self._INDEX)
try:
if not self._is_expired(key):
return super(_TimeoutMixin, self).__getitem__(key)
except KeyError:
pass
raise KeyError(key)
def set(self, key, func, *args, **kwargs):
"""Return key's value if it exists, otherwise call given function.
:param key: The key to lookup/set.
:param func: A function to use if the key doesn't exist.
All other arguments and keyword arguments are passed to *func*.
"""
if key in self:
return self[key]
self[key] = value = func(*args, **kwargs)
return value
def settimeout(self, key, value, timeout):
"""Set a key with a timeout value (in seconds).
:meth:`settimeout` is used to override the shelf's timeout value.
:param timeout: The timeout value in seconds for the given key.
``0`` means that the key will never expire.
:type timeout: integer
"""
self[key] = value
if not hasattr(self, '_index'):
return # don't update index if __init__ hasn't completed
self._index[key] = int(time() + timeout) if timeout else None
def __setitem__(self, key, value):
if key == self._INDEX:
raise TypeError("reserved key name '%s'" % self._INDEX)
super(_TimeoutMixin, self).__setitem__(key, value)
if not hasattr(self, '_index'):
return # don't update index if __init__ hasn't completed
self._index[key] = int(time() + self.timeout) if self.timeout else None
def __delitem__(self, key):
if key == self._INDEX:
raise KeyError("cannot delete protected key '%s'" % self._INDEX)
super(_TimeoutMixin, self).__delitem__(key)
if not hasattr(self, '_index'):
return # don't update index if __init__ hasn't completed
del self._index[key]
def __iter__(self):
for key in super(_TimeoutMixin, self).__iter__():
if key == self._INDEX:
continue
if not self._is_expired(key):
yield key
def __contains__(self, key):
"""Hide the timeout index from __contains__."""
if key == self._INDEX:
return False
return super(_TimeoutMixin, self).__contains__(key)
def __len__(self):
"""Hide the timeout index from the object's length."""
return super(_TimeoutMixin, self).__len__() - 1
def __repr__(self):
"""Remove the timeout index from the object representation."""
for key in self: # delete expired data via __iter__()
pass
super(_TimeoutMixin, self).__delitem__(self._INDEX) # hide the index
_repr = super(_TimeoutMixin, self).__repr__()
super(_TimeoutMixin, self).__setitem__(self._INDEX, self._index)
return _repr
def sync(self):
"""Sync the timeout index entry with the shelf."""
if self.writeback and self.cache:
super(_TimeoutMixin, self).__delitem__(self._INDEX)
super(_TimeoutMixin, self).sync()
self.writeback = False
super(_TimeoutMixin, self).__setitem__(self._INDEX, self._index)
self.writeback = True
if hasattr(self.dict, 'sync'):
self.dict.sync()
def __del__(self):
"""Sync timeout index when object is deleted."""
super(_TimeoutMixin, self).__setitem__(self._INDEX, self._index)
super(_TimeoutMixin, self).__del__()
def __exit__(self, *exc_info):
"""Sync timeout index on exit."""
self.sync()
super(_TimeoutMixin, self).__exit__(*exc_info)
class _NewOldMixin(object):
"""Makes certain dict methods follow MRO to the container."""
def __init__(self, *args, **kwargs):
self._class = kwargs.pop('old_class')
self._class.__init__(self, *args, **kwargs)
def __getitem__(self, key):
return self._class.__getitem__(self, key)
def __setitem__(self, key, value):
return self._class.__setitem__(self, key, value)
def __delitem__(self, key):
return self._class.__delitem__(self, key)
def __iter__(self):
return self._class.__iter__(self)
def __len__(self):
return self._class.__len__(self)
class LRUShelf(_LRUMixin, _NewOldMixin, Shelf):
"""A :class:`~shelve.Shelf` with LRU cache management.
.. NOTE::
The *keyencoding* keyword argument is only used in Python 3.
"""
def __init__(self, *args, **kwargs):
"""Initialize LRU cache management.
:param maxsize: The maximum size the container is allowed to grow to.
``0`` means that no size limit is enforced.
:type maxsize: integer
"""
super(LRUShelf, self).__init__(*args, old_class=Shelf, **kwargs)
class TimeoutShelf(_TimeoutMixin, _NewOldMixin, Shelf):
"""A :class:`~shelve.Shelf` with automatic data timeout.
.. NOTE::
The *keyencoding* keyword argument is only used in Python 3.
"""
def __init__(self, *args, **kwargs):
"""Initialize the data timeout index.
:param timeout: The default timeout value for data (in seconds). ``0``
means that the data never expires.
:type timeout: integer
"""
super(TimeoutShelf, self).__init__(*args, old_class=Shelf, **kwargs)
if not is_py3:
def keys(self):
"""Override :meth:`~shelve.Shelf.keys` to hide timeout index.
This also removes expired keys.
"""
_keys = self.dict.keys()
if self._INDEX in _keys:
_keys.remove(self._INDEX)
keys = []
for key in _keys:
if not self._is_expired(key):
keys.append(key)
return keys
class LRUTimeoutShelf(_LRUMixin, TimeoutShelf):
"""A :class:`~shelve.Shelf` with LRU cache management and data timeout.
.. NOTE::
The *keyencoding* keyword argument is only used in Python 3.
"""
def __init__(self, *args, **kwargs):
"""Initialize LRU cache management and data timeout index.
:param maxsize: The maximum size the container is allowed to grow to.
``0`` means that no size limit is enforced.
:type maxsize: integer
:param timeout: The default timeout value for data (in seconds). ``0``
means that the data never expires.
:type timeout: integer
"""
super(LRUTimeoutShelf, self).__init__(*args, **kwargs)
def open(filename, flag='c', protocol=None, writeback=False,
maxsize=DEFAULT_MAXSIZE, timeout=DEFAULT_TIMEOUT):
"""Open a database file as a persistent dictionary.
The persistent dictionary file is opened using :func:`dbm.open`, so
performance will depend on which :mod:`dbm` modules are installed.
:func:`open` chooses to open a :class:`Shelf <shelve.Shelf>`,
:class:`LRUShelf`, :class:`TimeoutShelf`, or :class:`LRUTimeoutShelf`
depending on the values of keyword arguments *maxsize* and *timeout*.
A :data:`None` value for *maxsize* and *timeout* will disable the LRU
cache management and automatic data timeout features respectively.
:param filename: The base filename for the underlying database that is
passed to :func:`dbm.open`.
:param flag: The flag to pass to :func:`dbm.open`.
:param protocol: The pickle protocol to pass to :func:`pickle.dump`.
:param writeback: Whether or not to write back all accessed entries on
:meth:`Shelf.sync <shelve.Shelf.sync>` and
:meth:`Shelf.close <shelve.Shelf.close>`
:type writeback: bool
:param maxsize: The maximum size the container is allowed to grow to.
``0`` means that no size limit is enforced. :data:`None` means that
LRU cache management is disabled.
:type maxsize: integer or :data:`None`
:param timeout: The default timeout value for data (in seconds). ``0``
means that the data never expires. :data:`None` means that automatic
timeout features will be disabled.
:type timeout: integer or :data:`None`
:return: A shelf
:rtype: :class:`~shelve.Shelf`, :class:`LRUShelf`, :class:`TimeoutShelf`,
or :class:`LRUTimeoutShelf`
"""
import dbm
dict = dbm.open(filename, flag)
if maxsize is None and timeout is None:
return Shelf(dict, protocol, writeback)
elif maxsize is None:
return TimeoutShelf(dict, protocol, writeback, timeout=timeout)
elif timeout is None:
return LRUShelf(dict, protocol, writeback, maxsize=maxsize)
return LRUTimeoutShelf(dict, protocol, writeback, timeout=timeout,
maxsize=maxsize)
| |
"""
These tests were brought over from UrbanSim.
"""
from __future__ import division
import os.path
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from patsy import dmatrix
from choicemodels import mnl
@pytest.fixture
def num_alts():
return 4
@pytest.fixture(scope='module', params=[
('fish.csv',
'fish_choosers.csv',
'price + catch - 1',
'mode',
pd.Series([-0.02047652, 0.95309824], index=['price', 'catch']),
pd.DataFrame([
[0.2849598, 0.2742482, 0.1605457, 0.2802463],
[0.1498991, 0.4542377, 0.2600969, 0.1357664]],
columns=['beach', 'boat', 'charter', 'pier'])),
('fish.csv',
'fish_choosers.csv',
'price:income + catch:income + catch * price - 1',
'mode',
pd.Series([
9.839876e-01, -2.659466e-02, 6.933946e-07, -1.324231e-04,
7.646750e-03],
index=[
'catch', 'price', 'price:income', 'catch:income', 'catch:price']),
pd.DataFrame([
[0.2885868, 0.2799776, 0.1466286, 0.2848070],
[0.1346205, 0.4855238, 0.2593983, 0.1204575]],
columns=['beach', 'boat', 'charter', 'pier'])),
('travel_mode.csv',
'travel_choosers.csv',
'wait + travel + vcost - 1',
'choice',
pd.Series([
-0.033976668, -0.002192951, 0.008890669],
index=['wait', 'travel', 'vcost']),
pd.DataFrame([
[0.2776876, 0.1584818, 0.1049530, 0.4588777],
[0.1154490, 0.1653297, 0.1372684, 0.5819528]],
columns=['air', 'train', 'bus', 'car'])),
('travel_mode.csv',
'travel_choosers.csv',
'wait + travel + income:vcost + income:gcost - 1',
'choice',
pd.Series([
-3.307586e-02, -2.518762e-03, 1.601746e-04, 3.745822e-05],
index=['wait', 'travel', 'income:vcost', 'income:gcost']),
pd.DataFrame([
[0.2862046, 0.1439074, 0.1044490, 0.4654390],
[0.1098313, 0.1597317, 0.1344395, 0.5959975]],
columns=['air', 'train', 'bus', 'car']))])
def test_data(request):
data, choosers, form, col, est_expected, sim_expected = request.param
return {
'data': data,
'choosers': choosers,
'formula': form,
'column': col,
'est_expected': est_expected,
'sim_expected': sim_expected
}
def get_df(request):
filen = os.path.join(os.path.dirname(__file__), 'data', request['data'])
return pd.read_csv(filen)
@pytest.fixture
def df(test_data):
return get_df(test_data)
def get_choosers(request):
filen = os.path.join(
os.path.dirname(__file__), 'data', request['choosers'])
return pd.read_csv(filen)
@pytest.fixture
def choosers(test_data):
return get_choosers(test_data)
def get_chosen(df, num_alts, request):
return df[request['column']].values.astype('int').reshape(
(int(len(df) / num_alts), num_alts))
@pytest.fixture
def chosen(df, num_alts, test_data):
return get_chosen(df, num_alts, test_data)
@pytest.fixture
def dm(df, test_data):
return dmatrix(test_data['formula'], data=df, return_type='dataframe')
@pytest.fixture
def choosers_dm(choosers, test_data):
return dmatrix(
test_data['formula'], data=choosers, return_type='dataframe')
@pytest.fixture
def fit_coeffs(dm, chosen, num_alts):
log_like, fit = mnl.mnl_estimate(dm.values, chosen, num_alts)
return fit.Coefficient.values
def test_mnl_estimate(dm, chosen, num_alts, test_data):
log_like, fit = mnl.mnl_estimate(dm.values, chosen, num_alts)
result = pd.Series(fit.Coefficient.values, index=dm.columns)
result, expected = result.align(test_data['est_expected'])
npt.assert_allclose(result.values, expected.values, rtol=1e-4)
# def test_mnl_simulate(dm, fit_coeffs, num_alts, test_data, choosers_dm):
# # check that if all the alternatives have the same numbers
# # we get an even probability distribution
# data = np.array(
# [[10 ** (x + 1) for x in range(len(dm.columns))]] * num_alts)
#
# probs = mnl.mnl_simulate(
# data, fit_coeffs, num_alts, returnprobs=True)
#
# npt.assert_allclose(probs, [[1 / num_alts] * num_alts])
#
# # now test with real data
# probs = mnl.mnl_simulate(
# choosers_dm.values, fit_coeffs, num_alts, returnprobs=True)
# results = pd.DataFrame(probs, columns=test_data['sim_expected'].columns)
# results, expected = results.align(test_data['sim_expected'])
# npt.assert_allclose(results.values, expected.values, rtol=1e-4)
def test_alternative_specific_coeffs(num_alts):
template = np.array(
[[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
fish = get_df({'data': 'fish.csv'})
fish_choosers = get_choosers({'choosers': 'fish_choosers.csv'})
fish_chosen = get_chosen(fish, num_alts, {'column': 'mode'})
# construct design matrix with columns repeated for 3 / 4 of alts
num_choosers = len(fish['chid'].unique())
intercept_df = pd.DataFrame(
np.tile(template, (num_choosers, 1)),
columns=[
'boat:(intercept)', 'charter:(intercept)', 'pier:(intercept)'])
income_df = pd.DataFrame(
np.tile(template, (num_choosers, 1)),
columns=[
'boat:income', 'charter:income', 'pier:income'])
for idx, row in fish.iterrows():
income_df.loc[idx] = income_df.loc[idx] * row['income']
dm = pd.concat([intercept_df, income_df], axis=1)
# construct choosers design matrix
num_choosers = len(fish_choosers['chid'].unique())
intercept_df = pd.DataFrame(
np.tile(template, (num_choosers, 1)),
columns=[
'boat:(intercept)', 'charter:(intercept)', 'pier:(intercept)'])
income_df = pd.DataFrame(
np.tile(template, (num_choosers, 1)),
columns=[
'boat:income', 'charter:income', 'pier:income'])
for idx, row in fish_choosers.iterrows():
income_df.loc[idx] = income_df.loc[idx] * row['income']
choosers_dm = pd.concat([intercept_df, income_df], axis=1)
# test estimation
expected = pd.Series([
7.389208e-01, 1.341291e+00, 8.141503e-01, 9.190636e-05,
-3.163988e-05, -1.434029e-04],
index=[
'boat:(intercept)', 'charter:(intercept)', 'pier:(intercept)',
'boat:income', 'charter:income', 'pier:income'])
log_like, fit = mnl.mnl_estimate(dm.values, fish_chosen, num_alts)
result = pd.Series(fit.Coefficient.values, index=dm.columns)
result, expected = result.align(expected)
npt.assert_allclose(result.values, expected.values, rtol=1e-4)
# # test simulation
# expected = pd.DataFrame([
# [0.1137676, 0.2884583, 0.4072931, 0.190481],
# [0.1153440, 0.3408657, 0.3917253, 0.152065]],
# columns=['beach', 'boat', 'charter', 'pier'])
#
# fit_coeffs = fit.Coefficient.values
# probs = mnl.mnl_simulate(
# choosers_dm.values, fit_coeffs, num_alts, returnprobs=True)
# results = pd.DataFrame(probs, columns=expected.columns)
# results, expected = results.align(expected)
# npt.assert_allclose(results.values, expected.values, rtol=1e-4)
| |
"""
Base and utility classes for tseries type pandas objects.
"""
import warnings
from datetime import datetime, timedelta
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.core.tools.timedeltas import to_timedelta
import numpy as np
from pandas.core.dtypes.common import (
is_integer, is_float,
is_bool_dtype, _ensure_int64,
is_scalar, is_dtype_equal,
is_list_like)
from pandas.core.dtypes.generic import (
ABCIndex, ABCSeries,
ABCPeriodIndex, ABCIndexClass)
from pandas.core.dtypes.missing import isna
from pandas.core import common as com, algorithms
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.common import AbstractMethodError
import pandas.io.formats.printing as printing
from pandas._libs import lib, iNaT, NaT
from pandas._libs.period import Period
from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.util._decorators import Appender, cache_readonly
import pandas.core.dtypes.concat as _concat
import pandas.tseries.frequencies as frequencies
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
class DatelikeOps(object):
""" common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex """
def strftime(self, date_format):
return np.asarray(self.format(date_format=date_format),
dtype=compat.text_type)
strftime.__doc__ = """
Return an array of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format doc <{0}>`__
Parameters
----------
date_format : str
date format string (e.g. "%Y-%m-%d")
Returns
-------
ndarray of formatted strings
""".format("https://docs.python.org/2/library/datetime.html"
"#strftime-and-strptime-behavior")
class TimelikeOps(object):
""" common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex """
_round_doc = (
"""
%s the index to the specified freq
Parameters
----------
freq : freq string/object
Returns
-------
index of same type
Raises
------
ValueError if the freq cannot be converted
""")
def _round(self, freq, rounder):
from pandas.tseries.frequencies import to_offset
unit = to_offset(freq).nanos
# round the local times
values = _ensure_datetimelike_to_i8(self)
if unit < 1000 and unit % 1000 != 0:
# for nano rounding, work with the last 6 digits separately
# due to float precision
buff = 1000000
result = (buff * (values // buff) + unit *
(rounder((values % buff) / float(unit))).astype('i8'))
elif unit >= 1000 and unit % 1000 != 0:
msg = 'Precision will be lost using frequency: {}'
warnings.warn(msg.format(freq))
result = (unit * rounder(values / float(unit)).astype('i8'))
else:
result = (unit * rounder(values / float(unit)).astype('i8'))
result = self._maybe_mask_results(result, fill_value=NaT)
attribs = self._get_attributes_dict()
if 'freq' in attribs:
attribs['freq'] = None
if 'tz' in attribs:
attribs['tz'] = None
return self._ensure_localized(
self._shallow_copy(result, **attribs))
@Appender(_round_doc % "round")
def round(self, freq, *args, **kwargs):
return self._round(freq, np.round)
@Appender(_round_doc % "floor")
def floor(self, freq):
return self._round(freq, np.floor)
@Appender(_round_doc % "ceil")
def ceil(self, freq):
return self._round(freq, np.ceil)
class DatetimeIndexOpsMixin(object):
""" common ops mixin to support a unified inteface datetimelike Index """
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, ABCIndexClass):
return False
elif not isinstance(other, type(self)):
try:
other = type(self)(other)
except Exception:
return False
if not is_dtype_equal(self.dtype, other.dtype):
# have different timezone
return False
# ToDo: Remove this when PeriodDtype is added
elif isinstance(self, ABCPeriodIndex):
if not isinstance(other, ABCPeriodIndex):
return False
if self.freq != other.freq:
return False
return np.array_equal(self.asi8, other.asi8)
def __iter__(self):
return (self._box_func(v) for v in self.asi8)
@staticmethod
def _join_i8_wrapper(joinf, dtype, with_indexers=True):
""" create the join wrapper methods """
@staticmethod
def wrapper(left, right):
if isinstance(left, (np.ndarray, ABCIndex, ABCSeries)):
left = left.view('i8')
if isinstance(right, (np.ndarray, ABCIndex, ABCSeries)):
right = right.view('i8')
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view(dtype)
return join_index, left_indexer, right_indexer
return results
return wrapper
def _evaluate_compare(self, other, op):
"""
We have been called because a comparison between
8 aware arrays. numpy >= 1.11 will
now warn about NaT comparisons
"""
# coerce to a similar object
if not isinstance(other, type(self)):
if not is_list_like(other):
# scalar
other = [other]
elif is_scalar(lib.item_from_zerodim(other)):
# ndarray scalar
other = [other.item()]
other = type(self)(other)
# compare
result = op(self.asi8, other.asi8)
# technically we could support bool dtyped Index
# for now just return the indexing array directly
mask = (self._isnan) | (other._isnan)
if is_bool_dtype(result):
result[mask] = False
return result
try:
result[mask] = iNaT
return Index(result)
except TypeError:
return result
def _ensure_localized(self, result):
"""
ensure that we are re-localized
This is for compat as we can then call this on all datetimelike
indexes generally (ignored for Period/Timedelta)
Parameters
----------
result : DatetimeIndex / i8 ndarray
Returns
-------
localized DTI
"""
# reconvert to local tz
if getattr(self, 'tz', None) is not None:
if not isinstance(result, ABCIndexClass):
result = self._simple_new(result)
result = result.tz_localize(self.tz)
return result
@property
def _box_func(self):
"""
box function to get object from internal representation
"""
raise AbstractMethodError(self)
def _box_values(self, values):
"""
apply box func to passed values
"""
return lib.map_infer(values, self._box_func)
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
try:
res = self.get_loc(key)
return is_scalar(res) or type(res) == slice or np.any(res)
except (KeyError, TypeError, ValueError):
return False
contains = __contains__
def __getitem__(self, key):
"""
This getitem defers to the underlying array, which by-definition can
only handle list-likes, slices, and integer scalars
"""
is_int = is_integer(key)
if is_scalar(key) and not is_int:
raise IndexError("only integers, slices (`:`), ellipsis (`...`), "
"numpy.newaxis (`None`) and integer or boolean "
"arrays are valid indices")
getitem = self._data.__getitem__
if is_int:
val = getitem(key)
return self._box_func(val)
else:
if com.is_bool_indexer(key):
key = np.asarray(key)
if key.all():
key = slice(0, None, None)
else:
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
attribs = self._get_attributes_dict()
is_period = isinstance(self, ABCPeriodIndex)
if is_period:
freq = self.freq
else:
freq = None
if isinstance(key, slice):
if self.freq is not None and key.step is not None:
freq = key.step * self.freq
else:
freq = self.freq
attribs['freq'] = freq
result = getitem(key)
if result.ndim > 1:
# To support MPL which performs slicing with 2 dim
# even though it only has 1 dim by definition
if is_period:
return self._simple_new(result, **attribs)
return result
return self._simple_new(result, **attribs)
@property
def freqstr(self):
"""
Return the frequency object as a string if its set, otherwise None
"""
if self.freq is None:
return None
return self.freq.freqstr
@cache_readonly
def inferred_freq(self):
"""
Trys to return a string representing a frequency guess,
generated by infer_freq. Returns None if it can't autodetect the
frequency.
"""
try:
return frequencies.infer_freq(self)
except ValueError:
return None
def _nat_new(self, box=True):
"""
Return Index or ndarray filled with NaT which has the same
length as the caller.
Parameters
----------
box : boolean, default True
- If True returns a Index as the same as caller.
- If False returns ndarray of np.int64.
"""
result = np.zeros(len(self), dtype=np.int64)
result.fill(iNaT)
if not box:
return result
attribs = self._get_attributes_dict()
if not isinstance(self, ABCPeriodIndex):
attribs['freq'] = None
return self._simple_new(result, **attribs)
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
def map(self, f):
try:
result = f(self)
# Try to use this result if we can
if isinstance(result, np.ndarray):
result = Index(result)
if not isinstance(result, Index):
raise TypeError('The map function must return an Index object')
return result
except Exception:
return self.asobject.map(f)
def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self._values)
attribs = self._get_attributes_dict()
freq = attribs['freq']
if freq is not None and not isinstance(self, ABCPeriodIndex):
if freq.n > 0 and not ascending:
freq = freq * -1
elif freq.n < 0 and ascending:
freq = freq * -1
attribs['freq'] = freq
if not ascending:
sorted_values = sorted_values[::-1]
return self._simple_new(sorted_values, **attribs)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_int64(indices)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
if isinstance(maybe_slice, slice):
return self[maybe_slice]
taken = self._assert_take_fillable(self.asi8, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=iNaT)
# keep freq in PeriodIndex, reset otherwise
freq = self.freq if isinstance(self, ABCPeriodIndex) else None
return self._shallow_copy(taken, freq=freq)
def get_duplicates(self):
values = Index.get_duplicates(self)
return self._simple_new(values)
_can_hold_na = True
_na_value = NaT
"""The expected NA value to use with this index."""
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
return (self.asi8 == iNaT)
@property
def asobject(self):
"""
return object Index which contains boxed values
*this is an internal non-public method*
"""
from pandas.core.index import Index
return Index(self._box_values(self.asi8), name=self.name, dtype=object)
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(to_timedelta(tolerance, box=False))
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
return tolerance
def _maybe_mask_results(self, result, fill_value=None, convert=None):
"""
Parameters
----------
result : a ndarray
convert : string/dtype or None
Returns
-------
result : ndarray with values replace by the fill_value
mask the result if needed, convert to the provided dtype if its not
None
This is an internal routine
"""
if self.hasnans:
if convert:
result = result.astype(convert)
if fill_value is None:
fill_value = np.nan
result[self._isnan] = fill_value
return result
def tolist(self):
"""
return a list of the underlying data
"""
return list(self.asobject)
def min(self, axis=None, *args, **kwargs):
"""
Return the minimum value of the Index or minimum along
an axis.
See also
--------
numpy.ndarray.min
"""
nv.validate_min(args, kwargs)
try:
i8 = self.asi8
# quick check
if len(i8) and self.is_monotonic:
if i8[0] != iNaT:
return self._box_func(i8[0])
if self.hasnans:
min_stamp = self[~self._isnan].asi8.min()
else:
min_stamp = i8.min()
return self._box_func(min_stamp)
except ValueError:
return self._na_value
def argmin(self, axis=None, *args, **kwargs):
"""
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See also
--------
numpy.ndarray.argmin
"""
nv.validate_argmin(args, kwargs)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all():
return -1
i8 = i8.copy()
i8[mask] = np.iinfo('int64').max
return i8.argmin()
def max(self, axis=None, *args, **kwargs):
"""
Return the maximum value of the Index or maximum along
an axis.
See also
--------
numpy.ndarray.max
"""
nv.validate_max(args, kwargs)
try:
i8 = self.asi8
# quick check
if len(i8) and self.is_monotonic:
if i8[-1] != iNaT:
return self._box_func(i8[-1])
if self.hasnans:
max_stamp = self[~self._isnan].asi8.max()
else:
max_stamp = i8.max()
return self._box_func(max_stamp)
except ValueError:
return self._na_value
def argmax(self, axis=None, *args, **kwargs):
"""
Returns the indices of the maximum values along an axis.
See `numpy.ndarray.argmax` for more information on the
`axis` parameter.
See also
--------
numpy.ndarray.argmax
"""
nv.validate_argmax(args, kwargs)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all():
return -1
i8 = i8.copy()
i8[mask] = 0
return i8.argmax()
@property
def _formatter_func(self):
raise AbstractMethodError(self)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
attrs = super(DatetimeIndexOpsMixin, self)._format_attrs()
for attrib in self._attributes:
if attrib == 'freq':
freq = self.freqstr
if freq is not None:
freq = "'%s'" % freq
attrs.append(('freq', freq))
return attrs
@cache_readonly
def _resolution(self):
return frequencies.Resolution.get_reso_from_freq(self.freqstr)
@cache_readonly
def resolution(self):
"""
Returns day, hour, minute, second, millisecond or microsecond
"""
return frequencies.Resolution.get_str(self._resolution)
def _convert_scalar_indexer(self, key, kind=None):
"""
we don't allow integer or float indexing on datetime-like when using
loc
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# we don't allow integer/float indexing for loc
# we don't allow float indexing for ix/getitem
if is_scalar(key):
is_int = is_integer(key)
is_flt = is_float(key)
if kind in ['loc'] and (is_int or is_flt):
self._invalid_indexer('index', key)
elif kind in ['ix', 'getitem'] and is_flt:
self._invalid_indexer('index', key)
return (super(DatetimeIndexOpsMixin, self)
._convert_scalar_indexer(key, kind=kind))
def _add_datelike(self, other):
raise TypeError("cannot add {0} and {1}"
.format(type(self).__name__,
type(other).__name__))
def _sub_datelike(self, other):
raise AbstractMethodError(self)
def _sub_period(self, other):
return NotImplemented
@classmethod
def _add_datetimelike_methods(cls):
"""
add in the datetimelike methods (as we may have to override the
superclass)
"""
def __add__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
if isinstance(other, TimedeltaIndex):
return self._add_delta(other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if hasattr(other, '_add_delta'):
return other._add_delta(self)
raise TypeError("cannot add TimedeltaIndex and {typ}"
.format(typ=type(other)))
elif isinstance(other, (DateOffset, timedelta, np.timedelta64)):
return self._add_delta(other)
elif is_integer(other):
return self.shift(other)
elif isinstance(other, (Index, datetime, np.datetime64)):
return self._add_datelike(other)
else: # pragma: no cover
return NotImplemented
cls.__add__ = __add__
cls.__radd__ = __add__
def __sub__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
if isinstance(other, TimedeltaIndex):
return self._add_delta(-other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if not isinstance(other, TimedeltaIndex):
raise TypeError("cannot subtract TimedeltaIndex and {typ}"
.format(typ=type(other).__name__))
return self._add_delta(-other)
elif isinstance(other, DatetimeIndex):
return self._sub_datelike(other)
elif isinstance(other, Index):
raise TypeError("cannot subtract {typ1} and {typ2}"
.format(typ1=type(self).__name__,
typ2=type(other).__name__))
elif isinstance(other, (DateOffset, timedelta, np.timedelta64)):
return self._add_delta(-other)
elif is_integer(other):
return self.shift(-other)
elif isinstance(other, (datetime, np.datetime64)):
return self._sub_datelike(other)
elif isinstance(other, Period):
return self._sub_period(other)
else: # pragma: no cover
return NotImplemented
cls.__sub__ = __sub__
def __rsub__(self, other):
return -(self - other)
cls.__rsub__ = __rsub__
cls.__iadd__ = __add__
cls.__isub__ = __sub__
def _add_delta(self, other):
return NotImplemented
def _add_delta_td(self, other):
# add a delta of a timedeltalike
# return the i8 result view
inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc,
arr_mask=self._isnan).view('i8')
if self.hasnans:
new_values[self._isnan] = iNaT
return new_values.view('i8')
def _add_delta_tdi(self, other):
# add a delta of a TimedeltaIndex
# return the i8 result view
# delta operation
if not len(self) == len(other):
raise ValueError("cannot add indices of unequal length")
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(self_i8, other_i8,
arr_mask=self._isnan,
b_mask=other._isnan)
if self.hasnans or other.hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view(self.dtype)
def isin(self, values):
"""
Compute boolean array of whether each index value is found in the
passed set of values
Parameters
----------
values : set or sequence of values
Returns
-------
is_contained : ndarray (boolean dtype)
"""
if not isinstance(values, type(self)):
try:
values = type(self)(values)
except ValueError:
return self.asobject.isin(values)
return algorithms.isin(self.asi8, values.asi8)
def shift(self, n, freq=None):
"""
Specialized shift which produces a DatetimeIndex
Parameters
----------
n : int
Periods to shift by
freq : DateOffset or timedelta-like, optional
Returns
-------
shifted : DatetimeIndex
"""
if freq is not None and freq != self.freq:
if isinstance(freq, compat.string_types):
freq = frequencies.to_offset(freq)
offset = n * freq
result = self + offset
if hasattr(self, 'tz'):
result.tz = self.tz
return result
if n == 0:
# immutable so OK
return self
if self.freq is None:
raise ValueError("Cannot shift with no freq")
start = self[0] + n * self.freq
end = self[-1] + n * self.freq
attribs = self._get_attributes_dict()
attribs['start'] = start
attribs['end'] = end
return type(self)(**attribs)
def repeat(self, repeats, *args, **kwargs):
"""
Analogous to ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
if isinstance(self, ABCPeriodIndex):
freq = self.freq
else:
freq = None
return self._shallow_copy(self.asi8.repeat(repeats),
freq=freq)
@Appender(_index_shared_docs['where'] % _index_doc_kwargs)
def where(self, cond, other=None):
other = _ensure_datetimelike_to_i8(other)
values = _ensure_datetimelike_to_i8(self)
result = np.where(cond, values, other).astype('i8')
result = self._ensure_localized(result)
return self._shallow_copy(result,
**self._get_attributes_dict())
def summary(self, name=None):
"""
return a summarized representation
"""
formatter = self._formatter_func
if len(self) > 0:
index_summary = ', %s to %s' % (formatter(self[0]),
formatter(self[-1]))
else:
index_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (printing.pprint_thing(name),
len(self), index_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
# display as values, not quoted
result = result.replace("'", "")
return result
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
"""
attribs = self._get_attributes_dict()
attribs['name'] = name
if not isinstance(self, ABCPeriodIndex):
# reset freq
attribs['freq'] = None
if getattr(self, 'tz', None) is not None:
return _concat._concat_datetimetz(to_concat, name)
else:
new_data = np.concatenate([c.asi8 for c in to_concat])
return self._simple_new(new_data, **attribs)
def _ensure_datetimelike_to_i8(other):
""" helper for coercing an input scalar or array to i8 """
if lib.isscalar(other) and isna(other):
other = iNaT
elif isinstance(other, ABCIndexClass):
# convert tz if needed
if getattr(other, 'tz', None) is not None:
other = other.tz_localize(None).asi8
else:
other = other.asi8
else:
try:
other = np.array(other, copy=False).view('i8')
except TypeError:
# period array cannot be coerces to int
other = Index(other).asi8
return other
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import shlex
import subprocess
import sys
import logging
from xml.etree import ElementTree as ET
from multiprocessing import Process
FORMAT = '[%(asctime)-15s] [%(levelname)s] [%(filename)s %(levelno)s line] %(message)s'
logger = logging.getLogger(__file__)
logging.basicConfig(format=FORMAT)
logger.setLevel(logging.DEBUG)
PORTS = "1,3-4,6-7,9,13,17,19-26,30,32-33,37,42-43,49,53,70,79-85,88-90,99-100,106,109-111,113,119,125,135,139,143-144," \
"146,161,163,179,199,211-212,222,254-256,259,264,280,301,306,311,340,366,389,406-407,416-417,425,427,443-445,458," \
"464-465,481,497,500,512-515,524,541,543-545,548,554-555,563,587,593,616-617,625,631,636,646,648,666-668,683,687," \
"691,700,705,711,714,720,722,726,749,765,777,783,787,800-801,808,843,873,880,888,898,900-903,911-912,981,987,990," \
"992-993,995,999-1002,1007,1009-1011,1021-1100,1102,1104-1108,1110-1114,1117,1119,1121-1124,1126,1130-1132,1137-1138," \
"1141,1145,1147-1149,1151-1152,1154,1163-1166,1169,1174-1175,1183,1185-1187,1192,1198-1199,1201,1213,1216-1218," \
"1233-1234,1236,1244,1247-1248,1259,1271-1272,1277,1287,1296,1300-1301,1309-1311,1322,1328,1334,1352,1417," \
"1433-1434,1443,1455,1461,1494,1500-1501,1503,1521,1524,1533,1556,1580,1583,1594,1600,1641,1658,1666,1687-1688," \
"1700,1717-1721,1723,1755,1761,1782-1783,1801,1805,1812,1839-1840,1862-1864,1875,1900,1914,1935,1947,1971-1972," \
"1974,1984,1998-2010,2013,2020-2022,2030,2033-2035,2038,2040-2043,2045-2049,2065,2068,2099-2100,2103,2105-2107," \
"2111,2119,2121,2126,2135,2144,2160-2161,2170,2179,2190-2191,2196,2200,2222,2251,2260,2288,2301,2323,2366,2381-2383," \
"2393-2394,2399,2401,2492,2500,2522,2525,2557,2601-2602,2604-2605,2607-2608,2638,2701-2702,2710,2717-2718,2725," \
"2800,2809,2811,2869,2875,2909-2910,2920,2967-2968,2998,3000-3001,3003,3005-3007,3011,3013,3017,3030-3031,3050," \
"3052,3071,3077,3128,3168,3211,3221,3260-3261,3268-3269,3283,3300-3301,3306,3322-3325,3333,3351,3367,3369-3372," \
"3389-3390,3404,3476,3493,3517,3527,3546,3551,3580,3659,3689-3690,3703,3737,3766,3784,3800-3801,3809,3814," \
"3826-3828,3851,3869,3871,3878,3880,3889,3905,3914,3918,3920,3945,3971,3986,3995,3998,4000-4006,4045,4111," \
"4125-4126,4129,4224,4242,4279,4321,4343,4443-4446,4449,4550,4567,4662,4848,4899-4900,4998,5000-5004,5009," \
"5030,5033,5050-5051,5054,5060-5061,5080,5087,5100-5102,5120,5190,5200,5214,5221-5222,5225-5226,5269,5280," \
"5298,5357,5405,5414,5431-5432,5440,5500,5510,5544,5550,5555,5560,5566,5631,5633,5666,5678-5679,5718,5730," \
"5800-5802,5810-5811,5815,5822,5825,5850,5859,5862,5877,5900-5904,5906-5907,5910-5911,5915,5922,5925,5950,5952," \
"5959-5963,5987-5989,5998-6007,6009,6025,6059,6100-6101,6106,6112,6123,6129,6156,6346,6389,6502,6510,6543,6547," \
"6565-6567,6580,6646,6666-6669,6689,6692,6699,6779,6788-6789,6792,6839,6881,6901,6969,7000-7002,7004,7007,7019," \
"7025,7070,7100,7103,7106,7200-7201,7402,7435,7443,7496,7512,7625,7627,7676,7741,7777-7778,7800,7911,7920-7921," \
"7937-7938,7999-8002,8007-8011,8021-8022,8031,8042,8045,8080-8090,8093,8099-8100,8180-8181,8192-8194,8200,8222," \
"8254,8290-8292,8300,8333,8383,8400,8402,8443,8500,8600,8649,8651-8652,8654,8701,8800,8873,8888,8899,8994," \
"9000-9003,9009-9011,9040,9050,9071,9080-9081,9090-9091,9099-9103,9110-9111,9200,9207,9220,9290,9415,9418,9485," \
"9500,9502-9503,9535,9575,9593-9595,9618,9666,9876-9878,9898,9900,9917,9943-9944,9968,9998-10004,10009-10010,10012," \
"10024-10025,10082,10180,10215,10243,10566,10616-10617,10621,10626,10628-10629,10778,11110-11111,11967,12000,12174," \
"12265,12345,13456,13722,13782-13783,14000,14238,14441-14442,15000,15002-15004,15660,15742,16000-16001,16012,16016," \
"16018,16080,16113,16992-16993,17877,17988,18040,18101,18988,19101,19283,19315,19350,19780,19801,19842,20000,20005," \
"20031,20221-20222,20828,21571,22939,23502,24444,24800,25734-25735,26214,27000,27352-27353,27355-27356,27715,28201," \
"30000,30718,30951,31038,31337,32768-32785,33354,33899,34571-34573,35500,38292,40193,40911,41511,42510,44176,44442-44443," \
"44501,45100,48080,49152-49161,49163,49165,49167,49175-49176,49400,49999-50003,50006,50300,50389,50500,50636,50800,51103," \
"51493,52673,52822,52848,52869,54045,54328,55055-55056,55555,55600,56737-56738,57294,57797,58080,60020,60443,61532,61900,62078,63331,64623,64680,65000,65129,65389"
def __scan_progressive__(self, hosts, ports, arguments, callback, sudo):
"""
Used by PortScannerAsync for callback
"""
try:
scan_data = self._nm.scan(hosts, ports, arguments, sudo)
except PortScannerError:
scan_data = None
if callback is not None:
callback(hosts, scan_data)
class PortScanner(object):
"""
PortScanner class allows to use masscan from python
"""
def __init__(self, masscan_search_path=['masscan', '/usr/bin/masscan', '/usr/local/bin/masscan', '/sw/bin/masscan', '/opt/local/bin/masscan']):
"""
Initialize PortScanner module
* detects masscan on the system and masscan version
* may raise PortScannerError exception if masscan is not found in the path
:param masscan_search_path: tupple of string where to search for masscan executable. Change this if you want to use a specific version of masscan.
:returns: nothing
"""
self._masscan_path = '' # masscan path
self._scan_result = {}
self._masscan_version_number = 0 # masscan version number
self._masscan_subversion_number = 0 # masscan subversion number
self._masscan_revised_number = 0 # masscan revised number
self._masscan_last_output = '' # last full ascii masscan output
self._args = ''
self._scaninfo = {}
is_masscan_found = False # true if we have found masscan
self.__process = None
# regex used to detect masscan (http or https)
regex = re.compile(
'Masscan version [0-9]*\.[0-9]*[^ ]* \( http(|s)://.* \)'
)
# launch 'masscan -V', we wait after
# 'Masscan version 1.0.3 ( https://github.com/robertdavidgraham/masscan )'
# This is for Mac OSX. When idle3 is launched from the finder, PATH is not set so masscan was not found
_ = os.path.split(os.path.realpath(__file__))[0]
_ = os.path.join(_, "masscan/bin/masscan")
#print _
# only for this project
masscan_search_path.append(_)
#print masscan_search_path
for masscan_path in masscan_search_path:
try:
if sys.platform.startswith('freebsd') \
or sys.platform.startswith('linux') \
or sys.platform.startswith('darwin'):
p = subprocess.Popen([masscan_path, '-V'],
bufsize=10000,
stdout=subprocess.PIPE,
close_fds=True)
else:
p = subprocess.Popen([masscan_path, '-V'],
bufsize=10000,
stdout=subprocess.PIPE)
except OSError:
pass
else:
self._masscan_path = masscan_path # save path
break
else:
raise PortScannerError(
'masscan program was not found in path. PATH is : {0}'.format(os.getenv('PATH'))
)
self._masscan_last_output = bytes.decode(p.communicate()[0]) # sav stdout
for line in self._masscan_last_output.split(os.linesep):
if regex.match(line):
is_masscan_found = True
# Search for version number
regex_version = re.compile(r'(?P<version>\d{1,4})\.(?P<subversion>\d{1,4})\.(?P<revised>\d{1,4})')
rv = regex_version.search(line)
if rv:
# extract version/subversion/revised
self._masscan_version_number = int(rv.group('version'))
self._masscan_subversion_number = int(rv.group('subversion'))
self._masscan_revised_number = int(rv.group('revised'))
break
if not is_masscan_found:
raise PortScannerError('masscan program was not found in path')
def __getitem__(self, host):
"""
returns a host detail
"""
if sys.version_info[0] == 2:
assert type(host) in (str, unicode), 'Wrong type for [host], should be a string [was {0}]'.format(type(host))
else:
assert type(host) is str, 'Wrong type for [host], should be a string [was {0}]'.format(type(host))
return self._scan_result['scan'][host]
@property
def get_masscan_last_output(self):
"""
Returns the last text output of masscan in raw text
this may be used for debugging purpose
:returns: string containing the last text output of masscan in raw text
"""
return self._masscan_last_output
@property
def masscan_version(self):
"""
returns masscan version if detected (int version, int subversion)
or (0, 0) if unknown
:returns: masscan_version_number, masscan_subversion_number
"""
return self._masscan_version_number, self._masscan_subversion_number, self._masscan_revised_number
@property
def all_hosts(self):
"""
returns a sorted list of all hosts
"""
if not 'scan' in list(self._scan_result.keys()):
return []
listh = list(self._scan_result['scan'].keys())
listh.sort()
return listh
@property
def command_line(self):
"""
returns command line used for the scan
may raise AssertionError exception if called before scanning
"""
assert 'masscan' in self._scan_result, 'Do a scan before trying to get result !'
assert 'command_line' in self._scan_result['masscan'], 'Do a scan before trying to get result !'
return self._scan_result['masscan']['command_line']
@property
def scan_result(self):
"""
returns command line used for the scan
may raise AssertionError exception if called before scanning
"""
assert 'masscan' in self._scan_result, 'Do a scan before trying to get result !'
return self._scan_result
@property
def scaninfo(self):
"""
returns scaninfo structure
{'tcp': {'services': '22', 'method': 'connect'}}
may raise AssertionError exception if called before scanning
"""
return self._scaninfo
@property
def scanstats(self):
"""
returns scanstats structure
{'uphosts': '3', 'timestr': 'Thu Jun 3 21:45:07 2010', 'downhosts': '253', 'totalhosts': '256', 'elapsed': '5.79'}
may raise AssertionError exception if called before scanning
"""
assert 'masscan' in self._scan_result, 'Do a scan before trying to get result !'
assert 'scanstats' in self._scan_result['masscan'], 'Do a scan before trying to get result !'
return self._scan_result['masscan']['scanstats']
def scan(self, hosts='127.0.0.1', ports=PORTS, arguments='', sudo=False):
"""
Scan given hosts
May raise PortScannerError exception if masscan output was not xml
Test existance of the following key to know
if something went wrong : ['masscan']['scaninfo']['error']
If not present, everything was ok.
:param hosts: string for hosts as masscan use it 'scanme.masscan.org' or '198.116.0-255.1-127' or '216.163.128.20/20'
:param ports: string for ports as masscan use it '22,53,110,143-4564'
:param arguments: string of arguments for masscan '-sU -sX -sC'
:param sudo: launch masscan with sudo if True
:returns: scan_result as dictionnary
"""
if sys.version_info[0] == 2:
assert type(hosts) in (str, unicode), 'Wrong type for [hosts], should be a string [was {0}]'.format(type(hosts)) # noqa
assert type(ports) in (str, unicode, type(None)), 'Wrong type for [ports], should be a string [was {0}]'.format(type(ports)) # noqa
assert type(arguments) in (str, unicode), 'Wrong type for [arguments], should be a string [was {0}]'.format(type(arguments)) # noqa
else:
assert type(hosts) is str, 'Wrong type for [hosts], should be a string [was {0}]'.format(type(hosts)) # noqa
assert type(ports) in (str, type(None)), 'Wrong type for [ports], should be a string [was {0}]'.format(type(ports)) # noqa
assert type(arguments) is str, 'Wrong type for [arguments], should be a string [was {0}]'.format(type(arguments)) # noqa
h_args = shlex.split(hosts)
f_args = shlex.split(arguments)
# Launch scan
args = [self._masscan_path, '-oX', '-'] + h_args + ['-p', ports]*(ports is not None) + f_args
logger.debug('Scan parameters: "' + ' '.join(args) + '"')
self._args = ' '.join(args)
if sudo:
args = ['sudo'] + args
p = subprocess.Popen(args,
bufsize=100000,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
# wait until finished
# get output
self._masscan_last_output, masscan_err = p.communicate()
self._masscan_last_output = bytes.decode(self._masscan_last_output)
masscan_err = bytes.decode(masscan_err)
# If there was something on stderr, there was a problem so abort... in
# fact not always. As stated by AlenLPeacock :
# This actually makes python-masscan mostly unusable on most real-life
# networks -- a particular subnet might have dozens of scannable hosts,
# but if a single one is unreachable or unroutable during the scan,
# masscan.scan() returns nothing. This behavior also diverges significantly
# from commandline masscan, which simply stderrs individual problems but
# keeps on trucking.
masscan_err_keep_trace = []
masscan_warn_keep_trace = []
if len(masscan_err) > 0:
regex_warning = re.compile('^Warning: .*', re.IGNORECASE)
for line in masscan_err.split(os.linesep):
if len(line) > 0:
rgw = regex_warning.search(line)
if rgw is not None:
# sys.stderr.write(line+os.linesep)
masscan_warn_keep_trace.append(line+os.linesep)
else:
# raise PortScannerError(masscan_err)
masscan_err_keep_trace.append(masscan_err)
return self.analyse_masscan_xml_scan(
masscan_xml_output=self._masscan_last_output,
masscan_err=masscan_err,
masscan_err_keep_trace=masscan_err_keep_trace,
masscan_warn_keep_trace=masscan_warn_keep_trace
)
def analyse_masscan_xml_scan(self, masscan_xml_output=None, masscan_err='', masscan_err_keep_trace='', masscan_warn_keep_trace=''):
"""
Analyses NMAP xml scan ouput
May raise PortScannerError exception if masscan output was not xml
Test existance of the following key to know if something went wrong : ['masscan']['scaninfo']['error']
If not present, everything was ok.
:param masscan_xml_output: xml string to analyse
:returns: scan_result as dictionnary
"""
# masscan xml output looks like :
"""
<?xml version="1.0"?>
<!-- masscan v1.0 scan -->
<?xml-stylesheet href="" type="text/xsl"?>
<nmaprun scanner="masscan" start="1490242774" version="1.0-BETA" xmloutputversion="1.03">
<scaninfo type="syn" protocol="tcp" />
<host endtime="1490242774">
<address addr="10.0.9.9" addrtype="ipv4"/>
<ports>
<port protocol="tcp" portid="80">
<state state="open" reason="syn-ack" reason_ttl="64"/>
</port>
</ports>
</host>
<host endtime="1490242774"><address addr="10.0.9.254" addrtype="ipv4"/><ports><port protocol="tcp" portid="80"><state state="open" reason="syn-ack" reason_ttl="255"/></port></ports></host>
<host endtime="1490242774"><address addr="10.0.9.19" addrtype="ipv4"/><ports><port protocol="tcp" portid="80"><state state="open" reason="syn-ack" reason_ttl="64"/></port></ports></host>
<host endtime="1490242774"><address addr="10.0.9.49" addrtype="ipv4"/><ports><port protocol="tcp" portid="80"><state state="open" reason="syn-ack" reason_ttl="64"/></port></ports></host>
<host endtime="1490242774"><address addr="10.0.9.8" addrtype="ipv4"/><ports><port protocol="tcp" portid="80"><state state="open" reason="syn-ack" reason_ttl="64"/></port></ports></host>
<host endtime="1490242775"><address addr="10.0.9.11" addrtype="ipv4"/><ports><port protocol="tcp" portid="80"><state state="open" reason="syn-ack" reason_ttl="64"/></port></ports></host>
<host endtime="1490242775"><address addr="10.0.9.10" addrtype="ipv4"/><ports><port protocol="tcp" portid="80"><state state="open" reason="syn-ack" reason_ttl="64"/></port></ports></host>
<host endtime="1490242775"><address addr="10.0.9.6" addrtype="ipv4"/><ports><port protocol="tcp" portid="80"><state state="open" reason="syn-ack" reason_ttl="64"/></port></ports></host>
<host endtime="1490242775"><address addr="10.0.9.12" addrtype="ipv4"/><ports><port protocol="tcp" portid="80"><state state="open" reason="syn-ack" reason_ttl="64"/></port></ports></host>
<host endtime="1490242776"><address addr="10.0.9.28" addrtype="ipv4"/><ports><port protocol="tcp" portid="80"><state state="open" reason="syn-ack" reason_ttl="64"/></port></ports></host>
<runstats>
<finished time="1490242786" timestr="2017-03-23 12:19:46" elapsed="13" />
<hosts up="10" down="0" total="10" />
</runstats>
</nmaprun>
"""
if masscan_xml_output is not None:
self._masscan_last_output = masscan_xml_output
scan_result = {}
try:
dom = ET.fromstring(self._masscan_last_output)
except Exception:
if len(masscan_err) > 0:
raise PortScannerError(masscan_err)
else:
raise PortScannerError(self._masscan_last_output)
# masscan command line
scan_result['masscan'] = {
'command_line': self._args,
'scanstats': {
'timestr': dom.find("runstats/finished").get('timestr'),
'elapsed': dom.find("runstats/finished").get('elapsed'),
'uphosts': dom.find("runstats/hosts").get('up'),
'downhosts': dom.find("runstats/hosts").get('down'),
'totalhosts': dom.find("runstats/hosts").get('total')}
}
# if there was an error
if len(masscan_err_keep_trace) > 0:
self._scaninfo['error'] = masscan_err_keep_trace
# if there was a warning
if len(masscan_warn_keep_trace) > 0:
self._scaninfo['warning'] = masscan_warn_keep_trace
scan_result['scan'] = {}
for dhost in dom.findall('host'):
# host ip, mac and other addresses
host = None
address_block = {}
vendor_block = {}
for address in dhost.findall('address'):
addtype = address.get('addrtype')
address_block[addtype] = address.get('addr')
if addtype == 'ipv4':
host = address_block[addtype]
elif addtype == 'mac' and address.get('vendor') is not None:
vendor_block[address_block[addtype]] = address.get('vendor')
if host is None:
host = dhost.find('address').get('addr')
if host not in scan_result['scan']:
scan_result['scan'][host] = {}
for dport in dhost.findall('ports/port'):
proto = dport.get('protocol')
port = int(dport.get('portid'))
state = dport.find('state').get('state')
reason = dport.find('state').get('reason')
reason_ttl = dport.find('state').get('reason_ttl')
if not proto in list(scan_result['scan'][host].keys()):
scan_result['scan'][host][proto] = {}
scan_result['scan'][host][proto][port] = {
'state': state,
'reason': reason,
'reason_ttl': reason_ttl
}
self._scan_result = scan_result
return scan_result
def has_host(self, host):
"""
returns True if host has result, False otherwise
"""
assert type(host) is str, 'Wrong type for [host], should be a string [was {0}]'.format(type(host))
assert 'scan' in self._scan_result, 'Do a scan before trying to get result !'
if host in list(self._scan_result['scan'].keys()):
return True
return False
class PortScannerAsync(object):
"""
PortScannerAsync allows to use masscan from python asynchronously
for each host scanned, callback is called with scan result for the host
"""
def __init__(self):
"""
Initialize the module
* detects masscan on the system and masscan version
* may raise PortScannerError exception if masscan is not found in the path
"""
self._process = None
self._nm = PortScanner()
return
def __del__(self):
"""
Cleanup when deleted
"""
if self._process is not None:
try:
if self._process.is_alive():
self._process.terminate()
except AssertionError:
# Happens on python3.4
# when using PortScannerAsync twice in a row
pass
self._process = None
return
def scan(self, hosts='127.0.0.1', ports=None, arguments='', callback=None, sudo=False):
"""
Scan given hosts in a separate process and return host by host result using callback function
PortScannerError exception from standard masscan is catched and you won't know about but get None as scan_data
:param hosts: string for hosts as masscan use it 'scanme.masscan.org' or '198.116.0-255.1-127' or '216.163.128.20/20'
:param ports: string for ports as masscan use it '22,53,110,143-4564'
:param arguments: string of arguments for masscan '-sU -sX -sC'
:param callback: callback function which takes (host, scan_data) as arguments
:param sudo: launch masscan with sudo if true
"""
if sys.version_info[0] == 2:
assert type(hosts) in (str, unicode), 'Wrong type for [hosts], should be a string [was {0}]'.format(type(hosts))
assert type(ports) in (str, unicode, type(None)), 'Wrong type for [ports], should be a string [was {0}]'.format(type(ports))
assert type(arguments) in (str, unicode), 'Wrong type for [arguments], should be a string [was {0}]'.format(type(arguments))
else:
assert type(hosts) is str, 'Wrong type for [hosts], should be a string [was {0}]'.format(type(hosts))
assert type(ports) in (str, type(None)), 'Wrong type for [ports], should be a string [was {0}]'.format(type(ports))
assert type(arguments) is str, 'Wrong type for [arguments], should be a string [was {0}]'.format(type(arguments))
assert callable(callback) or callback is None, 'The [callback] {0} should be callable or None.'.format(str(callback))
self._process = Process(
target=__scan_progressive__,
args=(self, hosts, ports, arguments, callback, sudo)
)
self._process.daemon = True
self._process.start()
def stop(self):
"""
Stop the current scan process
"""
if self._process is not None:
self._process.terminate()
return
def wait(self, timeout=None):
"""
Wait for the current scan process to finish, or timeout
:param timeout: default = None, wait timeout seconds
"""
assert type(timeout) in (int, type(None)), 'Wrong type for [timeout], should be an int or None [was {0}]'.format(type(timeout))
self._process.join(timeout)
return
def still_scanning(self):
"""
:returns: True if a scan is currently running, False otherwise
"""
try:
return self._process.is_alive()
except:
return False
class PortScannerYield(PortScannerAsync):
"""
PortScannerYield allows to use masscan from python with a generator
for each host scanned, yield is called with scan result for the host
"""
def __init__(self):
"""
Initialize the module
* detects masscan on the system and masscan version
* may raise PortScannerError exception if masscan is not found in the path
"""
PortScannerAsync.__init__(self)
return
def scan(self, hosts='127.0.0.1', ports=None, arguments='', sudo=False):
"""
Scan given hosts in a separate process and return host by host result using callback function
PortScannerError exception from standard masscan is catched and you won't know about it
:param hosts: string for hosts as masscan use it 'scanme.masscan.org' or '198.116.0-255.1-127' or '216.163.128.20/20'
:param ports: string for ports as masscan use it '22,53,110,143-4564'
:param arguments: string of arguments for masscan '-sU -sX -sC'
:param callback: callback function which takes (host, scan_data) as arguments
:param sudo: launch masscan with sudo if true
"""
assert type(hosts) is str, 'Wrong type for [hosts], should be a string [was {0}]'.format(type(hosts))
assert type(ports) in (str, type(None)), 'Wrong type for [ports], should be a string [was {0}]'.format(type(ports))
assert type(arguments) is str, 'Wrong type for [arguments], should be a string [was {0}]'.format(type(arguments))
for host in self._nm.listscan(hosts):
try:
scan_data = self._nm.scan(host, ports, arguments, sudo)
except PortScannerError:
scan_data = None
yield (host, scan_data)
return
def stop(self):
pass
def wait(self, timeout=None):
pass
def still_scanning(self):
pass
class PortScannerError(Exception):
"""
Exception error class for PortScanner class
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def __repr__(self):
return 'PortScannerError exception {0}'.format(self.value)
| |
import argparse
import io
import logging
import os
import sys
import xml.etree.ElementTree as etree
from diff_cover import DESCRIPTION, VERSION
from diff_cover.config_parser import Tool, get_config
from diff_cover.diff_reporter import GitDiffReporter
from diff_cover.git_diff import GitDiffTool
from diff_cover.git_path import GitPathTool
from diff_cover.report_generator import (
HtmlReportGenerator,
JsonReportGenerator,
MarkdownReportGenerator,
StringReportGenerator,
)
from diff_cover.violationsreporters.violations_reporter import XmlCoverageReporter
HTML_REPORT_HELP = "Diff coverage HTML output"
JSON_REPORT_HELP = "Diff coverage JSON output"
MARKDOWN_REPORT_HELP = "Diff coverage Markdown output"
COMPARE_BRANCH_HELP = "Branch to compare"
CSS_FILE_HELP = "Write CSS into an external file"
FAIL_UNDER_HELP = (
"Returns an error code if coverage or quality score is below this value"
)
IGNORE_STAGED_HELP = "Ignores staged changes"
IGNORE_UNSTAGED_HELP = "Ignores unstaged changes"
IGNORE_WHITESPACE = "When getting a diff ignore any and all whitespace"
EXCLUDE_HELP = "Exclude files, more patterns supported"
SRC_ROOTS_HELP = "List of source directories (only for jacoco coverage reports)"
COVERAGE_XML_HELP = "XML coverage report"
DIFF_RANGE_NOTATION_HELP = (
"Git diff range notation to use when comparing branches, defaults to '...'"
)
QUIET_HELP = "Only print errors and failures"
SHOW_UNCOVERED = "Show uncovered lines on the console"
INCLUDE_UNTRACKED_HELP = "Include untracked files"
CONFIG_FILE_HELP = "The configuration file to use"
LOGGER = logging.getLogger(__name__)
def parse_coverage_args(argv):
"""
Parse command line arguments, returning a dict of
valid options:
{
'coverage_xml': COVERAGE_XML,
'html_report': None | HTML_REPORT,
'json_report': None | JSON_REPORT,
'external_css_file': None | CSS_FILE,
}
where `COVERAGE_XML`, `HTML_REPORT`, `JSON_REPORT`, and `CSS_FILE` are paths.
The path strings may or may not exist.
"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument("coverage_xml", type=str, help=COVERAGE_XML_HELP, nargs="+")
parser.add_argument(
"--html-report",
metavar="FILENAME",
type=str,
help=HTML_REPORT_HELP,
)
parser.add_argument(
"--json-report",
metavar="FILENAME",
type=str,
help=JSON_REPORT_HELP,
)
parser.add_argument(
"--markdown-report",
metavar="FILENAME",
type=str,
help=MARKDOWN_REPORT_HELP,
)
parser.add_argument(
"--show-uncovered", action="store_true", default=None, help=SHOW_UNCOVERED
)
parser.add_argument(
"--external-css-file",
metavar="FILENAME",
type=str,
help=CSS_FILE_HELP,
)
parser.add_argument(
"--compare-branch",
metavar="BRANCH",
type=str,
help=COMPARE_BRANCH_HELP,
)
parser.add_argument(
"--fail-under", metavar="SCORE", type=float, default=None, help=FAIL_UNDER_HELP
)
parser.add_argument(
"--ignore-staged", action="store_true", default=None, help=IGNORE_STAGED_HELP
)
parser.add_argument(
"--ignore-unstaged",
action="store_true",
default=None,
help=IGNORE_UNSTAGED_HELP,
)
parser.add_argument(
"--include-untracked",
action="store_true",
default=None,
help=INCLUDE_UNTRACKED_HELP,
)
parser.add_argument(
"--exclude", metavar="EXCLUDE", type=str, nargs="+", help=EXCLUDE_HELP
)
parser.add_argument(
"--src-roots",
metavar="DIRECTORY",
type=str,
nargs="+",
help=SRC_ROOTS_HELP,
)
parser.add_argument(
"--diff-range-notation",
metavar="RANGE_NOTATION",
type=str,
choices=["...", ".."],
help=DIFF_RANGE_NOTATION_HELP,
)
parser.add_argument("--version", action="version", version=f"diff-cover {VERSION}")
parser.add_argument(
"--ignore-whitespace",
action="store_true",
default=None,
help=IGNORE_WHITESPACE,
)
parser.add_argument(
"-q", "--quiet", action="store_true", default=None, help=QUIET_HELP
)
parser.add_argument(
"-c", "--config-file", help=CONFIG_FILE_HELP, metavar="CONFIG_FILE"
)
defaults = {
"show_uncovered": False,
"compare_branch": "origin/main",
"fail_under": 0,
"ignore_staged": False,
"ignore_unstaged": False,
"ignore_untracked": False,
"src_roots": ["src/main/java", "src/test/java"],
"ignore_whitespace": False,
"diff_range_notation": "...",
"quiet": False,
}
return get_config(parser=parser, argv=argv, defaults=defaults, tool=Tool.DIFF_COVER)
def generate_coverage_report(
coverage_xml,
compare_branch,
html_report=None,
css_file=None,
json_report=None,
markdown_report=None,
ignore_staged=False,
ignore_unstaged=False,
include_untracked=False,
exclude=None,
src_roots=None,
diff_range_notation=None,
ignore_whitespace=False,
quiet=False,
show_uncovered=False,
):
"""
Generate the diff coverage report, using kwargs from `parse_args()`.
"""
diff = GitDiffReporter(
compare_branch,
git_diff=GitDiffTool(diff_range_notation, ignore_whitespace),
ignore_staged=ignore_staged,
ignore_unstaged=ignore_unstaged,
include_untracked=include_untracked,
exclude=exclude,
)
xml_roots = [etree.parse(xml_root) for xml_root in coverage_xml]
coverage = XmlCoverageReporter(xml_roots, src_roots)
# Build a report generator
if html_report is not None:
css_url = css_file
if css_url is not None:
css_url = os.path.relpath(css_file, os.path.dirname(html_report))
reporter = HtmlReportGenerator(coverage, diff, css_url=css_url)
with open(html_report, "wb") as output_file:
reporter.generate_report(output_file)
if css_file is not None:
with open(css_file, "wb") as output_file:
reporter.generate_css(output_file)
if json_report is not None:
reporter = JsonReportGenerator(coverage, diff)
with open(json_report, "wb") as output_file:
reporter.generate_report(output_file)
if markdown_report is not None:
reporter = MarkdownReportGenerator(coverage, diff)
with open(markdown_report, "wb") as output_file:
reporter.generate_report(output_file)
# Generate the report for stdout
reporter = StringReportGenerator(coverage, diff, show_uncovered)
output_file = io.BytesIO() if quiet else sys.stdout.buffer
# Generate the report
reporter.generate_report(output_file)
return reporter.total_percent_covered()
def main(argv=None, directory=None):
"""
Main entry point for the tool, script installed via pyproject.toml
Returns a value that can be passed into exit() specifying
the exit code.
1 is an error
0 is successful run
"""
argv = argv or sys.argv
arg_dict = parse_coverage_args(argv[1:])
quiet = arg_dict["quiet"]
level = logging.ERROR if quiet else logging.WARNING
logging.basicConfig(format="%(message)s", level=level)
GitPathTool.set_cwd(directory)
fail_under = arg_dict.get("fail_under")
percent_covered = generate_coverage_report(
arg_dict["coverage_xml"],
arg_dict["compare_branch"],
html_report=arg_dict["html_report"],
json_report=arg_dict["json_report"],
markdown_report=arg_dict["markdown_report"],
css_file=arg_dict["external_css_file"],
ignore_staged=arg_dict["ignore_staged"],
ignore_unstaged=arg_dict["ignore_unstaged"],
include_untracked=arg_dict["include_untracked"],
exclude=arg_dict["exclude"],
src_roots=arg_dict["src_roots"],
diff_range_notation=arg_dict["diff_range_notation"],
ignore_whitespace=arg_dict["ignore_whitespace"],
quiet=quiet,
show_uncovered=arg_dict["show_uncovered"],
)
if percent_covered >= fail_under:
return 0
LOGGER.error("Failure. Coverage is below %i%%.", fail_under)
return 1
if __name__ == "__main__":
sys.exit(main())
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import izip as zip, imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, TimestampType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
.. autoattribute:: builder
:annotation:
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances"""
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
if self._jvm.SparkSession.getDefaultSession().isDefined() \
and not self._jvm.SparkSession.getDefaultSession().get() \
.sparkContext().isStopped():
jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
else:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
self._jvm.SparkSession.setDefaultSession(self._jsparkSession)
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowSerializer, _create_batch
from pyspark.sql.types import from_arrow_schema, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create Arrow record batches
batches = [_create_batch([(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)],
timezone)
for pdf_slice in pdf_slices]
# Create the Spark schema from the first Arrow batch (always at least 1 batch after slicing)
if isinstance(schema, (list, tuple)):
struct = from_arrow_schema(batches[0].schema)
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
# Create the Spark DataFrame directly from the Arrow data and schema
jrdd = self._sc._serialize_to_jvm(batches, len(batches), ArrowSerializer())
jdf = self._jvm.PythonSQLUtils.arrowPayloadToDataFrame(
jrdd, schema.json(), self._wrapped._jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
if self.conf.get("spark.sql.execution.pandas.respectSessionTimeZone").lower() \
== "true":
timezone = self.conf.get("spark.sql.session.timeZone")
else:
timezone = None
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [str(x) if not isinstance(x, basestring) else
(x.encode('utf-8') if not isinstance(x, str) else x)
for x in data.columns]
if self.conf.get("spark.sql.execution.arrow.enabled", "false").lower() == "true" \
and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
warnings.warn("Arrow will not be used in createDataFrame: %s" % str(e))
# Fallback to create DataFrame without arrow if raise some exception
data = self._convert_from_pandas(data, schema, timezone)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
# We should clean the default session up. See SPARK-23228.
self._jvm.SparkSession.clearDefaultSession()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| |
"""CLI Module
This module helps enable the CLI."""
import sys
import logging
import traceback
import time, datetime
from colorama import Fore
from mookfist_lled_controller import scan_bridges
from mookfist_lled_controller import create_bridge
from mookfist_lled_controller import logger
from mookfist_lled_controller.exceptions import UnsupportedVersion
from mookfist_lled_controller.exceptions import InvalidGroup
from mookfist_lled_controller.exceptions import NoBridgeFound
LVL_NAMES = {
'DEBUG': Fore.CYAN,
'INFO': Fore.GREEN,
'WARNING': Fore.YELLOW,
'ERROR': Fore.RED,
'CRTICIAL': Fore.MAGENTA
}
class ColoredFormatter(logging.Formatter):
"""Colord log formatter"""
def __init__(self, *args, **kwargs):
logging.Formatter.__init__(self, *args, **kwargs)
self.timer = datetime.datetime.now()
def format(self, record):
lvlname_color = LVL_NAMES[record.levelname]
lvlname = record.levelname.ljust(8)
timestamp = datetime.datetime.now() - self.timer
lvl = '%s[%s%s%s]' % (Fore.WHITE, lvlname_color, lvlname, Fore.WHITE)
msg = '%s %s %s%s' % (timestamp, lvl, Fore.RESET, record.msg)
return msg
def configure_logger(debug=False):
"""Configure a logger with colored output"""
formatter = logger.ColoredFormatter()
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
log = logging.getLogger()
if debug == True:
log.setLevel(logging.DEBUG)
handler.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
handler.setLevel(logging.INFO)
log.addHandler(handler)
class Main(object):
"""the application"""
def __init__(self, arguments):
if arguments['fade']:
self.action = 'fade'
elif arguments['fadec']:
self.action = 'fadec'
elif arguments['color']:
self.action = 'color'
elif arguments['rgb']:
self.action = 'rgb'
elif arguments['brightness']:
self.action = 'brightness'
elif arguments['on']:
self.action = ['on']
elif arguments['off']:
self.action = 'off'
elif arguments['white']:
self.action = 'white'
elif arguments['colorcycle']:
self.action = 'colorcycle'
if arguments['--bridge-version'] == '4' or arguments['--bridge-version'] == '5':
self.bridge_version = 4
elif arguments['--bridge-version'] == '6':
self.bridge_version = 6
elif arguments['--bridge-version'] != None:
raise UnsupportedVersion
else:
self.bridge_version = 4
self.log = logging.getLogger('lled')
self.arguments = arguments
def action_fade(self):
start = int(self.arguments['<start>'])
end = int(self.arguments['<end>'])
if (start > end):
steps = -1
else:
steps = 1
self.log.info('Transitioning brightness from %s%% to %s%%' % (start, end))
self.bridge.on(self.arguments['--group'])
for brightness in range(start, end, steps):
self.bridge.brightness(brightness, self.arguments['--group'])
def action_fadec(self):
start = int(self.arguments['<start>'])
end = int(self.arguments['<end>'])
if (start > end):
steps = -1
else:
steps = 1
self.log.info('Transitioning color from %s to %s' % (start, end))
for color in range(start,end,steps):
self.bridge.color(color, self.arguments['--group'])
def action_color(self):
color = int(self.arguments['<color>'])
self.log.info('Setting color to %s' % color)
self.bridge.color(color, self.arguments['--group'])
def action_white(self):
self.log.info('Setting color to white')
self.bridge.white(self.arguments['--group'])
def action_brightness(self):
brightness = int(self.arguments['<brightness>'])
self.log.info('Setting brightness to %s%%' % brightness)
self.bridge.brightness(brightness, self.arguments['--group'])
def action_on(self):
self.log.info('Turning lights on')
self.bridge.on(self.arguments['--group'])
def action_off(self):
self.log.info('Turning lights off')
self.bridge.off(self.arguments['--group'])
def action_rgb(self):
r = int(self.arguments['<r>'])
g = int(self.arguments['<g>'])
b = int(self.arguments['<b>'])
self.log.info('Setting color to rgb(%s, %s, %s)' % (r, g, b))
self.bridge.color_rgb(r, g, b, self.arguments['--group'])
def action_colorcycle(self):
for x in range(0,256):
if x < 0:
x = x + 255
elif x > 255:
x = x - 255
self.log.info('Setting color to %s' % x)
self.bridge.color(x, self.arguments['--group'])
def route_action(self):
if self.arguments['fade']:
self.action_fade()
elif self.arguments['fadec']:
self.action_fadec()
elif self.arguments['color']:
self.action_color()
elif self.arguments['brightness']:
self.action_brightness()
elif self.arguments['on']:
self.action_on()
elif self.arguments['off']:
self.action_off()
elif self.arguments['white']:
self.action_white()
elif self.arguments['rgb']:
self.action_rgb()
elif self.arguments['colorcycle']:
self.action_colorcycle()
def run(self):
try:
if (self.arguments['--bridge-ip']):
host = self.arguments['--bridge-ip']
else:
self.log.info('Scanning for bridge...')
bridges = scan_bridges(self.bridge_version)
self.log.info('Found %s bridge(s)' % len(bridges))
if len(bridges) > 1:
self.log.warning('Multiple bridges have been found. I will choose the first one I saw')
self.log.warning('If you really don\'t want me to do that, then use the --bridge-ip (and --bridge-port if needed) flags when using this tool')
self.log.info('--- Available Bridges')
for bridge in bridges:
self.log.info(' %s - %s' % (bridge[0], bridge[1]))
elif len(bridges) == 0:
raise NoBridgeFound
host = bridges[0][0]
rc = 1
if self.arguments['--repeat']:
rc = int(self.arguments['--repeat'])
pause = 100
if self.arguments['--pause']:
pause = int(self.arguments['--pause'])
version = 4
if self.arguments['--bridge-version']:
version = int(self.arguments['--bridge-version'])
if version == 4 or version == 5:
port = 8899
elif version == 6:
port = 5987
elif self.arguments['--bridge-port']:
port = int(self.arguments['--bridge-port'])
self.log.info('--- Bridge Details')
self.log.info('Version: %s' % version)
self.log.info('IP: %s' % host)
self.log.info('Port: %s' % port)
self.log.debug('--- Settings')
self.log.debug('Pause: %sms' % pause)
self.log.debug('Command Repeat: %s' % rc)
self.bridge = create_bridge(version, host, port, pause, rc)
self.route_action()
except UnsupportedVersion:
self.log.error('The chosen bridge version is unsupported')
except InvalidGroup as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.log.error('Groups can be numbered 1 through 4 only')
self.log.error(traceback.format_exception(exc_type, exc_value, exc_traceback))
except NoBridgeFound:
self.log.error('Sorry, I was not able to find any bridges. So either give me the IP (and port number) of the bridge you wish to use, or figure out why I can not find any bridges')
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from neutronclient.common import exceptions as qe
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as neutronclient
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.cfn import functions as cfn_funcs
from heat.engine.resources.openstack.neutron import subnet
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
neutron_template = '''
heat_template_version: 2015-04-30
description: Template to test subnet Neutron resource
resources:
net:
type: OS::Neutron::Net
properties:
name: the_net
tenant_id: c1210485b2424d48804aad5d39c61b8f
shared: true
dhcp_agent_ids:
- 28c25a04-3f73-45a7-a2b4-59e183943ddc
sub_net:
type: OS::Neutron::Subnet
properties:
network: { get_resource : net}
tenant_id: c1210485b2424d48804aad5d39c61b8f
ip_version: 4
cidr: 10.0.3.0/24
allocation_pools:
- start: 10.0.3.20
end: 10.0.3.150
host_routes:
- destination: 10.0.4.0/24
nexthop: 10.0.3.20
dns_nameservers:
- 8.8.8.8
port:
type: OS::Neutron::Port
properties:
device_id: d6b4d3a5-c700-476f-b609-1493dd9dadc0
name: port1
network: { get_resource : net}
fixed_ips:
- subnet: { get_resource : sub_net }
ip_address: 10.0.3.21
port2:
type: OS::Neutron::Port
properties:
name: port2
network: { get_resource : net}
router:
type: OS::Neutron::Router
properties:
l3_agent_id: 792ff887-6c85-4a56-b518-23f24fa65581
router_interface:
type: OS::Neutron::RouterInterface
properties:
router_id: { get_resource : router }
subnet: { get_resource : sub_net }
gateway:
type: OS::Neutron::RouterGateway
properties:
router_id: { get_resource : router }
network: { get_resource : net}
'''
neutron_template_deprecated = neutron_template.replace(
'neutron', 'neutron_id').replace('subnet', 'subnet_id')
class NeutronSubnetTest(common.HeatTestCase):
def setUp(self):
super(NeutronSubnetTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_subnet')
self.m.StubOutWithMock(neutronclient.Client, 'delete_subnet')
self.m.StubOutWithMock(neutronclient.Client, 'show_subnet')
self.m.StubOutWithMock(neutronclient.Client, 'update_subnet')
self.m.StubOutWithMock(neutronV20, 'find_resourceid_by_name_or_id')
def create_subnet(self, t, stack, resource_name):
resource_defns = stack.t.resource_definitions(stack)
rsrc = subnet.Subnet('test_subnet', resource_defns[resource_name],
stack)
return rsrc
def test_subnet(self):
t = self._test_subnet()
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'None'
).AndReturn('None')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'router',
'None'
).AndReturn('None')
stack = utils.parse_stack(t)
rsrc = self.create_subnet(t, stack, 'sub_net')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.validate()
ref_id = rsrc.FnGetRefId()
self.assertEqual('91e47a57-7508-46fe-afc9-fc454e8580e1', ref_id)
self.assertIsNone(rsrc.FnGetAtt('network_id'))
self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766',
rsrc.FnGetAtt('network_id'))
self.assertEqual('8.8.8.8', rsrc.FnGetAtt('dns_nameservers')[0])
# assert the dependency (implicit or explicit) between the ports
# and the subnet
self.assertIn(stack['port'], stack.dependencies[stack['sub_net']])
self.assertIn(stack['port2'], stack.dependencies[stack['sub_net']])
props = {
"name": 'mysubnet',
"network_id": cfn_funcs.ResourceRef(stack, "get_resource", "net"),
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"ip_version": 4,
"cidr": "10.0.3.0/24",
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.150"}],
"dns_nameservers": ["8.8.8.8", "192.168.1.254"],
"host_routes": [
{"destination": "192.168.1.0/24", "nexthop": "194.168.1.2"}
]
}
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
props)
rsrc.handle_update(update_snippet, {}, {})
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE, 'to delete again')
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
self.m.VerifyAll()
def test_subnet_deprecated(self):
t = self._test_subnet(resolve_neutron=False)
stack = utils.parse_stack(t)
rsrc = self.create_subnet(t, stack, 'sub_net')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'None'
).AndReturn('None')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'router',
'None'
).AndReturn('None')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.validate()
ref_id = rsrc.FnGetRefId()
self.assertEqual('91e47a57-7508-46fe-afc9-fc454e8580e1', ref_id)
self.assertIsNone(rsrc.FnGetAtt('network_id'))
self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766',
rsrc.FnGetAtt('network_id'))
self.assertEqual('8.8.8.8', rsrc.FnGetAtt('dns_nameservers')[0])
# assert the dependency (implicit or explicit) between the ports
# and the subnet
self.assertIn(stack['port'], stack.dependencies[stack['sub_net']])
self.assertIn(stack['port2'], stack.dependencies[stack['sub_net']])
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE, 'to delete again')
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
self.m.VerifyAll()
def _test_subnet(self, resolve_neutron=True):
neutronclient.Client.create_subnet({
'subnet': {
'name': utils.PhysName('test_stack', 'test_subnet'),
'network_id': u'None',
'dns_nameservers': [u'8.8.8.8'],
'allocation_pools': [
{'start': u'10.0.3.20', 'end': u'10.0.3.150'}],
'host_routes': [
{'destination': u'10.0.4.0/24', 'nexthop': u'10.0.3.20'}],
'ip_version': 4,
'cidr': u'10.0.3.0/24',
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'enable_dhcp': True
}
}).AndReturn({
"subnet": {
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.150"}],
"cidr": "10.0.3.0/24",
"dns_nameservers": ["8.8.8.8"],
"enable_dhcp": True,
"gateway_ip": "10.0.3.1",
"host_routes": [
{"destination": "10.0.4.0/24", "nexthop": "10.0.3.20"}],
"id": "91e47a57-7508-46fe-afc9-fc454e8580e1",
"ip_version": 4,
"name": "name",
"network_id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f"
}
})
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1').AndRaise(
qe.NeutronClientException(status_code=404))
sn = {
"subnet": {
"name": "name",
"network_id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.150"}],
"gateway_ip": "10.0.3.1",
'host_routes': [
{'destination': u'10.0.4.0/24', 'nexthop': u'10.0.3.20'}],
"ip_version": 4,
"cidr": "10.0.3.0/24",
"dns_nameservers": ["8.8.8.8"],
"id": "91e47a57-7508-46fe-afc9-fc454e8580e1",
"enable_dhcp": True,
}
}
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1').AndReturn(sn)
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1').AndReturn(sn)
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1').AndReturn(sn)
# Delete script
neutronclient.Client.delete_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndReturn(None)
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndRaise(qe.NeutronClientException(status_code=404))
neutronclient.Client.delete_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndRaise(qe.NeutronClientException(status_code=404))
if resolve_neutron:
t = template_format.parse(neutron_template)
# Update script
neutronclient.Client.update_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1',
{'subnet': {
'dns_nameservers': ['8.8.8.8', '192.168.1.254'],
'name': 'mysubnet',
'enable_dhcp': True,
'host_routes': [
{'destination': '192.168.1.0/24',
'nexthop': '194.168.1.2'}
]
}}
)
else:
t = template_format.parse(neutron_template_deprecated)
return t
def test_subnet_disable_dhcp(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'None'
).AndReturn('None')
neutronclient.Client.create_subnet({
'subnet': {
'name': utils.PhysName('test_stack', 'test_subnet'),
'network_id': u'None',
'dns_nameservers': [u'8.8.8.8'],
'allocation_pools': [
{'start': u'10.0.3.20', 'end': u'10.0.3.150'}],
'host_routes': [
{'destination': u'10.0.4.0/24', 'nexthop': u'10.0.3.20'}],
'ip_version': 4,
'enable_dhcp': False,
'cidr': u'10.0.3.0/24',
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f'
}
}).AndReturn({
"subnet": {
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.150"}],
"host_routes": [
{"destination": "10.0.4.0/24", "nexthop": "10.0.3.20"}],
"cidr": "10.0.3.0/24",
"dns_nameservers": ["8.8.8.8"],
"enable_dhcp": False,
"gateway_ip": "10.0.3.1",
"id": "91e47a57-7508-46fe-afc9-fc454e8580e1",
"ip_version": 4,
"name": "name",
"network_id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f"
}
})
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1').AndReturn({
"subnet": {
"name": "name",
"network_id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.150"}],
"host_routes": [
{"destination": "10.0.4.0/24",
"nexthop": "10.0.3.20"}],
"gateway_ip": "10.0.3.1",
"ip_version": 4,
"cidr": "10.0.3.0/24",
"dns_nameservers": ["8.8.8.8"],
"id": "91e47a57-7508-46fe-afc9-fc454e8580e1",
"enable_dhcp": False,
}
})
neutronclient.Client.delete_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndReturn(None)
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndRaise(qe.NeutronClientException(status_code=404))
self.m.ReplayAll()
t = template_format.parse(neutron_template)
t['resources']['sub_net']['properties']['enable_dhcp'] = 'False'
stack = utils.parse_stack(t)
rsrc = self.create_subnet(t, stack, 'sub_net')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.validate()
ref_id = rsrc.FnGetRefId()
self.assertEqual('91e47a57-7508-46fe-afc9-fc454e8580e1', ref_id)
self.assertIs(False, rsrc.FnGetAtt('enable_dhcp'))
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_null_gateway_ip(self):
p = {}
subnet.Subnet._null_gateway_ip(p)
self.assertEqual({}, p)
p = {'foo': 'bar'}
subnet.Subnet._null_gateway_ip(p)
self.assertEqual({'foo': 'bar'}, p)
p = {
'foo': 'bar',
'gateway_ip': '198.51.100.0'
}
subnet.Subnet._null_gateway_ip(p)
self.assertEqual({
'foo': 'bar',
'gateway_ip': '198.51.100.0'
}, p)
p = {
'foo': 'bar',
'gateway_ip': ''
}
subnet.Subnet._null_gateway_ip(p)
self.assertEqual({
'foo': 'bar',
'gateway_ip': None
}, p)
# This should not happen as prepare_properties
# strips out None values, but testing anyway
p = {
'foo': 'bar',
'gateway_ip': None
}
subnet.Subnet._null_gateway_ip(p)
self.assertEqual({
'foo': 'bar',
'gateway_ip': None
}, p)
def test_ipv6_subnet(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'None'
).AndReturn('None')
neutronclient.Client.create_subnet({
'subnet': {
'name': utils.PhysName('test_stack', 'test_subnet'),
'network_id': u'None',
'dns_nameservers': [u'2001:4860:4860::8844'],
'ip_version': 6,
'enable_dhcp': True,
'cidr': u'fdfa:6a50:d22b::/64',
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'ipv6_address_mode': 'slaac',
'ipv6_ra_mode': 'slaac'
}
}).AndReturn({
"subnet": {
"allocation_pools": [
{"start": "fdfa:6a50:d22b::2",
"end": "fdfa:6a50:d22b:0:ffff:ffff:ffff:fffe"}],
"cidr": "fd00:1::/64",
"enable_dhcp": True,
"gateway_ip": "fdfa:6a50:d22b::1",
"id": "91e47a57-7508-46fe-afc9-fc454e8580e1",
"ip_version": 6,
"name": "name",
"network_id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
'ipv6_address_mode': 'slaac',
'ipv6_ra_mode': 'slaac'
}
})
self.m.ReplayAll()
t = template_format.parse(neutron_template)
props = t['resources']['sub_net']['properties']
props.pop('allocation_pools')
props.pop('host_routes')
props['ip_version'] = 6
props['ipv6_address_mode'] = 'slaac'
props['ipv6_ra_mode'] = 'slaac'
props['cidr'] = 'fdfa:6a50:d22b::/64'
props['dns_nameservers'] = ['2001:4860:4860::8844']
stack = utils.parse_stack(t)
rsrc = self.create_subnet(t, stack, 'sub_net')
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.validate()
self.m.VerifyAll()
def test_ipv6_validate_ra_mode(self):
t = template_format.parse(neutron_template)
props = t['resources']['sub_net']['properties']
props['ipv6_address_mode'] = 'dhcpv6-stateful'
props['ipv6_ra_mode'] = 'slaac'
props['ip_version'] = 6
stack = utils.parse_stack(t)
rsrc = stack['sub_net']
ex = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertEqual("When both ipv6_ra_mode and ipv6_address_mode are "
"set, they must be equal.", six.text_type(ex))
def test_ipv6_validate_ip_version(self):
t = template_format.parse(neutron_template)
props = t['resources']['sub_net']['properties']
props['ipv6_address_mode'] = 'slaac'
props['ipv6_ra_mode'] = 'slaac'
props['ip_version'] = 4
stack = utils.parse_stack(t)
rsrc = stack['sub_net']
ex = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertEqual("ipv6_ra_mode and ipv6_address_mode are not "
"supported for ipv4.", six.text_type(ex))
def test_deprecated_network_id(self):
template = """
heat_template_version: 2015-04-30
resources:
net:
type: OS::Neutron::Net
properties:
name: test
subnet:
type: OS::Neutron::Subnet
properties:
network_id: { get_resource: net }
cidr: 10.0.0.0/24
"""
t = template_format.parse(template)
stack = utils.parse_stack(t)
rsrc = stack['subnet']
stack.create()
self.assertEqual(cfn_funcs.ResourceRef(stack, 'get_resource', 'net'),
rsrc.properties.get('network'))
self.assertIsNone(rsrc.properties.get('network_id'))
| |
# Generated by Django 2.1.2 on 2018-10-17 18:46
from decimal import Decimal
import django.contrib.postgres.fields.jsonb
import django.core.validators
import django.db.models.deletion
from django.db import migrations, models
import saleor.payment
class Migration(migrations.Migration):
initial = True
dependencies = [
("checkout", "0015_auto_20181017_1346"),
("order", "0064_auto_20181016_0819"),
]
operations = [
migrations.CreateModel(
name="PaymentMethod",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("gateway", models.CharField(max_length=255)),
("is_active", models.BooleanField(default=True)),
("created", models.DateTimeField(auto_now_add=True)),
("modified", models.DateTimeField(auto_now=True)),
(
"charge_status",
models.CharField(
choices=[
("charged", "Charged"),
("not-charged", "Not charged"),
("fully-refunded", "Fully refunded"),
],
default="not-charged",
max_length=15,
),
),
("billing_first_name", models.CharField(blank=True, max_length=256)),
("billing_last_name", models.CharField(blank=True, max_length=256)),
("billing_company_name", models.CharField(blank=True, max_length=256)),
("billing_address_1", models.CharField(blank=True, max_length=256)),
("billing_address_2", models.CharField(blank=True, max_length=256)),
("billing_city", models.CharField(blank=True, max_length=256)),
("billing_city_area", models.CharField(blank=True, max_length=128)),
("billing_postal_code", models.CharField(blank=True, max_length=256)),
("billing_country_code", models.CharField(blank=True, max_length=2)),
("billing_country_area", models.CharField(blank=True, max_length=256)),
("billing_email", models.EmailField(blank=True, max_length=254)),
(
"customer_ip_address",
models.GenericIPAddressField(blank=True, null=True),
),
("cc_brand", models.CharField(blank=True, default="", max_length=40)),
(
"cc_exp_month",
models.PositiveIntegerField(
blank=True,
null=True,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(12),
],
),
),
(
"cc_exp_year",
models.PositiveIntegerField(
blank=True,
null=True,
validators=[django.core.validators.MinValueValidator(1000)],
),
),
(
"cc_first_digits",
models.CharField(blank=True, default="", max_length=6),
),
(
"cc_last_digits",
models.CharField(blank=True, default="", max_length=4),
),
("extra_data", models.TextField(blank=True, default="")),
("token", models.CharField(blank=True, default="", max_length=128)),
("currency", models.CharField(max_length=10)),
(
"total",
models.DecimalField(
decimal_places=2, default=Decimal("0.0"), max_digits=12
),
),
(
"captured_amount",
models.DecimalField(
decimal_places=2, default=Decimal("0.0"), max_digits=12
),
),
(
"checkout",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="payment_methods",
to="checkout.Cart",
),
),
(
"order",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="payment_methods",
to="order.Order",
),
),
],
),
migrations.CreateModel(
name="Transaction",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created", models.DateTimeField(auto_now_add=True)),
("token", models.CharField(blank=True, default="", max_length=128)),
(
"kind",
models.CharField(
choices=[
("auth", "Authorization"),
("charge", "Charge"),
("refund", "Refund"),
("capture", "Capture"),
("void", "Void"),
],
max_length=10,
),
),
("is_success", models.BooleanField(default=False)),
(
"error",
models.CharField(
choices=[
(
saleor.payment.TransactionError("incorrect_number"),
"incorrect_number",
),
(
saleor.payment.TransactionError("invalid_number"),
"invalid_number",
),
(
saleor.payment.TransactionError("incorrect_cvv"),
"incorrect_cvv",
),
(
saleor.payment.TransactionError("invalid_cvv"),
"invalid_cvv",
),
(
saleor.payment.TransactionError("incorrect_zip"),
"incorrect_zip",
),
(
saleor.payment.TransactionError("incorrect_address"),
"incorrect_address",
),
(
saleor.payment.TransactionError("invalid_expiry_date"),
"invalid_expiry_date",
),
(saleor.payment.TransactionError("expired"), "expired"),
(
saleor.payment.TransactionError("processing_error"),
"processing_error",
),
(saleor.payment.TransactionError("declined"), "declined"),
],
max_length=256,
null=True,
),
),
("currency", models.CharField(max_length=10)),
(
"amount",
models.DecimalField(
decimal_places=2, default=Decimal("0.0"), max_digits=12
),
),
("gateway_response", django.contrib.postgres.fields.jsonb.JSONField()),
(
"payment_method",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="transactions",
to="payment.PaymentMethod",
),
),
],
),
]
| |
#!/usr/bin/env python3
# IPFIX support for Scapy (RFC7011)
from scapy.all import bind_layers, FieldLenField, IntField, Packet, \
PacketListField, ShortEnumField, ShortField, StrLenField
from scapy.layers.inet import UDP
# IPFIX Information Elements http://www.iana.org/assignments/ipfix/ipfix.xhtml
information_elements = {
1: "octetDeltaCount",
2: "packetDeltaCount",
3: "deltaFlowCount",
4: "protocolIdentifier",
5: "ipClassOfService",
6: "tcpControlBits",
7: "sourceTransportPort",
8: "sourceIPv4Address",
9: "sourceIPv4PrefixLength",
10: "ingressInterface",
11: "destinationTransportPort",
12: "destinationIPv4Address",
13: "destinationIPv4PrefixLength",
14: "egressInterface",
15: "ipNextHopIPv4Address",
16: "bgpSourceAsNumber",
17: "bgpDestinationAsNumber",
18: "bgpNextHopIPv4Address",
19: "postMCastPacketDeltaCount",
20: "postMCastOctetDeltaCount",
21: "flowEndSysUpTime",
22: "flowStartSysUpTime",
23: "postOctetDeltaCount",
24: "postPacketDeltaCount",
25: "minimumIpTotalLength",
26: "maximumIpTotalLength",
27: "sourceIPv6Address",
28: "destinationIPv6Address",
29: "sourceIPv6PrefixLength",
30: "destinationIPv6PrefixLength",
31: "flowLabelIPv6",
32: "icmpTypeCodeIPv4",
33: "igmpType",
34: "samplingInterval",
35: "samplingAlgorithm",
36: "flowActiveTimeout",
37: "flowIdleTimeout",
38: "engineType",
39: "engineId",
40: "exportedOctetTotalCount",
41: "exportedMessageTotalCount",
42: "exportedFlowRecordTotalCount",
43: "ipv4RouterSc",
44: "sourceIPv4Prefix",
45: "destinationIPv4Prefix",
46: "mplsTopLabelType",
47: "mplsTopLabelIPv4Address",
48: "samplerId",
49: "samplerMode",
50: "samplerRandomInterval",
51: "classId",
52: "minimumTTL",
53: "maximumTTL",
54: "fragmentIdentification",
55: "postIpClassOfService",
56: "sourceMacAddress",
57: "postDestinationMacAddress",
58: "vlanId",
59: "postVlanId",
60: "ipVersion",
61: "flowDirection",
62: "ipNextHopIPv6Address",
63: "bgpNextHopIPv6Address",
64: "ipv6ExtensionHeaders",
70: "mplsTopLabelStackSection",
71: "mplsLabelStackSection2",
72: "mplsLabelStackSection3",
73: "mplsLabelStackSection4",
74: "mplsLabelStackSection5",
75: "mplsLabelStackSection6",
76: "mplsLabelStackSection7",
77: "mplsLabelStackSection8",
78: "mplsLabelStackSection9",
79: "mplsLabelStackSection10",
80: "destinationMacAddress",
81: "postSourceMacAddress",
82: "interfaceName",
83: "interfaceDescription",
84: "samplerName",
85: "octetTotalCount",
86: "packetTotalCount",
87: "flagsAndSamplerId",
88: "fragmentOffset",
89: "forwardingStatus",
90: "mplsVpnRouteDistinguisher",
91: "mplsTopLabelPrefixLength",
92: "srcTrafficIndex",
93: "dstTrafficIndex",
94: "applicationDescription",
95: "applicationId",
96: "applicationName",
98: "postIpDiffServCodePoint",
99: "multicastReplicationFactor",
100: "className",
101: "classificationEngineId",
102: "layer2packetSectionOffset",
103: "layer2packetSectionSize",
104: "layer2packetSectionData",
128: "bgpNextAdjacentAsNumber",
129: "bgpPrevAdjacentAsNumber",
130: "exporterIPv4Address",
131: "exporterIPv6Address",
132: "droppedOctetDeltaCount",
133: "droppedPacketDeltaCount",
134: "droppedOctetTotalCount",
135: "droppedPacketTotalCount",
136: "flowEndReason",
137: "commonPropertiesId",
138: "observationPointId",
139: "icmpTypeCodeIPv6",
140: "mplsTopLabelIPv6Address",
141: "lineCardId",
142: "portId",
143: "meteringProcessId",
144: "exportingProcessId",
145: "templateId",
146: "wlanChannelId",
147: "wlanSSID",
148: "flowId",
149: "observationDomainId",
150: "flowStartSeconds",
151: "flowEndSeconds",
152: "flowStartMilliseconds",
153: "flowEndMilliseconds",
154: "flowStartMicroseconds",
155: "flowEndMicroseconds",
156: "flowStartNanoseconds",
157: "flowEndNanoseconds",
158: "flowStartDeltaMicroseconds",
159: "flowEndDeltaMicroseconds",
160: "systemInitTimeMilliseconds",
161: "flowDurationMilliseconds",
162: "flowDurationMicroseconds",
163: "observedFlowTotalCount",
164: "ignoredPacketTotalCount",
165: "ignoredOctetTotalCount",
166: "notSentFlowTotalCount",
167: "notSentPacketTotalCount",
168: "notSentOctetTotalCount",
169: "destinationIPv6Prefix",
170: "sourceIPv6Prefix",
171: "postOctetTotalCount",
172: "postPacketTotalCount",
173: "flowKeyIndicator",
174: "postMCastPacketTotalCount",
175: "postMCastOctetTotalCount",
176: "icmpTypeIPv4",
177: "icmpCodeIPv4",
178: "icmpTypeIPv6",
179: "icmpCodeIPv6",
180: "udpSourcePort",
181: "udpDestinationPort",
182: "tcpSourcePort",
183: "tcpDestinationPort",
184: "tcpSequenceNumber",
185: "tcpAcknowledgementNumber",
186: "tcpWindowSize",
187: "tcpUrgentPointer",
188: "tcpHeaderLength",
189: "ipHeaderLength",
190: "totalLengthIPv4",
191: "payloadLengthIPv6",
192: "ipTTL",
193: "nextHeaderIPv6",
194: "mplsPayloadLength",
195: "ipDiffServCodePoint",
196: "ipPrecedence",
197: "fragmentFlags",
198: "octetDeltaSumOfSquares",
199: "octetTotalSumOfSquares",
200: "mplsTopLabelTTL",
201: "mplsLabelStackLength",
202: "mplsLabelStackDepth",
203: "mplsTopLabelExp",
204: "ipPayloadLength",
205: "udpMessageLength",
206: "isMulticast",
207: "ipv4IHL",
208: "ipv4Options",
209: "tcpOptions",
210: "paddingOctets",
211: "collectorIPv4Address",
212: "collectorIPv6Address",
213: "exportInterface",
214: "exportProtocolVersion",
215: "exportTransportProtocol",
216: "collectorTransportPort",
217: "exporterTransportPort",
218: "tcpSynTotalCount",
219: "tcpFinTotalCount",
220: "tcpRstTotalCount",
221: "tcpPshTotalCount",
222: "tcpAckTotalCount",
223: "tcpUrgTotalCount",
224: "ipTotalLength",
225: "postNATSourceIPv4Address",
226: "postNATDestinationIPv4Address",
227: "postNAPTSourceTransportPort",
228: "postNAPTDestinationTransportPort",
229: "natOriginatingAddressRealm",
230: "natEvent",
231: "initiatorOctets",
232: "responderOctets",
233: "firewallEvent",
234: "ingressVRFID",
235: "egressVRFID",
236: "VRFname",
237: "postMplsTopLabelExp",
238: "tcpWindowScale",
239: "biflowDirection",
240: "ethernetHeaderLength",
241: "ethernetPayloadLength",
242: "ethernetTotalLength",
243: "dot1qVlanId",
244: "dot1qPriority",
245: "dot1qCustomerVlanId",
246: "dot1qCustomerPriority",
247: "metroEvcId",
248: "metroEvcType",
249: "pseudoWireId",
250: "pseudoWireType",
251: "pseudoWireControlWord",
252: "ingressPhysicalInterface",
253: "egressPhysicalInterface",
254: "postDot1qVlanId",
255: "postDot1qCustomerVlanId",
256: "ethernetType",
257: "postIpPrecedence",
258: "collectionTimeMilliseconds",
259: "exportSctpStreamId",
260: "maxExportSeconds",
261: "maxFlowEndSeconds",
262: "messageMD5Checksum",
263: "messageScope",
264: "minExportSeconds",
265: "minFlowStartSeconds",
266: "opaqueOctets",
267: "sessionScope",
268: "maxFlowEndMicroseconds",
269: "maxFlowEndMilliseconds",
270: "maxFlowEndNanoseconds",
271: "minFlowStartMicroseconds",
272: "minFlowStartMilliseconds",
273: "minFlowStartNanoseconds",
274: "collectorCertificate",
275: "exporterCertificate",
276: "dataRecordsReliability",
277: "observationPointType",
278: "newConnectionDeltaCount",
279: "connectionSumDurationSeconds",
280: "connectionTransactionId",
281: "postNATSourceIPv6Address",
282: "postNATDestinationIPv6Address",
283: "natPoolId",
284: "natPoolName",
285: "anonymizationFlags",
286: "anonymizationTechnique",
287: "informationElementIndex",
288: "p2pTechnology",
289: "tunnelTechnology",
290: "encryptedTechnology",
291: "basicList",
292: "subTemplateList",
293: "subTemplateMultiList",
294: "bgpValidityState",
295: "IPSecSPI",
296: "greKey",
297: "natType",
298: "initiatorPackets",
299: "responderPackets",
300: "observationDomainName",
301: "selectionSequenceId",
302: "selectorId",
303: "informationElementId",
304: "selectorAlgorithm",
305: "samplingPacketInterval",
306: "samplingPacketSpace",
307: "samplingTimeInterval",
308: "samplingTimeSpace",
309: "samplingSize",
310: "samplingPopulation",
311: "samplingProbability",
312: "dataLinkFrameSize",
313: "ipHeaderPacketSection",
314: "ipPayloadPacketSection",
315: "dataLinkFrameSection",
316: "mplsLabelStackSection",
317: "mplsPayloadPacketSection",
318: "selectorIdTotalPktsObserved",
319: "selectorIdTotalPktsSelected",
320: "absoluteError",
321: "relativeError",
322: "observationTimeSeconds",
323: "observationTimeMilliseconds",
324: "observationTimeMicroseconds",
325: "observationTimeNanoseconds",
326: "digestHashValue",
327: "hashIPPayloadOffset",
328: "hashIPPayloadSize",
329: "hashOutputRangeMin",
330: "hashOutputRangeMax",
331: "hashSelectedRangeMin",
332: "hashSelectedRangeMax",
333: "hashDigestOutput",
334: "hashInitialiserValue",
335: "selectorName",
336: "upperCILimit",
337: "lowerCILimit",
338: "confidenceLevel",
339: "informationElementDataType",
340: "informationElementDescription",
341: "informationElementName",
342: "informationElementRangeBegin",
343: "informationElementRangeEnd",
344: "informationElementSemantics",
345: "informationElementUnits",
346: "privateEnterpriseNumber",
347: "virtualStationInterfaceId",
348: "virtualStationInterfaceName",
349: "virtualStationUUID",
350: "virtualStationName",
351: "layer2SegmentId",
352: "layer2OctetDeltaCount",
353: "layer2OctetTotalCount",
354: "ingressUnicastPacketTotalCount",
355: "ingressMulticastPacketTotalCount",
356: "ingressBroadcastPacketTotalCount",
357: "egressUnicastPacketTotalCount",
358: "egressBroadcastPacketTotalCount",
359: "monitoringIntervalStartMilliSeconds",
360: "monitoringIntervalEndMilliSeconds",
361: "portRangeStart",
362: "portRangeEnd",
363: "portRangeStepSize",
364: "portRangeNumPorts",
365: "staMacAddress",
366: "staIPv4Address",
367: "wtpMacAddress",
368: "ingressInterfaceType",
369: "egressInterfaceType",
370: "rtpSequenceNumber",
371: "userName",
372: "applicationCategoryName",
373: "applicationSubCategoryName",
374: "applicationGroupName",
375: "originalFlowsPresent",
376: "originalFlowsInitiated",
377: "originalFlowsCompleted",
378: "distinctCountOfSourceIPAddress",
379: "distinctCountOfDestinationIPAddress",
380: "distinctCountOfSourceIPv4Address",
381: "distinctCountOfDestinationIPv4Address",
382: "distinctCountOfSourceIPv6Address",
383: "distinctCountOfDestinationIPv6Address",
384: "valueDistributionMethod",
385: "rfc3550JitterMilliseconds",
386: "rfc3550JitterMicroseconds",
387: "rfc3550JitterNanoseconds",
388: "dot1qDEI",
389: "dot1qCustomerDEI",
390: "flowSelectorAlgorithm",
391: "flowSelectedOctetDeltaCount",
392: "flowSelectedPacketDeltaCount",
393: "flowSelectedFlowDeltaCount",
394: "selectorIDTotalFlowsObserved",
395: "selectorIDTotalFlowsSelected",
396: "samplingFlowInterval",
397: "samplingFlowSpacing",
398: "flowSamplingTimeInterval",
399: "flowSamplingTimeSpacing",
400: "hashFlowDomain",
401: "transportOctetDeltaCount",
402: "transportPacketDeltaCount",
403: "originalExporterIPv4Address",
404: "originalExporterIPv6Address",
405: "originalObservationDomainId",
406: "intermediateProcessId",
407: "ignoredDataRecordTotalCount",
408: "dataLinkFrameType",
409: "sectionOffset",
410: "sectionExportedOctets",
411: "dot1qServiceInstanceTag",
412: "dot1qServiceInstanceId",
413: "dot1qServiceInstancePriority",
414: "dot1qCustomerSourceMacAddress",
415: "dot1qCustomerDestinationMacAddress",
417: "postLayer2OctetDeltaCount",
418: "postMCastLayer2OctetDeltaCount",
420: "postLayer2OctetTotalCount",
421: "postMCastLayer2OctetTotalCount",
422: "minimumLayer2TotalLength",
423: "maximumLayer2TotalLength",
424: "droppedLayer2OctetDeltaCount",
425: "droppedLayer2OctetTotalCount",
426: "ignoredLayer2OctetTotalCount",
427: "notSentLayer2OctetTotalCount",
428: "layer2OctetDeltaSumOfSquares",
429: "layer2OctetTotalSumOfSquares",
430: "layer2FrameDeltaCount",
431: "layer2FrameTotalCount",
432: "pseudoWireDestinationIPv4Address",
433: "ignoredLayer2FrameTotalCount",
434: "mibObjectValueInteger",
435: "mibObjectValueOctetString",
436: "mibObjectValueOID",
437: "mibObjectValueBits",
438: "mibObjectValueIPAddress",
439: "mibObjectValueCounter",
440: "mibObjectValueGauge",
441: "mibObjectValueTimeTicks",
442: "mibObjectValueUnsigned",
443: "mibObjectValueTable",
444: "mibObjectValueRow",
445: "mibObjectIdentifier",
446: "mibSubIdentifier",
447: "mibIndexIndicator",
448: "mibCaptureTimeSemantics",
449: "mibContextEngineID",
450: "mibContextName",
451: "mibObjectName",
452: "mibObjectDescription",
453: "mibObjectSyntax",
454: "mibModuleName",
455: "mobileIMSI",
456: "mobileMSISDN",
457: "httpStatusCode",
458: "sourceTransportPortsLimit",
459: "httpRequestMethod",
460: "httpRequestHost",
461: "httpRequestTarget",
462: "httpMessageVersion",
466: "natQuotaExceededEvent",
471: "maxSessionEntries",
472: "maxBIBEntries",
473: "maxEntriesPerUser",
475: "maxFragmentsPendingReassembly"
}
class IPFIX(Packet):
name = "IPFIX"
fields_desc = [ShortField("version", 10),
ShortField("length", None),
IntField("exportTime", None),
IntField("sequenceNumber", 1),
IntField("observationDomainID", 1)]
class FieldSpecifier(Packet):
name = "Field Specifier"
fields_desc = [ShortEnumField(
"informationElement", None, information_elements),
ShortField("fieldLength", None)]
def extract_padding(self, s):
return "", s
class Template(Packet):
name = "Template"
fields_desc = [ShortField("templateID", 256),
FieldLenField("fieldCount", None, count_of="fields"),
PacketListField("templateFields", [], FieldSpecifier,
count_from=lambda p: p.fieldCount)]
class Data(Packet):
name = "Data"
fields_desc = [
StrLenField("data", "", length_from=lambda p: p.underlayer.length - 4)]
def extract_padding(self, s):
return "", s
class Set(Packet):
name = "Set"
fields_desc = [ShortField("setID", 256),
ShortField("length", None)]
def guess_payload_class(self, payload):
if self.setID == 2:
return Template
elif self.setID > 255:
return Data
else:
return Packet.guess_payload_class(self, payload)
bind_layers(IPFIX, Set)
bind_layers(UDP, IPFIX, dport=4739)
class IPFIXDecoder(object):
""" IPFIX data set decoder """
def __init__(self):
self._templates = []
def add_template(self, template):
"""
Add IPFIX template
:param template: IPFIX template
"""
templateID = template.templateID
fields = []
rec_len = 0
for field in template.templateFields:
fields.append(
{'name': field.informationElement, 'len': field.fieldLength})
rec_len += field.fieldLength
self._templates.append(
{'id': templateID, 'fields': fields, 'rec_len': rec_len})
def decode_data_set(self, data_set):
"""
Decode IPFIX data
:param data_set: IPFIX data set
:returns: List of decoded data records.
"""
data = []
for template in self._templates:
if template['id'] == data_set.setID:
offset = 0
d = data_set[Data].data
for i in range(len(d) // template['rec_len']):
record = {}
for field in template['fields']:
f = d[offset:offset + field['len']]
offset += field['len']
record.update({field['name']: f})
data.append(record)
break
return data
| |
"""
Default settings for the ``mezzanine.core`` app. Each of these can be
overridden in your project's settings module, just like regular
Django settings. The ``editable`` argument for each controls whether
the setting is editable via Django's admin.
Thought should be given to how a setting is actually used before
making it editable, as it may be inappropriate - for example settings
that are only read during startup shouldn't be editable, since changing
them would require an application reload.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import register_setting
register_setting(
name="ADMIN_MENU_ORDER",
description=_("Controls the ordering and grouping of the admin menu."),
editable=False,
default=(
(_("Content"), ("pages.Page", "blog.BlogPost",
"generic.ThreadedComment", (_("Media Library"), "fb_browse"),)),
(_("Site"), ("sites.Site", "redirects.Redirect", "conf.Setting")),
(_("Users"), ("auth.User", "auth.Group",)),
),
)
register_setting(
name="ADMIN_MENU_COLLAPSED",
label=_("Collapse the Admin menu"),
description=_("Controls whether or not the left-hand admin menu is "
"collapsed by default."),
editable=True,
default=False,
)
register_setting(
name="ADMIN_REMOVAL",
description=_("Unregister these models from the admin."),
editable=False,
default=(),
)
register_setting(
name="ADMIN_THUMB_SIZE",
description=_("Size of thumbnail previews for image fields in the "
"admin interface."),
editable=False,
default="24x24",
)
register_setting(
name="AKISMET_API_KEY",
label=_("Akismet API Key"),
description=_("Key for http://akismet.com spam filtering service. Used "
"for filtering comments and forms."),
editable=True,
default="",
)
register_setting(
name="BITLY_ACCESS_TOKEN",
label=_("bit.ly access token"),
description=_("Access token for http://bit.ly URL shortening service."),
editable=True,
default="",
)
register_setting(
name="CACHE_SET_DELAY_SECONDS",
description=_("Mezzanine's caching uses a technique know as mint "
"caching. This is where the requested expiry for a cache entry "
"is stored with the cache entry in cache, and the real expiry "
"used has the ``CACHE_SET_DELAY`` added to it. Then on a cache get, "
"the store expiry is checked, and if it has passed, the cache entry "
"is set again, and no entry is returned. This tries to ensure that "
"cache misses never occur, and if many clients were to get a cache "
"miss at once, only one would actually need to re-generated the "
"cache entry."),
editable=False,
default=30,
)
if "mezzanine.blog" in settings.INSTALLED_APPS:
dashboard_tags = (
("blog_tags.quick_blog", "mezzanine_tags.app_list"),
("comment_tags.recent_comments",),
("mezzanine_tags.recent_actions",),
)
else:
dashboard_tags = (
("mezzanine_tags.app_list",),
("mezzanine_tags.recent_actions",),
(),
)
register_setting(
name="DASHBOARD_TAGS",
description=_("A three item sequence, each containing a sequence of "
"template tags used to render the admin dashboard."),
editable=False,
default=dashboard_tags,
)
register_setting(
name="DEVICE_DEFAULT",
description=_("Device specific template sub-directory to use as the "
"default device."),
editable=False,
default="",
)
register_setting(
name="DEVICE_USER_AGENTS",
description=_("Mapping of device specific template sub-directory names to "
"the sequence of strings that may be found in their user agents."),
editable=False,
default=(
("mobile", ("2.0 MMP", "240x320", "400X240", "AvantGo", "BlackBerry",
"Blazer", "Cellphone", "Danger", "DoCoMo", "Elaine/3.0",
"EudoraWeb", "Googlebot-Mobile", "hiptop", "IEMobile",
"KYOCERA/WX310K", "LG/U990", "MIDP-2.", "MMEF20", "MOT-V",
"NetFront", "Newt", "Nintendo Wii", "Nitro", "Nokia",
"Opera Mini", "Palm", "PlayStation Portable", "portalmmm",
"Proxinet", "ProxiNet", "SHARP-TQ-GX10", "SHG-i900",
"Small", "SonyEricsson", "Symbian OS", "SymbianOS",
"TS21i-10", "UP.Browser", "UP.Link", "webOS", "Windows CE",
"WinWAP", "YahooSeeker/M1A1-R2D2", "iPhone", "iPod", "Android",
"BlackBerry9530", "LG-TU915 Obigo", "LGE VX", "webOS",
"Nokia5800",)),
),
)
register_setting(
name="FORMS_USE_HTML5",
description=_("If ``True``, website forms will use HTML5 features."),
editable=False,
default=False,
)
register_setting(
name="EMAIL_FAIL_SILENTLY",
description=_("If ``True``, failures to send email will happen "
"silently, otherwise an exception is raised. "
"Defaults to ``settings.DEBUG``."),
editable=False,
default=settings.DEBUG,
)
register_setting(
name="EXTRA_MODEL_FIELDS",
description=_("A sequence of fields that will be injected into "
"Mezzanine's (or any library's) models. Each item in the sequence is "
"a four item sequence. The first two items are the dotted path to the "
"model and its field name to be added, and the dotted path to the "
"field class to use for the field. The third and fourth items are a "
"sequence of positional args and a dictionary of keyword args, to use "
"when creating the field instance. When specifying the field class, "
"the path ``django.models.db.`` can be omitted for regular Django "
"model fields."),
editable=False,
default=(),
)
register_setting(
name="GOOGLE_ANALYTICS_ID",
label=_("Google Analytics ID"),
description=_("Google Analytics ID (http://www.google.com/analytics/)"),
editable=True,
default="",
)
register_setting(
name="HOST_THEMES",
description=_("A sequence mapping host names to themes, allowing "
"different templates to be served per HTTP host. "
"Each item in the sequence is a two item sequence, "
"containing a host such as ``othersite.example.com``, and "
"the name of an importable Python package for the theme. "
"If the host is matched for a request, the templates "
"directory inside the theme package will be first searched "
"when loading templates."),
editable=False,
default=(),
)
register_setting(
name="INLINE_EDITING_ENABLED",
description=_("If ``True``, front-end inline editing will be enabled."),
editable=False,
default=True,
)
register_setting(
name="JQUERY_FILENAME",
label=_("Name of the jQuery file."),
description=_("Name of the jQuery file found in "
"mezzanine/core/static/mezzanine/js/"),
editable=False,
default="jquery-1.8.3.min.js",
)
register_setting(
name="JQUERY_UI_FILENAME",
label=_("Name of the jQuery UI file."),
description=_("Name of the jQuery UI file found in "
"mezzanine/core/static/mezzanine/js/"),
editable=False,
default="jquery-ui-1.8.24.min.js",
)
register_setting(
name="MAX_PAGING_LINKS",
label=_("Max paging links"),
description=_("Max number of paging links to display when paginating."),
editable=True,
default=10,
)
register_setting(
name="MEDIA_LIBRARY_PER_SITE",
label=_("Media library per site"),
description=_("If ``True``, each site will use its own directory within "
"the filebrowser media library."),
editable=False,
default=False,
)
register_setting(
name="OWNABLE_MODELS_ALL_EDITABLE",
description=_("Models that subclass ``Ownable`` and use the "
"``OwnableAdmin`` have their admin change-list records filtered "
"down to records owned by the current user. This setting contains a "
"sequence of models in the format ``app_label.object_name``, that "
"when subclassing ``Ownable``, will still show all records in the "
"admin change-list interface, regardless of the current user."),
editable=False,
default=(),
)
register_setting(
name="RICHTEXT_WIDGET_CLASS",
description=_("Dotted package path and class name of the widget to use "
"for the ``RichTextField``."),
editable=False,
default="mezzanine.core.forms.TinyMceWidget",
)
register_setting(
name="RICHTEXT_ALLOWED_TAGS",
description=_("List of HTML tags that won't be stripped from "
"``RichTextField`` instances."),
editable=False,
default=("a", "abbr", "acronym", "address", "area", "article", "aside",
"b", "bdo", "big", "blockquote", "br", "button", "caption", "center",
"cite", "code", "col", "colgroup", "dd", "del", "dfn", "dir", "div",
"dl", "dt", "em", "fieldset", "figure", "font", "footer", "form",
"h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "i", "img",
"input", "ins", "kbd", "label", "legend", "li", "map", "menu",
"nav", "ol", "optgroup", "option", "p", "pre", "q", "s", "samp",
"section", "select", "small", "span", "strike", "strong",
"sub", "sup", "table", "tbody", "td", "textarea",
"tfoot", "th", "thead", "tr", "tt", "u", "ul", "var", "wbr"),
)
register_setting(
name="RICHTEXT_ALLOWED_ATTRIBUTES",
description=_("List of HTML attributes that won't be stripped from "
"``RichTextField`` instances."),
editable=False,
default=("abbr", "accept", "accept-charset", "accesskey", "action",
"align", "alt", "axis", "border", "cellpadding", "cellspacing",
"char", "charoff", "charset", "checked", "cite", "class", "clear",
"cols", "colspan", "color", "compact", "coords", "datetime", "dir",
"disabled", "enctype", "for", "frame", "headers", "height", "href",
"hreflang", "hspace", "id", "ismap", "label", "lang", "longdesc",
"maxlength", "media", "method", "multiple", "name", "nohref",
"noshade", "nowrap", "prompt", "readonly", "rel", "rev", "rows",
"rowspan", "rules", "scope", "selected", "shape", "size", "span",
"src", "start", "style", "summary", "tabindex", "target", "title",
"type", "usemap", "valign", "value", "vspace", "width", "xml:lang"),
)
register_setting(
name="RICHTEXT_ALLOWED_STYLES",
description=_("List of inline CSS styles that won't be stripped from "
"``RichTextField`` instances."),
editable=False,
default=("border", "float", "list-style-type", "margin", "margin-bottom",
"margin-left", "margin-right", "margin-top", "padding-left",
"text-align", "text-decoration", "vertical-align"),
)
register_setting(
name="RICHTEXT_FILTERS",
description=_("List of dotted paths to functions, called in order, on a "
"``RichTextField`` value before it is rendered to the template."),
editable=False,
default=("mezzanine.utils.html.thumbnails",),
)
RICHTEXT_FILTER_LEVEL_HIGH = 1
RICHTEXT_FILTER_LEVEL_LOW = 2
RICHTEXT_FILTER_LEVEL_NONE = 3
RICHTEXT_FILTER_LEVELS = (
(RICHTEXT_FILTER_LEVEL_HIGH, _("High")),
(RICHTEXT_FILTER_LEVEL_LOW, _("Low (allows video, iframe, Flash, etc)")),
(RICHTEXT_FILTER_LEVEL_NONE, _("No filtering")),
)
register_setting(
name="RICHTEXT_FILTER_LEVEL",
label=_("Rich Text filter level"),
description=_("*Do not change this setting unless you know what you're "
"doing.*\n\nWhen content is saved in a Rich Text (WYSIWYG) field, "
"unsafe HTML tags and attributes are stripped from the content to "
"protect against staff members intentionally adding code that could "
"be used to cause problems, such as changing their account to "
"a super-user with full access to the system.\n\n"
"This setting allows you to change the level of filtering that "
"occurs. Setting it to low will allow certain extra tags to be "
"permitted, such as those required for embedding video. While these "
"tags are not the main candidates for users adding malicious code, "
"they are still considered dangerous and could potentially be "
"mis-used by a particularly technical user, and so are filtered out "
"when the filtering level is set to high.\n\n"
"Setting the filtering level to no filtering, will disable all "
"filtering, and allow any code to be entered by staff members, "
"including script tags."),
editable=True,
choices=RICHTEXT_FILTER_LEVELS,
default=RICHTEXT_FILTER_LEVEL_HIGH,
)
register_setting(
name="SEARCH_MODEL_CHOICES",
description=_("Sequence of models that will be provided by default as "
"choices in the search form. Each model should be in the format "
"``app_label.model_name``. Only models that subclass "
"``mezzanine.core.models.Displayable`` should be used."),
editable=False,
default=("pages.Page", "blog.BlogPost"),
)
register_setting(
name="SEARCH_PER_PAGE",
label=_("Search results per page"),
description=_("Number of results shown in the search results page."),
editable=True,
default=10,
)
register_setting(
name="SITE_PREFIX",
description=_("A URL prefix for mounting all of Mezzanine's urlpatterns "
"under. When using this, you'll also need to manually apply it to "
"your project's root ``urls.py`` module. The root ``urls.py`` module "
"provided by Mezzanine's ``mezzanine-project`` command contains an "
"example of this towards its end."),
editable=False,
default="",
)
register_setting(
name="SITE_TITLE",
label=_("Site Title"),
description=_("Title that will display at the top of the site, and be "
"appended to the content of the HTML title tags on every page."),
editable=True,
default="Mezzanine",
translatable=True,
)
register_setting(
name="SITE_TAGLINE",
label=_("Tagline"),
description=_("A tag line that will appear at the top of all pages."),
editable=True,
default=_("An open source content management platform."),
translatable=True,
)
register_setting(
name="SLUGIFY",
description=_("Dotted Python path to the callable for converting "
"strings into URL slugs. Defaults to "
"``mezzanine.utils.urls.slugify_unicode`` which allows for non-ascii "
"URLs. Change to ``django.template.defaultfilters.slugify`` to use "
"Django's slugify function, or something of your own if required."),
editable=False,
default="mezzanine.utils.urls.slugify_unicode",
)
register_setting(
name="SPAM_FILTERS",
description=_("Sequence of dotted Python paths to callable functions "
"used for checking posted content (such as forms or comments) is "
"spam. Each function should accept three arguments: the request "
"object, the form object, and the URL that was posted from. "
"Defaults to ``mezzanine.utils.views.is_spam_akismet`` which will "
"use the http://akismet.com spam filtering service when the "
"``AKISMET_API_KEY`` setting is configured."),
editable=False,
default=("mezzanine.utils.views.is_spam_akismet",),
)
register_setting(
name="SSL_ENABLED",
label=_("Enable SSL"),
description=_("If ``True``, users will be automatically redirected to "
"HTTPS for the URLs specified by the ``SSL_FORCE_URL_PREFIXES`` "
"setting."),
editable=True,
default=False,
)
register_setting(
name="SSL_FORCE_HOST",
label=_("Force Host"),
description=_("Host name that the site should always be accessed via that "
"matches the SSL certificate."),
editable=True,
default="",
)
register_setting(
name="SSL_FORCE_URL_PREFIXES",
description="Sequence of URL prefixes that will be forced to run over "
"SSL when ``SSL_ENABLED`` is ``True``. i.e. "
"('/admin', '/example') would force all URLs beginning with "
"/admin or /example to run over SSL.",
editable=False,
default=("/admin", "/account"),
)
register_setting(
name="SSL_FORCED_PREFIXES_ONLY",
description=_("If ``True``, only URLs specified by the "
"``SSL_FORCE_URL_PREFIXES`` setting will be accessible over SSL, "
"and all other URLs will be redirected back to HTTP if accessed "
"over HTTPS."),
editable=False,
default=True,
)
register_setting(
name="STOP_WORDS",
description=_("List of words which will be stripped from search queries."),
editable=False,
default=(
"a", "about", "above", "above", "across", "after",
"afterwards", "again", "against", "all", "almost", "alone",
"along", "already", "also", "although", "always", "am",
"among", "amongst", "amoungst", "amount", "an", "and",
"another", "any", "anyhow", "anyone", "anything", "anyway",
"anywhere", "are", "around", "as", "at", "back", "be",
"became", "because", "become", "becomes", "becoming", "been",
"before", "beforehand", "behind", "being", "below", "beside",
"besides", "between", "beyond", "bill", "both", "bottom",
"but", "by", "call", "can", "cannot", "cant", "co", "con",
"could", "couldnt", "cry", "de", "describe", "detail", "do",
"done", "down", "due", "during", "each", "eg", "eight",
"either", "eleven", "else", "elsewhere", "empty", "enough",
"etc", "even", "ever", "every", "everyone", "everything",
"everywhere", "except", "few", "fifteen", "fifty", "fill",
"find", "fire", "first", "five", "for", "former", "formerly",
"forty", "found", "four", "from", "front", "full", "further",
"get", "give", "go", "had", "has", "hasnt", "have", "he",
"hence", "her", "here", "hereafter", "hereby", "herein",
"hereupon", "hers", "herself", "him", "himself", "his",
"how", "however", "hundred", "ie", "if", "in", "inc",
"indeed", "interest", "into", "is", "it", "its", "itself",
"keep", "last", "latter", "latterly", "least", "less", "ltd",
"made", "many", "may", "me", "meanwhile", "might", "mill",
"mine", "more", "moreover", "most", "mostly", "move", "much",
"must", "my", "myself", "name", "namely", "neither", "never",
"nevertheless", "next", "nine", "no", "nobody", "none",
"noone", "nor", "not", "nothing", "now", "nowhere", "of",
"off", "often", "on", "once", "one", "only", "onto", "or",
"other", "others", "otherwise", "our", "ours", "ourselves",
"out", "over", "own", "part", "per", "perhaps", "please",
"put", "rather", "re", "same", "see", "seem", "seemed",
"seeming", "seems", "serious", "several", "she", "should",
"show", "side", "since", "sincere", "six", "sixty", "so",
"some", "somehow", "someone", "something", "sometime",
"sometimes", "somewhere", "still", "such", "system", "take",
"ten", "than", "that", "the", "their", "them", "themselves",
"then", "thence", "there", "thereafter", "thereby",
"therefore", "therein", "thereupon", "these", "they",
"thickv", "thin", "third", "this", "those", "though",
"three", "through", "throughout", "thru", "thus", "to",
"together", "too", "top", "toward", "towards", "twelve",
"twenty", "two", "un", "under", "until", "up", "upon", "us",
"very", "via", "was", "we", "well", "were", "what", "whatever",
"when", "whence", "whenever", "where", "whereafter", "whereas",
"whereby", "wherein", "whereupon", "wherever", "whether",
"which", "while", "whither", "who", "whoever", "whole", "whom",
"whose", "why", "will", "with", "within", "without", "would",
"yet", "you", "your", "yours", "yourself", "yourselves", "the",
),
)
register_setting(
name="TAG_CLOUD_SIZES",
label=_("Tag Cloud Sizes"),
description=_("Number of different sizes for tags when shown as a cloud."),
editable=True,
default=4,
)
register_setting(
name="TEMPLATE_ACCESSIBLE_SETTINGS",
description=_("Sequence of setting names available within templates."),
editable=False,
default=(
"ACCOUNTS_APPROVAL_REQUIRED", "ACCOUNTS_VERIFICATION_REQUIRED",
"ADMIN_MENU_COLLAPSED",
"BITLY_ACCESS_TOKEN", "BLOG_USE_FEATURED_IMAGE",
"COMMENTS_DISQUS_SHORTNAME", "COMMENTS_NUM_LATEST",
"COMMENTS_DISQUS_API_PUBLIC_KEY", "COMMENTS_DISQUS_API_SECRET_KEY",
"COMMENTS_USE_RATINGS", "DEV_SERVER", "FORMS_USE_HTML5",
"GRAPPELLI_INSTALLED", "GOOGLE_ANALYTICS_ID", "JQUERY_FILENAME",
"JQUERY_UI_FILENAME", "LOGIN_URL", "LOGOUT_URL", "SITE_TITLE",
"SITE_TAGLINE", "USE_L10N", "USE_MODELTRANSLATION",
),
)
register_setting(
name="THUMBNAILS_DIR_NAME",
description=_("Directory name to store thumbnails in, that will be "
"created relative to the original image's directory."),
editable=False,
default=".thumbnails",
)
register_setting(
name="TINYMCE_SETUP_JS",
description=_("URL for the JavaScript file (relative to ``STATIC_URL``) "
"that handles configuring TinyMCE when the default "
"``RICHTEXT_WIDGET_CLASS`` is used."),
editable=False,
default="mezzanine/js/tinymce_setup.js",
)
register_setting(
name="UPLOAD_TO_HANDLERS",
description=_("Dict mapping file field names in the format "
"``app_label.model_name.field_name`` to the Python dotted path "
"to function names that will be used for the file field's "
"``upload_to`` argument."),
editable=False,
default={},
)
# The following settings are defined here for documentation purposes
# as this file is used to auto-generate the documentation for all
# available settings. They are Mezzanine specific, but their values
# are *always* overridden by the project's settings or local_settings
# modules, so the default values defined here will never be used.
register_setting(
name="USE_MODELTRANSLATION",
description=_("If ``True``, the django-modeltranslation application will "
"be automatically added to the ``INSTALLED_APPS`` setting."),
editable=False,
default=False,
)
register_setting(
name="NEVERCACHE_KEY",
description=_("Unique random string like ``SECRET_KEY``, but used for "
"two-phased cache responses. Like ``SECRET_KEY``, should be "
"automatically generated by the ``mezzanine-project`` command."),
editable=False,
default="",
)
| |
# Copyright (c) 2019, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import asyncio
import copy
import ctypes
import errno
import glob
import os
import warnings
import weakref
import numpy as np
from pynq.buffer import PynqBuffer
from .device import Device
from pynq._3rdparty import xrt
from pynq._3rdparty import ert
__author__ = "Peter Ogden"
__copyright__ = "Copyright 2019, Xilinx"
__email__ = "pynq_support@xilinx.com"
DRM_XOCL_BO_EXECBUF = 1 << 31
REQUIRED_VERSION_ERT = (2, 3, 0)
libc = ctypes.CDLL('libc.so.6')
libc.munmap.argtypes = [ctypes.c_void_p, ctypes.c_size_t]
libc.munmap.restype = ctypes.c_int
# Create our own struct that fixes the typos of the real one
class xclDeviceUsage (ctypes.Structure):
_fields_ = [
("h2c", ctypes.c_size_t*8),
("c2h", ctypes.c_size_t*8),
("ddrMemUsed", ctypes.c_size_t*8),
("ddrBOAllocated", ctypes.c_uint*8),
("totalContents", ctypes.c_uint),
("xclbinId", ctypes.c_ulonglong*4),
("dma_channel_cnt", ctypes.c_uint),
("mm_channel_cnt", ctypes.c_uint),
("memSize", ctypes.c_ulonglong*8)
]
_xrt_errors = {
-95: "Shell does not match",
-16: "Bitstream in use by another program",
-1: "Possibly buffers still allocated"
}
def _get_xrt_version():
import subprocess
import json
try:
output = subprocess.run(['xbutil', 'dump'], stdout=subprocess.PIPE,
universal_newlines=True)
details = json.loads(output.stdout)
return tuple(
int(s) for s in details['runtime']['build']['version'].split('.'))
except Exception:
return (0, 0, 0)
if xrt.XRT_SUPPORTED:
_xrt_version = _get_xrt_version()
else:
_xrt_version = (0, 0, 0)
def _format_xrt_error(err):
errstring = "{} ({}) {}".format(errno.errorcode[-err],
-err, os.strerror(-err))
if err in _xrt_errors:
errstring += "/" + _xrt_errors[err]
return errstring
def _xrt_allocate(shape, dtype, device, memidx):
elements = 1
try:
for s in shape:
elements *= s
except TypeError:
elements = shape
dtype = np.dtype(dtype)
size = elements * dtype.itemsize
bo = device.allocate_bo(size, memidx)
buf = device.map_bo(bo)
device_address = device.get_device_address(bo)
ar = PynqBuffer(shape, dtype, bo=bo, device=device, buffer=buf,
device_address=device_address, coherent=False)
weakref.finalize(buf, _free_bo, device, bo, ar.virtual_address, ar.nbytes)
return ar
def _free_bo(device, bo, ptr, length):
libc.munmap(ctypes.cast(ptr, ctypes.c_void_p), length)
xrt.xclFreeBO(device.handle, bo)
class XrtMemory:
"""Class representing a memory bank in a card
Memory banks can be both external DDR banks and internal buffers.
XrtMemory instances for the same bank are interchangeable and can
be compared and used as dictionary keys.
"""
def __init__(self, device, desc):
self.idx = desc['idx']
self.size = desc['size']
self.desc = desc
self.device = device
def allocate(self, shape, dtype):
"""Create a new buffer in the memory bank
Parameters
----------
shape : tuple(int)
Shape of the array
dtype : np.dtype
Data type of the array
"""
buf = _xrt_allocate(shape, dtype, self.device, self.idx)
buf.memory = self
return buf
def __hash__(self):
return hash((self.device, self.idx))
def __eq__(self, other):
return (type(other) is XrtMemory and
self.device == other.device and
self.idx == other.idx)
@property
def mem_used(self):
usage = self.device.get_usage()
return usage.ddrMemUsed[self.idx]
@property
def num_buffers(self):
usage = self.device.get_usage()
return usage.ddrBOAllocated[self.idx]
class XrtUUID:
def __init__(self, val):
self.bytes = val
class ExecBo:
"""Execution Buffer Object
Wraps an execution buffer used by XRT to schedule the execution of
accelerators. Usually used in conjunction with the ERT packet format
exposed in the XRT ``ert_binding`` python module.
"""
def __init__(self, bo, ptr, device, length):
self.bo = bo
self.ptr = ptr
self.device = device
self.length = length
def __del__(self):
_free_bo(self.device, self.bo, self.ptr, self.length)
def as_packet(self, ptype):
"""Get a packet representation of the buffer object
Parameters
----------
ptype : ctypes struct
The type to cast the buffer to
"""
return ctypes.cast(self.ptr, ctypes.POINTER(ptype))[0]
class ErtWaitHandle:
"""WaitHandle specific to ERT-scheduled accelerators
"""
def __init__(self, bo, future, device):
self._future = future
self._bo = bo
self.device = device
def _complete(self, state):
if state != ert.ert_cmd_state.ERT_CMD_STATE_COMPLETED:
self._future.set_exception(RuntimeError("Execution failed: " +
str(state)))
else:
self._future.set_result(None)
self._bo = None
@property
def _has_bo(self):
return self._bo is not None
@property
def done(self):
"""True is the accelerator has finished
"""
return self._future.done()
async def wait_async(self):
"""Coroutine to wait for the execution to be completed
This function requires that ``XrtDevice.set_event_loop`` is called
before the accelerator execution is started
"""
await self._future
def wait(self):
"""Wait for the Execution to be completed
"""
while not self.done:
self.device._handle_events(1000)
class XrtStream:
"""XRT Streming Connection
Encapsulates the IP connected to a stream. Note that the ``_ip``
attributes will only be populated if the corresponding device
driver has been instantiated.
Attributes
----------
source : str
Source of the streaming connection as ip_name.port
sink : str
Sink of the streaming connection as ip_name.port
monitors : [str]
Monitor connections of the stream as a list of ip_name.port
source_ip : pynq.overlay.DefaultIP
Source IP driver instance for the stream
sink_ip : pynq.overlay.DefaultIP
Sink IP driver instance for the stream
monitor_ips : [pynq.overlay.DefaultIP]
list of driver instances for IP monitoring the stream
"""
def __init__(self, device, desc):
ip_dict = device.ip_dict
idx = desc['idx']
for ip_name, ip in ip_dict.items():
for stream_name, stream in ip['streams'].items():
if stream['stream_id'] == idx:
if stream['direction'] == 'output':
self.source = ip_name + "." + stream_name
elif stream['direction'] == 'input':
self.sink = ip_name + "." + stream_name
self.source_ip = None
self.monitors = []
self.monitor_ips = []
self.sink_ip = None
def __repr__(self):
return 'XrtStream(source={}, sink={})'.format(self.source,
self.sink)
class XrtDevice(Device):
@classmethod
def _probe_(cls):
if not xrt.XRT_SUPPORTED:
return []
num = xrt.xclProbe()
devices = [XrtDevice(i) for i in range(num)]
return devices
_probe_priority_ = 200
def __init__(self, index):
super().__init__('xrt{}'.format(index))
self.capabilities = {
'REGISTER_RW': True,
'CALLABLE': True,
}
if _xrt_version >= REQUIRED_VERSION_ERT:
self.capabilities['ERT'] = True
self.handle = xrt.xclOpen(index, None, 0)
self._info = xrt.xclDeviceInfo2()
xrt.xclGetDeviceInfo2(self.handle, self._info)
self.contexts = []
self._find_sysfs()
self.active_bos = []
self._bo_cache = []
self._loop = asyncio.get_event_loop()
self._streams = {}
def _find_sysfs(self):
devices = glob.glob('/sys/bus/pci/drivers/xclmgmt/*:*')
self.sysfs_path = None
for d in devices:
with open(os.path.join(d, 'slot')) as f:
slot = int(f.read())
if slot == self._info.mPciSlot:
self.sysfs_path = os.path.realpath(d)
@property
def device_info(self):
info = xrt.xclDeviceInfo2()
xrt.xclGetDeviceInfo2(self.handle, info)
return info
@property
def name(self):
return self._info.mName.decode()
@property
def clocks(self):
"""Runtime clocks. This dictionary provides the actual
clock frequencies that the hardware is running at.
Frequencies are expressed in Mega Hertz.
"""
clks = {}
idx = 0
for clk in self._info.mOCLFrequency:
if clk != 0:
clks['clock'+str(idx)] = {'frequency': clk}
idx +=1
return clks
@property
def sensors(self):
from pynq.pmbus import get_xrt_sysfs_rails
return get_xrt_sysfs_rails(self)
@property
def default_memory(self):
mem_dict = self.mem_dict
active_mems = [m for m in mem_dict.values()
if m['used'] and not m['streaming']]
if len(active_mems) == 0:
raise RuntimeError("No active memories in design")
elif len(active_mems) > 1:
raise RuntimeError("Multiple memories active in design: specify" +
" the memory using the `target` parameters")
return self.get_memory(active_mems[0])
def flush(self, bo, offset, ptr, size):
ret = xrt.xclSyncBO(
self.handle, bo, xrt.xclBOSyncDirection.XCL_BO_SYNC_BO_TO_DEVICE,
size, offset)
if ret >= 0x80000000:
raise RuntimeError("Flush Failed: " + str(ret))
def invalidate(self, bo, offset, ptr, size):
ret = xrt.xclSyncBO(
self.handle, bo, xrt.xclBOSyncDirection.XCL_BO_SYNC_BO_FROM_DEVICE,
size, offset)
if ret >= 0x80000000:
raise RuntimeError("Invalidate Failed: " + str(ret))
def allocate_bo(self, size, idx):
bo = xrt.xclAllocBO(self.handle, size,
xrt.xclBOKind.XCL_BO_DEVICE_RAM, idx)
if bo >= 0x80000000:
raise RuntimeError("Allocate failed: " + str(bo))
return bo
def buffer_write(self, bo, bo_offset, buf, buf_offset=0, count=-1):
view = memoryview(buf).cast('B')
if count == -1:
view = view[buf_offset:]
else:
view = view[buf_offset:buf_offset+count]
ptr = (ctypes.c_char * len(view)).from_buffer(view)
status = xrt.xclWriteBO(self.handle, bo, ptr, len(view), bo_offset)
if status != 0:
raise RuntimeError("Buffer Write Failed: " + str(status))
def buffer_read(self, bo, bo_offset, buf, buf_offset=0, count=-1):
view = memoryview(buf).cast('B')
if view.readonly:
raise RuntimeError("Buffer not writable")
if count == -1:
view = view[buf_offset:]
else:
view = view[buf_offset:buf_offset+count]
ptr = (ctypes.c_char * len(view)).from_buffer(view)
status = xrt.xclReadBO(self.handle, bo, ptr, len(view), bo_offset)
if status != 0:
raise RuntimeError("Buffer Write Failed: " + str(status))
def map_bo(self, bo):
ptr = xrt.xclMapBO(self.handle, bo, True)
prop = xrt.xclBOProperties()
if xrt.xclGetBOProperties(self.handle, bo, prop):
raise RuntimeError('Failed to get buffer properties')
size = prop.size
casted = ctypes.cast(ptr, ctypes.POINTER(ctypes.c_char * size))
return casted[0]
def get_device_address(self, bo):
prop = xrt.xclBOProperties()
xrt.xclGetBOProperties(self.handle, bo, prop)
return prop.paddr
def get_usage(self):
usage = xclDeviceUsage()
status = xrt.xclGetUsageInfo(self.handle, ctypes.cast(
ctypes.pointer(usage),
ctypes.POINTER(xrt.xclDeviceInfo2)))
if status != 0:
raise RuntimeError("Get Usage Failed: " + str(status))
return usage
def close(self):
if self.handle:
xrt.xclClose(self.handle)
self.handle = None
super().close()
def get_memory(self, desc):
if desc['streaming']:
if desc['idx'] not in self._streams:
self._streams[desc['idx']] = XrtStream(self, desc)
return self._streams[desc['idx']]
else:
return XrtMemory(self, desc)
def get_memory_by_idx(self, idx):
for m in self.mem_dict.values():
if m['idx'] == idx:
return self.get_memory(m)
raise RuntimeError("Could not find memory")
def read_registers(self, address, length):
data = (ctypes.c_char * length)()
ret = xrt.xclRead(self.handle,
xrt.xclAddressSpace.XCL_ADDR_KERNEL_CTRL,
address, data, length)
return bytes(data)
def write_registers(self, address, data):
cdata = (ctypes.c_char * len(data)).from_buffer_copy(data)
xrt.xclWrite(self.handle, xrt.xclAddressSpace.XCL_ADDR_KERNEL_CTRL,
address, cdata, len(data))
def free_bitstream(self):
for c in self.contexts:
xrt.xclCloseContext(self.handle, c[0], c[1])
self.contexts = []
def download(self, bitstream, parser=None):
# Keep copy of old contexts so we can reacquire them if
# downloading fails
old_contexts = copy.deepcopy(self.contexts)
# Close existing contexts
for c in self.contexts:
xrt.xclCloseContext(self.handle, c[0], c[1])
self.contexts = []
# Download xclbin file
err = xrt.xclLockDevice(self.handle)
if err:
raise RuntimeError(
"Could not lock device for programming - " + str(err))
try:
with open(bitstream.bitfile_name, 'rb') as f:
data = f.read()
err = xrt.xclLoadXclBin(self.handle, data)
if err:
for c in old_contexts:
xrt.xclOpenContext(self.handle, c[0], c[1], True)
self.contexts = old_contexts
raise RuntimeError("Programming Device failed: " +
_format_xrt_error(err))
finally:
xrt.xclUnlockDevice(self.handle)
super().post_download(bitstream, parser)
# Setup the execution context for the new xclbin
if parser is not None:
ip_dict = parser.ip_dict
cu_used = 0
uuid = None
for k, v in ip_dict.items():
if 'index' in v:
index = v['adjusted_index']
uuid = bytes.fromhex(v['xclbin_uuid'])
uuid_ctypes = \
XrtUUID((ctypes.c_char * 16).from_buffer_copy(uuid))
err = xrt.xclOpenContext(self.handle, uuid_ctypes, index,
True)
if err:
raise RuntimeError('Could not open CU context - {}, '
'{}'.format(err, index))
self.contexts.append((uuid_ctypes, index))
def get_bitfile_metadata(self, bitfile_name):
from .xclbin_parser import XclBin
return XclBin(bitfile_name)
def get_exec_bo(self, size=1024):
if len(self._bo_cache):
return self._bo_cache.pop()
if _xrt_version < REQUIRED_VERSION_ERT:
raise RuntimeError("XRT Version too old for PYNQ ERT support")
new_bo = xrt.xclAllocBO(self.handle, size, 0, DRM_XOCL_BO_EXECBUF)
new_ptr = xrt.xclMapBO(self.handle, new_bo, 1)
return ExecBo(new_bo, new_ptr, self, size)
def return_exec_bo(self, bo):
self._bo_cache.append(bo)
def execute_bo(self, bo):
status = xrt.xclExecBuf(self.handle, bo.bo)
if status:
raise RuntimeError('Buffer submit failed: ' + str(status))
wh = ErtWaitHandle(bo, self._loop.create_future(), self)
self.active_bos.append((bo, wh))
return wh
def execute_bo_with_waitlist(self, bo, waitlist):
wait_array = (ctypes.c_uint * len(waitlist))()
for i in range(len(waitlist)):
wait_array[i] = waitlist[i].bo
status = xrt.xclExecBufWithWaitList(
self.handle, bo.bo, len(waitlist), wait_array)
if status:
raise RuntimeError('Buffer submit failed: ' + str(status))
wh = ErtWaitHandle(bo, self._loop.create_future(), self)
self.active_bos.append((bo, wh))
return wh
def set_event_loop(self, loop):
self._loop = loop
for fd in glob.glob('/proc/self/fd/*'):
try:
link_target = os.readlink(fd)
except:
continue
if link_target.startswith('/dev/dri/renderD'):
base_fd = int(os.path.basename(fd))
loop.add_reader(open(base_fd, closefd=False),
self._handle_events)
def _handle_events(self, timeout=0):
xrt.xclExecWait(self.handle, timeout)
next_bos = []
for bo, completion in self.active_bos:
state = bo.as_packet(ert.ert_cmd_struct).state & 0xF
if state >= ert.ert_cmd_state.ERT_CMD_STATE_COMPLETED:
if completion:
completion._complete(state)
self.return_exec_bo(bo)
else:
next_bos.append((bo, completion))
self.active_bos = next_bos
| |
"""
General serializer field tests.
"""
from __future__ import unicode_literals
import datetime
from decimal import Decimal
from uuid import uuid4
from django.core import validators
from django.db import models
from django.test import TestCase
from django.utils.datastructures import SortedDict
from rest_framework import serializers
from rest_framework.tests.models import RESTFrameworkModel
class TimestampedModel(models.Model):
added = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class CharPrimaryKeyModel(models.Model):
id = models.CharField(max_length=20, primary_key=True)
class TimestampedModelSerializer(serializers.ModelSerializer):
class Meta:
model = TimestampedModel
class CharPrimaryKeyModelSerializer(serializers.ModelSerializer):
class Meta:
model = CharPrimaryKeyModel
class TimeFieldModel(models.Model):
clock = models.TimeField()
class TimeFieldModelSerializer(serializers.ModelSerializer):
class Meta:
model = TimeFieldModel
SAMPLE_CHOICES = [
('red', 'Red'),
('green', 'Green'),
('blue', 'Blue'),
]
class ChoiceFieldModel(models.Model):
choice = models.CharField(choices=SAMPLE_CHOICES, blank=True, max_length=255)
class ChoiceFieldModelSerializer(serializers.ModelSerializer):
class Meta:
model = ChoiceFieldModel
class ChoiceFieldModelWithNull(models.Model):
choice = models.CharField(choices=SAMPLE_CHOICES, blank=True, null=True, max_length=255)
class ChoiceFieldModelWithNullSerializer(serializers.ModelSerializer):
class Meta:
model = ChoiceFieldModelWithNull
class BasicFieldTests(TestCase):
def test_auto_now_fields_read_only(self):
"""
auto_now and auto_now_add fields should be read_only by default.
"""
serializer = TimestampedModelSerializer()
self.assertEqual(serializer.fields['added'].read_only, True)
def test_auto_pk_fields_read_only(self):
"""
AutoField fields should be read_only by default.
"""
serializer = TimestampedModelSerializer()
self.assertEqual(serializer.fields['id'].read_only, True)
def test_non_auto_pk_fields_not_read_only(self):
"""
PK fields other than AutoField fields should not be read_only by default.
"""
serializer = CharPrimaryKeyModelSerializer()
self.assertEqual(serializer.fields['id'].read_only, False)
def test_dict_field_ordering(self):
"""
Field should preserve dictionary ordering, if it exists.
See: https://github.com/tomchristie/django-rest-framework/issues/832
"""
ret = SortedDict()
ret['c'] = 1
ret['b'] = 1
ret['a'] = 1
ret['z'] = 1
field = serializers.Field()
keys = list(field.to_native(ret).keys())
self.assertEqual(keys, ['c', 'b', 'a', 'z'])
class DateFieldTest(TestCase):
"""
Tests for the DateFieldTest from_native() and to_native() behavior
"""
def test_from_native_string(self):
"""
Make sure from_native() accepts default iso input formats.
"""
f = serializers.DateField()
result_1 = f.from_native('1984-07-31')
self.assertEqual(datetime.date(1984, 7, 31), result_1)
def test_from_native_datetime_date(self):
"""
Make sure from_native() accepts a datetime.date instance.
"""
f = serializers.DateField()
result_1 = f.from_native(datetime.date(1984, 7, 31))
self.assertEqual(result_1, datetime.date(1984, 7, 31))
def test_from_native_custom_format(self):
"""
Make sure from_native() accepts custom input formats.
"""
f = serializers.DateField(input_formats=['%Y -- %d'])
result = f.from_native('1984 -- 31')
self.assertEqual(datetime.date(1984, 1, 31), result)
def test_from_native_invalid_default_on_custom_format(self):
"""
Make sure from_native() don't accept default formats if custom format is preset
"""
f = serializers.DateField(input_formats=['%Y -- %d'])
try:
f.from_native('1984-07-31')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Date has wrong format. Use one of these formats instead: YYYY -- DD"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_empty(self):
"""
Make sure from_native() returns None on empty param.
"""
f = serializers.DateField()
result = f.from_native('')
self.assertEqual(result, None)
def test_from_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.DateField()
result = f.from_native(None)
self.assertEqual(result, None)
def test_from_native_invalid_date(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid date.
"""
f = serializers.DateField()
try:
f.from_native('1984-13-31')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]]"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_invalid_format(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid format.
"""
f = serializers.DateField()
try:
f.from_native('1984 -- 31')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]]"])
else:
self.fail("ValidationError was not properly raised")
def test_to_native(self):
"""
Make sure to_native() returns datetime as default.
"""
f = serializers.DateField()
result_1 = f.to_native(datetime.date(1984, 7, 31))
self.assertEqual(datetime.date(1984, 7, 31), result_1)
def test_to_native_iso(self):
"""
Make sure to_native() with 'iso-8601' returns iso formated date.
"""
f = serializers.DateField(format='iso-8601')
result_1 = f.to_native(datetime.date(1984, 7, 31))
self.assertEqual('1984-07-31', result_1)
def test_to_native_custom_format(self):
"""
Make sure to_native() returns correct custom format.
"""
f = serializers.DateField(format="%Y - %m.%d")
result_1 = f.to_native(datetime.date(1984, 7, 31))
self.assertEqual('1984 - 07.31', result_1)
def test_to_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.DateField(required=False)
self.assertEqual(None, f.to_native(None))
class DateTimeFieldTest(TestCase):
"""
Tests for the DateTimeField from_native() and to_native() behavior
"""
def test_from_native_string(self):
"""
Make sure from_native() accepts default iso input formats.
"""
f = serializers.DateTimeField()
result_1 = f.from_native('1984-07-31 04:31')
result_2 = f.from_native('1984-07-31 04:31:59')
result_3 = f.from_native('1984-07-31 04:31:59.000200')
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31), result_1)
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31, 59), result_2)
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31, 59, 200), result_3)
def test_from_native_datetime_datetime(self):
"""
Make sure from_native() accepts a datetime.datetime instance.
"""
f = serializers.DateTimeField()
result_1 = f.from_native(datetime.datetime(1984, 7, 31, 4, 31))
result_2 = f.from_native(datetime.datetime(1984, 7, 31, 4, 31, 59))
result_3 = f.from_native(datetime.datetime(1984, 7, 31, 4, 31, 59, 200))
self.assertEqual(result_1, datetime.datetime(1984, 7, 31, 4, 31))
self.assertEqual(result_2, datetime.datetime(1984, 7, 31, 4, 31, 59))
self.assertEqual(result_3, datetime.datetime(1984, 7, 31, 4, 31, 59, 200))
def test_from_native_custom_format(self):
"""
Make sure from_native() accepts custom input formats.
"""
f = serializers.DateTimeField(input_formats=['%Y -- %H:%M'])
result = f.from_native('1984 -- 04:59')
self.assertEqual(datetime.datetime(1984, 1, 1, 4, 59), result)
def test_from_native_invalid_default_on_custom_format(self):
"""
Make sure from_native() don't accept default formats if custom format is preset
"""
f = serializers.DateTimeField(input_formats=['%Y -- %H:%M'])
try:
f.from_native('1984-07-31 04:31:59')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Datetime has wrong format. Use one of these formats instead: YYYY -- hh:mm"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_empty(self):
"""
Make sure from_native() returns None on empty param.
"""
f = serializers.DateTimeField()
result = f.from_native('')
self.assertEqual(result, None)
def test_from_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.DateTimeField()
result = f.from_native(None)
self.assertEqual(result, None)
def test_from_native_invalid_datetime(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid datetime.
"""
f = serializers.DateTimeField()
try:
f.from_native('04:61:59')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Datetime has wrong format. Use one of these formats instead: "
"YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HHMM|-HHMM|Z]"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_invalid_format(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid format.
"""
f = serializers.DateTimeField()
try:
f.from_native('04 -- 31')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Datetime has wrong format. Use one of these formats instead: "
"YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HHMM|-HHMM|Z]"])
else:
self.fail("ValidationError was not properly raised")
def test_to_native(self):
"""
Make sure to_native() returns isoformat as default.
"""
f = serializers.DateTimeField()
result_1 = f.to_native(datetime.datetime(1984, 7, 31))
result_2 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31))
result_3 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59))
result_4 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59, 200))
self.assertEqual(datetime.datetime(1984, 7, 31), result_1)
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31), result_2)
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31, 59), result_3)
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31, 59, 200), result_4)
def test_to_native_iso(self):
"""
Make sure to_native() with format=iso-8601 returns iso formatted datetime.
"""
f = serializers.DateTimeField(format='iso-8601')
result_1 = f.to_native(datetime.datetime(1984, 7, 31))
result_2 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31))
result_3 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59))
result_4 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59, 200))
self.assertEqual('1984-07-31T00:00:00', result_1)
self.assertEqual('1984-07-31T04:31:00', result_2)
self.assertEqual('1984-07-31T04:31:59', result_3)
self.assertEqual('1984-07-31T04:31:59.000200', result_4)
def test_to_native_custom_format(self):
"""
Make sure to_native() returns correct custom format.
"""
f = serializers.DateTimeField(format="%Y - %H:%M")
result_1 = f.to_native(datetime.datetime(1984, 7, 31))
result_2 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31))
result_3 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59))
result_4 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59, 200))
self.assertEqual('1984 - 00:00', result_1)
self.assertEqual('1984 - 04:31', result_2)
self.assertEqual('1984 - 04:31', result_3)
self.assertEqual('1984 - 04:31', result_4)
def test_to_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.DateTimeField(required=False)
self.assertEqual(None, f.to_native(None))
class TimeFieldTest(TestCase):
"""
Tests for the TimeField from_native() and to_native() behavior
"""
def test_from_native_string(self):
"""
Make sure from_native() accepts default iso input formats.
"""
f = serializers.TimeField()
result_1 = f.from_native('04:31')
result_2 = f.from_native('04:31:59')
result_3 = f.from_native('04:31:59.000200')
self.assertEqual(datetime.time(4, 31), result_1)
self.assertEqual(datetime.time(4, 31, 59), result_2)
self.assertEqual(datetime.time(4, 31, 59, 200), result_3)
def test_from_native_datetime_time(self):
"""
Make sure from_native() accepts a datetime.time instance.
"""
f = serializers.TimeField()
result_1 = f.from_native(datetime.time(4, 31))
result_2 = f.from_native(datetime.time(4, 31, 59))
result_3 = f.from_native(datetime.time(4, 31, 59, 200))
self.assertEqual(result_1, datetime.time(4, 31))
self.assertEqual(result_2, datetime.time(4, 31, 59))
self.assertEqual(result_3, datetime.time(4, 31, 59, 200))
def test_from_native_custom_format(self):
"""
Make sure from_native() accepts custom input formats.
"""
f = serializers.TimeField(input_formats=['%H -- %M'])
result = f.from_native('04 -- 31')
self.assertEqual(datetime.time(4, 31), result)
def test_from_native_invalid_default_on_custom_format(self):
"""
Make sure from_native() don't accept default formats if custom format is preset
"""
f = serializers.TimeField(input_formats=['%H -- %M'])
try:
f.from_native('04:31:59')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Time has wrong format. Use one of these formats instead: hh -- mm"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_empty(self):
"""
Make sure from_native() returns None on empty param.
"""
f = serializers.TimeField()
result = f.from_native('')
self.assertEqual(result, None)
def test_from_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.TimeField()
result = f.from_native(None)
self.assertEqual(result, None)
def test_from_native_invalid_time(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid time.
"""
f = serializers.TimeField()
try:
f.from_native('04:61:59')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Time has wrong format. Use one of these formats instead: "
"hh:mm[:ss[.uuuuuu]]"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_invalid_format(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid format.
"""
f = serializers.TimeField()
try:
f.from_native('04 -- 31')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Time has wrong format. Use one of these formats instead: "
"hh:mm[:ss[.uuuuuu]]"])
else:
self.fail("ValidationError was not properly raised")
def test_to_native(self):
"""
Make sure to_native() returns time object as default.
"""
f = serializers.TimeField()
result_1 = f.to_native(datetime.time(4, 31))
result_2 = f.to_native(datetime.time(4, 31, 59))
result_3 = f.to_native(datetime.time(4, 31, 59, 200))
self.assertEqual(datetime.time(4, 31), result_1)
self.assertEqual(datetime.time(4, 31, 59), result_2)
self.assertEqual(datetime.time(4, 31, 59, 200), result_3)
def test_to_native_iso(self):
"""
Make sure to_native() with format='iso-8601' returns iso formatted time.
"""
f = serializers.TimeField(format='iso-8601')
result_1 = f.to_native(datetime.time(4, 31))
result_2 = f.to_native(datetime.time(4, 31, 59))
result_3 = f.to_native(datetime.time(4, 31, 59, 200))
self.assertEqual('04:31:00', result_1)
self.assertEqual('04:31:59', result_2)
self.assertEqual('04:31:59.000200', result_3)
def test_to_native_custom_format(self):
"""
Make sure to_native() returns correct custom format.
"""
f = serializers.TimeField(format="%H - %S [%f]")
result_1 = f.to_native(datetime.time(4, 31))
result_2 = f.to_native(datetime.time(4, 31, 59))
result_3 = f.to_native(datetime.time(4, 31, 59, 200))
self.assertEqual('04 - 00 [000000]', result_1)
self.assertEqual('04 - 59 [000000]', result_2)
self.assertEqual('04 - 59 [000200]', result_3)
class DecimalFieldTest(TestCase):
"""
Tests for the DecimalField from_native() and to_native() behavior
"""
def test_from_native_string(self):
"""
Make sure from_native() accepts string values
"""
f = serializers.DecimalField()
result_1 = f.from_native('9000')
result_2 = f.from_native('1.00000001')
self.assertEqual(Decimal('9000'), result_1)
self.assertEqual(Decimal('1.00000001'), result_2)
def test_from_native_invalid_string(self):
"""
Make sure from_native() raises ValidationError on passing invalid string
"""
f = serializers.DecimalField()
try:
f.from_native('123.45.6')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Enter a number."])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_integer(self):
"""
Make sure from_native() accepts integer values
"""
f = serializers.DecimalField()
result = f.from_native(9000)
self.assertEqual(Decimal('9000'), result)
def test_from_native_float(self):
"""
Make sure from_native() accepts float values
"""
f = serializers.DecimalField()
result = f.from_native(1.00000001)
self.assertEqual(Decimal('1.00000001'), result)
def test_from_native_empty(self):
"""
Make sure from_native() returns None on empty param.
"""
f = serializers.DecimalField()
result = f.from_native('')
self.assertEqual(result, None)
def test_from_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.DecimalField()
result = f.from_native(None)
self.assertEqual(result, None)
def test_to_native(self):
"""
Make sure to_native() returns Decimal as string.
"""
f = serializers.DecimalField()
result_1 = f.to_native(Decimal('9000'))
result_2 = f.to_native(Decimal('1.00000001'))
self.assertEqual(Decimal('9000'), result_1)
self.assertEqual(Decimal('1.00000001'), result_2)
def test_to_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.DecimalField(required=False)
self.assertEqual(None, f.to_native(None))
def test_valid_serialization(self):
"""
Make sure the serializer works correctly
"""
class DecimalSerializer(serializers.Serializer):
decimal_field = serializers.DecimalField(max_value=9010,
min_value=9000,
max_digits=6,
decimal_places=2)
self.assertTrue(DecimalSerializer(data={'decimal_field': '9001'}).is_valid())
self.assertTrue(DecimalSerializer(data={'decimal_field': '9001.2'}).is_valid())
self.assertTrue(DecimalSerializer(data={'decimal_field': '9001.23'}).is_valid())
self.assertFalse(DecimalSerializer(data={'decimal_field': '8000'}).is_valid())
self.assertFalse(DecimalSerializer(data={'decimal_field': '9900'}).is_valid())
self.assertFalse(DecimalSerializer(data={'decimal_field': '9001.234'}).is_valid())
def test_raise_max_value(self):
"""
Make sure max_value violations raises ValidationError
"""
class DecimalSerializer(serializers.Serializer):
decimal_field = serializers.DecimalField(max_value=100)
s = DecimalSerializer(data={'decimal_field': '123'})
self.assertFalse(s.is_valid())
self.assertEqual(s.errors, {'decimal_field': ['Ensure this value is less than or equal to 100.']})
def test_raise_min_value(self):
"""
Make sure min_value violations raises ValidationError
"""
class DecimalSerializer(serializers.Serializer):
decimal_field = serializers.DecimalField(min_value=100)
s = DecimalSerializer(data={'decimal_field': '99'})
self.assertFalse(s.is_valid())
self.assertEqual(s.errors, {'decimal_field': ['Ensure this value is greater than or equal to 100.']})
def test_raise_max_digits(self):
"""
Make sure max_digits violations raises ValidationError
"""
class DecimalSerializer(serializers.Serializer):
decimal_field = serializers.DecimalField(max_digits=5)
s = DecimalSerializer(data={'decimal_field': '123.456'})
self.assertFalse(s.is_valid())
self.assertEqual(s.errors, {'decimal_field': ['Ensure that there are no more than 5 digits in total.']})
def test_raise_max_decimal_places(self):
"""
Make sure max_decimal_places violations raises ValidationError
"""
class DecimalSerializer(serializers.Serializer):
decimal_field = serializers.DecimalField(decimal_places=3)
s = DecimalSerializer(data={'decimal_field': '123.4567'})
self.assertFalse(s.is_valid())
self.assertEqual(s.errors, {'decimal_field': ['Ensure that there are no more than 3 decimal places.']})
def test_raise_max_whole_digits(self):
"""
Make sure max_whole_digits violations raises ValidationError
"""
class DecimalSerializer(serializers.Serializer):
decimal_field = serializers.DecimalField(max_digits=4, decimal_places=3)
s = DecimalSerializer(data={'decimal_field': '12345.6'})
self.assertFalse(s.is_valid())
self.assertEqual(s.errors, {'decimal_field': ['Ensure that there are no more than 4 digits in total.']})
class ChoiceFieldTests(TestCase):
"""
Tests for the ChoiceField options generator
"""
def test_choices_required(self):
"""
Make sure proper choices are rendered if field is required
"""
f = serializers.ChoiceField(required=True, choices=SAMPLE_CHOICES)
self.assertEqual(f.choices, SAMPLE_CHOICES)
def test_choices_not_required(self):
"""
Make sure proper choices (plus blank) are rendered if the field isn't required
"""
f = serializers.ChoiceField(required=False, choices=SAMPLE_CHOICES)
self.assertEqual(f.choices, models.fields.BLANK_CHOICE_DASH + SAMPLE_CHOICES)
def test_invalid_choice_model(self):
s = ChoiceFieldModelSerializer(data={'choice': 'wrong_value'})
self.assertFalse(s.is_valid())
self.assertEqual(s.errors, {'choice': ['Select a valid choice. wrong_value is not one of the available choices.']})
self.assertEqual(s.data['choice'], '')
def test_empty_choice_model(self):
"""
Test that the 'empty' value is correctly passed and used depending on
the 'null' property on the model field.
"""
s = ChoiceFieldModelSerializer(data={'choice': ''})
self.assertTrue(s.is_valid())
self.assertEqual(s.data['choice'], '')
s = ChoiceFieldModelWithNullSerializer(data={'choice': ''})
self.assertTrue(s.is_valid())
self.assertEqual(s.data['choice'], None)
def test_from_native_empty(self):
"""
Make sure from_native() returns an empty string on empty param by default.
"""
f = serializers.ChoiceField(choices=SAMPLE_CHOICES)
self.assertEqual(f.from_native(''), '')
self.assertEqual(f.from_native(None), '')
def test_from_native_empty_override(self):
"""
Make sure you can override from_native() behavior regarding empty values.
"""
f = serializers.ChoiceField(choices=SAMPLE_CHOICES, empty=None)
self.assertEqual(f.from_native(''), None)
self.assertEqual(f.from_native(None), None)
def test_metadata_choices(self):
"""
Make sure proper choices are included in the field's metadata.
"""
choices = [{'value': v, 'display_name': n} for v, n in SAMPLE_CHOICES]
f = serializers.ChoiceField(choices=SAMPLE_CHOICES)
self.assertEqual(f.metadata()['choices'], choices)
def test_metadata_choices_not_required(self):
"""
Make sure proper choices are included in the field's metadata.
"""
choices = [{'value': v, 'display_name': n}
for v, n in models.fields.BLANK_CHOICE_DASH + SAMPLE_CHOICES]
f = serializers.ChoiceField(required=False, choices=SAMPLE_CHOICES)
self.assertEqual(f.metadata()['choices'], choices)
class EmailFieldTests(TestCase):
"""
Tests for EmailField attribute values
"""
class EmailFieldModel(RESTFrameworkModel):
email_field = models.EmailField(blank=True)
class EmailFieldWithGivenMaxLengthModel(RESTFrameworkModel):
email_field = models.EmailField(max_length=150, blank=True)
def test_default_model_value(self):
class EmailFieldSerializer(serializers.ModelSerializer):
class Meta:
model = self.EmailFieldModel
serializer = EmailFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['email_field'], 'max_length'), 75)
def test_given_model_value(self):
class EmailFieldSerializer(serializers.ModelSerializer):
class Meta:
model = self.EmailFieldWithGivenMaxLengthModel
serializer = EmailFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['email_field'], 'max_length'), 150)
def test_given_serializer_value(self):
class EmailFieldSerializer(serializers.ModelSerializer):
email_field = serializers.EmailField(source='email_field', max_length=20, required=False)
class Meta:
model = self.EmailFieldModel
serializer = EmailFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['email_field'], 'max_length'), 20)
class SlugFieldTests(TestCase):
"""
Tests for SlugField attribute values
"""
class SlugFieldModel(RESTFrameworkModel):
slug_field = models.SlugField(blank=True)
class SlugFieldWithGivenMaxLengthModel(RESTFrameworkModel):
slug_field = models.SlugField(max_length=84, blank=True)
def test_default_model_value(self):
class SlugFieldSerializer(serializers.ModelSerializer):
class Meta:
model = self.SlugFieldModel
serializer = SlugFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['slug_field'], 'max_length'), 50)
def test_given_model_value(self):
class SlugFieldSerializer(serializers.ModelSerializer):
class Meta:
model = self.SlugFieldWithGivenMaxLengthModel
serializer = SlugFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['slug_field'], 'max_length'), 84)
def test_given_serializer_value(self):
class SlugFieldSerializer(serializers.ModelSerializer):
slug_field = serializers.SlugField(source='slug_field',
max_length=20, required=False)
class Meta:
model = self.SlugFieldModel
serializer = SlugFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['slug_field'],
'max_length'), 20)
def test_invalid_slug(self):
"""
Make sure an invalid slug raises ValidationError
"""
class SlugFieldSerializer(serializers.ModelSerializer):
slug_field = serializers.SlugField(source='slug_field', max_length=20, required=True)
class Meta:
model = self.SlugFieldModel
s = SlugFieldSerializer(data={'slug_field': 'a b'})
self.assertEqual(s.is_valid(), False)
self.assertEqual(s.errors, {'slug_field': ["Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."]})
class URLFieldTests(TestCase):
"""
Tests for URLField attribute values.
(Includes test for #1210, checking that validators can be overridden.)
"""
class URLFieldModel(RESTFrameworkModel):
url_field = models.URLField(blank=True)
class URLFieldWithGivenMaxLengthModel(RESTFrameworkModel):
url_field = models.URLField(max_length=128, blank=True)
def test_default_model_value(self):
class URLFieldSerializer(serializers.ModelSerializer):
class Meta:
model = self.URLFieldModel
serializer = URLFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['url_field'],
'max_length'), 200)
def test_given_model_value(self):
class URLFieldSerializer(serializers.ModelSerializer):
class Meta:
model = self.URLFieldWithGivenMaxLengthModel
serializer = URLFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['url_field'],
'max_length'), 128)
def test_given_serializer_value(self):
class URLFieldSerializer(serializers.ModelSerializer):
url_field = serializers.URLField(source='url_field',
max_length=20, required=False)
class Meta:
model = self.URLFieldWithGivenMaxLengthModel
serializer = URLFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['url_field'],
'max_length'), 20)
def test_validators_can_be_overridden(self):
url_field = serializers.URLField(validators=[])
validators = url_field.validators
self.assertEqual([], validators, 'Passing `validators` kwarg should have overridden default validators')
class FieldMetadata(TestCase):
def setUp(self):
self.required_field = serializers.Field()
self.required_field.label = uuid4().hex
self.required_field.required = True
self.optional_field = serializers.Field()
self.optional_field.label = uuid4().hex
self.optional_field.required = False
def test_required(self):
self.assertEqual(self.required_field.metadata()['required'], True)
def test_optional(self):
self.assertEqual(self.optional_field.metadata()['required'], False)
def test_label(self):
for field in (self.required_field, self.optional_field):
self.assertEqual(field.metadata()['label'], field.label)
class FieldCallableDefault(TestCase):
def setUp(self):
self.simple_callable = lambda: 'foo bar'
def test_default_can_be_simple_callable(self):
"""
Ensure that the 'default' argument can also be a simple callable.
"""
field = serializers.WritableField(default=self.simple_callable)
into = {}
field.field_from_native({}, {}, 'field', into)
self.assertEqual(into, {'field': 'foo bar'})
class CustomIntegerField(TestCase):
"""
Test that custom fields apply min_value and max_value constraints
"""
def test_custom_fields_can_be_validated_for_value(self):
class MoneyField(models.PositiveIntegerField):
pass
class EntryModel(models.Model):
bank = MoneyField(validators=[validators.MaxValueValidator(100)])
class EntrySerializer(serializers.ModelSerializer):
class Meta:
model = EntryModel
entry = EntryModel(bank=1)
serializer = EntrySerializer(entry, data={"bank": 11})
self.assertTrue(serializer.is_valid())
serializer = EntrySerializer(entry, data={"bank": -1})
self.assertFalse(serializer.is_valid())
serializer = EntrySerializer(entry, data={"bank": 101})
self.assertFalse(serializer.is_valid())
class BooleanField(TestCase):
"""
Tests for BooleanField
"""
def test_boolean_required(self):
class BooleanRequiredSerializer(serializers.Serializer):
bool_field = serializers.BooleanField(required=True)
self.assertFalse(BooleanRequiredSerializer(data={}).is_valid())
| |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase, main as test_main
import mock
import sys
import os
class TestStormCli(TestCase):
def setUp(self):
self.mock_path_exists = mock.patch("os.path.exists").start()
self.mock_popen = mock.patch("subprocess.Popen").start()
self.mock_execvp = mock.patch("os.execvp").start()
self.mock_path_exists.return_value = True
self.mock_popen.return_value.returncode = 0
self.mock_popen.return_value.communicate = \
mock.MagicMock(return_value=("{}", ""))
self.storm_dir = os.path.abspath(
os.path.join(os.getcwd(), os.pardir, os.pardir, os.pardir)
)
from storm import main as cli_main
self.cli_main = cli_main
self.java_cmd = os.path.join(
os.getenv('JAVA_HOME', None), 'bin', 'java'
)
def base_test(self, command_invocation, mock_shell_interface, expected_output):
print(command_invocation)
with mock.patch.object(sys, "argv", command_invocation):
self.cli_main()
if expected_output not in mock_shell_interface.call_args_list:
print("Expected:" + str(expected_output))
print("Got:" + str(mock_shell_interface.call_args_list[-1]))
assert expected_output in mock_shell_interface.call_args_list
def test_jar_command(self):
self.base_test([
'storm', 'jar',
'example/storm-starter/storm-starter-topologies-*.jar',
'org.apache.storm.starter.RollingTopWords', 'blobstore-remote2',
'remote', '-c topology.blobstore.map=\'{"key1":{"localname":"blob_file", "uncompress":false},"key2":{}}\'', '--jars',
'./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar"', '--artifacts', '"redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api"', '--artifactRepositories', '"jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=+topology.blobstore.map%3D%27%7B%22key1%22%3A%7B%22localname%22%3A%22blob_file%22%2C+%22uncompress%22%3Afalse%7D%2C%22key2%22%3A%7B%7D%7D%27',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib-worker:' + self.storm_dir
+ '/extlib:example/storm-starter/storm-starter-topologies-*.jar:' + self.storm_dir + '/conf:'
+ self.storm_dir + '/bin:./external/storm-redis/storm-redis-1.1.0.jar:./external/storm-kafka-client/storm-kafka-client-1.1.0.jar"', '-Dstorm.jar=example/storm-starter/storm-starter-topologies-*.jar', '-Dstorm.dependency.jars=./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar"', '-Dstorm.dependency.artifacts={}',
'org.apache.storm.starter.RollingTopWords', 'blobstore-remote2', 'remote'
])
)
self.mock_execvp.reset_mock()
self.base_test([
'storm', 'jar', '/path/to/jar.jar', 'some.Topology.Class',
'-name', 'run-topology', 'randomArgument', '-randomFlag', 'randomFlagValue', '-rotateSize', '0.0001',
'--hdfsConf', 'someOtherHdfsConf', 'dfs.namenode.kerberos.principal.pattern=hdfs/*.EV..COM'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib-worker:' + self.storm_dir
+ '/extlib:/path/to/jar.jar:' + self.storm_dir + '/conf:' + self.storm_dir + '/bin:',
'-Dstorm.jar=/path/to/jar.jar', '-Dstorm.dependency.jars=', '-Dstorm.dependency.artifacts={}',
'some.Topology.Class', '-name', 'run-topology', 'randomArgument', '-randomFlag', 'randomFlagValue',
'-rotateSize', '0.0001', '--hdfsConf', 'someOtherHdfsConf',
'dfs.namenode.kerberos.principal.pattern=hdfs/*.EV..COM'
])
)
def test_localconfvalue_command(self):
self.base_test(
["storm", "localconfvalue", "conf_name"], self.mock_popen, mock.call([
self.java_cmd, '-client', '-Dstorm.options=',
'-Dstorm.conf.file=', '-cp', self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir +'/extlib:' + self.storm_dir + '/extlib-daemon:' + self.storm_dir + '/conf',
'org.apache.storm.command.ConfigValue', 'conf_name'
], stdout=-1
)
)
def test_remoteconfvalue_command(self):
self.base_test(
["storm", "remoteconfvalue", "conf_name"], self.mock_popen, mock.call([
self.java_cmd, '-client', '-Dstorm.options=',
'-Dstorm.conf.file=', '-cp', self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir + '/extlib:' + self.storm_dir + '/extlib-daemon:' + self.storm_dir + '/conf',
'org.apache.storm.command.ConfigValue', 'conf_name'
], stdout=-1
)
)
def test_local_command(self):
self.base_test([
'storm', 'local',
'example/storm-starter/storm-starter-topologies-*.jar',
'org.apache.storm.starter.RollingTopWords', 'blobstore-remote2',
'remote', '--jars',
'./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar"',
'--artifacts', '"redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api"',
'--artifactRepositories',
'"jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client','-Ddaemon.name=', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir +
'/extlib:example/storm-starter/storm-starter-topologies-*.jar:' + self.storm_dir +
'/conf:' + self.storm_dir +
'/bin:./external/storm-redis/storm-redis-1.1.0.jar:./external/storm-kafka-client/storm-kafka-client-1.1.0.jar"',
'-Dstorm.local.sleeptime=20', '-Dstorm.jar=example/storm-starter/storm-starter-topologies-*.jar',
'-Dstorm.dependency.jars=./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar"',
'-Dstorm.dependency.artifacts={}', 'org.apache.storm.LocalCluster',
'org.apache.storm.starter.RollingTopWords',
'blobstore-remote2', 'remote'
])
)
def test_sql_command(self):
self.base_test(
['storm', 'sql', 'apache_log_error_filtering.sql', 'apache_log_error_filtering', '--artifacts',
'"org.apache.storm:storm-sql-kafka:2.0.0-SNAPSHOT,org.apache.storm:storm-kafka:2.0.0-SNAPSHOT,org.apache.kafka:kafka_2.10:0.8.2.2^org.slf4j:slf4j-log4j12,org.apache.kafka:kafka-clients:0.8.2.2"'
], self.mock_execvp, mock.call(
self.java_cmd,
[self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir + '/extlib:' +
self.storm_dir +
'/conf:' + self.storm_dir + '/bin:' + self.storm_dir + '/lib-tools/sql/core',\
'-Dstorm.dependency.jars=', '-Dstorm.dependency.artifacts={}',
'org.apache.storm.sql.StormSqlRunner', '--file', 'apache_log_error_filtering.sql',
'--topology', 'apache_log_error_filtering']
)
)
def test_kill_command(self):
self.base_test([
'storm', 'kill', 'doomed_topology'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir + '/extlib:' + self.storm_dir +
'/extlib-daemon:' + self.storm_dir + '/conf:' + self.storm_dir + '/bin', 'org.apache.storm.command.KillTopology', 'doomed_topology'
])
)
def test_upload_credentials_command(self):
self.base_test([
'storm', 'upload-credentials', '--config', '/some/other/storm.yaml', '-c', 'test=test', 'my-topology-name', 'appids', 'role.name1,role.name2'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=test%3Dtest',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=/some/other/storm.yaml',
'-cp', self.storm_dir + '/*:' + self.storm_dir + '/lib:' +
self.storm_dir +
'/extlib:' + self.storm_dir + '/extlib-daemon:' +
self.storm_dir + '/conf:' + self.storm_dir +
'/bin', 'org.apache.storm.command.UploadCredentials',
'my-topology-name', 'appids', 'role.name1,role.name2'])
)
def test_blobstore_command(self):
self.base_test([
'storm', 'blobstore', 'create', 'mytopo:data.tgz', '-f', 'data.tgz', '-a', 'u:alice:rwa,u:bob:rw,o::r'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir +
'/extlib:' + self.storm_dir + '/extlib-daemon:' + self.storm_dir + '/conf:' +
self.storm_dir + '/bin', 'org.apache.storm.command.Blobstore', 'create',
'mytopo:data.tgz', '-f', 'data.tgz', '-a', 'u:alice:rwa,u:bob:rw,o::r'])
)
self.mock_execvp.reset_mock()
self.base_test([
'storm', 'blobstore', 'list'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir,
'-Dstorm.log.dir=' + self.storm_dir + "/logs", '-Djava.library.path=',
'-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir +
'/extlib:' + self.storm_dir + '/extlib-daemon:' + self.storm_dir + '/conf:' +
self.storm_dir + '/bin', 'org.apache.storm.command.Blobstore', 'list'])
)
self.mock_execvp.reset_mock()
self.base_test([
'storm', 'blobstore', 'list', 'wordstotrack'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir,
'-Dstorm.log.dir=' + self.storm_dir + "/logs", '-Djava.library.path=',
'-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir +
'/extlib:' + self.storm_dir + '/extlib-daemon:' + self.storm_dir + '/conf:' +
self.storm_dir + '/bin', 'org.apache.storm.command.Blobstore', 'list', 'wordstotrack'])
)
self.mock_execvp.reset_mock()
self.base_test([
'storm', 'blobstore', 'update', '-f', '/wordsToTrack.list', 'wordstotrack'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir +
'/extlib:' + self.storm_dir + '/extlib-daemon:' + self.storm_dir + '/conf:' +
self.storm_dir + '/bin', 'org.apache.storm.command.Blobstore', 'update', '-f',
'/wordsToTrack.list', 'wordstotrack'])
)
self.mock_execvp.reset_mock()
self.base_test([
'storm', 'blobstore', 'cat', 'wordstotrack'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir +
'/extlib:' + self.storm_dir + '/extlib-daemon:' + self.storm_dir + '/conf:' +
self.storm_dir + '/bin', 'org.apache.storm.command.Blobstore', 'cat', 'wordstotrack'])
)
def test_activate_command(self):
self.base_test([
'storm', 'activate', 'doomed_topology'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir + '/extlib:' + self.storm_dir +
'/extlib-daemon:' + self.storm_dir + '/conf:' + self.storm_dir + '/bin',
'org.apache.storm.command.Activate', 'doomed_topology'
])
)
def test_deactivate_command(self):
self.base_test([
'storm', 'deactivate', 'doomed_topology'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir + '/extlib:' + self.storm_dir +
'/extlib-daemon:' + self.storm_dir + '/conf:' + self.storm_dir +
'/bin', 'org.apache.storm.command.Deactivate', 'doomed_topology'
])
)
def test_rebalance_command(self):
self.base_test([
'storm', 'rebalance', 'doomed_topology'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir + '/extlib:' + self.storm_dir +
'/extlib-daemon:' + self.storm_dir + '/conf:' + self.storm_dir +
'/bin', 'org.apache.storm.command.Rebalance', 'doomed_topology'
])
)
def test_list_command(self):
self.base_test([
'storm', 'list'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir + '/extlib:' + self.storm_dir +
'/extlib-daemon:' + self.storm_dir + '/conf:' + self.storm_dir +
'/bin', 'org.apache.storm.command.ListTopologies'
])
)
def test_nimbus_command(self):
self.base_test([
'storm', 'nimbus'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-server', '-Ddaemon.name=nimbus', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir +
'/extlib:' + self.storm_dir + '/extlib-daemon:' + self.storm_dir + '/conf',
'-Djava.deserialization.disabled=true', '-Dlogfile.name=nimbus.log',
'-Dlog4j.configurationFile=' + self.storm_dir + '/log4j2/cluster.xml',
'org.apache.storm.daemon.nimbus.Nimbus'
])
)
def test_supervisor_command(self):
self.base_test([
'storm', 'supervisor'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-server', '-Ddaemon.name=supervisor', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir +
'/extlib:' + self.storm_dir + '/extlib-daemon:' + self.storm_dir + '/conf',
'-Djava.deserialization.disabled=true', '-Dlogfile.name=supervisor.log',
'-Dlog4j.configurationFile=' + self.storm_dir + '/log4j2/cluster.xml',
'org.apache.storm.daemon.supervisor.Supervisor'
])
)
def test_pacemaker_command(self):
self.base_test([
'storm', 'pacemaker'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-server', '-Ddaemon.name=pacemaker', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir +
'/extlib:' + self.storm_dir + '/extlib-daemon:' + self.storm_dir + '/conf',
'-Djava.deserialization.disabled=true', '-Dlogfile.name=pacemaker.log',
'-Dlog4j.configurationFile=' + self.storm_dir + '/log4j2/cluster.xml',
'org.apache.storm.pacemaker.Pacemaker'
])
)
def test_ui_command(self):
self.base_test([
'storm', 'ui'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-server', '-Ddaemon.name=ui', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir +
'/extlib:' + self.storm_dir + '/extlib-daemon:' + self.storm_dir +
'/lib-webapp:' + self.storm_dir + '/conf',
'-Djava.deserialization.disabled=true', '-Dlogfile.name=ui.log',
'-Dlog4j.configurationFile=' + self.storm_dir + '/log4j2/cluster.xml',
'org.apache.storm.daemon.ui.UIServer'
])
)
def test_logviewer_command(self):
self.base_test([
'storm', 'logviewer'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-server', '-Ddaemon.name=logviewer', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir +
'/extlib:' + self.storm_dir + '/extlib-daemon:' + self.storm_dir +
'/lib-webapp:' + self.storm_dir + '/conf',
'-Djava.deserialization.disabled=true', '-Dlogfile.name=logviewer.log',
'-Dlog4j.configurationFile=' + self.storm_dir + '/log4j2/cluster.xml',
'org.apache.storm.daemon.logviewer.LogviewerServer'
])
)
def test_drpc_command(self):
self.base_test([
'storm', 'drpc'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-server', '-Ddaemon.name=drpc', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir +
'/extlib:' + self.storm_dir + '/extlib-daemon:' + self.storm_dir +
'/lib-webapp:' + self.storm_dir + '/conf',
'-Djava.deserialization.disabled=true', '-Dlogfile.name=drpc.log',
'-Dlog4j.configurationFile=' + self.storm_dir + '/log4j2/cluster.xml',
'org.apache.storm.daemon.drpc.DRPCServer'
])
)
def test_drpc_client_command(self):
self.base_test([
'storm', 'drpc-client', 'exclaim', 'a', 'exclaim', 'b', 'test', 'bar'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir + '/extlib:' + self.storm_dir +
'/extlib-daemon:' + self.storm_dir + '/conf:' + self.storm_dir +
'/bin', 'org.apache.storm.command.BasicDrpcClient', 'exclaim', 'a', 'exclaim', 'b', 'test', 'bar'
])
)
self.base_test([
'storm', 'drpc-client', '-f', 'exclaim', 'a', 'b'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs",
'-Djava.library.path=', '-Dstorm.conf.file=', '-cp',
self.storm_dir + '/*:' + self.storm_dir + '/lib:' + self.storm_dir + '/extlib:' + self.storm_dir +
'/extlib-daemon:' + self.storm_dir + '/conf:' + self.storm_dir +
'/bin', 'org.apache.storm.command.BasicDrpcClient', '-f', 'exclaim', 'a', 'b'
])
)
def test_healthcheck_command(self):
self.base_test([
'storm', 'node-health-check'
], self.mock_execvp, mock.call(
self.java_cmd, [
self.java_cmd, '-client', '-Ddaemon.name=', '-Dstorm.options=',
'-Dstorm.home=' + self.storm_dir, '-Dstorm.log.dir=' + self.storm_dir + "/logs", '-Djava.library.path=',
'-Dstorm.conf.file=', '-cp', self.storm_dir + '/*:' + self.storm_dir + '/lib:' +
self.storm_dir + '/extlib:' + self.storm_dir + '/extlib-daemon:' + self.storm_dir + '/conf:' +
self.storm_dir + '/bin', 'org.apache.storm.command.HealthCheck'
])
)
def tearDown(self):
self.mock_popen.stop()
self.mock_path_exists.stop()
if __name__ == '__main__':
test_main()
| |
from collections import Counter
from datetime import datetime
from django.contrib import messages
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth import login as login_user
from django.contrib.auth.decorators import login_required
from django.core.cache import cache
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.text import slugify
from django.views.generic import TemplateView
from chat.models import ChatMessage
from userprofile.models import UserProfile
from notifications.sms import format_numbers
from userprofile.forms import UserForm
from utils.forms import SetNewPasswordForm
from utils.user_utils import username_hash, get_client_ip
from .forms import ContactForm
from .models import Course, OfficeHours
class AboutView(TemplateView):
template_name = 'officehours/about.html'
class HelpView(TemplateView):
template_name = 'officehours/help.html'
class ContactView(TemplateView):
template_name = 'officehours/contact.html'
def index(request):
user = request.user
# Are we logged in?
if user.is_authenticated():
# if so, have we already added a course or any other data?
if (
user.officehours_set.exists() or
user.teaching.exists() or
user.course_set.exists()
):
return redirect("officehours:schedule")
# Otherwise, show the index (which is a getting-started page)
return render(request, 'officehours/index.html', {})
# Otherwise show the google auth login.
return redirect("officehours:login")
def login(request):
if request.user.is_authenticated():
return redirect("officehours:schedule")
if request.method == "POST" and request.is_ajax():
email = request.POST.get('email')
token = request.POST.get('token')
user = authenticate(email=email, token=token)
if user is None:
return JsonResponse({'error': "User not found"}, status=400)
else:
login_user(request, user)
return JsonResponse({'user_id': user.id}, status=200)
return render(request, 'officehours/login.html', {})
@login_required
def add_code(request):
if request.method == "POST":
next_url = request.POST.get('next') or 'officehours:schedule'
code = "{}".format(request.POST['code']).strip().upper()
try:
course = Course.objects.get(code=code)
course.students.add(request.user)
messages.success(request, "Course added to your schedule.")
return redirect(next_url)
except Course.DoesNotExist:
messages.error(request, "Could not find that course.")
return redirect('officehours:add-code')
context = {
'next_url': request.GET.get('next', '')
}
return render(request, 'officehours/add_code.html', context)
@login_required
def phone_number(request):
error = None
phone = request.user.userprofile.phone
if request.method == "POST" and 'phone' in request.POST:
try:
phone = request.POST['phone']
if format_numbers([phone]):
request.user.userprofile.phone = phone
request.user.userprofile.save()
messages.success(request, "Phone number updated.")
return redirect('officehours:schedule')
error = "Sorry, that doesn't look like a phone number."
except (UserProfile.DoesNotExist, IndexError):
messages.error(request, "Could not save your information.")
return redirect('officehours:index')
context = {
'phone': phone,
'error': error,
}
return render(request, 'officehours/phone_number.html', context)
@login_required
def contact_info(request):
if request.method == "POST":
form = ContactForm(request.POST)
if form.is_valid():
request.user.userprofile.phone = form.cleaned_data['phone']
request.user.userprofile.save()
request.user.email = form.cleaned_data['email']
parts = form.cleaned_data['your_name'].split()
first, last = parts[:-1], parts[1]
request.user.first_name = " ".join(first)
request.user.last_name = last
request.user.save()
messages.success(request, "Contact info saved.")
return redirect("officehours:add-hours")
else:
messages.error(request, "Unable to save your info.")
else:
initial = {
'your_name': request.user.get_full_name(),
'email': request.user.email,
'phone': request.user.userprofile.phone,
}
form = ContactForm(initial=initial)
context = {'form': form}
return render(request, 'officehours/contact_info.html', context)
@login_required
def add_hours(request):
"""
Allow a user to add office hours.
NOTE: This doesn't use any Forms because I wasted a whole day trying to
get this form UI working with MDL; so, I just gave up and hard-coded the
html form.
"""
selected_days = []
results = None
if request.method == "POST":
# NEED:
#
# { DAY: [{from: time, to: time}, ... ] }
# ------------------------------------------------
selected_days = [
k for k, v in request.POST.dict().items() if v == 'on'
]
results = {day: [] for day in selected_days}
for day in selected_days:
from_key = "from_time-{}".format(day.lower())
to_key = "to_time-{}".format(day.lower())
from_times = request.POST.getlist(from_key)
to_times = request.POST.getlist(to_key)
# zip returns tuples, we need lists to convert to JSON
results[day] = [
{"from": from_time, "to": to_time}
for from_time, to_time in list(zip(from_times, to_times))
]
if results:
OfficeHours.objects.create(user=request.user, schedule=results)
messages.success(request, "Office hours saved.")
if request.POST.get('another') == "true":
return redirect("officehours:add-hours")
return redirect("officehours:add-course")
else:
messages.error(request, "Unable to save your office hours.")
context = {
'results': results,
'day_choices': [
'Sunday', 'Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday',
]
}
return render(request, 'officehours/add_hours.html', context)
@login_required
def add_course(request):
"""
Allow a user to add office hours.
NOTE: This doesn't use any Forms because I wasted a whole day trying to
get this form UI working with MDL; so, I just gave up and hard-coded the
html form.
"""
coursetime = ''
coursetime_error = None
coursename = ''
coursename_error = ''
location = ''
location_error = ''
selected_days = []
if request.method == "POST":
coursename = request.POST['coursename']
if not coursename:
coursename_error = "Course name is required"
location = request.POST['coursename']
if not location:
location_error = "Location is required"
try:
coursetime = request.POST['coursetime']
coursetime = datetime.strptime(coursetime, '%I:%M %p').time()
except ValueError:
coursetime = ''
coursetime_error = 'Enter a valid time'
for key, val in request.POST.items():
if val == "on":
selected_days.append(key)
if all([coursename, coursetime, location, selected_days]):
course = Course.objects.create(
user=request.user,
name=coursename,
start_time=coursetime,
location=location,
days=selected_days
)
messages.success(request, "Course info saved.")
if request.POST.get('another') == "true":
return redirect("officehours:add-course")
return redirect(course.get_share_url())
else:
messages.error(request, "Unable to save your course.")
context = {
'coursename': coursename,
'coursename_error': coursename_error,
'coursetime': coursetime,
'coursetime_error': coursetime_error,
'location': location,
'location_error': location_error,
'selected_days': selected_days,
'day_choices': [
'Sunday', 'Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday',
]
}
return render(request, 'officehours/add_course.html', context)
@login_required
def delete_course(request, pk):
"""
Allow an INSTRUCTOR (course owner) to delete a course (with confirmation)
"""
course = get_object_or_404(Course, pk=pk, user=request.user)
if request.method == "POST" and bool(request.POST.get('confirm', False)):
course_name = course.name
course.delete()
messages.success(request, "{} was deleted.".format(course_name))
return redirect("officehours:schedule")
context = {
'course': course,
}
return render(request, 'officehours/delete_course.html', context)
@login_required
def share_course(request, pk):
"""Display the course's share code"""
context = {
'course': get_object_or_404(Course, pk=pk)
}
return render(request, 'officehours/share_course.html', context)
@login_required
def course_details(request, pk):
"""Display course details"""
context = {
'course': get_object_or_404(Course, pk=pk)
}
return render(request, 'officehours/course_details.html', context)
@login_required
def officehours_details(request, pk):
"""Display OfficeHours details"""
context = {
'hours': get_object_or_404(OfficeHours, pk=pk)
}
return render(request, 'officehours/officehours_details.html', context)
@login_required
def delete_officehours(request, pk):
"""
Allow an INSTRUCTOR (officehours owner) to delete a officehours (with confirmation)
"""
officehours = get_object_or_404(OfficeHours, pk=pk, user=request.user)
if request.method == "POST" and bool(request.POST.get('confirm', False)):
officehours.delete()
messages.success(request, "Office Hours were deleted.")
return redirect("officehours:schedule")
context = {'hours': officehours}
return render(request, 'officehours/delete_officehours.html', context)
@login_required
def schedule(request):
"""List courses in which the user is a student / teacher."""
# Get unread messages for the user, then count the number from each sender
chat_data = ChatMessage.objects.to_user(request.user).filter(read=False)
_fields = ('user__id', 'user__first_name', 'user__last_name')
chat_data = dict(Counter(chat_data.values_list(*_fields)))
student_schedule = request.user.course_set.all()
teaching_schedule = request.user.teaching.all()
office_hours = OfficeHours.objects.current().filter(user=request.user)
context = {
'student_schedule': student_schedule,
'teaching_schedule': request.user.teaching.all(),
'office_hours': office_hours,
'is_student': student_schedule.exists(),
'is_teacher': teaching_schedule.exists() or office_hours.exists(),
'chat_data': chat_data,
}
return render(request, 'officehours/schedule.html', context)
def create_account(request):
"""Yet another way to create an account."""
if request.method == "POST":
form = UserForm(request.POST)
password_form = SetNewPasswordForm(request.POST, prefix="pw")
if form.is_valid() and password_form.is_valid():
User = get_user_model()
email = form.cleaned_data['email'].strip().lower()
try:
# Ensure the email isn't already tied to an account
user = User.objects.get(email__iexact=email)
messages.info(request, "It looks like you already have an "
"account! Log in to continue.")
return redirect("officehours:login")
except User.DoesNotExist:
# Create & activate the account
# XXX This is a hack to keep these users from getting the
# XXX `selected_by_default` content from the `goals` app.
# XXX We *must* set this before we craete the user, hence the
# XXX use of the email in the key.
_key = "omit-default-selections-{}".format(slugify(email))
cache.set(_key, True, 30)
user = form.save(commit=False)
user.is_active = True
user.username = username_hash(email)
user.set_password(password_form.cleaned_data['password'])
user.save()
# Set their IP address.
user.userprofile.ip_address = get_client_ip(request)
user.userprofile.save()
user = authenticate(
email=email,
password=password_form.cleaned_data['password']
)
login_user(request, user)
return redirect("officehours:index")
else:
messages.error(request, "We could not process your request. "
"Please see the details, below.")
else:
password_form = SetNewPasswordForm(prefix='pw')
form = UserForm()
context = {
'form': form,
'password_form': password_form,
}
return render(request, "officehours/create_account.html", context)
| |
""" Functionality for managing child processes. """
# Don't import signal from this package
from __future__ import absolute_import
import os.path, struct, cPickle, sys, signal, traceback, subprocess, errno, \
stat, time
from srllib import threading, util
from srllib._common import *
from srllib.error import BusyError, SrlError
from srllib.signal import Signal
class ChildError(SrlError):
""" Exception detected in child process.
If the original exception derives from L{PickleableException}, it is
preserved, along with the traceback.
@ivar orig_exception: The original exception, possibly C{None}.
@ivar orig_traceback: Traceback of original exception, possibly C{None}.
"""
def __init__(self, process_error):
SrlError.__init__(self, "Error in child process")
if process_error.exc_class is not None:
assert process_error.exc_message is not None
assert process_error.exc_traceback is not None
assert process_error.exc_arguments is not None
self.orig_exception = process_error.exc_class(
*process_error.exc_arguments)
else:
self.orig_exception = None
self.orig_traceback = process_error.exc_traceback
class PickleableException(SrlError):
def __init__(self, msg, *args):
SrlError.__init__(self, msg)
self.arguments = (msg,) + args
class _ProcessError(object):
""" Encapsulation of a child process error.
Exceptions don't pickle in the standard fashion, so we do it like this.
@ivar message: Error message.
@ivar exc_message: Original exception message.
@ivar exc_class: Class of original exception.
@ivar exc_arguments: Arguments of original exception.
@ivar exc_traceback: Traceback of original exception.
"""
def __init__(self, msg, original_exc=None, original_tb=None):
self.message = msg
if original_exc is not None:
self.exc_message = str(original_exc)
else:
self.exc_message = None
if isinstance(original_exc, PickleableException):
self.exc_class = original_exc.__class__
self.exc_arguments = original_exc.arguments
else:
self.exc_class = None
self.exc_arguments = None
if original_tb is not None:
self.exc_traceback = traceback.format_tb(original_tb)
else:
self.exc_traceback = None
class PickleError(SrlError):
pass
class _MthdProxy(object):
def __init__(self, mthd):
self.__obj, self.__cls, self.__name = (mthd.im_self, mthd.im_class,
mthd.im_func.func_name)
def __call__(self, *args, **kwds):
name = self.__name
if name.startswith("__"):
# Mangle
name = "_%s%s" % (self.__cls.__name__, name)
func = getattr(self.__cls, name)
func(self.__obj, *args, **kwds)
class ChildDied(SrlError):
""" Child died unexpectedly.
@ivar exitcode: Child's exit code.
@ivar stderr: Child's stderr.
"""
def __init__(self, exitcode, stderr):
SrlError.__init__(self, "Child died unexpectedly (exit code %d)" %
exitcode)
self.exitcode, self.stderrr = exitcode, stderr
def terminate(process):
""" Terminate a process of either the L{Process} type or the standard
subprocess.Popen type.
This method will block until it is determined that the process has in fact
terminated.
@note: On Windows, pywin32 is required.
@return: The process's exit status.
"""
r = process.poll()
if r is not None:
return r
if get_os_name() == Os_Windows:
import win32process, win32api
# Emulate POSIX behaviour, where the exit code will be the negative
# value of the signal that terminated the process
# Open with rights to terminate and synchronize
handle = win32api.OpenProcess(0x1 | 0x100000, False, process.pid)
win32process.TerminateProcess(handle, -signal.SIGTERM)
else:
try:
os.kill(process.pid, signal.SIGTERM)
time.sleep(.05)
except OSError, err:
if err.errno == errno.ECHILD:
# Presumably, the child is dead already?
pass
else:
raise
if process.poll() is None:
os.kill(process.pid, signal.SIGKILL)
return process.wait()
class Process(object):
""" Invoke a callable in a child process.
Instantiating an object of this class will spawn a child process, I{in which a
provided callable is invoked}. Pipes are provided for the standard streams
and a separate pipe for communication between parent and child. stdout and
stderr are made non-blocking so they can easily be drained of data.
@ivar stdout: Child's stdout file.
@ivar stderr: Child's stderr file..
"""
def __init__(self, child_func, child_args=[], child_kwds={}):
"""
@param child_func: Function to be called in child process.
@param child_args: Optional arguments for the child function.
@param child_kwds: Optional keywords for the child function.
@raise ChildDied: Child died unexpectedly.
"""
self.__exit_rslt = None
# We execute in the child a script which first unpickles the function
# and its parameters, and then invokes it. This is the best cross-
# platform approach that we've found (since Windows does not support
# fork)
script_fname = self.__script_fname = util.create_tempfile(content=
r"""import cPickle, sys, struct
pipe_path = sys.argv[1]
lnth = struct.unpack("@I", sys.stdin.read(4))[0]
sys.path = cPickle.loads(sys.stdin.read(lnth))
lnth = struct.unpack("@I", sys.stdin.read(4))[0]
func, args, kwds = cPickle.loads(sys.stdin.read(lnth))
try: func(*args, **kwds)
except Exception, err:
from srllib.process import _ProcessError
pickle = cPickle.dumps(_ProcessError("Error in child", err, sys.exc_info()[2]))
f = file(pipe_path, "wb")
try: f.write(pickle)
finally: f.close()
""")
# XXX: Some day we might want to use a real (named) pipe ...
pipe_path = self.__errpipe_path = util.create_tempfile()
prcs = self.__prcs = subprocess.Popen(["python", script_fname, pipe_path],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=
True, bufsize=-1)
import types
if isinstance(child_func, types.MethodType):
# Can't pickle instance methods
child_func = _MthdProxy(child_func)
# Pickle the path, to ensure proper unpickling
path_data = cPickle.dumps(sys.path)
try: func_data = cPickle.dumps((child_func, child_args, child_kwds))
except TypeError, err:
print err
raise PickleError("Failed to pickle %r, is this e.g. a nested definition?" % \
child_func)
try:
prcs.stdin.write(struct.pack("@I", len(path_data)))
prcs.stdin.write(path_data)
prcs.stdin.write(struct.pack("@I", len(func_data)))
prcs.stdin.write(func_data)
# Important: Flush what we've written so child isn't stuck waiting for data
prcs.stdin.flush()
except EnvironmentError, err:
if err.errno == errno.EPIPE:
exitcode = self.wait()
raise ChildDied(exitcode, prcs.stderr.read())
raise
self._pid = prcs.pid
def __str__(self):
return self.name
@property
def pid(self):
return self._pid
@property
def stdout(self):
return self.__prcs.stdout
@property
def stderr(self):
return self.__prcs.stderr
def close(self):
""" Release resources.
If the process is still alive, it is waited for.
"""
if self.__exit_rslt is None:
self.wait()
os.remove(self.__errpipe_path)
os.remove(self.__script_fname)
def poll(self):
""" Check if child has exited.
@return: If child has exited, its exit code, else C{None}.
"""
rslt = self.__exit_rslt
if rslt is not None:
if isinstance(rslt, ChildError):
raise rslt
assert isinstance(rslt, int)
return rslt
r = self.__prcs.poll()
f = file(self.__errpipe_path, "rb")
try: data = f.read()
finally: f.close()
if data:
# Exception from child process
err = cPickle.loads(data)
self.__exit_rslt = ChildError(err)
raise self.__exit_rslt
return r
def wait(self):
""" Wait for child to finish.
@return: Child's exit code.
@raise ChildError: Exception detected in child.
"""
self.__prcs.wait()
return self.poll()
def terminate(self):
""" Kill child process.
Implemented using L{terminate}.
"""
return terminate(self)
def write_message(self, message, wait=True):
""" Write message to other process.
If this is the child process, message will be available for parent
process and vice versa. This method may wait for the other process to
"pick up the phone". A broken connection will result in EofError.
@param message: An arbitrary object.
@param wait: Wait for acknowledgement.
"""
msg = cPickle.dumps(message)
self.pipe_out.write(struct.pack("i", len(msg)))
self.pipe_out.write(msg)
if not wait:
return
# Wait for ack
a = self.pipe_in.read(1)
if a == "":
raise EofError
def read_message(self):
""" Read message from other process.
If this is the child process, message will be read from parent process
and vice versa. This method will wait until a message is actually
received.
@return: An arbitrary object written by the other process
@raise EofError: Broken connection.
"""
def read_data(lnth):
data = self.pipe_in.read(lnth)
if len(data) < lnth:
raise EofError
return data
data = read_data(struct.calcsize("i"))
msgLnth = struct.unpack("i", data)[0]
data = read_data(msgLnth)
# Ack
try: self.pipe_out.write('a')
except IOError: pass
import cPickle
obj = cPickle.loads(data)
return obj
def _poll(self, wait=False):
if self._childRet is not None:
return self._childRet
if wait:
flag = 0
else:
flag = os.WNOHANG
while True:
try: pid, status = os.waitpid(self._pid, flag)
except OSError, err:
# Ignore interrupts
if err.errno == errno.EINTR:
continue
raise
break
if pid != self._pid:
return None
if os.WIFSIGNALED(status):
self._childRet = -os.WTERMSIG(status)
else:
self._childRet = os.WEXITSTATUS(status)
if self._childRet != 0:
try: obj = self.read_message()
except EofError:
pass
else:
if isinstance(obj, _ProcessError):
raise ChildError(obj)
return self._childRet
class EofError(IOError):
pass
class ThreadedProcessMonitor(object):
""" Monitor a child process in a background thread.
@group Signals: sig*
@ivar process: The L{child process<Process>}
@ivar sig_stdout: Triggered to deliver stdout output from the child process.
@ivar sig_stderr: Triggered to deliver stderr output from the child process-
@ivar sig_finished: Signal that monitor has finished, from background thread.
Parameters: None.
@ivar sig_failed: Signal that monitored process failed, from background
thread. Paramaters: The caught exception.
"""
def __init__(self, daemon=False, use_pty=False, pass_process=True):
"""
@param daemon: Start background threads in daemon mode
@param use_pty: Open pseudo-terminal for child process.
@param pass_process: When executing functions in child processes,
should the L{Process} object be passed as a parameter?
"""
self.sig_stdout, self.sig_stderr, self.sig_finished, self.sig_failed = (
Signal(), Signal(), Signal(), Signal())
self.__process = None
i, o = os.pipe()
self._event_pipe_in, self._event_pipe_out = (os.fdopen(i, "r", 0),
os.fdopen(o, "w", 0))
self._daemon = daemon
self._thrd = None
@property
def process(self):
""" The monitored process. """
return self.__process
def __call__(self, child_func, child_args=[], child_kwds={}):
""" Execute function in child process, monitored in background thread.
@param child_func: Function to execute
@param child_args: Arguments for child function
@param child_kwds: Keywords for child function
@raise BusyError: Already busy with a child process.
"""
if self.__process is not None:
raise BusyError("Another process is already being monitored")
self.__exit_code = None
self.__process = Process(child_func, child_args=child_args, child_kwds=
child_kwds)
thrd = self._thrd = threading.Thread(target=self._thrdfunc, daemon=
self._daemon)
thrd.start()
def monitor_command(self, arguments, cwd=None, env=None):
""" Monitor a command.
@return: The associated L{process<subprocess.Popen>}.
"""
if self.__process is not None:
raise BusyError("Another process is already being monitored")
prcs = self.__process = subprocess.Popen(arguments, cwd=cwd, env=env,
stdout=subprocess.PIPE, stderr=
subprocess.PIPE)
thrd = self._thrd = threading.Thread(target=self._thrdfunc, daemon=
self._daemon)
thrd.start()
return prcs
def wait(self):
""" Wait for monitoring thread to finish.
@return: The child process exit code. If the child raised a L{ChildError},
this will be None.
"""
if self._thrd is not None:
self._thrd.join()
return self.__exit_code
def _thrdfunc(self):
prcs = self.__process
try: self.__exit_code = prcs.wait()
except ChildError, err:
self.sig_failed(err)
else:
self.sig_finished()
if hasattr(prcs, "close"):
prcs.close()
self.__process = None
'''
# POSIX
def _thrdfunc(self):
import select
process = self.__process
stdout, stderr = process.stdout, process.stderr
pollIn, pollOut, pollEx = [stdout, stderr, self._event_pipe_in], [], []
while pollIn:
# Keep in mind that closed files will be seen as ready by select and
# cause it to wake up
rd, wr, ex = select.select(pollIn, pollOut, pollEx, 0.01)
if stdout in rd:
try: o = stdout.read()
except IOError:
# Presumably EOF
o = ""
if o:
self.sig_stdout(o)
else:
pollIn.remove(stdout)
if stderr in rd:
e = stderr.read()
if e:
self.sig_stderr(e)
else:
pollIn.remove(stderr)
if self._event_pipe_in in rd:
event = self._event_pipe_in.read(1)
assert event in ("t",)
if event == "t":
# Terminate
process.terminate()
break
if stdout not in pollIn and stderr not in pollIn:
# Child exited
break
try: self.__exit_code = self.__process.wait()
except ChildError, err:
self.sig_failed(err)
else:
self.sig_finished()
self.__process.close()
self.__process = None
'''
| |
import atexit
import glob
import logging
import os
import random
import re
import signal
import subprocess
import sys
import time
import irc.client
PYTHON = os.getenv('PYTHON', "python3")
class Client(irc.client.SimpleIRCClient):
def __init__(self):
irc.client.SimpleIRCClient.__init__(self)
self.flags = {
'queued': False,
'finished': False,
'ident': None,
}
def on_nicknameinuse(self, connection, event):
connection.nick('{}{}'.format(connection.get_nickname(),
random.randint(0, 99))
)
def on_welcome(self, connection, event):
connection.join('#atbot-test')
def on_join(self, connection, event):
channel = event.target
nickname = event.source.nick
if nickname == 'atbot':
connection.privmsg(
channel,
'{}?{}'.format('!ao http://localhost:8866',
random.randint(0, 1000))
)
def on_part(self, connection, event):
channel = event.target
nickname = event.source.nick
def on_quit(self, connection, event):
nickname = event.source.nick
def on_kick(self, connection, event):
channel = event.target
nickname = self.get_nick_if_possible(event.source)
kicked_nickname = event.arguments[0]
def on_mode(self, connection, event):
channel = event.target
modes_str = ' '.join(event.arguments)
nickname = self.get_nick_if_possible(event.source)
def on_pubmsg(self, connection, event):
channel = event.target
if not irc.client.is_channel(channel):
return
text = event.arguments[0]
nickname = self.get_nick_if_possible(event.source)
if 'Queued' in text:
self.flags['queued'] = True
elif 'finished' in text:
self.flags['finished'] = True
elif '!status' in text:
match = re.search(r'!status ([a-z0-9]+)', text)
self.flags['ident'] = match.group(1)
def on_pubnotice(self, connection, event):
channel = event.target
if not irc.client.is_channel(channel):
return
text = event.arguments[0]
nickname = self.get_nick_if_possible(event.source)
def on_topic(self, connection, event):
channel = event.target
nickname = self.get_nick_if_possible(event.source)
text = event.arguments[0]
def on_nick(self, connection, event):
nickname = event.source.nick
text = event.arguments[0]
@classmethod
def get_nick_if_possible(cls, source):
try:
return source.nick
except AttributeError:
return source
def main():
logging.basicConfig(level=logging.INFO)
script_dir = os.path.dirname(__file__)
bot_script = os.path.join(script_dir, 'run_bot.sh')
firehose_script = os.path.join(script_dir, 'run_firehose.sh')
dashboard_script = os.path.join(script_dir, 'run_dashboard.sh')
pipeline_script = os.path.join(script_dir, 'run_pipeline.sh')
cogs_script = os.path.join(script_dir, 'run_cogs.sh')
irc_client = Client()
irc_client.connect('127.0.0.1', 6667, 'obsessive')
print('Wait to avoid reconnect flooding')
for dummy in range(100):
irc_client.reactor.process_once(timeout=0.1)
time.sleep(0.1)
print('.', end='')
sys.stdout.flush()
print()
bot_proc = subprocess.Popen([bot_script], preexec_fn=os.setpgrp)
firehose_proc = subprocess.Popen([firehose_script], preexec_fn=os.setpgrp)
dashboard_proc = subprocess.Popen([dashboard_script], preexec_fn=os.setpgrp)
pipeline_proc = subprocess.Popen([pipeline_script], preexec_fn=os.setpgrp)
cogs_proc = subprocess.Popen([cogs_script], preexec_fn=os.setpgrp)
web_proc = subprocess.Popen(
[PYTHON, '-m', 'huhhttp', '--port', '8866'],
preexec_fn=os.setpgrp
)
all_procs = [bot_proc, firehose_proc, dashboard_proc, pipeline_proc, cogs_proc, web_proc]
@atexit.register
def cleanup():
for proc in all_procs:
print('Terminate', proc)
try:
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
except OSError as error:
print(error)
time.sleep(1)
for proc in all_procs:
print('Kill', proc)
try:
os.killpg(os.getpgid(proc.pid), signal.SIGKILL)
except OSError as error:
print(error)
def check_alive():
bot_proc.poll()
dashboard_proc.poll()
pipeline_proc.poll()
web_proc.poll()
cogs_proc.poll()
assert bot_proc.returncode is None, bot_proc.returncode
assert firehose_proc.returncode is None, firehose_proc.returncode
assert dashboard_proc.returncode is None, dashboard_proc.returncode
assert pipeline_proc.returncode is None, pipeline_proc.returncode
assert web_proc.returncode is None, web_proc.returncode
assert cogs_proc.returncode is None, cogs_proc.returncode
time.sleep(2)
check_alive()
start_time = time.time()
while True:
irc_client.reactor.process_once(timeout=0.2)
time_now = time.time()
if time_now - start_time > 5 * 60:
break
if all(irc_client.flags.values()):
break
flags = irc_client.flags
short_ident = flags['ident'][:5]
flags['warc_dir'] = tuple(
glob.glob('/tmp/warc/*{}*.gz'.format(short_ident))
)
flags['rsync_dir'] = tuple(
glob.glob('/tmp/rsync/*{}*.json'.format(short_ident))
)
print('---FIN---')
print(flags)
if not all(flags.values()):
print('FAIL!')
sys.exit(42)
check_alive()
if __name__ == '__main__':
main()
| |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for core.domain.rte_component_registry."""
import inspect
import os
import pkgutil
import re
import string
import struct
from core.domain import obj_services
from core.domain import rte_component_registry
from core.tests import test_utils
import feconf
import schema_utils
import schema_utils_test
import utils
# File names ending in any of these suffixes will be ignored when checking for
# RTE component validity.
IGNORED_FILE_SUFFIXES = ['.pyc', '.DS_Store']
RTE_THUMBNAIL_HEIGHT_PX = 16
RTE_THUMBNAIL_WIDTH_PX = 16
_COMPONENT_CONFIG_SCHEMA = [
('backend_id', basestring), ('category', basestring),
('description', basestring), ('frontend_id', basestring),
('tooltip', basestring), ('icon_data_url', basestring),
('requires_fs', bool), ('is_block_element', bool),
('customization_arg_specs', list)]
class RteComponentUnitTests(test_utils.GenericTestBase):
"""Tests that all the default RTE components are valid."""
def _is_camel_cased(self, name):
"""Check whether a name is in CamelCase."""
return name and (name[0] in string.ascii_uppercase)
def _is_alphanumeric_string(self, input_string):
"""Check whether a string is alphanumeric."""
return bool(re.compile('^[a-zA-Z0-9_]+$').match(input_string))
def _validate_customization_arg_specs(self, customization_arg_specs):
"""Validates the given customization arg specs."""
for ca_spec in customization_arg_specs:
self.assertEqual(set(ca_spec.keys()), set([
'name', 'description', 'schema', 'default_value']))
self.assertTrue(isinstance(ca_spec['name'], basestring))
self.assertTrue(self._is_alphanumeric_string(ca_spec['name']))
self.assertTrue(isinstance(ca_spec['description'], basestring))
self.assertGreater(len(ca_spec['description']), 0)
# The default value might not pass validation checks (e.g. the
# Image component has a required field whose default value is
# empty). Thus, when checking the default value schema, we don't
# apply the custom validators.
schema_utils_test.validate_schema(ca_spec['schema'])
self.assertEqual(
ca_spec['default_value'],
schema_utils.normalize_against_schema(
ca_spec['default_value'], ca_spec['schema'],
apply_custom_validators=False))
if ca_spec['schema']['type'] == 'custom':
# Default value of SanitizedUrl obj_type may be empty. The empty
# string is not considered valid for this object, so we don't
# attempt to normalize it.
if ca_spec['schema']['obj_type'] == 'SanitizedUrl':
self.assertEqual(ca_spec['default_value'], '')
else:
obj_class = obj_services.Registry.get_object_class_by_type(
ca_spec['schema']['obj_type'])
self.assertEqual(
ca_spec['default_value'],
obj_class.normalize(ca_spec['default_value']))
def _listdir_omit_ignored(self, directory):
"""List all files and directories within 'directory', omitting the ones
whose name ends in one of the IGNORED_FILE_SUFFIXES.
"""
names = os.listdir(directory)
for suffix in IGNORED_FILE_SUFFIXES:
names = [name for name in names if not name.endswith(suffix)]
return names
def test_image_thumbnails_for_rte_components(self):
"""Test the thumbnails for the RTE component icons."""
rte_components = (
rte_component_registry.Registry.get_all_rte_components())
for (component_name, component_specs) in rte_components.iteritems():
generated_image_filepath = os.path.join(
os.getcwd(), feconf.RTE_EXTENSIONS_DIR,
component_name, '%s.png' % component_name)
relative_icon_data_url = component_specs['icon_data_url'][1:]
defined_image_filepath = os.path.join(
os.getcwd(), feconf.EXTENSIONS_DIR_PREFIX,
'extensions', relative_icon_data_url)
self.assertEqual(generated_image_filepath, defined_image_filepath)
with open(generated_image_filepath, 'rb') as f:
img_data = f.read()
width, height = struct.unpack('>LL', img_data[16:24])
self.assertEqual(int(width), RTE_THUMBNAIL_WIDTH_PX)
self.assertEqual(int(height), RTE_THUMBNAIL_HEIGHT_PX)
def test_rte_components_are_valid(self):
"""Test that the default RTE components are valid."""
rte_components = (
rte_component_registry.Registry.get_all_rte_components())
for (component_id, component_specs) in rte_components.iteritems():
# Check that the component id is valid.
self.assertTrue(self._is_camel_cased(component_id))
# Check that the component directory exists.
component_dir = os.path.join(
feconf.RTE_EXTENSIONS_DIR, component_id)
self.assertTrue(os.path.isdir(component_dir))
# In this directory there should be a /directives directory, an
# an icon .png file and a protractor.js file, and an optional
# preview .png file.
# In /directives directory should be HTML file, a JS file,
# there could be multiple JS and HTML files.
dir_contents = self._listdir_omit_ignored(component_dir)
self.assertLessEqual(len(dir_contents), 4)
directives_dir = os.path.join(component_dir, 'directives')
png_file = os.path.join(component_dir, '%s.png' % component_id)
protractor_file = os.path.join(component_dir, 'protractor.js')
self.assertTrue(os.path.isdir(directives_dir))
self.assertTrue(os.path.isfile(png_file))
self.assertTrue(os.path.isfile(protractor_file))
main_ts_file = os.path.join(
directives_dir, 'OppiaNoninteractive%sDirective.ts'
% component_id)
main_html_file = os.path.join(
directives_dir, '%s_directive.html' % component_id.lower())
self.assertTrue(os.path.isfile(main_ts_file))
self.assertTrue(os.path.isfile(main_html_file))
ts_file_content = utils.get_file_contents(main_ts_file)
self.assertIn(
'oppiaNoninteractive%s' % component_id, ts_file_content)
self.assertNotIn('<script>', ts_file_content)
self.assertNotIn('</script>', ts_file_content)
# Check that the configuration file contains the correct
# top-level keys, and that these keys have the correct types.
for item, item_type in _COMPONENT_CONFIG_SCHEMA:
self.assertTrue(isinstance(
component_specs[item], item_type))
# The string attributes should be non-empty.
if item_type == basestring:
self.assertTrue(component_specs[item])
self._validate_customization_arg_specs(
component_specs['customization_arg_specs']) # pylint: disable=protected-access
def test_require_file_contains_all_imports(self):
"""Test that the rich_text_components.html file contains script-imports
for all directives of all RTE components.
"""
rtc_ts_filenames = []
for component_id in feconf.ALLOWED_RTE_EXTENSIONS:
component_dir = os.path.join(
feconf.RTE_EXTENSIONS_DIR, component_id)
directives_dir = os.path.join(component_dir, 'directives')
directive_filenames = os.listdir(directives_dir)
rtc_ts_filenames.extend(
filename for filename
in directive_filenames if filename.endswith('.ts'))
rtc_ts_file = os.path.join(
feconf.RTE_EXTENSIONS_DIR, 'richTextComponentsRequires.ts')
with open(rtc_ts_file, 'r') as f:
rtc_require_file_contents = f.read()
for rtc_ts_filename in rtc_ts_filenames:
self.assertIn(rtc_ts_filename, rtc_require_file_contents)
class RteComponentRegistryUnitTests(test_utils.GenericTestBase):
"""Tests the methods in RteComponentRegistry."""
def test_get_all_rte_components(self):
"""Test get_all_rte_components method."""
obtained_components = (
rte_component_registry.Registry.get_all_rte_components().keys())
actual_components = [name for name in os.listdir(
'./extensions/rich_text_components') if os.path.isdir(os.path.join(
'./extensions/rich_text_components', name))]
self.assertEqual(set(obtained_components), set(actual_components))
def test_get_tag_list_with_attrs(self):
"""Test get_tag_list_with_attrs method."""
obtained_tag_list_with_attrs = (
rte_component_registry.Registry.get_tag_list_with_attrs())
actual_tag_list_with_attrs = {}
component_specs = (
rte_component_registry.Registry.get_all_rte_components())
for component_spec in component_specs.values():
tag_name = 'oppia-noninteractive-%s' % component_spec['frontend_id']
attr_names = [
'%s-with-value' % attr['name'] for attr in component_spec[
'customization_arg_specs']]
actual_tag_list_with_attrs[tag_name] = attr_names
self.assertEqual(
set(obtained_tag_list_with_attrs.keys()),
set(actual_tag_list_with_attrs.keys()))
for key in obtained_tag_list_with_attrs:
self.assertEqual(
set(obtained_tag_list_with_attrs[key]),
set(actual_tag_list_with_attrs[key]))
def test_get_component_types_to_component_classes(self):
"""Test get_component_types_to_component_classes method."""
component_types_to_component_classes = rte_component_registry.Registry.get_component_types_to_component_classes() # pylint: disable=line-too-long
component_specs = (
rte_component_registry.Registry.get_all_rte_components())
obtained_component_tags = component_types_to_component_classes.keys()
actual_component_tags = [
'oppia-noninteractive-%s' % component_spec['frontend_id']
for component_spec in component_specs.values()]
self.assertEqual(
set(obtained_component_tags), set(actual_component_tags))
obtained_component_class_names = [
component_class.__name__
for component_class in component_types_to_component_classes.values()
]
actual_component_class_names = []
rte_path = [feconf.RTE_EXTENSIONS_DIR]
for loader, name, _ in pkgutil.iter_modules(path=rte_path):
if name == 'components':
module = loader.find_module(name).load_module(name)
break
for name, obj in inspect.getmembers(module):
if inspect.isclass(obj) and name != 'BaseRteComponent':
actual_component_class_names.append(name)
self.assertEqual(
set(obtained_component_class_names),
set(actual_component_class_names))
def test_get_component_tag_names(self):
"""Test get_component_tag_names method."""
component_specs = (
rte_component_registry.Registry.get_all_rte_components())
keys = ['is_block_element', 'is_complex']
expected_values = [True, False]
for key in keys:
for expected_value in expected_values:
actual_component_tag_names = [
'oppia-noninteractive-%s' % component_spec['frontend_id']
for component_spec in component_specs.values()
if component_spec[key] == expected_value]
obtained_component_tag_names = (
rte_component_registry.Registry.get_component_tag_names(
key, expected_value))
self.assertEqual(
set(actual_component_tag_names),
set(obtained_component_tag_names))
def test_get_inline_component_tag_names(self):
"""Test get_inline_component_tag_names method."""
component_specs = (
rte_component_registry.Registry.get_all_rte_components())
obtained_inline_component_tag_names = (
rte_component_registry.Registry.get_inline_component_tag_names())
actual_inline_component_tag_names = [
'oppia-noninteractive-%s' % component_spec['frontend_id']
for component_spec in component_specs.values()
if not component_spec['is_block_element']]
self.assertEqual(
set(actual_inline_component_tag_names),
set(obtained_inline_component_tag_names))
def test_get_block_component_tag_names(self):
"""Test get_block_component_tag_names method."""
component_specs = (
rte_component_registry.Registry.get_all_rte_components())
obtained_block_component_tag_names = (
rte_component_registry.Registry.get_block_component_tag_names())
actual_block_component_tag_names = [
'oppia-noninteractive-%s' % component_spec['frontend_id']
for component_spec in component_specs.values()
if component_spec['is_block_element']]
self.assertEqual(
set(actual_block_component_tag_names),
set(obtained_block_component_tag_names))
def test_get_simple_component_tag_names(self):
"""Test get_simple_component_tag_names method."""
component_specs = (
rte_component_registry.Registry.get_all_rte_components())
obtained_simple_component_tag_names = (
rte_component_registry.Registry.get_simple_component_tag_names())
actual_simple_component_tag_names = [
'oppia-noninteractive-%s' % component_spec['frontend_id']
for component_spec in component_specs.values()
if not component_spec['is_complex']]
self.assertEqual(
set(actual_simple_component_tag_names),
set(obtained_simple_component_tag_names))
def test_get_complex_component_tag_names(self):
"""Test get_complex_component_tag_names method."""
component_specs = (
rte_component_registry.Registry.get_all_rte_components())
obtained_complex_component_tag_names = (
rte_component_registry.Registry.get_complex_component_tag_names())
actual_complex_component_tag_names = [
'oppia-noninteractive-%s' % component_spec['frontend_id']
for component_spec in component_specs.values()
if component_spec['is_complex']]
self.assertEqual(
set(actual_complex_component_tag_names),
set(obtained_complex_component_tag_names))
| |
# These are tests for Zulip's database migrations. System documented at:
# https://zulip.readthedocs.io/en/latest/subsystems/schema-migrations.html
#
# You can also read
# https://www.caktusgroup.com/blog/2016/02/02/writing-unit-tests-django-migrations/
# to get a tutorial on the framework that inspired this feature.
from typing import Optional
from unittest import skip
import orjson
from django.db.migrations.state import StateApps
from django.utils.timezone import now as timezone_now
from zerver.lib.test_classes import MigrationsTestCase
from zerver.lib.test_helpers import use_db_models
from zerver.models import get_stream
# Important note: These tests are very expensive, and details of
# Django's database transaction model mean it does not super work to
# have a lot of migrations tested in this file at once; so we usually
# delete the old migration tests when adding a new one, so this file
# always has a single migration test in it as an example.
#
# The error you get with multiple similar tests doing migrations on
# the same table is this (table name may vary):
#
# django.db.utils.OperationalError: cannot ALTER TABLE
# "zerver_subscription" because it has pending trigger events
#
# As a result, we generally mark these tests as skipped once they have
# been tested for a migration being merged.
@skip("Will not pass once newer migrations are merged.") # nocoverage # skipped
class MessageEditHistoryLegacyFormats(MigrationsTestCase):
migrate_from = "0376_set_realmemoji_author_and_reupload_realmemoji"
migrate_to = "0377_message_edit_history_format"
msg_id: Optional[int] = None
@use_db_models
def setUpBeforeMigration(self, apps: StateApps) -> None:
Recipient = apps.get_model("zerver", "Recipient")
Message = apps.get_model("zerver", "Message")
iago = self.example_user("iago")
stream_name = "Denmark"
denmark = get_stream(stream_name, iago.realm)
denmark_recipient = Recipient.objects.get(type=2, type_id=denmark.id)
self.msg_id = Message.objects.create(
recipient_id=denmark_recipient.id,
subject="topic 4",
sender_id=iago.id,
sending_client_id=1,
content="current message text",
date_sent=timezone_now(),
).id
# topic edits contain only "prev_subject" field.
# stream edits contain only "prev_stream" field.
msg = Message.objects.filter(id=self.msg_id).first()
msg.edit_history = orjson.dumps(
[
{
"user_id": 11,
"timestamp": 1644405050,
"prev_stream": 3,
"prev_subject": "topic 3",
},
{"user_id": 11, "timestamp": 1644405040, "prev_stream": 2},
{
"user_id": 11,
"timestamp": 1644405030,
"prev_content": "test content and topic edit",
"prev_rendered_content": "<p>test content and topic edit</p>",
"prev_rendered_content_version": 1,
"prev_subject": "topic 2",
},
{"user_id": 11, "timestamp": 1644405020, "prev_subject": "topic 1"},
{
"user_id": 11,
"timestamp": 1644405010,
"prev_content": "test content only edit",
"prev_rendered_content": "<p>test content only edit</p>",
"prev_rendered_content_version": 1,
},
]
).decode()
msg.save(update_fields=["edit_history"])
def test_message_legacy_edit_history_format(self) -> None:
Message = self.apps.get_model("zerver", "Message")
Recipient = self.apps.get_model("zerver", "Recipient")
iago = self.example_user("iago")
stream_name = "Denmark"
denmark = get_stream(stream_name, iago.realm)
msg = Message.objects.filter(id=self.msg_id).first()
msg_stream_id = Recipient.objects.get(id=msg.recipient_id).type_id
new_edit_history = orjson.loads(msg.edit_history)
self.assert_length(new_edit_history, 5)
# stream and topic edit entry
self.assertFalse("prev_subject" in new_edit_history[0])
self.assertEqual(new_edit_history[0]["prev_topic"], "topic 3")
self.assertEqual(new_edit_history[0]["topic"], msg.subject)
self.assertEqual(new_edit_history[0]["prev_stream"], 3)
self.assertEqual(new_edit_history[0]["stream"], msg_stream_id)
self.assertEqual(new_edit_history[0]["stream"], denmark.id)
self.assertEqual(
set(new_edit_history[0].keys()),
{"timestamp", "prev_topic", "topic", "prev_stream", "stream", "user_id"},
)
# stream only edit entry
self.assertEqual(new_edit_history[1]["prev_stream"], 2)
self.assertEqual(new_edit_history[1]["stream"], 3)
self.assertEqual(
set(new_edit_history[1].keys()), {"timestamp", "prev_stream", "stream", "user_id"}
)
# topic and content edit entry
self.assertFalse("prev_subject" in new_edit_history[2])
self.assertEqual(new_edit_history[2]["prev_topic"], "topic 2")
self.assertEqual(new_edit_history[2]["topic"], "topic 3")
self.assertEqual(new_edit_history[2]["prev_content"], "test content and topic edit")
self.assertEqual(
new_edit_history[2]["prev_rendered_content"], "<p>test content and topic edit</p>"
)
self.assertEqual(new_edit_history[2]["prev_rendered_content_version"], 1)
self.assertEqual(
set(new_edit_history[2].keys()),
{
"timestamp",
"prev_topic",
"topic",
"prev_content",
"prev_rendered_content",
"prev_rendered_content_version",
"user_id",
},
)
# topic only edit entry
self.assertFalse("prev_subject" in new_edit_history[3])
self.assertEqual(new_edit_history[3]["prev_topic"], "topic 1")
self.assertEqual(new_edit_history[3]["topic"], "topic 2")
self.assertEqual(
set(new_edit_history[3].keys()), {"timestamp", "prev_topic", "topic", "user_id"}
)
# content only edit entry - not retested because never changes
self.assertEqual(new_edit_history[4]["prev_content"], "test content only edit")
self.assertEqual(
new_edit_history[4]["prev_rendered_content"], "<p>test content only edit</p>"
)
self.assertEqual(new_edit_history[4]["prev_rendered_content_version"], 1)
self.assertEqual(
set(new_edit_history[4].keys()),
{
"timestamp",
"prev_content",
"prev_rendered_content",
"prev_rendered_content_version",
"user_id",
},
)
@skip("Will not pass once newer migrations are merged.") # nocoverage # skipped
class MessageEditHistoryModernFormats(MigrationsTestCase):
migrate_from = "0376_set_realmemoji_author_and_reupload_realmemoji"
migrate_to = "0377_message_edit_history_format"
msg_id: Optional[int] = None
@use_db_models
def setUpBeforeMigration(self, apps: StateApps) -> None:
Recipient = apps.get_model("zerver", "Recipient")
Message = apps.get_model("zerver", "Message")
iago = self.example_user("iago")
stream_name = "Denmark"
denmark = get_stream(stream_name, iago.realm)
denmark_recipient = Recipient.objects.get(type=2, type_id=denmark.id)
self.msg_id = Message.objects.create(
recipient_id=denmark_recipient.id,
subject="topic 4",
sender_id=iago.id,
sending_client_id=1,
content="current message text",
date_sent=timezone_now(),
).id
msg = Message.objects.filter(id=self.msg_id).first()
msg_stream_id = Recipient.objects.get(id=msg.recipient_id).type_id
# topic edits contain "topic" and "prev_topic" fields.
# stream edits contain "stream" and "prev_stream" fields.
msg.edit_history = orjson.dumps(
[
{
"user_id": 11,
"timestamp": 1644405050,
"stream": msg_stream_id,
"prev_stream": 3,
"topic": msg.subject,
"prev_topic": "topic 3",
},
{"user_id": 11, "timestamp": 1644405040, "prev_stream": 2, "stream": 3},
{
"user_id": 11,
"timestamp": 1644405030,
"prev_content": "test content and topic edit",
"prev_rendered_content": "<p>test content and topic edit</p>",
"prev_rendered_content_version": 1,
"prev_topic": "topic 2",
"topic": "topic 3",
},
{
"user_id": 11,
"timestamp": 1644405020,
"prev_topic": "topic 1",
"topic": "topic 2",
},
]
).decode()
msg.save(update_fields=["edit_history"])
def test_message_modern_edit_history_format(self) -> None:
Message = self.apps.get_model("zerver", "Message")
Recipient = self.apps.get_model("zerver", "Recipient")
iago = self.example_user("iago")
stream_name = "Denmark"
denmark = get_stream(stream_name, iago.realm)
msg = Message.objects.filter(id=self.msg_id).first()
msg_stream_id = Recipient.objects.get(id=msg.recipient_id).type_id
new_edit_history = orjson.loads(msg.edit_history)
self.assert_length(new_edit_history, 4)
# stream and topic edit entry
self.assertEqual(new_edit_history[0]["prev_topic"], "topic 3")
self.assertEqual(new_edit_history[0]["topic"], msg.subject)
self.assertEqual(new_edit_history[0]["prev_stream"], 3)
self.assertEqual(new_edit_history[0]["stream"], msg_stream_id)
self.assertEqual(new_edit_history[0]["stream"], denmark.id)
self.assertEqual(
set(new_edit_history[0].keys()),
{"timestamp", "prev_topic", "topic", "prev_stream", "stream", "user_id"},
)
# stream only edit entry
self.assertEqual(new_edit_history[1]["prev_stream"], 2)
self.assertEqual(new_edit_history[1]["stream"], 3)
self.assertEqual(
set(new_edit_history[1].keys()), {"timestamp", "prev_stream", "stream", "user_id"}
)
# topic and content edit entry
self.assertEqual(new_edit_history[2]["prev_topic"], "topic 2")
self.assertEqual(new_edit_history[2]["topic"], "topic 3")
self.assertEqual(new_edit_history[2]["prev_content"], "test content and topic edit")
self.assertEqual(
new_edit_history[2]["prev_rendered_content"], "<p>test content and topic edit</p>"
)
self.assertEqual(new_edit_history[2]["prev_rendered_content_version"], 1)
self.assertEqual(
set(new_edit_history[2].keys()),
{
"timestamp",
"prev_topic",
"topic",
"prev_content",
"prev_rendered_content",
"prev_rendered_content_version",
"user_id",
},
)
# topic only edit entry
self.assertEqual(new_edit_history[3]["prev_topic"], "topic 1")
self.assertEqual(new_edit_history[3]["topic"], "topic 2")
self.assertEqual(
set(new_edit_history[3].keys()), {"timestamp", "prev_topic", "topic", "user_id"}
)
@skip("Will not pass once newer migrations are merged.") # nocoverage # skipped
class MessageEditHistoryIntermediateFormats(MigrationsTestCase):
migrate_from = "0376_set_realmemoji_author_and_reupload_realmemoji"
migrate_to = "0377_message_edit_history_format"
msg_id: Optional[int] = None
@use_db_models
def setUpBeforeMigration(self, apps: StateApps) -> None:
Recipient = apps.get_model("zerver", "Recipient")
Message = apps.get_model("zerver", "Message")
iago = self.example_user("iago")
stream_name = "Denmark"
denmark = get_stream(stream_name, iago.realm)
denmark_recipient = Recipient.objects.get(type=2, type_id=denmark.id)
self.msg_id = Message.objects.create(
recipient_id=denmark_recipient.id,
subject="topic 4",
sender_id=iago.id,
sending_client_id=1,
content="current message text",
date_sent=timezone_now(),
).id
msg = Message.objects.filter(id=self.msg_id).first()
msg_stream_id = Recipient.objects.get(id=msg.recipient_id).type_id
# topic edits contain "prev_subject", "topic" and "prev_topic" fields.
# stream edits contain "stream" and "prev_stream" fields.
msg.edit_history = orjson.dumps(
[
{
"user_id": 11,
"timestamp": 1644405050,
"stream": msg_stream_id,
"prev_stream": 3,
"topic": msg.subject,
"prev_topic": "topic 3",
"prev_subject": "topic 3",
},
{"user_id": 11, "timestamp": 1644405040, "prev_stream": 2, "stream": 3},
{
"user_id": 11,
"timestamp": 1644405030,
"prev_content": "test content and topic edit",
"prev_rendered_content": "<p>test content and topic edit</p>",
"prev_rendered_content_version": 1,
"prev_topic": "topic 2",
"prev_subject": "topic 2",
"topic": "topic 3",
},
{
"user_id": 11,
"timestamp": 1644405020,
"prev_topic": "topic 1",
"prev_subject": "topic 1",
"topic": "topic 2",
},
]
).decode()
msg.save(update_fields=["edit_history"])
def test_message_temporary_edit_history_format(self) -> None:
Message = self.apps.get_model("zerver", "Message")
Recipient = self.apps.get_model("zerver", "Recipient")
iago = self.example_user("iago")
stream_name = "Denmark"
denmark = get_stream(stream_name, iago.realm)
msg = Message.objects.filter(id=self.msg_id).first()
msg_stream_id = Recipient.objects.get(id=msg.recipient_id).type_id
new_edit_history = orjson.loads(msg.edit_history)
self.assert_length(new_edit_history, 4)
# stream and topic edit entry
self.assertFalse("prev_subject" in new_edit_history[0])
self.assertEqual(new_edit_history[0]["prev_topic"], "topic 3")
self.assertEqual(new_edit_history[0]["topic"], msg.subject)
self.assertEqual(new_edit_history[0]["prev_stream"], 3)
self.assertEqual(new_edit_history[0]["stream"], msg_stream_id)
self.assertEqual(new_edit_history[0]["stream"], denmark.id)
self.assertEqual(
set(new_edit_history[0].keys()),
{"timestamp", "prev_topic", "topic", "prev_stream", "stream", "user_id"},
)
# stream only edit entry
self.assertEqual(new_edit_history[1]["prev_stream"], 2)
self.assertEqual(new_edit_history[1]["stream"], 3)
self.assertEqual(
set(new_edit_history[1].keys()), {"timestamp", "prev_stream", "stream", "user_id"}
)
# topic and content edit entry
self.assertFalse("prev_subject" in new_edit_history[2])
self.assertEqual(new_edit_history[2]["prev_topic"], "topic 2")
self.assertEqual(new_edit_history[2]["topic"], "topic 3")
self.assertEqual(new_edit_history[2]["prev_content"], "test content and topic edit")
self.assertEqual(
new_edit_history[2]["prev_rendered_content"], "<p>test content and topic edit</p>"
)
self.assertEqual(new_edit_history[2]["prev_rendered_content_version"], 1)
self.assertEqual(
set(new_edit_history[2].keys()),
{
"timestamp",
"prev_topic",
"topic",
"prev_content",
"prev_rendered_content",
"prev_rendered_content_version",
"user_id",
},
)
# topic only edit entry
self.assertFalse("prev_subject" in new_edit_history[3])
self.assertEqual(new_edit_history[3]["prev_topic"], "topic 1")
self.assertEqual(new_edit_history[3]["topic"], "topic 2")
self.assertEqual(
set(new_edit_history[3].keys()), {"timestamp", "prev_topic", "topic", "user_id"}
)
| |
# Copyright 2015 Cloudera Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibis.expr.datatypes import validate_type
import ibis.expr.datatypes as _dt
import ibis.expr.operations as _ops
import ibis.expr.rules as rules
import ibis.impala.compiler as comp
import ibis.common as com
import ibis.util as util
__all__ = ['add_operation', 'scalar_function', 'aggregate_function',
'wrap_udf', 'wrap_uda']
class Function(object):
def __init__(self, inputs, output, name):
self.inputs = inputs
self.output = output
self.name = name
(self.input_type,
self.output_type) = self._type_signature(inputs, output)
self._klass = self._create_operation(name)
def _create_operation(self, name):
class_name = self._get_class_name(name)
return _create_operation_class(class_name, self.input_type,
self.output_type)
def __repr__(self):
klass = type(self).__name__
return ('{0}({1}, {2!r}, {3!r})'
.format(klass, self.name, self.inputs, self.output))
def __call__(self, *args):
return self._klass(*args).to_expr()
def register(self, name, database):
"""
Registers the given operation within the Ibis SQL translation
toolchain. Can also use add_operation API
Parameters
----------
name: used in issuing statements to SQL engine
database: database the relevant operator is registered to
"""
add_operation(self._klass, name, database)
class ScalarFunction(Function):
def _get_class_name(self, name):
if name is None:
name = util.guid()
return 'UDF_{0}'.format(name)
def _type_signature(self, inputs, output):
input_type = _to_input_sig(inputs)
output = validate_type(output)
output_type = rules.shape_like_flatargs(output)
return input_type, output_type
class AggregateFunction(Function):
def _create_operation(self, name):
klass = Function._create_operation(self, name)
klass._reduction = True
return klass
def _get_class_name(self, name):
if name is None:
name = util.guid()
return 'UDA_{0}'.format(name)
def _type_signature(self, inputs, output):
input_type = _to_input_sig(inputs)
output = validate_type(output)
output_type = rules.scalar_output(output)
return input_type, output_type
class ImpalaFunction(object):
def __init__(self, name=None, lib_path=None):
self.lib_path = lib_path
self.name = name or util.guid()
if lib_path is not None:
self._check_library()
def _check_library(self):
suffix = self.lib_path[-3:]
if suffix not in ['.so', '.ll']:
raise ValueError('Invalid file type. Must be .so or .ll ')
def hash(self):
raise NotImplementedError
class ImpalaUDF(ScalarFunction, ImpalaFunction):
"""
Feel free to customize my __doc__ or wrap in a nicer user API
"""
def __init__(self, inputs, output, so_symbol=None, lib_path=None,
name=None):
self.so_symbol = so_symbol
ImpalaFunction.__init__(self, name=name, lib_path=lib_path)
ScalarFunction.__init__(self, inputs, output, name=self.name)
def hash(self):
# TODO: revisit this later
# from hashlib import sha1
# val = self.so_symbol
# for in_type in self.inputs:
# val += in_type.name()
# return sha1(val).hexdigest()
pass
class ImpalaUDA(AggregateFunction, ImpalaFunction):
def __init__(self, inputs, output, update_fn=None, init_fn=None,
merge_fn=None, finalize_fn=None, serialize_fn=None,
lib_path=None, name=None):
self.init_fn = init_fn
self.update_fn = update_fn
self.merge_fn = merge_fn
self.finalize_fn = finalize_fn
self.serialize_fn = serialize_fn
ImpalaFunction.__init__(self, name=name, lib_path=lib_path)
AggregateFunction.__init__(self, inputs, output, name=self.name)
def _check_library(self):
suffix = self.lib_path[-3:]
if suffix == '.ll':
raise com.IbisInputError('LLVM IR UDAs are not yet supported')
elif suffix != '.so':
raise ValueError('Invalid file type. Must be .so')
def wrap_uda(hdfs_file, inputs, output, update_fn, init_fn=None,
merge_fn=None, finalize_fn=None, serialize_fn=None,
close_fn=None, name=None):
"""
Creates a callable aggregation function object. Must be created in Impala
to be used
Parameters
----------
hdfs_file: .so file that contains relevant UDA
inputs: list of strings denoting ibis datatypes
output: string denoting ibis datatype
update_fn: string
Library symbol name for update function
init_fn: string, optional
Library symbol name for initialization function
merge_fn: string, optional
Library symbol name for merge function
finalize_fn: string, optional
Library symbol name for finalize function
serialize_fn : string, optional
Library symbol name for serialize UDA API function. Not required for all
UDAs; see documentation for more.
close_fn : string, optional
name: string, optional
Used internally to track function
Returns
-------
container : UDA object
"""
func = ImpalaUDA(inputs, output, update_fn, init_fn,
merge_fn, finalize_fn,
serialize_fn=serialize_fn,
name=name, lib_path=hdfs_file)
return func
def wrap_udf(hdfs_file, inputs, output, so_symbol, name=None):
"""
Creates a callable scalar function object. Must be created in Impala to be
used
Parameters
----------
hdfs_file: .so file that contains relevant UDF
inputs: list of strings or TypeSignature
Input types to UDF
output: string
Ibis data type
so_symbol: string, C++ function name for relevant UDF
name: string (optional). Used internally to track function
Returns
-------
container : UDF object
"""
func = ImpalaUDF(inputs, output, so_symbol, name=name,
lib_path=hdfs_file)
return func
def scalar_function(inputs, output, name=None):
"""
Creates an operator class that can be passed to add_operation()
Parameters:
inputs: list of strings
Ibis data type names
output: string
Ibis data type
name: string, optional
Used internally to track function
Returns
-------
klass, user_api : class, function
"""
return ScalarFunction(inputs, output, name=name)
def aggregate_function(inputs, output, name=None):
"""
Creates an operator class that can be passed to add_operation()
Parameters:
inputs: list of strings
Ibis data type names
output: string
Ibis data type
name: string, optional
Used internally to track function
Returns
-------
klass, user_api : class, function
"""
return AggregateFunction(inputs, output, name=name)
def _to_input_sig(inputs):
if isinstance(inputs, rules.TypeSignature):
return inputs
else:
in_type = [validate_type(x) for x in inputs]
return rules.TypeSignature([rules.value_typed_as(x)
for x in in_type])
def _create_operation_class(name, input_type, output_type):
func_dict = {
'input_type': input_type,
'output_type': output_type,
}
klass = type(name, (_ops.ValueOp,), func_dict)
return klass
def add_operation(op, func_name, db):
"""
Registers the given operation within the Ibis SQL translation toolchain
Parameters
----------
op: operator class
name: used in issuing statements to SQL engine
database: database the relevant operator is registered to
"""
full_name = '{0}.{1}'.format(db, func_name)
if isinstance(op.input_type, rules.VarArgs):
translator = comp.varargs(full_name)
else:
arity = len(op.input_type.types)
translator = comp.fixed_arity(full_name, arity)
comp._operation_registry[op] = translator
def _impala_type_to_ibis(tval):
if tval in _impala_to_ibis_type:
return _impala_to_ibis_type[tval]
return tval
def _ibis_string_to_impala(tval):
from ibis.impala.compiler import _sql_type_names
if tval in _sql_type_names:
return _sql_type_names[tval]
result = _dt._parse_decimal(tval)
if result:
return repr(result)
_impala_to_ibis_type = {
'boolean': 'boolean',
'tinyint': 'int8',
'smallint': 'int16',
'int': 'int32',
'bigint': 'int64',
'float': 'float',
'double': 'double',
'string': 'string',
'varchar': 'string',
'char': 'string',
'timestamp': 'timestamp',
'decimal': 'decimal'
}
| |
# Copyright (c) 2014 Hoang Do, Phuc Vo, P. Michiardi, D. Venzano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
import six
from sahara import conductor as c
from sahara.plugins import provisioning as p
from sahara.plugins import utils
from sahara.swift import swift_helper as swift
from sahara.topology import topology_helper as topology
from sahara.utils import files as f
from sahara.utils import types as types
from sahara.utils import xmlutils as x
conductor = c.API
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CORE_DEFAULT = x.load_hadoop_xml_defaults(
'plugins/spark/resources/core-default.xml')
HDFS_DEFAULT = x.load_hadoop_xml_defaults(
'plugins/spark/resources/hdfs-default.xml')
SWIFT_DEFAULTS = swift.read_default_swift_configs()
XML_CONFS = {
"HDFS": [CORE_DEFAULT, HDFS_DEFAULT, SWIFT_DEFAULTS]
}
_default_executor_classpath = ":".join(
['/usr/lib/hadoop/lib/jackson-core-asl-1.8.8.jar',
'/usr/lib/hadoop/hadoop-swift.jar'])
SPARK_CONFS = {
'Spark': {
"OPTIONS": [
{
'name': 'Executor extra classpath',
'description': 'Value for spark.executor.extraClassPath'
' in spark-defaults.conf'
' (default: %s)' % _default_executor_classpath,
'default': '%s' % _default_executor_classpath,
'priority': 2,
},
{
'name': 'Master port',
'description': 'Start the master on a different port'
' (default: 7077)',
'default': '7077',
'priority': 2,
},
{
'name': 'Worker port',
'description': 'Start the Spark worker on a specific port'
' (default: random)',
'default': 'random',
'priority': 2,
},
{
'name': 'Master webui port',
'description': 'Port for the master web UI (default: 8080)',
'default': '8080',
'priority': 1,
},
{
'name': 'Worker webui port',
'description': 'Port for the worker web UI (default: 8081)',
'default': '8081',
'priority': 1,
},
{
'name': 'Worker cores',
'description': 'Total number of cores to allow Spark'
' applications to use on the machine'
' (default: all available cores)',
'default': 'all',
'priority': 2,
},
{
'name': 'Worker memory',
'description': 'Total amount of memory to allow Spark'
' applications to use on the machine, e.g. 1000m,'
' 2g (default: total memory minus 1 GB)',
'default': 'all',
'priority': 1,
},
{
'name': 'Worker instances',
'description': 'Number of worker instances to run on each'
' machine (default: 1)',
'default': '1',
'priority': 2,
},
{
'name': 'Spark home',
'description': 'The location of the spark installation'
' (default: /opt/spark)',
'default': '/opt/spark',
'priority': 2,
},
{
'name': 'Minimum cleanup seconds',
'description': 'Job data will never be purged before this'
' amount of time elapses (default: 86400 = 1 day)',
'default': '86400',
'priority': 2,
},
{
'name': 'Maximum cleanup seconds',
'description': 'Job data will always be purged after this'
' amount of time elapses (default: 1209600 = 14 days)',
'default': '1209600',
'priority': 2,
},
{
'name': 'Minimum cleanup megabytes',
'description': 'No job data will be purged unless the total'
' job data exceeds this size (default: 4096 = 4GB)',
'default': '4096',
'priority': 2,
},
]
}
}
HADOOP_CONF_DIR = "/etc/hadoop/conf"
ENV_CONFS = {
"HDFS": {
'Name Node Heap Size': 'HADOOP_NAMENODE_OPTS=\\"-Xmx%sm\\"',
'Data Node Heap Size': 'HADOOP_DATANODE_OPTS=\\"-Xmx%sm\\"'
}
}
ENABLE_DATA_LOCALITY = p.Config('Enable Data Locality', 'general', 'cluster',
config_type="bool", priority=1,
default_value=True, is_optional=True)
ENABLE_SWIFT = p.Config('Enable Swift', 'general', 'cluster',
config_type="bool", priority=1,
default_value=True, is_optional=False)
# Default set to 1 day, which is the default Keystone token
# expiration time. After the token is expired we can't continue
# scaling anyway.
DECOMMISSIONING_TIMEOUT = p.Config('Decommissioning Timeout', 'general',
'cluster', config_type='int', priority=1,
default_value=86400, is_optional=True,
description='Timeout for datanode'
' decommissioning operation'
' during scaling, in seconds')
HIDDEN_CONFS = ['fs.defaultFS', 'dfs.namenode.name.dir',
'dfs.datanode.data.dir']
CLUSTER_WIDE_CONFS = ['dfs.block.size', 'dfs.permissions', 'dfs.replication',
'dfs.replication.min', 'dfs.replication.max',
'io.file.buffer.size']
PRIORITY_1_CONFS = ['dfs.datanode.du.reserved',
'dfs.datanode.failed.volumes.tolerated',
'dfs.datanode.max.xcievers', 'dfs.datanode.handler.count',
'dfs.namenode.handler.count']
# for now we have not so many cluster-wide configs
# lets consider all of them having high priority
PRIORITY_1_CONFS += CLUSTER_WIDE_CONFS
def _initialise_configs():
configs = []
for service, config_lists in six.iteritems(XML_CONFS):
for config_list in config_lists:
for config in config_list:
if config['name'] not in HIDDEN_CONFS:
cfg = p.Config(config['name'], service, "node",
is_optional=True, config_type="string",
default_value=str(config['value']),
description=config['description'])
if cfg.default_value in ["true", "false"]:
cfg.config_type = "bool"
cfg.default_value = (cfg.default_value == 'true')
elif types.is_int(cfg.default_value):
cfg.config_type = "int"
cfg.default_value = int(cfg.default_value)
if config['name'] in CLUSTER_WIDE_CONFS:
cfg.scope = 'cluster'
if config['name'] in PRIORITY_1_CONFS:
cfg.priority = 1
configs.append(cfg)
for service, config_items in six.iteritems(ENV_CONFS):
for name, param_format_str in six.iteritems(config_items):
configs.append(p.Config(name, service, "node",
default_value=1024, priority=1,
config_type="int"))
for service, config_items in six.iteritems(SPARK_CONFS):
for item in config_items['OPTIONS']:
cfg = p.Config(name=item["name"],
description=item["description"],
default_value=item["default"],
applicable_target=service,
scope="cluster", is_optional=True,
priority=item["priority"])
configs.append(cfg)
configs.append(DECOMMISSIONING_TIMEOUT)
configs.append(ENABLE_SWIFT)
if CONF.enable_data_locality:
configs.append(ENABLE_DATA_LOCALITY)
return configs
# Initialise plugin Hadoop configurations
PLUGIN_CONFIGS = _initialise_configs()
def get_plugin_configs():
return PLUGIN_CONFIGS
def generate_cfg_from_general(cfg, configs, general_config,
rest_excluded=False):
if 'general' in configs:
for nm in general_config:
if nm not in configs['general'] and not rest_excluded:
configs['general'][nm] = general_config[nm]['default_value']
for name, value in configs['general'].items():
if value:
cfg = _set_config(cfg, general_config, name)
LOG.debug("Applying config: {name}".format(name=name))
else:
cfg = _set_config(cfg, general_config)
return cfg
def _get_hostname(service):
return service.hostname() if service else None
def generate_xml_configs(configs, storage_path, nn_hostname, hadoop_port):
if hadoop_port is None:
hadoop_port = 8020
cfg = {
'fs.defaultFS': 'hdfs://%s:%s' % (nn_hostname, str(hadoop_port)),
'dfs.namenode.name.dir': extract_hadoop_path(storage_path,
'/dfs/nn'),
'dfs.datanode.data.dir': extract_hadoop_path(storage_path,
'/dfs/dn'),
'hadoop.tmp.dir': extract_hadoop_path(storage_path,
'/dfs'),
'dfs.hosts': '/etc/hadoop/dn.incl',
'dfs.hosts.exclude': '/etc/hadoop/dn.excl'
}
# inserting user-defined configs
for key, value in extract_hadoop_xml_confs(configs):
cfg[key] = value
# Add the swift defaults if they have not been set by the user
swft_def = []
if is_swift_enabled(configs):
swft_def = SWIFT_DEFAULTS
swift_configs = extract_name_values(swift.get_swift_configs())
for key, value in six.iteritems(swift_configs):
if key not in cfg:
cfg[key] = value
# invoking applied configs to appropriate xml files
core_all = CORE_DEFAULT + swft_def
if CONF.enable_data_locality:
cfg.update(topology.TOPOLOGY_CONFIG)
# applying vm awareness configs
core_all += topology.vm_awareness_core_config()
xml_configs = {
'core-site': x.create_hadoop_xml(cfg, core_all),
'hdfs-site': x.create_hadoop_xml(cfg, HDFS_DEFAULT)
}
return xml_configs
def _get_spark_opt_default(opt_name):
for opt in SPARK_CONFS["Spark"]["OPTIONS"]:
if opt_name == opt["name"]:
return opt["default"]
return None
def generate_spark_env_configs(cluster):
configs = []
# master configuration
sp_master = utils.get_instance(cluster, "master")
configs.append('SPARK_MASTER_IP=' + sp_master.hostname())
# point to the hadoop conf dir so that Spark can read things
# like the swift configuration without having to copy core-site
# to /opt/spark/conf
configs.append('HADOOP_CONF_DIR=' + HADOOP_CONF_DIR)
masterport = utils.get_config_value_or_default("Spark",
"Master port",
cluster)
if masterport and masterport != _get_spark_opt_default("Master port"):
configs.append('SPARK_MASTER_PORT=' + str(masterport))
masterwebport = utils.get_config_value_or_default("Spark",
"Master webui port",
cluster)
if (masterwebport and
masterwebport != _get_spark_opt_default("Master webui port")):
configs.append('SPARK_MASTER_WEBUI_PORT=' + str(masterwebport))
# configuration for workers
workercores = utils.get_config_value_or_default("Spark",
"Worker cores",
cluster)
if workercores and workercores != _get_spark_opt_default("Worker cores"):
configs.append('SPARK_WORKER_CORES=' + str(workercores))
workermemory = utils.get_config_value_or_default("Spark",
"Worker memory",
cluster)
if (workermemory and
workermemory != _get_spark_opt_default("Worker memory")):
configs.append('SPARK_WORKER_MEMORY=' + str(workermemory))
workerport = utils.get_config_value_or_default("Spark",
"Worker port",
cluster)
if workerport and workerport != _get_spark_opt_default("Worker port"):
configs.append('SPARK_WORKER_PORT=' + str(workerport))
workerwebport = utils.get_config_value_or_default("Spark",
"Worker webui port",
cluster)
if (workerwebport and
workerwebport != _get_spark_opt_default("Worker webui port")):
configs.append('SPARK_WORKER_WEBUI_PORT=' + str(workerwebport))
workerinstances = utils.get_config_value_or_default("Spark",
"Worker instances",
cluster)
if (workerinstances and
workerinstances != _get_spark_opt_default("Worker instances")):
configs.append('SPARK_WORKER_INSTANCES=' + str(workerinstances))
return '\n'.join(configs)
# workernames need to be a list of woker names
def generate_spark_slaves_configs(workernames):
return '\n'.join(workernames)
def generate_spark_executor_classpath(cluster):
cp = utils.get_config_value_or_default("Spark",
"Executor extra classpath",
cluster)
if cp:
return "spark.executor.extraClassPath " + cp
return "\n"
def extract_hadoop_environment_confs(configs):
"""Returns environment specific Hadoop configurations.
:returns list of Hadoop parameters which should be passed via environment
"""
lst = []
for service, srv_confs in configs.items():
if ENV_CONFS.get(service):
for param_name, param_value in srv_confs.items():
for cfg_name, cfg_format_str in ENV_CONFS[service].items():
if param_name == cfg_name and param_value is not None:
lst.append(cfg_format_str % param_value)
return lst
def extract_hadoop_xml_confs(configs):
"""Returns xml specific Hadoop configurations.
:returns list of Hadoop parameters which should be passed into general
configs like core-site.xml
"""
lst = []
for service, srv_confs in configs.items():
if XML_CONFS.get(service):
for param_name, param_value in srv_confs.items():
for cfg_list in XML_CONFS[service]:
names = [cfg['name'] for cfg in cfg_list]
if param_name in names and param_value is not None:
lst.append((param_name, param_value))
return lst
def generate_hadoop_setup_script(storage_paths, env_configs):
script_lines = ["#!/bin/bash -x"]
script_lines.append("echo -n > /tmp/hadoop-env.sh")
for line in env_configs:
if 'HADOOP' in line:
script_lines.append('echo "%s" >> /tmp/hadoop-env.sh' % line)
script_lines.append("cat /etc/hadoop/hadoop-env.sh >> /tmp/hadoop-env.sh")
script_lines.append("cp /tmp/hadoop-env.sh /etc/hadoop/hadoop-env.sh")
hadoop_log = storage_paths[0] + "/log/hadoop/\$USER/"
script_lines.append('sed -i "s,export HADOOP_LOG_DIR=.*,'
'export HADOOP_LOG_DIR=%s," /etc/hadoop/hadoop-env.sh'
% hadoop_log)
hadoop_log = storage_paths[0] + "/log/hadoop/hdfs"
script_lines.append('sed -i "s,export HADOOP_SECURE_DN_LOG_DIR=.*,'
'export HADOOP_SECURE_DN_LOG_DIR=%s," '
'/etc/hadoop/hadoop-env.sh' % hadoop_log)
for path in storage_paths:
script_lines.append("chown -R hadoop:hadoop %s" % path)
script_lines.append("chmod -R 755 %s" % path)
return "\n".join(script_lines)
def generate_job_cleanup_config(cluster):
args = {
'minimum_cleanup_megabytes': utils.get_config_value_or_default(
"Spark", "Minimum cleanup megabytes", cluster),
'minimum_cleanup_seconds': utils.get_config_value_or_default(
"Spark", "Minimum cleanup seconds", cluster),
'maximum_cleanup_seconds': utils.get_config_value_or_default(
"Spark", "Maximum cleanup seconds", cluster)
}
job_conf = {'valid': (args['maximum_cleanup_seconds'] > 0 and
(args['minimum_cleanup_megabytes'] > 0
and args['minimum_cleanup_seconds'] > 0))}
if job_conf['valid']:
job_conf['cron'] = f.get_file_text(
'plugins/spark/resources/spark-cleanup.cron'),
job_cleanup_script = f.get_file_text(
'plugins/spark/resources/tmp-cleanup.sh.template')
job_conf['script'] = job_cleanup_script.format(**args)
return job_conf
def extract_name_values(configs):
return {cfg['name']: cfg['value'] for cfg in configs}
def make_hadoop_path(base_dirs, suffix):
return [base_dir + suffix for base_dir in base_dirs]
def extract_hadoop_path(lst, hadoop_dir):
if lst:
return ",".join(make_hadoop_path(lst, hadoop_dir))
def _set_config(cfg, gen_cfg, name=None):
if name in gen_cfg:
cfg.update(gen_cfg[name]['conf'])
if name is None:
for name in gen_cfg:
cfg.update(gen_cfg[name]['conf'])
return cfg
def _get_general_config_value(conf, option):
if 'general' in conf and option.name in conf['general']:
return conf['general'][option.name]
return option.default_value
def _get_general_cluster_config_value(cluster, option):
return _get_general_config_value(cluster.cluster_configs, option)
def is_data_locality_enabled(cluster):
if not CONF.enable_data_locality:
return False
return _get_general_cluster_config_value(cluster, ENABLE_DATA_LOCALITY)
def is_swift_enabled(configs):
return _get_general_config_value(configs, ENABLE_SWIFT)
def get_decommissioning_timeout(cluster):
return _get_general_cluster_config_value(cluster, DECOMMISSIONING_TIMEOUT)
def get_port_from_config(service, name, cluster=None):
address = utils.get_config_value_or_default(service, name, cluster)
return utils.get_port_from_address(address)
| |
from datetime import datetime
import importlib
from io import StringIO
import os
import sys
from types import ModuleType
from typing import Union, Optional, Set, Tuple, Callable
from hwt.doc_markers import internal
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.enum import HEnum
from hwt.hdl.value import HValue
from hwt.serializer.serializer_filter import SerializerFilterDoNotExclude
from hwt.serializer.simModel import SimModelSerializer
from hwt.serializer.store_manager import SaveToStream, SaveToFilesFlat
from hwt.synthesizer.dummyPlatform import DummyPlatform
from hwt.synthesizer.interface import Interface
from hwt.synthesizer.rtlLevel.mainBases import RtlSignalBase
from hwt.synthesizer.unit import Unit
from hwt.synthesizer.utils import to_rtl
from pyDigitalWaveTools.vcd.common import VCD_SIG_TYPE
from pyDigitalWaveTools.vcd.value_format import VcdBitsFormatter, \
VcdEnumFormatter
from pyDigitalWaveTools.vcd.writer import VcdVarWritingScope, \
VarAlreadyRegistered
from pyMathBitPrecise.bits3t import Bits3t
from pyMathBitPrecise.enum3t import Enum3t
from hwtSimApi.basic_hdl_simulator.model import BasicRtlSimModel
from hwtSimApi.basic_hdl_simulator.proxy import BasicRtlSimProxy
from hwtSimApi.basic_hdl_simulator.rtlSimulator import BasicRtlSimulator
from hwtSimApi.basic_hdl_simulator.sim_utils import ValueUpdater, \
ArrayValueUpdater
class BasicRtlSimulatorWithSignalRegisterMethods(BasicRtlSimulator):
supported_type_classes = tuple()
def __init__(self, model_cls, synthesised_unit):
"""
Only store variables for later construction
"""
self.model_cls = model_cls
self.synthesised_unit = synthesised_unit
self.wave_writer = None
self._obj2scope = {}
self._traced_signals = set()
def __call__(self) -> "BasicRtlSimulatorVcd":
"""
Create and initialize the BasicRtlSimulatorWithVCD object
"""
sim = self.__class__(self.model_cls, self.synthesised_unit)
super(BasicRtlSimulatorWithSignalRegisterMethods, sim).__init__()
model = self.model_cls(sim)
model._init_body()
sim.bound_model(model)
return sim
def _init_listeners(self):
self.logPropagation = False
self.logApplyingValues = False
@classmethod
def build(cls,
unit: Unit,
unique_name: str,
build_dir: Optional[str],
target_platform=DummyPlatform(),
do_compile=True) -> "BasicRtlSimulatorVcd":
"""
Create a hwtSimApi.basic_hdl_simulator based simulation model
for specified unit and load it to python
:param unit: interface level unit which you wont prepare for simulation
:param unique_name: unique name for build directory and python module with simulator
:param target_platform: target platform for this synthesis
:param build_dir: directory to store sim model build files,
if None sim model will be constructed only in memory
"""
if unique_name is None:
unique_name = unit._getDefaultName()
_filter = SerializerFilterDoNotExclude()
if build_dir is None or not do_compile:
buff = StringIO()
store_man = SaveToStream(SimModelSerializer, buff, _filter=_filter)
else:
if not os.path.isabs(build_dir):
build_dir = os.path.join(os.getcwd(), build_dir)
build_private_dir = os.path.join(build_dir, unique_name)
store_man = SaveToFilesFlat(SimModelSerializer,
build_private_dir,
_filter=_filter)
store_man.module_path_prefix = unique_name
to_rtl(unit,
name=unique_name,
target_platform=target_platform,
store_manager=store_man)
if build_dir is not None:
d = build_dir
dInPath = d in sys.path
if not dInPath:
sys.path.insert(0, d)
if unique_name in sys.modules:
del sys.modules[unique_name]
simModule = importlib.import_module(
unique_name + "." + unique_name,
package='simModule_' + unique_name)
if not dInPath:
sys.path.pop(0)
else:
simModule = ModuleType('simModule_' + unique_name)
# python supports only ~100 opened brackets
# if exceeded it throws MemoryError: s_push: parser stack overflow
exec(buff.getvalue(),
simModule.__dict__)
model_cls = simModule.__dict__[unit._name]
# can not use just function as it would get bounded to class
return cls(model_cls, unit)
@internal
def get_trace_formatter(self, t)\
-> Tuple[str, int, Callable[[RtlSignalBase, HValue], str]]:
"""
:return: (vcd type name, vcd width, formatter fn)
"""
if isinstance(t, (Bits3t, Bits)):
return (VCD_SIG_TYPE.WIRE, t.bit_length(), VcdBitsFormatter())
elif isinstance(t, (Enum3t, HEnum)):
return (VCD_SIG_TYPE.REAL, 1, VcdEnumFormatter())
else:
raise ValueError(t)
def set_trace_file(self, file_name, trace_depth):
self.create_wave_writer(file_name)
ww = self.wave_writer
if ww is not None:
ww.date(datetime.now())
ww.timescale(1)
interface_signals = set()
self._collect_interface_signals(self.synthesised_unit, self.model, interface_signals)
self.wave_register_signals(self.synthesised_unit, self.model, None, interface_signals)
ww.enddefinitions()
def create_wave_writer(self, file_name):
self.wave_writer = None
def finalize(self):
pass
def _collect_interface_signals(self,
obj: Union[Interface, Unit],
model: BasicRtlSimModel, res: Set[BasicRtlSimProxy]):
intfs = getattr(obj, "_interfaces", None)
if intfs:
for chIntf in intfs:
self._collect_interface_signals(chIntf, model, res)
if isinstance(obj, Unit):
for u in obj._units:
m = getattr(model, u._name + "_inst")
if u._shared_component_with is not None:
u, _, _ = u._shared_component_with
self._collect_interface_signals(u, m, res)
else:
sig_name = obj._sigInside.name
s = getattr(model.io, sig_name)
res.add(s)
def wave_register_signals(self,
obj: Union[Interface, Unit],
model: BasicRtlSimModel,
parent: Optional[VcdVarWritingScope],
interface_signals: Set[BasicRtlSimProxy]):
"""
Register signals from interfaces for Interface or :class:`hwt.synthesizer.unit.Unit` instances
"""
if obj._interfaces:
if isinstance(obj, Unit):
name = model._name
else:
name = obj._name
parent_ = self.wave_writer if parent is None else parent
subScope = parent_.varScope(name)
self._obj2scope[obj] = subScope
with subScope:
# register all subinterfaces
for chIntf in obj._interfaces:
self.wave_register_signals(chIntf, model, subScope, interface_signals)
if isinstance(obj, Unit):
self.wave_register_remaining_signals(subScope, model, interface_signals)
# register interfaces from all subunits
for u in obj._units:
m = getattr(model, u._name + "_inst")
if u._shared_component_with is not None:
u, _, _ = u._shared_component_with
self.wave_register_signals(u, m, subScope, interface_signals)
return subScope
else:
t = obj._dtype
if isinstance(t, self.supported_type_classes):
tName, width, formatter = self.get_trace_formatter(t)
sig_name = obj._sigInside.name
s = getattr(model.io, sig_name)
try:
parent.addVar(s, sig_name,
tName, width, formatter)
except VarAlreadyRegistered:
pass
def wave_register_remaining_signals(self, unitScope,
model: BasicRtlSimModel,
interface_signals: Set[BasicRtlSimProxy]):
for s in model._interfaces:
if s not in interface_signals and s not in self.wave_writer._idScope:
t = s._dtype
if isinstance(t, self.supported_type_classes):
tName, width, formatter = self.get_trace_formatter(t)
try:
unitScope.addVar(s, s._name, tName, width, formatter)
except VarAlreadyRegistered:
pass
def logChange(self, nowTime: int,
sig: BasicRtlSimProxy,
nextVal: HValue,
valueUpdater: Union[ValueUpdater, ArrayValueUpdater]):
"""
This method is called for every value change of any signal.
"""
pass
| |
#!/usr/bin/env python
"""push.py - Send a notification using Pushover"""
__version__ = "0.1"
__author__ = "Brian Connelly"
__copyright__ = "Copyright (c) 2014 Brian Connelly"
__credits__ = ["Brian Connelly"]
__license__ = "MIT"
__maintainer__ = "Brian Connelly"
__email__ = "bdc@bconnelly.net"
__status__ = "Beta"
import argparse
import ast
import datetime
import json
import os
import re
import socket
import sys
import urllib
try:
from httplib import HTTPSConnection
except ImportError:
from http.client import HTTPSConnection
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
def valid_app_token(token):
"""Check if the given app token is in the right format"""
return re.match(r'^[a-zA-Z0-9]{30}$', token) != None
def valid_user_key(key):
"""Check if the given user key is in the right format"""
return re.match(r'^[a-zA-Z0-9]{30}$', key) != None
def valid_message_receipt(receipt):
"""Check if the given message receipt is in the right format"""
return re.match(r'^[a-zA-Z0-9]{30}$', receipt) != None
def valid_group_key(key):
"""Check if the given group key is in the right format"""
return valid_user_key(key)
def valid_device_name(device):
"""Check if the given device name is in the right format"""
return re.match(r'^[A-Za-z0-9_-]{1,25}$', device) != None
def request(method, route, data=None):
if data is None:
data = {}
sroute = '/'.join(['/1'] + route)
content = urlencode(data, {"Content-type": "application/x-www-form-urlencoded"})
try:
conn = HTTPSConnection("api.pushover.net")
conn.request(method, sroute, content)
response = conn.getresponse()
data = json.loads(response.read().decode())
return (response.status, data)
except:
raise Exception("problem")
def parse_arguments():
"""Parse command line arguments"""
sound_choices = ['bike', 'bugle', 'cashregister', 'classical', 'cosmic',
'falling', 'gamelan', 'incoming', 'intermission',
'magic', 'mechanical', 'pianobar', 'siren', 'spacealarm',
'tugboat', 'alien', 'climb', 'persistent', 'echo',
'updown', 'pushover', 'none']
parser = argparse.ArgumentParser(prog=sys.argv[0],
description='Send a notification message with Pushover',
epilog='Available Sounds: {s}'.format(s=', '.join(sound_choices)))
parser.add_argument('--version', action='version', version=__version__)
mgroup = parser.add_argument_group(title='message arguments')
mgroup.add_argument('-d', '--device', metavar='D', help='send message to' \
' specified device')
mgroup.add_argument('-t', '--title', metavar='t', help='message title')
mgroup.add_argument('-T', '--timestamp', metavar='T', help='send message ' \
'specified UNIX timestamp', type=float)
mgroup.add_argument('-u', '--url', metavar='u',
help='supplementary URL for message')
mgroup.add_argument('-U', '--urltitle', metavar='U', help='title for '\
'supplementary url')
mgroup.add_argument('-s', '--sound', metavar='S', choices=sound_choices,
default='pushover', help='play specified sound (see below)')
mgroup.add_argument('--request', action='store_true', default=False,
help='print request token on success')
mgroup.add_argument('--validate', action='store_true', default=False,
help='validate the given user token (and device)')
apigroup = parser.add_argument_group(title='Pushover API arguments (optional)',
description='Specify user or API token')
apigroup.add_argument('--user', help='Pushover user or group key')
apigroup.add_argument('--token', help='Application token')
pgroup = parser.add_argument_group(title='message priority (optional)',
description='By default, messages send'\
' with normal priority.')
pgroup.add_argument('--silent', dest='priority', action='store_const',
const=-2, help='send as lowest priority (-2)')
pgroup.add_argument('--quiet', dest='priority', action='store_const',
const=-1, help='send as low priority (-1)')
pgroup.add_argument('--normal', dest='priority', action='store_const',
const=0, help='send as normal priority (0)')
pgroup.add_argument('--high', dest='priority', action='store_const',
const=1, help='send as high priority (1)')
pgroup.add_argument('--emergency', dest='priority', action='store_const',
const=2, help='send as emergency priority, requiring '\
'user confirmation (2)')
pgroup.add_argument('-r', '--retry', dest='retry', type=int,
default=30, help='Retry interval (seconds) for '\
'emergency messages (default: 30)')
pgroup.add_argument('-e', '--expire', dest='expire', type=int,
default=3600, help='Expiration time (seconds) for '\
'emergency messages (default: 3600)')
pgroup.add_argument('--callback', dest='callback', help='Callback URL for'\
' emergency messages')
egroup = parser.add_argument_group(title='emergency message receipts ' \
'(optional)')
egroup.add_argument('--receipt', metavar='R', help='check status of ' \
'emergency message with receipt R')
egroup.add_argument('--cancel', metavar='R', help='cancel emergency '\
'message with receipt R')
mgroup.add_argument('message', nargs='?', help='Message to send')
args = parser.parse_args()
return args
def main():
args = parse_arguments()
token = args.token
if token is None:
token = os.environ.get('PUSHPY_TOKEN')
if token is None:
print("Error: Must provide application token.")
sys.exit(11)
if not valid_app_token(token):
print("Error: Invalid application token")
sys.exit(1)
user = args.user
if user is None:
user = os.environ.get('PUSHPY_USER')
if user is None:
print("Error: Must provide application token.")
sys.exit(11)
if not valid_user_key(user):
print("Error: Invalid user/group key")
sys.exit(2)
urlargs = {"user": user, "token": token}
# Check the status of an emergency message
if args.receipt is not None:
if not valid_message_receipt(args.receipt):
print("Error: Invalid message receipt")
sys.exit(41)
try:
st_query = "{r}.json?token={t}".format(r=args.receipt, t=token)
(rstatus, rdata) = request("GET", ["receipts", st_query],
data=None)
except:
print("Error: Could not connect to service")
sys.exit(21)
if rstatus == 200 and rdata['status'] == 1:
print("Last Delivered At: {}".format(datetime.datetime.fromtimestamp(rdata["last_delivered_at"]).strftime('%Y-%m-%d %H:%M:%S %Z')))
if rdata["acknowledged"] == 1:
print("Acknowledged At: {}".format(datetime.datetime.fromtimestamp(rdata["acknowledged_at"]).strftime('%Y-%m-%d %H:%M:%S %Z')))
print("Acknowledged By: {}".format(rdata["acknowledged_by"]))
else:
print("Not Acknowledged")
if rdata["expired"]:
print("Expired At: {}".format(datetime.datetime.fromtimestamp(rdata["expires_at"]).strftime('%Y-%m-%d %H:%M:%S %Z')))
else:
print("Expires At: {}".format(datetime.datetime.fromtimestamp(rdata["expires_at"]).strftime('%Y-%m-%d %H:%M:%S %Z')))
if rdata["called_back"] == 1:
print("Called Back At: {}".format(datetime.datetime.fromtimestamp(rdata["called_back_at"]).strftime('%Y-%m-%d %H:%M:%S %Z')))
sys.exit(0)
else:
for error in rdata['errors']:
print("Error: {e}".format(e=error))
sys.exit(rdata['status'])
# Cancel a message
if args.cancel is not None:
if not valid_message_receipt(args.cancel):
print("Error: Invalid message receipt")
sys.exit(41)
try:
(rstatus, rdata) = request("POST",
["receipts", args.cancel, "cancel.json"],
data=urlargs)
except:
print("Error: Could not connect to service")
sys.exit(21)
if rstatus == 200 and rdata["status"] == 1:
sys.exit(0)
else:
for error in rdata['errors']:
print("Error: {e}".format(e=error))
sys.exit(rdata["status"])
if args.device:
if not valid_device_name(args.device):
print("Error: Invalid device name")
sys.exit(5)
urlargs['device'] = args.device
# Validate the user/group key and device (optional)
if args.validate:
try:
(rstatus, rdata) = request("POST",
["users", "validate.json"],
data=urlargs)
except:
print("Error: Could not connect to service")
sys.exit(21)
if rstatus == 200 and rdata["status"] == 1:
if args.device:
print("Valid user/group and device")
else:
print("Valid user/group")
sys.exit(0)
else:
for error in rdata['errors']:
print("Error: {e}".format(e=error))
sys.exit(rdata["status"])
if args.message is None:
message = ''
for line in sys.stdin:
message += line
message = message.rstrip()
else:
message = args.message
if args.title:
if len(args.title) + len(message) > 512:
print("Error: Exceeded maximum length for title and message")
sys.exit(3)
urlargs['title'] = args.title
else:
if len(message) > 512:
print("Error: Exceeded maximum length for title and message")
sys.exit(4)
urlargs['message'] = message
urlargs['priority'] = args.priority
if args.priority == 2:
if args.retry < 30:
print("Error: Retry must be at least 30 seconds")
sys.exit(5)
if args.expire > 86400:
print("Error: Expire can not be larger than 86400 seconds")
sys.exit(6)
urlargs['retry'] = args.retry
urlargs['expire'] = args.expire
if args.callback is not None and args.priority == 2:
urlargs['callback'] = args.callback
urlargs['sound'] = args.sound
if args.url:
urlargs['url'] = args.url
if args.urltitle:
urlargs['urltitle'] = args.urltitle
if args.timestamp:
urlargs['timestamp'] = args.timestamp
# Send a message
try:
(response_status, response_data) = request('POST', ['messages.json'],
data=urlargs)
except:
print("Error: Could not connect to service")
sys.exit(21)
if response_status == 200 and response_data['status'] == 1:
if args.priority == 2:
print(response_data['receipt'])
if args.request:
print(response_data['request'])
elif response_status == 500:
print("Error: Unable to connect to service")
sys.exit(500)
elif response_status == 429:
print("Error: Message limit reached")
sys.exit(429)
else:
for error in response_data['errors']:
print("Error: {e}".format(e=error))
sys.exit(response_data['status'])
if __name__ == "__main__":
main()
| |
import os
import sys
import errno
import uuid
from atomicwrites import atomic_write
__version__ = '0.1.0'
PY2 = sys.version_info[0] == 2
class cached_property(object):
'''A read-only @property that is only evaluated once. Only usable on class
instances' methods.
'''
def __init__(self, fget, doc=None):
self.__name__ = fget.__name__
self.__module__ = fget.__module__
self.__doc__ = doc or fget.__doc__
self.fget = fget
def __get__(self, obj, cls):
if obj is None: # pragma: no cover
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
def to_unicode(x, encoding='ascii'):
if not isinstance(x, text_type):
return x.decode(encoding)
return x
def to_bytes(x, encoding='ascii'):
if not isinstance(x, bytes):
return x.encode(encoding)
return x
if PY2:
text_type = unicode # noqa
to_native = to_bytes
else:
text_type = str # noqa
to_native = to_unicode
SAFE_UID_CHARS = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789_.-+')
def _href_safe(uid, safe=SAFE_UID_CHARS):
return not bool(set(uid) - set(safe))
def _generate_href(uid=None, safe=SAFE_UID_CHARS):
if not uid or not _href_safe(uid, safe):
return to_unicode(uuid.uuid4().hex)
else:
return uid
def get_etag_from_file(f):
'''Get mtime-based etag from a filepath or file-like object.
This function will flush/sync the file as much as necessary to obtain a
correct mtime.
'''
if hasattr(f, 'read'):
f.flush() # Only this is necessary on Linux
if sys.platform == 'win32':
os.fsync(f.fileno()) # Apparently necessary on Windows
stat = os.fstat(f.fileno())
else:
stat = os.stat(f)
mtime = getattr(stat, 'st_mtime_ns', None)
if mtime is None:
mtime = stat.st_mtime
return '{:.9f}'.format(mtime)
class VdirError(IOError):
def __init__(self, *args, **kwargs):
for key, value in kwargs.items():
if getattr(self, key, object()) is not None: # pragma: no cover
raise TypeError('Invalid argument: {}'.format(key))
setattr(self, key, value)
super(VdirError, self).__init__(*args)
class NotFoundError(VdirError):
pass
class CollectionNotFoundError(VdirError):
pass
class WrongEtagError(VdirError):
pass
class AlreadyExistingError(VdirError):
existing_href = None
class Item(object):
def __init__(self, raw):
assert isinstance(raw, text_type)
self.raw = raw
@cached_property
def uid(self):
uid = u''
lines = iter(self.raw.splitlines())
for line in lines:
if line.startswith('UID:'):
uid += line[4:].strip()
break
for line in lines:
if not line.startswith(u' '):
break
uid += line[1:]
return uid or None
def _normalize_meta_value(value):
return to_native(value or u'').strip()
class VdirBase(object):
item_class = Item
default_mode = 0o750
def __init__(self, path, fileext, encoding='utf-8'):
if not os.path.isdir(path):
raise CollectionNotFoundError(path)
self.path = path
self.encoding = encoding
self.fileext = fileext
@classmethod
def discover(cls, path, **kwargs):
try:
collections = os.listdir(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return
for collection in collections:
collection_path = os.path.join(path, collection)
if os.path.isdir(collection_path):
yield cls(path=collection_path, **kwargs)
@classmethod
def create(cls, collection_name, **kwargs):
kwargs = dict(kwargs)
path = kwargs['path']
path = os.path.join(path, collection_name)
if not os.path.exists(path):
os.makedirs(path, mode=cls.default_mode)
elif not os.path.isdir(path):
raise IOError('{} is not a directory.'.format(repr(path)))
kwargs['path'] = path
return kwargs
def _get_filepath(self, href):
return os.path.join(self.path, href)
def _get_href(self, uid):
return _generate_href(uid) + self.fileext
def list(self):
for fname in os.listdir(self.path):
fpath = os.path.join(self.path, fname)
if os.path.isfile(fpath) and fname.endswith(self.fileext):
yield fname, get_etag_from_file(fpath)
def get(self, href):
fpath = self._get_filepath(href)
try:
with open(fpath, 'rb') as f:
return (Item(f.read().decode(self.encoding)),
get_etag_from_file(fpath))
except IOError as e:
if e.errno == errno.ENOENT:
raise NotFoundError(href)
else:
raise
def upload(self, item):
if not isinstance(item.raw, text_type):
raise TypeError('item.raw must be a unicode string.')
try:
href = self._get_href(item.uid)
fpath, etag = self._upload_impl(item, href)
except OSError as e:
if e.errno in (
errno.ENAMETOOLONG, # Unix
errno.ENOENT # Windows
):
# random href instead of UID-based
href = self._get_href(None)
fpath, etag = self._upload_impl(item, href)
else:
raise
return href, etag
def _upload_impl(self, item, href):
fpath = self._get_filepath(href)
try:
with atomic_write(fpath, mode='wb', overwrite=False) as f:
f.write(item.raw.encode(self.encoding))
return fpath, get_etag_from_file(f)
except OSError as e:
if e.errno == errno.EEXIST:
raise AlreadyExistingError(existing_href=href)
else:
raise
def update(self, href, item, etag):
fpath = self._get_filepath(href)
if not os.path.exists(fpath):
raise NotFoundError(item.uid)
actual_etag = get_etag_from_file(fpath)
if etag != actual_etag:
raise WrongEtagError(etag, actual_etag)
if not isinstance(item.raw, text_type):
raise TypeError('item.raw must be a unicode string.')
with atomic_write(fpath, mode='wb', overwrite=True) as f:
f.write(item.raw.encode(self.encoding))
etag = get_etag_from_file(f)
return etag
def delete(self, href, etag):
fpath = self._get_filepath(href)
if not os.path.isfile(fpath):
raise NotFoundError(href)
actual_etag = get_etag_from_file(fpath)
if etag != actual_etag:
raise WrongEtagError(etag, actual_etag)
os.remove(fpath)
def get_meta(self, key):
fpath = os.path.join(self.path, key)
try:
with open(fpath, 'rb') as f:
return f.read().decode(self.encoding) or None
except IOError as e:
if e.errno == errno.ENOENT:
return None
else:
raise
def set_meta(self, key, value):
value = value or u''
assert isinstance(value, text_type)
fpath = os.path.join(self.path, key)
with atomic_write(fpath, mode='wb', overwrite=True) as f:
f.write(value.encode(self.encoding))
class cached_property(object):
'''A read-only @property that is only evaluated once. Only usable on class
instances' methods.
'''
def __init__(self, fget, doc=None):
self.__name__ = fget.__name__
self.__module__ = fget.__module__
self.__doc__ = doc or fget.__doc__
self.fget = fget
def __get__(self, obj, cls):
if obj is None: # pragma: no cover
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
class Color(object):
def __init__(self, x):
if not x:
raise ValueError('Color is false-ish.')
if not x.startswith('#'):
raise ValueError('Color must start with a #.')
if len(x) != 7:
raise ValueError('Color must not have shortcuts. '
'#ffffff instead of #fff')
self.raw = x.upper()
@cached_property
def rgb(self):
x = self.raw
r = x[1:3]
g = x[3:5]
b = x[5:8]
if len(r) == len(g) == len(b) == 2:
return int(r, 16), int(g, 16), int(b, 16)
else:
raise ValueError('Unable to parse color value: {}'
.format(self.value))
class ColorMixin(object):
color_type = Color
def get_color(self):
try:
return self.color_type(self.get_meta('color'))
except ValueError:
return None
def set_color(self, value):
self.set_meta('color', self.color_type(value).raw)
class DisplayNameMixin(object):
def get_displayname(self):
return self.get_meta('displayname')
def set_displayname(self, value):
self.set_meta('displayname', value)
class Vdir(VdirBase, ColorMixin, DisplayNameMixin):
pass
| |
# -*- coding: utf-8 -*-
"""
core
~~~~
Core functionality shared between the extension and the decorator.
:copyright: (c) 2016 by Cory Dolphin.
:license: MIT, see LICENSE for more details.
"""
import re
import logging
try:
# on python 3
from collections.abc import Iterable
except ImportError:
# on python 2.7 and pypy
from collections import Iterable
from datetime import timedelta
from six import string_types
from flask import request, current_app
from werkzeug.datastructures import Headers, MultiDict
LOG = logging.getLogger(__name__)
# Response Headers
ACL_ORIGIN = 'Access-Control-Allow-Origin'
ACL_METHODS = 'Access-Control-Allow-Methods'
ACL_ALLOW_HEADERS = 'Access-Control-Allow-Headers'
ACL_EXPOSE_HEADERS = 'Access-Control-Expose-Headers'
ACL_CREDENTIALS = 'Access-Control-Allow-Credentials'
ACL_MAX_AGE = 'Access-Control-Max-Age'
# Request Header
ACL_REQUEST_METHOD = 'Access-Control-Request-Method'
ACL_REQUEST_HEADERS = 'Access-Control-Request-Headers'
ALL_METHODS = ['GET', 'HEAD', 'POST', 'OPTIONS', 'PUT', 'PATCH', 'DELETE']
CONFIG_OPTIONS = ['CORS_ORIGINS', 'CORS_METHODS', 'CORS_ALLOW_HEADERS',
'CORS_EXPOSE_HEADERS', 'CORS_SUPPORTS_CREDENTIALS',
'CORS_MAX_AGE', 'CORS_SEND_WILDCARD',
'CORS_AUTOMATIC_OPTIONS', 'CORS_VARY_HEADER',
'CORS_RESOURCES', 'CORS_INTERCEPT_EXCEPTIONS',
'CORS_ALWAYS_SEND']
# Attribute added to request object by decorator to indicate that CORS
# was evaluated, in case the decorator and extension are both applied
# to a view.
FLASK_CORS_EVALUATED = '_FLASK_CORS_EVALUATED'
# Strange, but this gets the type of a compiled regex, which is otherwise not
# exposed in a public API.
RegexObject = type(re.compile(''))
DEFAULT_OPTIONS = dict(origins='*',
methods=ALL_METHODS,
allow_headers='*',
expose_headers=None,
supports_credentials=False,
max_age=None,
send_wildcard=False,
automatic_options=True,
vary_header=True,
resources=r'/*',
intercept_exceptions=True,
always_send=True)
def parse_resources(resources):
if isinstance(resources, dict):
# To make the API more consistent with the decorator, allow a
# resource of '*', which is not actually a valid regexp.
resources = [(re_fix(k), v) for k, v in resources.items()]
# Sort by regex length to provide consistency of matching and
# to provide a proxy for specificity of match. E.G. longer
# regular expressions are tried first.
def pattern_length(pair):
maybe_regex, _ = pair
return len(get_regexp_pattern(maybe_regex))
return sorted(resources,
key=pattern_length,
reverse=True)
elif isinstance(resources, string_types):
return [(re_fix(resources), {})]
elif isinstance(resources, Iterable):
return [(re_fix(r), {}) for r in resources]
# Type of compiled regex is not part of the public API. Test for this
# at runtime.
elif isinstance(resources, RegexObject):
return [(re_fix(resources), {})]
else:
raise ValueError("Unexpected value for resources argument.")
def get_regexp_pattern(regexp):
"""
Helper that returns regexp pattern from given value.
:param regexp: regular expression to stringify
:type regexp: _sre.SRE_Pattern or str
:returns: string representation of given regexp pattern
:rtype: str
"""
try:
return regexp.pattern
except AttributeError:
return str(regexp)
def get_cors_origins(options, request_origin):
origins = options.get('origins')
wildcard = r'.*' in origins
# If the Origin header is not present terminate this set of steps.
# The request is outside the scope of this specification.-- W3Spec
if request_origin:
LOG.debug("CORS request received with 'Origin' %s", request_origin)
# If the allowed origins is an asterisk or 'wildcard', always match
if wildcard and options.get('send_wildcard'):
LOG.debug("Allowed origins are set to '*'. Sending wildcard CORS header.")
return ['*']
# If the value of the Origin header is a case-sensitive match
# for any of the values in list of origins
elif try_match_any(request_origin, origins):
LOG.debug("The request's Origin header matches. Sending CORS headers.", )
# Add a single Access-Control-Allow-Origin header, with either
# the value of the Origin header or the string "*" as value.
# -- W3Spec
return [request_origin]
else:
LOG.debug("The request's Origin header does not match any of allowed origins.")
return None
elif options.get('always_send'):
if wildcard:
# If wildcard is in the origins, even if 'send_wildcard' is False,
# simply send the wildcard. Unless supports_credentials is True,
# since that is forbidded by the spec..
# It is the most-likely to be correct thing to do (the only other
# option is to return nothing, which almost certainly not what
# the developer wants if the '*' origin was specified.
if options.get('supports_credentials'):
return None
else:
return ['*']
else:
# Return all origins that are not regexes.
return sorted([o for o in origins if not probably_regex(o)])
# Terminate these steps, return the original request untouched.
else:
LOG.debug("The request did not contain an 'Origin' header. This means the browser or client did not request CORS, ensure the Origin Header is set.")
return None
def get_allow_headers(options, acl_request_headers):
if acl_request_headers:
request_headers = [h.strip() for h in acl_request_headers.split(',')]
# any header that matches in the allow_headers
matching_headers = filter(
lambda h: try_match_any(h, options.get('allow_headers')),
request_headers
)
return ', '.join(sorted(matching_headers))
return None
def get_cors_headers(options, request_headers, request_method):
origins_to_set = get_cors_origins(options, request_headers.get('Origin'))
headers = MultiDict()
if not origins_to_set: # CORS is not enabled for this route
return headers
for origin in origins_to_set:
headers.add(ACL_ORIGIN, origin)
headers[ACL_EXPOSE_HEADERS] = options.get('expose_headers')
if options.get('supports_credentials'):
headers[ACL_CREDENTIALS] = 'true' # case sensative
# This is a preflight request
# http://www.w3.org/TR/cors/#resource-preflight-requests
if request_method == 'OPTIONS':
acl_request_method = request_headers.get(ACL_REQUEST_METHOD, '').upper()
# If there is no Access-Control-Request-Method header or if parsing
# failed, do not set any additional headers
if acl_request_method and acl_request_method in options.get('methods'):
# If method is not a case-sensitive match for any of the values in
# list of methods do not set any additional headers and terminate
# this set of steps.
headers[ACL_ALLOW_HEADERS] = get_allow_headers(options, request_headers.get(ACL_REQUEST_HEADERS))
headers[ACL_MAX_AGE] = options.get('max_age')
headers[ACL_METHODS] = options.get('methods')
else:
LOG.info("The request's Access-Control-Request-Method header does not match allowed methods. CORS headers will not be applied.")
# http://www.w3.org/TR/cors/#resource-implementation
if options.get('vary_header'):
# Only set header if the origin returned will vary dynamically,
# i.e. if we are not returning an asterisk, and there are multiple
# origins that can be matched.
if headers[ACL_ORIGIN] == '*':
pass
elif (len(options.get('origins')) > 1 or
len(origins_to_set) > 1 or
any(map(probably_regex, options.get('origins')))):
headers.add('Vary', 'Origin')
return MultiDict((k, v) for k, v in headers.items() if v)
def set_cors_headers(resp, options):
"""
Performs the actual evaluation of Flas-CORS options and actually
modifies the response object.
This function is used both in the decorator and the after_request
callback
"""
# If CORS has already been evaluated via the decorator, skip
if hasattr(resp, FLASK_CORS_EVALUATED):
LOG.debug('CORS have been already evaluated, skipping')
return resp
# Some libraries, like OAuthlib, set resp.headers to non Multidict
# objects (Werkzeug Headers work as well). This is a problem because
# headers allow repeated values.
if (not isinstance(resp.headers, Headers)
and not isinstance(resp.headers, MultiDict)):
resp.headers = MultiDict(resp.headers)
headers_to_set = get_cors_headers(options, request.headers, request.method)
LOG.debug('Settings CORS headers: %s', str(headers_to_set))
for k, v in headers_to_set.items():
resp.headers.add(k, v)
return resp
def probably_regex(maybe_regex):
if isinstance(maybe_regex, RegexObject):
return True
else:
common_regex_chars = ['*', '\\', ']', '?', '$', '^', '[', ']', '(', ')']
# Use common characters used in regular expressions as a proxy
# for if this string is in fact a regex.
return any((c in maybe_regex for c in common_regex_chars))
def re_fix(reg):
"""
Replace the invalid regex r'*' with the valid, wildcard regex r'/.*' to
enable the CORS app extension to have a more user friendly api.
"""
return r'.*' if reg == r'*' else reg
def try_match_any(inst, patterns):
return any(try_match(inst, pattern) for pattern in patterns)
def try_match(request_origin, maybe_regex):
"""Safely attempts to match a pattern or string to a request origin."""
if isinstance(maybe_regex, RegexObject):
return re.match(maybe_regex, request_origin)
elif probably_regex(maybe_regex):
return re.match(maybe_regex, request_origin, flags=re.IGNORECASE)
else:
try:
return request_origin.lower() == maybe_regex.lower()
except AttributeError:
return request_origin == maybe_regex
def get_cors_options(appInstance, *dicts):
"""
Compute CORS options for an application by combining the DEFAULT_OPTIONS,
the app's configuration-specified options and any dictionaries passed. The
last specified option wins.
"""
options = DEFAULT_OPTIONS.copy()
options.update(get_app_kwarg_dict(appInstance))
if dicts:
for d in dicts:
options.update(d)
return serialize_options(options)
def get_app_kwarg_dict(appInstance=None):
"""Returns the dictionary of CORS specific app configurations."""
app = (appInstance or current_app)
# In order to support blueprints which do not have a config attribute
app_config = getattr(app, 'config', {})
return {
k.lower().replace('cors_', ''): app_config.get(k)
for k in CONFIG_OPTIONS
if app_config.get(k) is not None
}
def flexible_str(obj):
"""
A more flexible str function which intelligently handles stringifying
strings, lists and other iterables. The results are lexographically sorted
to ensure generated responses are consistent when iterables such as Set
are used.
"""
if obj is None:
return None
elif(not isinstance(obj, string_types)
and isinstance(obj, Iterable)):
return ', '.join(str(item) for item in sorted(obj))
else:
return str(obj)
def serialize_option(options_dict, key, upper=False):
if key in options_dict:
value = flexible_str(options_dict[key])
options_dict[key] = value.upper() if upper else value
def ensure_iterable(inst):
"""
Wraps scalars or string types as a list, or returns the iterable instance.
"""
if isinstance(inst, string_types):
return [inst]
elif not isinstance(inst, Iterable):
return [inst]
else:
return inst
def sanitize_regex_param(param):
return [re_fix(x) for x in ensure_iterable(param)]
def serialize_options(opts):
"""
A helper method to serialize and processes the options dictionary.
"""
options = (opts or {}).copy()
for key in opts.keys():
if key not in DEFAULT_OPTIONS:
LOG.warning("Unknown option passed to Flask-CORS: %s", key)
# Ensure origins is a list of allowed origins with at least one entry.
options['origins'] = sanitize_regex_param(options.get('origins'))
options['allow_headers'] = sanitize_regex_param(options.get('allow_headers'))
# This is expressly forbidden by the spec. Raise a value error so people
# don't get burned in production.
if r'.*' in options['origins'] and options['supports_credentials'] and options['send_wildcard']:
raise ValueError("Cannot use supports_credentials in conjunction with"
"an origin string of '*'. See: "
"http://www.w3.org/TR/cors/#resource-requests")
serialize_option(options, 'expose_headers')
serialize_option(options, 'methods', upper=True)
if isinstance(options.get('max_age'), timedelta):
options['max_age'] = str(int(options['max_age'].total_seconds()))
return options
| |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta
from st2client.client import Client
from st2client.models import KeyValuePair
from st2common.services.access import create_token
from st2common.util.api import get_full_public_api_url
from st2common.util.date import get_datetime_utc_now
from st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR, SYSTEM_SCOPE
class DatastoreService(object):
"""
Class provides public methods for accessing datastore items.
"""
DATASTORE_NAME_SEPARATOR = DATASTORE_KEY_SEPARATOR
def __init__(self, logger, pack_name, class_name, api_username):
self._api_username = api_username
self._pack_name = pack_name
self._class_name = class_name
self._logger = logger
self._client = None
self._token_expire = get_datetime_utc_now()
##################################
# Methods for datastore management
##################################
def list_values(self, local=True, prefix=None):
"""
Retrieve all the datastores items.
:param local: List values from a namespace local to this pack/class. Defaults to True.
:type: local: ``bool``
:param prefix: Optional key name prefix / startswith filter.
:type prefix: ``str``
:rtype: ``list`` of :class:`KeyValuePair`
"""
client = self._get_api_client()
self._logger.audit('Retrieving all the value from the datastore')
key_prefix = self._get_full_key_prefix(local=local, prefix=prefix)
kvps = client.keys.get_all(prefix=key_prefix)
return kvps
def get_value(self, name, local=True, scope=SYSTEM_SCOPE, decrypt=False):
"""
Retrieve a value from the datastore for the provided key.
By default, value is retrieved from the namespace local to the pack/class. If you want to
retrieve a global value from a datastore, pass local=False to this method.
:param name: Key name.
:type name: ``str``
:param local: Retrieve value from a namespace local to the pack/class. Defaults to True.
:type: local: ``bool``
:param scope: Scope under which item is saved. Defaults to system scope.
:type: local: ``str``
:param decrypt: Return the decrypted value. Defaults to False.
:type: local: ``bool``
:rtype: ``str`` or ``None``
"""
if scope != SYSTEM_SCOPE:
raise ValueError('Scope %s is unsupported.' % scope)
name = self._get_full_key_name(name=name, local=local)
client = self._get_api_client()
self._logger.audit('Retrieving value from the datastore (name=%s)', name)
try:
params = {'decrypt': str(decrypt).lower(), 'scope': scope}
kvp = client.keys.get_by_id(id=name, params=params)
except Exception as e:
self._logger.exception(
'Exception retrieving value from datastore (name=%s): %s',
name,
e
)
return None
if kvp:
return kvp.value
return None
def set_value(self, name, value, ttl=None, local=True, scope=SYSTEM_SCOPE, encrypt=False):
"""
Set a value for the provided key.
By default, value is set in a namespace local to the pack/class. If you want to
set a global value, pass local=False to this method.
:param name: Key name.
:type name: ``str``
:param value: Key value.
:type value: ``str``
:param ttl: Optional TTL (in seconds).
:type ttl: ``int``
:param local: Set value in a namespace local to the pack/class. Defaults to True.
:type: local: ``bool``
:param scope: Scope under which to place the item. Defaults to system scope.
:type: local: ``str``
:param encrypt: Encrypt the value when saving. Defaults to False.
:type: local: ``bool``
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
if scope != SYSTEM_SCOPE:
raise ValueError('Scope %s is unsupported.', scope)
name = self._get_full_key_name(name=name, local=local)
value = str(value)
client = self._get_api_client()
self._logger.audit('Setting value in the datastore (name=%s)', name)
instance = KeyValuePair()
instance.id = name
instance.name = name
instance.value = value
instance.scope = scope
if encrypt:
instance.secret = True
if ttl:
instance.ttl = ttl
client.keys.update(instance=instance)
return True
def delete_value(self, name, local=True, scope=SYSTEM_SCOPE):
"""
Delete the provided key.
By default, value is deleted from a namespace local to the pack/class. If you want to
delete a global value, pass local=False to this method.
:param name: Name of the key to delete.
:type name: ``str``
:param local: Delete a value in a namespace local to the pack/class. Defaults to True.
:type: local: ``bool``
:param scope: Scope under which item is saved. Defaults to system scope.
:type: local: ``str``
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
if scope != SYSTEM_SCOPE:
raise ValueError('Scope %s is unsupported.', scope)
name = self._get_full_key_name(name=name, local=local)
client = self._get_api_client()
instance = KeyValuePair()
instance.id = name
instance.name = name
self._logger.audit('Deleting value from the datastore (name=%s)', name)
try:
params = {'scope': scope}
client.keys.delete(instance=instance, params=params)
except Exception as e:
self._logger.exception(
'Exception deleting value from datastore (name=%s): %s',
name,
e
)
return False
return True
def _get_api_client(self):
"""
Retrieve API client instance.
"""
token_expire = self._token_expire <= get_datetime_utc_now()
if not self._client or token_expire:
self._logger.audit('Creating new Client object.')
ttl = (24 * 60 * 60)
self._token_expire = get_datetime_utc_now() + timedelta(seconds=ttl)
temporary_token = create_token(username=self._api_username, ttl=ttl)
api_url = get_full_public_api_url()
self._client = Client(api_url=api_url, token=temporary_token.token)
return self._client
def _get_full_key_name(self, name, local):
"""
Retrieve a full key name.
:rtype: ``str``
"""
if local:
name = self._get_key_name_with_prefix(name=name)
return name
def _get_full_key_prefix(self, local, prefix=None):
if local:
key_prefix = self._get_local_key_name_prefix()
if prefix:
key_prefix += prefix
else:
key_prefix = prefix
return key_prefix
def _get_local_key_name_prefix(self):
"""
Retrieve key prefix which is local to this pack/class.
"""
key_prefix = self._get_datastore_key_prefix() + self.DATASTORE_NAME_SEPARATOR
return key_prefix
def _get_key_name_with_prefix(self, name):
"""
Retrieve a full key name which is local to the current pack/class.
:param name: Base datastore key name.
:type name: ``str``
:rtype: ``str``
"""
prefix = self._get_datastore_key_prefix()
full_name = prefix + self.DATASTORE_NAME_SEPARATOR + name
return full_name
def _get_datastore_key_prefix(self):
prefix = '%s.%s' % (self._pack_name, self._class_name)
return prefix
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.