text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# The MIT License
#
# Copyright 2014, 2015 Piotr Dabkowski
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
from __future__ import unicode_literals
from .pyjsparserdata import *
from .std_nodes import *
from pprint import pprint
REGEXP_SPECIAL_SINGLE = ['\\', '^', '$', '*', '+', '?', '.', '[', ']', '(', ')', '{', '{', '|', '-']
import six
if six.PY3:
basestring = str
long = int
xrange = range
unicode = str
ESPRIMA_VERSION = '2.2.0'
DEBUG = False
# Small naming convention changes
# len -> leng
# id -> d
# type -> typ
# str -> st
true = True
false = False
null = None
class PyJsParser:
""" Usage:
parser = PyJsParser()
parser.parse('var JavaScriptCode = 5.1')
"""
def __init__(self):
self.clean()
def test(self, code):
pprint(self.parse(code))
def clean(self):
self.strict = None
self.sourceType = None
self.index = 0
self.lineNumber = 1
self.lineStart = 0
self.hasLineTerminator = None
self.lastIndex = None
self.lastLineNumber = None
self.lastLineStart = None
self.startIndex = None
self.startLineNumber = None
self.startLineStart = None
self.scanning = None
self.lookahead = None
self.state = None
self.extra = None
self.isBindingElement = None
self.isAssignmentTarget = None
self.firstCoverInitializedNameError = None
# 7.4 Comments
def skipSingleLineComment(self, offset):
start = self.index - offset;
while self.index < self.length:
ch = self.source[self.index];
self.index += 1
if isLineTerminator(ch):
if (ord(ch) == 13 and ord(self.source[self.index]) == 10):
self.index += 1
self.lineNumber += 1
self.hasLineTerminator = True
self.lineStart = self.index
return
def skipMultiLineComment(self):
while self.index < self.length:
ch = ord(self.source[self.index])
if isLineTerminator(ch):
if (ch == 0x0D and ord(self.source[self.index+1]) == 0x0A):
self.index += 1
self.lineNumber += 1
self.index += 1
self.hasLineTerminator = True
self.lineStart = self.index
elif ch == 0x2A:
# Block comment ends with '*/'.
if ord(self.source[self.index+1]) == 0x2F:
self.index += 2
return
self.index += 1
else:
self.index += 1
self.tolerateUnexpectedToken()
def skipComment(self):
self.hasLineTerminator = False
start = (self.index==0)
while self.index < self.length:
ch = ord(self.source[self.index])
if isWhiteSpace(ch):
self.index += 1
elif isLineTerminator(ch):
self.hasLineTerminator = True
self.index += 1
if (ch == 0x0D and ord(self.source[self.index]) == 0x0A):
self.index += 1
self.lineNumber += 1
self.lineStart = self.index
start = True
elif (ch == 0x2F): # U+002F is '/'
ch = ord(self.source[self.index+1])
if (ch == 0x2F):
self.index += 2
self.skipSingleLineComment(2)
start = True
elif (ch == 0x2A): # U+002A is '*'
self.index += 2
self.skipMultiLineComment()
else:
break
elif (start and ch == 0x2D): # U+002D is '-'
# U+003E is '>'
if (ord(self.source[self.index+1]) == 0x2D) and (ord(self.source[self.index+2]) == 0x3E):
# '-->' is a single-line comment
self.index += 3
self.skipSingleLineComment(3)
else:
break
elif (ch == 0x3C): # U+003C is '<'
if self.source[self.index+1: self.index+4]=='!--':
# <!--
self.index += 4
self.skipSingleLineComment(4)
else:
break
else:
break
def scanHexEscape(self, prefix):
code = 0
leng = 4 if (prefix == 'u') else 2
for i in xrange(leng):
if self.index < self.length and isHexDigit(self.source[self.index]):
ch = self.source[self.index]
self.index += 1
code = code * 16 + HEX_CONV[ch]
else:
return ''
return unichr(code)
def scanUnicodeCodePointEscape(self):
ch = self.source[self.index]
code = 0
# At least, one hex digit is required.
if ch == '}':
self.throwUnexpectedToken()
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
if not isHexDigit(ch):
break
code = code * 16 + HEX_CONV[ch]
if code > 0x10FFFF or ch != '}':
self.throwUnexpectedToken()
# UTF-16 Encoding
if (code <= 0xFFFF):
return unichr(code)
cu1 = ((code - 0x10000) >> 10) + 0xD800;
cu2 = ((code - 0x10000) & 1023) + 0xDC00;
return unichr(cu1)+unichr(cu2)
def ccode(self, offset=0):
return ord(self.source[self.index+offset])
def log_err_case(self):
if not DEBUG:
return
print('INDEX', self.index)
print(self.source[self.index-10:self.index+10])
print('')
def at(self, loc):
return None if loc>=self.length else self.source[loc]
def substr(self, le, offset=0):
return self.source[self.index+offset:self.index+offset+le]
def getEscapedIdentifier(self):
d = self.source[self.index]
ch = ord(d)
self.index += 1
# '\u' (U+005C, U+0075) denotes an escaped character.
if (ch == 0x5C):
if (ord(self.source[self.index]) != 0x75):
self.throwUnexpectedToken()
self.index += 1
ch = self.scanHexEscape('u')
if not ch or ch == '\\' or not isIdentifierStart(ch[0]):
self.throwUnexpectedToken()
d = ch
while (self.index < self.length):
ch = self.ccode()
if not isIdentifierPart(ch):
break
self.index += 1
d += unichr(ch)
# '\u' (U+005C, U+0075) denotes an escaped character.
if (ch == 0x5C):
d = d[0: len(d)-1]
if (self.ccode() != 0x75):
self.throwUnexpectedToken()
self.index += 1
ch = self.scanHexEscape('u');
if (not ch or ch == '\\' or not isIdentifierPart(ch[0])):
self.throwUnexpectedToken()
d += ch
return d
def getIdentifier(self):
start = self.index
self.index += 1
while (self.index < self.length):
ch = self.ccode()
if (ch == 0x5C):
# Blackslash (U+005C) marks Unicode escape sequence.
self.index = start
return self.getEscapedIdentifier()
if (isIdentifierPart(ch)):
self.index += 1
else:
break
return self.source[start: self.index]
def scanIdentifier(self):
start = self.index
# Backslash (U+005C) starts an escaped character.
d = self.getEscapedIdentifier() if (self.ccode() == 0x5C) else self.getIdentifier()
# There is no keyword or literal with only one character.
# Thus, it must be an identifier.
if (len(d)==1):
type = Token.Identifier
elif (isKeyword(d)):
type = Token.Keyword
elif (d == 'null'):
type = Token.NullLiteral
elif (i == 'true' or d == 'false'):
type = Token.BooleanLiteral
else:
type = Token.Identifier;
return {
'type': type,
'value': d,
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index
}
# 7.7 Punctuators
def scanPunctuator(self):
token = {
'type': Token.Punctuator,
'value': '',
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': self.index,
'end': self.index
}
# Check for most common single-character punctuators.
st = self.source[self.index]
if st == '{':
self.state['curlyStack'].append('{')
self.index += 1
elif st == '}':
self.index += 1
self.state['curlyStack'].pop()
elif st in ['.', '(', ')', ';', ',', '[', ']', ':', '?', '~']:
self.index += 1
else:
# 4-character punctuator.
st = self.substr(4)
if (st == '>>>='):
self.index += 4
else:
# 3-character punctuators.
st = st[0:3]
if st in ['===', '!==', '>>>', '<<=', '>>=']:
self.index += 3
else:
# 2-character punctuators.
st = st[0:2]
if st in ['&&','||','==','!=','+=','-=','*=' ,'/=' ,'++' , '--' , '<<', '>>', '&=', '|=', '^=', '%=', '<=', '>=', '=>']:
self.index += 2
else:
# 1-character punctuators.
st = self.source[self.index]
if st in ['<', '>', '=', '!', '+', '-', '*', '%', '&', '|', '^', '/']:
self.index += 1
if self.index == token['start']:
self.throwUnexpectedToken()
token['end'] = self.index;
token['value'] = st
return token
# 7.8.3 Numeric Literals
def scanHexLiteral(self, start):
number = ''
while (self.index < self.length):
if (not isHexDigit(self.source[self.index])):
break
number += self.source[self.index]
self.index += 1
if not number:
self.throwUnexpectedToken()
if isIdentifierStart(self.ccode()):
self.throwUnexpectedToken()
return {
'type': Token.NumericLiteral,
'value': int(number, 16),
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
def scanBinaryLiteral(self, start):
number = ''
while (self.index < self.length):
ch = self.source[self.index]
if (ch != '0' and ch != '1'):
break
number += self.source[self.index]
self.index += 1
if not number:
# only 0b or 0B
self.throwUnexpectedToken()
if (self.index < self.length):
ch = self.source[self.index]
# istanbul ignore else
if (isIdentifierStart(ch) or isDecimalDigit(ch)):
self.throwUnexpectedToken();
return {
'type': Token.NumericLiteral,
'value': int(number, 2),
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
def scanOctalLiteral(self, prefix, start):
if isOctalDigit(prefix):
octal = True
number = '0' + self.source[self.index]
self.index += 1
else:
octal = False
self.index += 1
number = ''
while (self.index < self.length):
if (not isOctalDigit(self.source[self.index])):
break
number += self.source[self.index]
self.index += 1
if (not octal and not number):
# only 0o or 0O
self.throwUnexpectedToken()
if (isIdentifierStart(self.ccode()) or isDecimalDigit(self.ccode())):
self.throwUnexpectedToken()
return {
'type': Token.NumericLiteral,
'value': int(number, 8),
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
def octalToDecimal(self, ch):
# \0 is not octal escape sequence
octal = (ch != '0')
code = int(ch, 8)
if (self.index < self.length and isOctalDigit(self.source[self.index])):
octal = True
code = code * 8 + int(self.source[self.index], 8)
self.index += 1
# 3 digits are only allowed when string starts
# with 0, 1, 2, 3
if (ch in '0123' and self.index < self.length and isOctalDigit(self.source[self.index])):
code = code * 8 + int((self.source[self.index]), 8)
self.index += 1
return {
'code': code,
'octal': octal}
def isImplicitOctalLiteral(self):
# Implicit octal, unless there is a non-octal digit.
# (Annex B.1.1 on Numeric Literals)
for i in xrange(self.index + 1, self.length):
ch = self.source[i];
if (ch == '8' or ch == '9'):
return False;
if (not isOctalDigit(ch)):
return True
return True
def scanNumericLiteral(self):
ch = self.source[self.index]
assert isDecimalDigit(ch) or (ch == '.'), 'Numeric literal must start with a decimal digit or a decimal point'
start = self.index
number = ''
if ch != '.':
number = self.source[self.index]
self.index += 1
ch = self.source[self.index]
# Hex number starts with '0x'.
# Octal number starts with '0'.
# Octal number in ES6 starts with '0o'.
# Binary number in ES6 starts with '0b'.
if (number == '0'):
if (ch == 'x' or ch == 'X'):
self.index += 1
return self.scanHexLiteral(start);
if (ch == 'b' or ch == 'B'):
self.index += 1
return self.scanBinaryLiteral(start)
if (ch == 'o' or ch == 'O'):
return self.scanOctalLiteral(ch, start)
if (isOctalDigit(ch)):
if (self.isImplicitOctalLiteral()):
return self.scanOctalLiteral(ch, start);
while (isDecimalDigit(self.ccode())):
number += self.source[self.index]
self.index += 1
ch = self.source[self.index];
if (ch == '.'):
number += self.source[self.index]
self.index += 1
while (isDecimalDigit(self.source[self.index])):
number += self.source[self.index]
self.index += 1
ch = self.source[self.index]
if (ch == 'e' or ch == 'E'):
number += self.source[self.index]
self.index += 1
ch = self.source[self.index]
if (ch == '+' or ch == '-'):
number += self.source[self.index]
self.index += 1
if (isDecimalDigit(self.source[self.index])):
while (isDecimalDigit(self.source[self.index])):
number += self.source[self.index]
self.index += 1
else:
self.throwUnexpectedToken()
if (isIdentifierStart(self.source[self.index])):
self.throwUnexpectedToken();
return {
'type': Token.NumericLiteral,
'value': float(number),
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
# 7.8.4 String Literals
def _interpret_regexp(self, string, flags):
'''Perform sctring escape - for regexp literals'''
self.index = 0
self.length = len(string)
self.source = string
self.lineNumber = 0
self.lineStart = 0
octal = False
st = ''
inside_square = 0
while (self.index < self.length):
template = '[%s]' if not inside_square else '%s'
ch = self.source[self.index]
self.index += 1
if ch == '\\':
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch=='u':
digs = self.source[self.index:self.index+4]
if len(digs)==4 and all(isHexDigit(d) for d in digs):
st += template%unichr(int(digs, 16))
self.index += 4
else:
st += 'u'
elif ch=='x':
digs = self.source[self.index:self.index+2]
if len(digs)==2 and all(isHexDigit(d) for d in digs):
st += template%unichr(int(digs, 16))
self.index += 2
else:
st += 'x'
# special meaning - single char.
elif ch=='0':
st += '\\0'
elif ch=='n':
st += '\\n'
elif ch=='r':
st += '\\r'
elif ch=='t':
st += '\\t'
elif ch=='f':
st += '\\f'
elif ch=='v':
st += '\\v'
# unescape special single characters like . so that they are interpreted literally
elif ch in REGEXP_SPECIAL_SINGLE:
st += '\\' + ch
# character groups
elif ch=='b':
st += '\\b'
elif ch=='B':
st += '\\B'
elif ch=='w':
st += '\\w'
elif ch=='W':
st += '\\W'
elif ch=='d':
st += '\\d'
elif ch=='D':
st += '\\D'
elif ch=='s':
st += template % u' \f\n\r\t\v\u00a0\u1680\u180e\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff'
elif ch=='S':
st += template % u'\u0000-\u0008\u000e-\u001f\u0021-\u009f\u00a1-\u167f\u1681-\u180d\u180f-\u1fff\u200b-\u2027\u202a-\u202e\u2030-\u205e\u2060-\u2fff\u3001-\ufefe\uff00-\uffff'
else:
if isDecimalDigit(ch):
num = ch
while self.index<self.length and isDecimalDigit(self.source[self.index]):
num += self.source[self.index]
self.index += 1
st += '\\' + num
else:
st += ch # DONT ESCAPE!!!
else:
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
else:
if ch=='[':
inside_square = True
elif ch==']':
inside_square = False
st += ch
#print string, 'was transformed to', st
return st
def scanStringLiteral(self):
st = ''
octal = False
quote = self.source[self.index]
assert quote == '\''or quote == '"', 'String literal must starts with a quote'
start = self.index;
self.index += 1
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
if (ch == quote):
quote = ''
break
elif (ch == '\\'):
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch in 'ux':
if (self.source[self.index] == '{'):
self.index += 1
st += self.scanUnicodeCodePointEscape()
else:
unescaped = self.scanHexEscape(ch)
if (not unescaped):
self.throwUnexpectedToken() # with throw I don't know whats the difference
st += unescaped
elif ch=='n':
st += '\n';
elif ch=='r':
st += '\r';
elif ch=='t':
st += '\t';
elif ch=='b':
st += '\b';
elif ch=='f':
st += '\f';
elif ch=='v':
st += '\x0B'
#elif ch in '89':
# self.throwUnexpectedToken() # again with throw....
else:
if isOctalDigit(ch):
octToDec = self.octalToDecimal(ch)
octal = octToDec.get('octal') or octal
st += unichr(octToDec['code'])
else:
st += ch
else:
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
elif isLineTerminator(ch):
break
else:
st += ch;
if (quote != ''):
self.throwUnexpectedToken()
return {
'type': Token.StringLiteral,
'value': st,
'octal': octal,
'lineNumber': self.lineNumber,
'lineStart': self.startLineStart,
'start': start,
'end': self.index}
def scanTemplate(self):
cooked = ''
terminated = False
tail = False
start = self.index
head = (self.source[self.index]=='`')
rawOffset = 2
self.index += 1
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
if (ch == '`'):
rawOffset = 1;
tail = True
terminated = True
break
elif (ch == '$'):
if (self.source[self.index] == '{'):
self.state['curlyStack'].append('${')
self.index += 1
terminated = True
break;
cooked += ch
elif (ch == '\\'):
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch=='n':
cooked += '\n'
elif ch=='r':
cooked += '\r'
elif ch=='t':
cooked += '\t'
elif ch in 'ux':
if (self.source[self.index] == '{'):
self.index += 1
cooked += self.scanUnicodeCodePointEscape()
else:
restore = self.index
unescaped = self.scanHexEscape(ch)
if (unescaped):
cooked += unescaped
else:
self.index = restore
cooked += ch
elif ch=='b':
cooked += '\b'
elif ch=='f':
cooked += '\f'
elif ch=='v':
cooked += '\v'
else:
if (ch == '0'):
if isDecimalDigit(self.ccode()):
# Illegal: \01 \02 and so on
self.throwError(Messages.TemplateOctalLiteral)
cooked += '\0'
elif (isOctalDigit(ch)):
# Illegal: \1 \2
self.throwError(Messages.TemplateOctalLiteral)
else:
cooked += ch
else:
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
elif (isLineTerminator(ch)):
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] =='\n'):
self.index += 1
self.lineStart = self.index
cooked += '\n'
else:
cooked += ch;
if (not terminated):
self.throwUnexpectedToken()
if (not head):
self.state['curlyStack'].pop();
return {
'type': Token.Template,
'value': {
'cooked': cooked,
'raw': self.source[start + 1:self.index - rawOffset]},
'head': head,
'tail': tail,
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
def testRegExp(self, pattern, flags):
#todo: you should return python regexp object
return (pattern, flags)
def scanRegExpBody(self):
ch = self.source[self.index]
assert ch == '/', 'Regular expression literal must start with a slash'
st = ch
self.index += 1
classMarker = False
terminated = False
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
st += ch
if (ch == '\\'):
ch = self.source[self.index]
self.index += 1
# ECMA-262 7.8.5
if (isLineTerminator(ch)):
self.throwUnexpectedToken(None, Messages.UnterminatedRegExp)
st += ch
elif (isLineTerminator(ch)):
self.throwUnexpectedToken(None, Messages.UnterminatedRegExp)
elif (classMarker):
if (ch == ']'):
classMarker = False
else:
if (ch == '/'):
terminated = True
break
elif (ch == '['):
classMarker = True;
if (not terminated):
self.throwUnexpectedToken(None, Messages.UnterminatedRegExp)
# Exclude leading and trailing slash.
body = st[1:-1]
return {
'value': body,
'literal': st}
def scanRegExpFlags(self):
st = ''
flags = ''
while (self.index < self.length):
ch = self.source[self.index]
if (not isIdentifierPart(ch)):
break
self.index += 1
if (ch == '\\' and self.index < self.length):
ch = self.source[self.index]
if (ch == 'u'):
self.index += 1
restore = self.index
ch = self.scanHexEscape('u')
if (ch):
flags += ch
st += '\\u'
while restore < self.index:
st += self.source[restore]
restore += 1
else:
self.index = restore
flags += 'u'
st += '\\u'
self.tolerateUnexpectedToken()
else:
st += '\\'
self.tolerateUnexpectedToken()
else:
flags += ch
st += ch
return {
'value': flags,
'literal': st}
def scanRegExp(self):
self.scanning = True
self.lookahead = None
self.skipComment()
start = self.index
body = self.scanRegExpBody()
flags = self.scanRegExpFlags()
value = self.testRegExp(body['value'], flags['value'])
scanning = False
return {
'literal': body['literal'] + flags['literal'],
'value': value,
'regex': {
'pattern': body['value'],
'flags': flags['value']
},
'start': start,
'end': self.index}
def collectRegex(self):
self.skipComment();
return self.scanRegExp()
def isIdentifierName(self, token):
return token['type'] in [1,3,4,5]
#def advanceSlash(self): ???
def advance(self):
if (self.index >= self.length):
return {
'type': Token.EOF,
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': self.index,
'end': self.index}
ch = self.ccode()
if isIdentifierStart(ch):
token = self.scanIdentifier()
if (self.strict and isStrictModeReservedWord(token['value'])):
token['type'] = Token.Keyword
return token
# Very common: ( and ) and ;
if (ch == 0x28 or ch == 0x29 or ch == 0x3B):
return self.scanPunctuator()
# String literal starts with single quote (U+0027) or double quote (U+0022).
if (ch == 0x27 or ch == 0x22):
return self.scanStringLiteral()
# Dot (.) U+002E can also start a floating-point number, hence the need
# to check the next character.
if (ch == 0x2E):
if (isDecimalDigit(self.ccode(1))):
return self.scanNumericLiteral()
return self.scanPunctuator();
if (isDecimalDigit(ch)):
return self.scanNumericLiteral()
# Slash (/) U+002F can also start a regex.
#if (extra.tokenize && ch == 0x2F):
# return advanceSlash();
# Template literals start with ` (U+0060) for template head
# or } (U+007D) for template middle or template tail.
if (ch == 0x60 or (ch == 0x7D and self.state['curlyStack'][len(self.state['curlyStack']) - 1] == '${')):
return self.scanTemplate()
return self.scanPunctuator();
#def collectToken(self):
# loc = {
# 'start': {
# 'line': self.lineNumber,
# 'column': self.index - self.lineStart}}
#
# token = self.advance()
#
# loc['end'] = {
# 'line': self.lineNumber,
# 'column': self.index - self.lineStart}
# if (token['type'] != Token.EOF):
# value = self.source[token['start']: token['end']]
# entry = {
# 'type': TokenName[token['type']],
# 'value': value,
# 'range': [token['start'], token['end']],
# 'loc': loc}
# if (token.get('regex')):
# entry['regex'] = {
# 'pattern': token['regex']['pattern'],
# 'flags': token['regex']['flags']}
# self.extra['tokens'].append(entry)
# return token;
def lex(self):
self.scanning = True
self.lastIndex = self.index
self.lastLineNumber = self.lineNumber
self.lastLineStart = self.lineStart
self.skipComment()
token = self.lookahead
self.startIndex = self.index
self.startLineNumber = self.lineNumber
self.startLineStart = self.lineStart
self.lookahead = self.advance()
self.scanning = False
return token
def peek(self):
self.scanning = True
self.skipComment()
self.lastIndex = self.index
self.lastLineNumber = self.lineNumber
self.lastLineStart = self.lineStart
self.startIndex = self.index
self.startLineNumber = self.lineNumber
self.startLineStart = self.lineStart
self.lookahead = self.advance()
self.scanning = False
def createError(self, line, pos, description):
self.log_err_case()
from universalscrapers.modules.js2py.base import ERRORS, Js, JsToPyException
error = ERRORS['SyntaxError']('Line ' + unicode(line) + ': ' + unicode(description))
error.put('index', Js(pos))
error.put('lineNumber', Js(line))
error.put('column', Js(pos - (self.lineStart if self.scanning else self.lastLineStart) + 1))
error.put('description', Js(description))
return JsToPyException(error)
# Throw an exception
def throwError(self, messageFormat, *args):
msg = messageFormat % tuple(unicode(e) for e in args)
raise self.createError(self.lastLineNumber, self.lastIndex, msg);
def tolerateError(self, messageFormat, *args):
return self.throwError(messageFormat, *args)
# Throw an exception because of the token.
def unexpectedTokenError(self, token={}, message=''):
msg = message or Messages.UnexpectedToken
if (token):
typ = token['type']
if (not message):
if typ == Token.EOF: msg = Messages.UnexpectedEOS
elif (typ == Token.Identifier): msg = Messages.UnexpectedIdentifier
elif (typ == Token.NumericLiteral): msg = Messages.UnexpectedNumber
elif (typ == Token.StringLiteral): msg = Messages.UnexpectedString
elif (typ == Token.Template): msg = Messages.UnexpectedTemplate
else: msg = Messages.UnexpectedToken;
if (typ == Token.Keyword):
if (isFutureReservedWord(token['value'])):
msg = Messages.UnexpectedReserved
elif (self.strict and isStrictModeReservedWord(token['value'])):
msg = Messages.StrictReservedWord
value = token['value']['raw'] if (typ == Token.Template) else token.get('value')
else:
value = 'ILLEGAL'
msg = msg.replace('%s', unicode(value))
return (self.createError(token['lineNumber'], token['start'], msg) if (token and token.get('lineNumber')) else
self.createError(self.lineNumber if self.scanning else self.lastLineNumber, self.index if self.scanning else self.lastIndex, msg))
def throwUnexpectedToken(self, token={}, message=''):
raise self.unexpectedTokenError(token, message)
def tolerateUnexpectedToken(self, token={}, message=''):
self.throwUnexpectedToken(token, message)
# Expect the next token to match the specified punctuator.
# If not, an exception will be thrown.
def expect(self, value):
token = self.lex()
if (token['type'] != Token.Punctuator or token['value'] != value):
self.throwUnexpectedToken(token)
#/**
# * @name expectCommaSeparator
# * @description Quietly expect a comma when in tolerant mode, otherwise delegates
# * to <code>expect(value)</code>
# * @since 2.0
# */
def expectCommaSeparator(self):
self.expect(',')
# Expect the next token to match the specified keyword.
# If not, an exception will be thrown.
def expectKeyword(self, keyword):
token = self.lex();
if (token['type'] != Token.Keyword or token['value'] != keyword):
self.throwUnexpectedToken(token)
# Return true if the next token matches the specified punctuator.
def match(self, value):
return self.lookahead['type'] == Token.Punctuator and self.lookahead['value'] == value
# Return true if the next token matches the specified keyword
def matchKeyword(self, keyword):
return self.lookahead['type'] == Token.Keyword and self.lookahead['value'] == keyword
# Return true if the next token matches the specified contextual keyword
# (where an identifier is sometimes a keyword depending on the context)
def matchContextualKeyword(self, keyword):
return self.lookahead['type'] == Token.Identifier and self.lookahead['value'] == keyword
# Return true if the next token is an assignment operator
def matchAssign(self):
if (self.lookahead['type'] != Token.Punctuator):
return False;
op = self.lookahead['value']
return op in ['=','*=', '/=','%=', '+=', '-=', '<<=', '>>=', '>>>=', '&=' , '^=' , '|=']
def consumeSemicolon(self):
# Catch the very common case first: immediately a semicolon (U+003B).
if (self.at(self.startIndex) == ';' or self.match(';')):
self.lex()
return
if (self.hasLineTerminator):
return
# TODO: FIXME(ikarienator): this is seemingly an issue in the previous location info convention.
self.lastIndex = self.startIndex
self.lastLineNumber = self.startLineNumber
self.lastLineStart = self.startLineStart
if (self.lookahead['type'] != Token.EOF and not self.match('}')):
self.throwUnexpectedToken(self.lookahead)
# // Cover grammar support.
# //
# // When an assignment expression position starts with an left parenthesis, the determination of the type
# // of the syntax is to be deferred arbitrarily long until the end of the parentheses pair (plus a lookahead)
# // or the first comma. This situation also defers the determination of all the expressions nested in the pair.
# //
# // There are three productions that can be parsed in a parentheses pair that needs to be determined
# // after the outermost pair is closed. They are:
# //
# // 1. AssignmentExpression
# // 2. BindingElements
# // 3. AssignmentTargets
# //
# // In order to avoid exponential backtracking, we use two flags to denote if the production can be
# // binding element or assignment target.
# //
# // The three productions have the relationship:
# //
# // BindingElements <= AssignmentTargets <= AssignmentExpression
# //
# // with a single exception that CoverInitializedName when used directly in an Expression, generates
# // an early error. Therefore, we need the third state, firstCoverInitializedNameError, to track the
# // first usage of CoverInitializedName and report it when we reached the end of the parentheses pair.
# //
# // isolateCoverGrammar function runs the given parser function with a new cover grammar context, and it does not
# // effect the current flags. This means the production the parser parses is only used as an expression. Therefore
# // the CoverInitializedName check is conducted.
# //
# // inheritCoverGrammar function runs the given parse function with a new cover grammar context, and it propagates
# // the flags outside of the parser. This means the production the parser parses is used as a part of a potential
# // pattern. The CoverInitializedName check is deferred.
def isolateCoverGrammar(self, parser):
oldIsBindingElement = self.isBindingElement
oldIsAssignmentTarget = self.isAssignmentTarget
oldFirstCoverInitializedNameError = self.firstCoverInitializedNameError
self.isBindingElement = true
self.isAssignmentTarget = true
self.firstCoverInitializedNameError = null
result = parser()
if (self.firstCoverInitializedNameError != null):
self.throwUnexpectedToken(self.firstCoverInitializedNameError)
self.isBindingElement = oldIsBindingElement
self.isAssignmentTarget = oldIsAssignmentTarget
self.firstCoverInitializedNameError = oldFirstCoverInitializedNameError
return result
def inheritCoverGrammar(self, parser):
oldIsBindingElement = self.isBindingElement
oldIsAssignmentTarget = self.isAssignmentTarget
oldFirstCoverInitializedNameError = self.firstCoverInitializedNameError
self.isBindingElement = true
self.isAssignmentTarget = true
self.firstCoverInitializedNameError = null
result = parser()
self.isBindingElement = self.isBindingElement and oldIsBindingElement
self.isAssignmentTarget = self.isAssignmentTarget and oldIsAssignmentTarget
self.firstCoverInitializedNameError = oldFirstCoverInitializedNameError or self.firstCoverInitializedNameError
return result
def parseArrayPattern(self):
node = Node()
elements = []
self.expect('[');
while (not self.match(']')):
if (self.match(',')):
self.lex()
elements.append(null)
else:
if (self.match('...')):
restNode = Node()
self.lex()
rest = self.parseVariableIdentifier()
elements.append(restNode.finishRestElement(rest))
break
else:
elements.append(self.parsePatternWithDefault())
if (not self.match(']')):
self.expect(',')
self.expect(']')
return node.finishArrayPattern(elements)
def parsePropertyPattern(self):
node = Node()
computed = self.match('[')
if (self.lookahead['type'] == Token.Identifier):
key = self.parseVariableIdentifier()
if (self.match('=')):
self.lex();
init = self.parseAssignmentExpression()
return node.finishProperty(
'init', key, false, WrappingNode(key).finishAssignmentPattern(key, init), false, false)
elif (not self.match(':')):
return node.finishProperty('init', key, false, key, false, true)
else:
key = self.parseObjectPropertyKey()
self.expect(':')
init = self.parsePatternWithDefault()
return node.finishProperty('init', key, computed, init, false, false)
def parseObjectPattern(self):
node = Node()
properties = []
self.expect('{')
while (not self.match('}')):
properties.append(self.parsePropertyPattern())
if (not self.match('}')):
self.expect(',')
self.lex()
return node.finishObjectPattern(properties)
def parsePattern(self):
if (self.lookahead['type'] == Token.Identifier):
return self.parseVariableIdentifier()
elif (self.match('[')):
return self.parseArrayPattern()
elif (self.match('{')):
return self.parseObjectPattern()
self.throwUnexpectedToken(self.lookahead)
def parsePatternWithDefault(self):
startToken = self.lookahead
pattern = self.parsePattern()
if (self.match('=')):
self.lex()
right = self.isolateCoverGrammar(self.parseAssignmentExpression)
pattern = WrappingNode(startToken).finishAssignmentPattern(pattern, right)
return pattern
# 11.1.4 Array Initialiser
def parseArrayInitialiser(self):
elements = []
node = Node()
self.expect('[')
while (not self.match(']')):
if (self.match(',')):
self.lex()
elements.append(null)
elif (self.match('...')):
restSpread = Node()
self.lex()
restSpread.finishSpreadElement(self.inheritCoverGrammar(self.parseAssignmentExpression))
if (not self.match(']')):
self.isAssignmentTarget = self.isBindingElement = false
self.expect(',')
elements.append(restSpread)
else:
elements.append(self.inheritCoverGrammar(self.parseAssignmentExpression))
if (not self.match(']')):
self.expect(',')
self.lex();
return node.finishArrayExpression(elements)
# 11.1.5 Object Initialiser
def parsePropertyFunction(self, node, paramInfo):
self.isAssignmentTarget = self.isBindingElement = false;
previousStrict = self.strict;
body = self.isolateCoverGrammar(self.parseFunctionSourceElements);
if (self.strict and paramInfo['firstRestricted']):
self.tolerateUnexpectedToken(paramInfo['firstRestricted'], paramInfo.get('message'))
if (self.strict and paramInfo['stricted']):
self.tolerateUnexpectedToken(paramInfo['stricted'], paramInfo.get('message'));
self.strict = previousStrict;
return node.finishFunctionExpression(null, paramInfo['params'], paramInfo['defaults'], body)
def parsePropertyMethodFunction(self):
node = Node();
params = self.parseParams();
method = self.parsePropertyFunction(node, params);
return method;
def parseObjectPropertyKey(self):
node = Node()
token = self.lex();
# // Note: This function is called only from parseObjectProperty(), where
# // EOF and Punctuator tokens are already filtered out.
typ = token['type']
if typ in [Token.StringLiteral, Token.NumericLiteral]:
if self.strict and token.get('octal'):
self.tolerateUnexpectedToken(token, Messages.StrictOctalLiteral);
return node.finishLiteral(token);
elif typ in [Token.Identifier, Token.BooleanLiteral, Token.NullLiteral, Token.Keyword]:
return node.finishIdentifier(token['value']);
elif typ==Token.Punctuator:
if (token['value'] == '['):
expr = self.isolateCoverGrammar(self.parseAssignmentExpression)
self.expect(']')
return expr
self.throwUnexpectedToken(token)
def lookaheadPropertyName(self):
typ = self.lookahead['type']
if typ in [Token.Identifier, Token.StringLiteral, Token.BooleanLiteral, Token.NullLiteral, Token.NumericLiteral, Token.Keyword]:
return true
if typ == Token.Punctuator:
return self.lookahead['value'] == '['
return false
# // This function is to try to parse a MethodDefinition as defined in 14.3. But in the case of object literals,
# // it might be called at a position where there is in fact a short hand identifier pattern or a data property.
# // This can only be determined after we consumed up to the left parentheses.
# //
# // In order to avoid back tracking, it returns `null` if the position is not a MethodDefinition and the caller
# // is responsible to visit other options.
def tryParseMethodDefinition(self, token, key, computed, node):
if (token['type'] == Token.Identifier):
# check for `get` and `set`;
if (token['value'] == 'get' and self.lookaheadPropertyName()):
computed = self.match('[');
key = self.parseObjectPropertyKey()
methodNode = Node()
self.expect('(')
self.expect(')')
value = self.parsePropertyFunction(methodNode, {
'params': [],
'defaults': [],
'stricted': null,
'firstRestricted': null,
'message': null
})
return node.finishProperty('get', key, computed, value, false, false)
elif (token['value'] == 'set' and self.lookaheadPropertyName()):
computed = self.match('[')
key = self.parseObjectPropertyKey()
methodNode = Node()
self.expect('(')
options = {
'params': [],
'defaultCount': 0,
'defaults': [],
'firstRestricted': null,
'paramSet': {}
}
if (self.match(')')):
self.tolerateUnexpectedToken(self.lookahead);
else:
self.parseParam(options);
if (options['defaultCount'] == 0):
options['defaults'] = []
self.expect(')')
value = self.parsePropertyFunction(methodNode, options);
return node.finishProperty('set', key, computed, value, false, false);
if (self.match('(')):
value = self.parsePropertyMethodFunction();
return node.finishProperty('init', key, computed, value, true, false)
return null;
def checkProto(self, key, computed, hasProto):
if (computed == false and (key['type'] == Syntax.Identifier and key.name == '__proto__' or
key['type'] == Syntax.Literal and key.value == '__proto__')):
if (hasProto.value):
self.tolerateError(Messages.DuplicateProtoProperty);
else:
hasProto.value = true;
def parseObjectProperty(self, hasProto):
token = self.lookahead
node = Node()
computed = self.match('[');
key = self.parseObjectPropertyKey();
maybeMethod = self.tryParseMethodDefinition(token, key, computed, node)
if (maybeMethod):
self.checkProto(maybeMethod.key, maybeMethod.computed, hasProto);
return maybeMethod;
#// init property or short hand property.
self.checkProto(key, computed, hasProto);
if (self.match(':')):
self.lex();
value = self.inheritCoverGrammar(self.parseAssignmentExpression)
return node.finishProperty('init', key, computed, value, false, false)
if (token['type'] == Token.Identifier):
if (self.match('=')):
self.firstCoverInitializedNameError = self.lookahead;
self.lex();
value = self.isolateCoverGrammar(self.parseAssignmentExpression);
return node.finishProperty('init', key, computed,
WrappingNode(token).finishAssignmentPattern(key, value), false, true)
return node.finishProperty('init', key, computed, key, false, true)
self.throwUnexpectedToken(self.lookahead)
def parseObjectInitialiser(self):
properties = []
hasProto = {'value': false}
node = Node();
self.expect('{');
while (not self.match('}')):
properties.append(self.parseObjectProperty(hasProto));
if (not self.match('}')):
self.expectCommaSeparator()
self.expect('}');
return node.finishObjectExpression(properties)
def reinterpretExpressionAsPattern(self, expr):
typ = (expr['type'])
if typ == Syntax.Identifier:
pass
elif typ == Syntax.SpreadElement:
expr['type'] = Syntax.RestElement
self.reinterpretExpressionAsPattern(expr.argument)
elif typ == Syntax.ArrayExpression:
expr['type'] = Syntax.ArrayPattern
for i in xrange(len(expr['elements'])):
if (expr['elements'][i] != null):
self.reinterpretExpressionAsPattern(expr['elements'][i])
elif typ == Syntax.ObjectExpression:
expr['type'] = Syntax.ObjectPattern
for i in xrange(len(expr['properties'])):
self.reinterpretExpressionAsPattern(expr['properties'][i]['value']);
elif Syntax.AssignmentExpression:
expr['type'] = Syntax.AssignmentPattern;
self.reinterpretExpressionAsPattern(expr['left'])
else:
#// Allow other node type for tolerant parsing.
return
def parseTemplateElement(self, option):
if (self.lookahead['type'] != Token.Template or (option['head'] and not self.lookahead['head'])):
self.throwUnexpectedToken()
node = Node();
token = self.lex();
return node.finishTemplateElement({ 'raw': token['value']['raw'], 'cooked': token['value']['cooked'] }, token['tail'])
def parseTemplateLiteral(self):
node = Node()
quasi = self.parseTemplateElement({ 'head': true })
quasis = [quasi]
expressions = []
while (not quasi['tail']):
expressions.append(self.parseExpression());
quasi = self.parseTemplateElement({ 'head': false });
quasis.append(quasi)
return node.finishTemplateLiteral(quasis, expressions)
# 11.1.6 The Grouping Operator
def parseGroupExpression(self):
self.expect('(');
if (self.match(')')):
self.lex();
if (not self.match('=>')):
self.expect('=>')
return {
'type': PlaceHolders.ArrowParameterPlaceHolder,
'params': []}
startToken = self.lookahead
if (self.match('...')):
expr = self.parseRestElement();
self.expect(')');
if (not self.match('=>')):
self.expect('=>')
return {
'type': PlaceHolders.ArrowParameterPlaceHolder,
'params': [expr]}
self.isBindingElement = true;
expr = self.inheritCoverGrammar(self.parseAssignmentExpression);
if (self.match(',')):
self.isAssignmentTarget = false;
expressions = [expr]
while (self.startIndex < self.length):
if (not self.match(',')):
break
self.lex();
if (self.match('...')):
if (not self.isBindingElement):
self.throwUnexpectedToken(self.lookahead)
expressions.append(self.parseRestElement())
self.expect(')');
if (not self.match('=>')):
self.expect('=>');
self.isBindingElement = false
for i in xrange(len(expressions)):
self.reinterpretExpressionAsPattern(expressions[i])
return {
'type': PlaceHolders.ArrowParameterPlaceHolder,
'params': expressions}
expressions.append(self.inheritCoverGrammar(self.parseAssignmentExpression))
expr = WrappingNode(startToken).finishSequenceExpression(expressions);
self.expect(')')
if (self.match('=>')):
if (not self.isBindingElement):
self.throwUnexpectedToken(self.lookahead);
if (expr['type'] == Syntax.SequenceExpression):
for i in xrange(len(expr.expressions)):
self.reinterpretExpressionAsPattern(expr['expressions'][i])
else:
self.reinterpretExpressionAsPattern(expr);
expr = {
'type': PlaceHolders.ArrowParameterPlaceHolder,
'params': expr['expressions'] if expr['type'] == Syntax.SequenceExpression else [expr]}
self.isBindingElement = false
return expr
# 11.1 Primary Expressions
def parsePrimaryExpression(self):
if (self.match('(')):
self.isBindingElement = false;
return self.inheritCoverGrammar(self.parseGroupExpression)
if (self.match('[')):
return self.inheritCoverGrammar(self.parseArrayInitialiser)
if (self.match('{')):
return self.inheritCoverGrammar(self.parseObjectInitialiser)
typ = self.lookahead['type']
node = Node();
if (typ == Token.Identifier):
expr = node.finishIdentifier(self.lex()['value']);
elif (typ == Token.StringLiteral or typ == Token.NumericLiteral):
self.isAssignmentTarget = self.isBindingElement = false
if (self.strict and self.lookahead.get('octal')):
self.tolerateUnexpectedToken(self.lookahead, Messages.StrictOctalLiteral)
expr = node.finishLiteral(self.lex())
elif (typ == Token.Keyword):
self.isAssignmentTarget = self.isBindingElement = false
if (self.matchKeyword('function')):
return self.parseFunctionExpression()
if (self.matchKeyword('this')):
self.lex()
return node.finishThisExpression()
if (self.matchKeyword('class')):
return self.parseClassExpression()
self.throwUnexpectedToken(self.lex())
elif (typ == Token.BooleanLiteral):
isAssignmentTarget = self.isBindingElement = false
token = self.lex();
token['value'] = (token['value'] == 'true')
expr = node.finishLiteral(token)
elif (typ == Token.NullLiteral):
self.isAssignmentTarget = self.isBindingElement = false
token = self.lex()
token['value'] = null;
expr = node.finishLiteral(token)
elif (self.match('/') or self.match('/=')):
self.isAssignmentTarget = self.isBindingElement = false;
self.index = self.startIndex;
token = self.scanRegExp(); # hehe, here you are!
self.lex();
expr = node.finishLiteral(token);
elif (typ == Token.Template):
expr = self.parseTemplateLiteral()
else:
self.throwUnexpectedToken(self.lex());
return expr;
# 11.2 Left-Hand-Side Expressions
def parseArguments(self):
args = [];
self.expect('(');
if (not self.match(')')):
while (self.startIndex < self.length):
args.append(self.isolateCoverGrammar(self.parseAssignmentExpression))
if (self.match(')')):
break
self.expectCommaSeparator()
self.expect(')')
return args;
def parseNonComputedProperty(self):
node = Node()
token = self.lex();
if (not self.isIdentifierName(token)):
self.throwUnexpectedToken(token)
return node.finishIdentifier(token['value'])
def parseNonComputedMember(self):
self.expect('.')
return self.parseNonComputedProperty();
def parseComputedMember(self):
self.expect('[')
expr = self.isolateCoverGrammar(self.parseExpression)
self.expect(']')
return expr
def parseNewExpression(self):
node = Node()
self.expectKeyword('new')
callee = self.isolateCoverGrammar(self.parseLeftHandSideExpression)
args = self.parseArguments() if self.match('(') else []
self.isAssignmentTarget = self.isBindingElement = false
return node.finishNewExpression(callee, args)
def parseLeftHandSideExpressionAllowCall(self):
previousAllowIn = self.state['allowIn']
startToken = self.lookahead;
self.state['allowIn'] = true;
if (self.matchKeyword('super') and self.state['inFunctionBody']):
expr = Node();
self.lex();
expr = expr.finishSuper()
if (not self.match('(') and not self.match('.') and not self.match('[')):
self.throwUnexpectedToken(self.lookahead);
else:
expr = self.inheritCoverGrammar(self.parseNewExpression if self.matchKeyword('new') else self.parsePrimaryExpression)
while True:
if (self.match('.')):
self.isBindingElement = false;
self.isAssignmentTarget = true;
property = self.parseNonComputedMember();
expr = WrappingNode(startToken).finishMemberExpression('.', expr, property)
elif (self.match('(')):
self.isBindingElement = false;
self.isAssignmentTarget = false;
args = self.parseArguments();
expr = WrappingNode(startToken).finishCallExpression(expr, args)
elif (self.match('[')):
self.isBindingElement = false;
self.isAssignmentTarget = true;
property = self.parseComputedMember();
expr = WrappingNode(startToken).finishMemberExpression('[', expr, property)
elif (self.lookahead['type'] == Token.Template and self.lookahead['head']):
quasi = self.parseTemplateLiteral()
expr = WrappingNode(startToken).finishTaggedTemplateExpression(expr, quasi)
else:
break
self.state['allowIn'] = previousAllowIn
return expr
def parseLeftHandSideExpression(self):
assert self.state['allowIn'], 'callee of new expression always allow in keyword.'
startToken = self.lookahead
if (self.matchKeyword('super') and self.state['inFunctionBody']):
expr = Node();
self.lex();
expr = expr.finishSuper();
if (not self.match('[') and not self.match('.')):
self.throwUnexpectedToken(self.lookahead)
else:
expr = self.inheritCoverGrammar(self.parseNewExpression if self.matchKeyword('new') else self.parsePrimaryExpression);
while True:
if (self.match('[')):
self.isBindingElement = false;
self.isAssignmentTarget = true;
property = self.parseComputedMember();
expr = WrappingNode(startToken).finishMemberExpression('[', expr, property)
elif (self.match('.')):
self.isBindingElement = false;
self.isAssignmentTarget = true;
property = self.parseNonComputedMember();
expr = WrappingNode(startToken).finishMemberExpression('.', expr, property);
elif (self.lookahead['type'] == Token.Template and self.lookahead['head']):
quasi = self.parseTemplateLiteral();
expr = WrappingNode(startToken).finishTaggedTemplateExpression(expr, quasi)
else:
break
return expr
# 11.3 Postfix Expressions
def parsePostfixExpression(self):
startToken = self.lookahead
expr = self.inheritCoverGrammar(self.parseLeftHandSideExpressionAllowCall)
if (not self.hasLineTerminator and self.lookahead['type'] == Token.Punctuator):
if (self.match('++') or self.match('--')):
# 11.3.1, 11.3.2
if (self.strict and expr.type == Syntax.Identifier and isRestrictedWord(expr.name)):
self.tolerateError(Messages.StrictLHSPostfix)
if (not self.isAssignmentTarget):
self.tolerateError(Messages.InvalidLHSInAssignment);
self.isAssignmentTarget = self.isBindingElement = false;
token = self.lex();
expr = WrappingNode(startToken).finishPostfixExpression(token['value'], expr);
return expr;
# 11.4 Unary Operators
def parseUnaryExpression(self):
if (self.lookahead['type'] != Token.Punctuator and self.lookahead['type'] != Token.Keyword):
expr = self.parsePostfixExpression();
elif (self.match('++') or self.match('--')):
startToken = self.lookahead;
token = self.lex();
expr = self.inheritCoverGrammar(self.parseUnaryExpression);
# 11.4.4, 11.4.5
if (self.strict and expr.type == Syntax.Identifier and isRestrictedWord(expr.name)):
self.tolerateError(Messages.StrictLHSPrefix)
if (not self.isAssignmentTarget):
self.tolerateError(Messages.InvalidLHSInAssignment)
expr = WrappingNode(startToken).finishUnaryExpression(token['value'], expr)
self.isAssignmentTarget = self.isBindingElement = false
elif (self.match('+') or self.match('-') or self.match('~') or self.match('!')):
startToken = self.lookahead;
token = self.lex();
expr = self.inheritCoverGrammar(self.parseUnaryExpression);
expr = WrappingNode(startToken).finishUnaryExpression(token['value'], expr)
self.isAssignmentTarget = self.isBindingElement = false;
elif (self.matchKeyword('delete') or self.matchKeyword('void') or self.matchKeyword('typeof')):
startToken = self.lookahead;
token = self.lex();
expr = self.inheritCoverGrammar(self.parseUnaryExpression);
expr = WrappingNode(startToken).finishUnaryExpression(token['value'], expr);
if (self.strict and expr.operator == 'delete' and expr.argument.type == Syntax.Identifier):
self.tolerateError(Messages.StrictDelete)
self.isAssignmentTarget = self.isBindingElement = false;
else:
expr = self.parsePostfixExpression()
return expr
def binaryPrecedence(self, token, allowIn):
prec = 0;
typ = token['type']
if (typ != Token.Punctuator and typ != Token.Keyword):
return 0;
val = token['value']
if val == 'in' and not allowIn:
return 0
return PRECEDENCE.get(val, 0)
# 11.5 Multiplicative Operators
# 11.6 Additive Operators
# 11.7 Bitwise Shift Operators
# 11.8 Relational Operators
# 11.9 Equality Operators
# 11.10 Binary Bitwise Operators
# 11.11 Binary Logical Operators
def parseBinaryExpression(self):
marker = self.lookahead;
left = self.inheritCoverGrammar(self.parseUnaryExpression);
token = self.lookahead;
prec = self.binaryPrecedence(token, self.state['allowIn']);
if (prec == 0):
return left
self.isAssignmentTarget = self.isBindingElement = false;
token['prec'] = prec
self.lex()
markers = [marker, self.lookahead];
right = self.isolateCoverGrammar(self.parseUnaryExpression);
stack = [left, token, right];
while True:
prec = self.binaryPrecedence(self.lookahead, self.state['allowIn'])
if not prec > 0:
break
# Reduce: make a binary expression from the three topmost entries.
while ((len(stack) > 2) and (prec <= stack[len(stack) - 2]['prec'])):
right = stack.pop();
operator = stack.pop()['value']
left = stack.pop()
markers.pop()
expr = WrappingNode(markers[len(markers) - 1]).finishBinaryExpression(operator, left, right)
stack.append(expr)
# Shift
token = self.lex();
token['prec'] = prec;
stack.append(token);
markers.append(self.lookahead);
expr = self.isolateCoverGrammar(self.parseUnaryExpression);
stack.append(expr);
# Final reduce to clean-up the stack.
i = len(stack) - 1;
expr = stack[i]
markers.pop()
while (i > 1):
expr = WrappingNode(markers.pop()).finishBinaryExpression(stack[i - 1]['value'], stack[i - 2], expr);
i -= 2
return expr
# 11.12 Conditional Operator
def parseConditionalExpression(self):
startToken = self.lookahead
expr = self.inheritCoverGrammar(self.parseBinaryExpression);
if (self.match('?')):
self.lex()
previousAllowIn = self.state['allowIn']
self.state['allowIn'] = true;
consequent = self.isolateCoverGrammar(self.parseAssignmentExpression);
self.state['allowIn'] = previousAllowIn;
self.expect(':');
alternate = self.isolateCoverGrammar(self.parseAssignmentExpression)
expr = WrappingNode(startToken).finishConditionalExpression(expr, consequent, alternate);
self.isAssignmentTarget = self.isBindingElement = false;
return expr
# [ES6] 14.2 Arrow Function
def parseConciseBody(self):
if (self.match('{')):
return self.parseFunctionSourceElements()
return self.isolateCoverGrammar(self.parseAssignmentExpression)
def checkPatternParam(self, options, param):
typ = param.type
if typ == Syntax.Identifier:
self.validateParam(options, param, param.name);
elif typ == Syntax.RestElement:
self.checkPatternParam(options, param.argument)
elif typ == Syntax.AssignmentPattern:
self.checkPatternParam(options, param.left)
elif typ == Syntax.ArrayPattern:
for i in xrange(len(param.elements)):
if (param.elements[i] != null):
self.checkPatternParam(options, param.elements[i]);
else:
assert typ == Syntax.ObjectPattern, 'Invalid type'
for i in xrange(len(param.properties)):
self.checkPatternParam(options, param.properties[i].value);
def reinterpretAsCoverFormalsList(self, expr):
defaults = [];
defaultCount = 0;
params = [expr];
typ = expr.type
if typ == Syntax.Identifier:
pass
elif typ == PlaceHolders.ArrowParameterPlaceHolder:
params = expr.params
else:
return null
options = {
'paramSet': {}}
le = len(params)
for i in xrange(le):
param = params[i]
if param.type == Syntax.AssignmentPattern:
params[i] = param.left;
defaults.append(param.right);
defaultCount += 1
self.checkPatternParam(options, param.left);
else:
self.checkPatternParam(options, param);
params[i] = param;
defaults.append(null);
if (options.get('message') == Messages.StrictParamDupe):
token = options['stricted'] if self.strict else options['firstRestricted']
self.throwUnexpectedToken(token, options.get('message'));
if (defaultCount == 0):
defaults = []
return {
'params': params,
'defaults': defaults,
'stricted': options['stricted'],
'firstRestricted': options['firstRestricted'],
'message': options.get('message')}
def parseArrowFunctionExpression(self, options, node):
if (self.hasLineTerminator):
self.tolerateUnexpectedToken(self.lookahead)
self.expect('=>')
previousStrict = self.strict;
body = self.parseConciseBody();
if (self.strict and options['firstRestricted']):
self.throwUnexpectedToken(options['firstRestricted'], options.get('message'));
if (self.strict and options['stricted']):
self.tolerateUnexpectedToken(options['stricted'], options['message']);
self.strict = previousStrict
return node.finishArrowFunctionExpression(options['params'], options['defaults'], body, body.type != Syntax.BlockStatement)
# 11.13 Assignment Operators
def parseAssignmentExpression(self):
startToken = self.lookahead;
token = self.lookahead;
expr = self.parseConditionalExpression();
if (expr.type == PlaceHolders.ArrowParameterPlaceHolder or self.match('=>')):
self.isAssignmentTarget = self.isBindingElement = false;
lis = self.reinterpretAsCoverFormalsList(expr)
if (lis):
self.firstCoverInitializedNameError = null;
return self.parseArrowFunctionExpression(lis, WrappingNode(startToken))
return expr
if (self.matchAssign()):
if (not self.isAssignmentTarget):
self.tolerateError(Messages.InvalidLHSInAssignment)
# 11.13.1
if (self.strict and expr.type == Syntax.Identifier and isRestrictedWord(expr.name)):
self.tolerateUnexpectedToken(token, Messages.StrictLHSAssignment);
if (not self.match('=')):
self.isAssignmentTarget = self.isBindingElement = false;
else:
self.reinterpretExpressionAsPattern(expr)
token = self.lex();
right = self.isolateCoverGrammar(self.parseAssignmentExpression)
expr = WrappingNode(startToken).finishAssignmentExpression(token['value'], expr, right);
self.firstCoverInitializedNameError = null
return expr
# 11.14 Comma Operator
def parseExpression(self):
startToken = self.lookahead
expr = self.isolateCoverGrammar(self.parseAssignmentExpression)
if (self.match(',')):
expressions = [expr];
while (self.startIndex < self.length):
if (not self.match(',')):
break
self.lex();
expressions.append(self.isolateCoverGrammar(self.parseAssignmentExpression))
expr = WrappingNode(startToken).finishSequenceExpression(expressions);
return expr
# 12.1 Block
def parseStatementListItem(self):
if (self.lookahead['type'] == Token.Keyword):
val = (self.lookahead['value'])
if val=='export':
if (self.sourceType != 'module'):
self.tolerateUnexpectedToken(self.lookahead, Messages.IllegalExportDeclaration)
return self.parseExportDeclaration();
elif val == 'import':
if (self.sourceType != 'module'):
self.tolerateUnexpectedToken(self.lookahead, Messages.IllegalImportDeclaration);
return self.parseImportDeclaration();
elif val == 'const' or val == 'let':
return self.parseLexicalDeclaration({'inFor': false});
elif val == 'function':
return self.parseFunctionDeclaration(Node());
elif val == 'class':
return self.parseClassDeclaration();
elif val == 'pyimport': # <<<<< MODIFIED HERE
return self.parsePyimportStatement()
return self.parseStatement();
def parsePyimportStatement(self):
n = Node()
self.lex()
n.finishPyimport(self.parseVariableIdentifier())
self.consumeSemicolon()
return n
def parseStatementList(self):
list = [];
while (self.startIndex < self.length):
if (self.match('}')):
break
list.append(self.parseStatementListItem())
return list
def parseBlock(self):
node = Node();
self.expect('{');
block = self.parseStatementList()
self.expect('}');
return node.finishBlockStatement(block);
# 12.2 Variable Statement
def parseVariableIdentifier(self):
node = Node()
token = self.lex()
if (token['type'] != Token.Identifier):
if (self.strict and token['type'] == Token.Keyword and isStrictModeReservedWord(token['value'])):
self.tolerateUnexpectedToken(token, Messages.StrictReservedWord);
else:
self.throwUnexpectedToken(token)
return node.finishIdentifier(token['value'])
def parseVariableDeclaration(self):
init = null
node = Node();
d = self.parsePattern();
# 12.2.1
if (self.strict and isRestrictedWord(d.name)):
self.tolerateError(Messages.StrictVarName);
if (self.match('=')):
self.lex();
init = self.isolateCoverGrammar(self.parseAssignmentExpression);
elif (d.type != Syntax.Identifier):
self.expect('=')
return node.finishVariableDeclarator(d, init)
def parseVariableDeclarationList(self):
lis = []
while True:
lis.append(self.parseVariableDeclaration())
if (not self.match(',')):
break
self.lex();
if not (self.startIndex < self.length):
break
return lis;
def parseVariableStatement(self, node):
self.expectKeyword('var')
declarations = self.parseVariableDeclarationList()
self.consumeSemicolon()
return node.finishVariableDeclaration(declarations)
def parseLexicalBinding(self, kind, options):
init = null
node = Node()
d = self.parsePattern();
# 12.2.1
if (self.strict and d.type == Syntax.Identifier and isRestrictedWord(d.name)):
self.tolerateError(Messages.StrictVarName);
if (kind == 'const'):
if (not self.matchKeyword('in')):
self.expect('=')
init = self.isolateCoverGrammar(self.parseAssignmentExpression)
elif ((not options['inFor'] and d.type != Syntax.Identifier) or self.match('=')):
self.expect('=');
init = self.isolateCoverGrammar(self.parseAssignmentExpression);
return node.finishVariableDeclarator(d, init)
def parseBindingList(self, kind, options):
list = [];
while True:
list.append(self.parseLexicalBinding(kind, options));
if (not self.match(',')):
break
self.lex();
if not (self.startIndex < self.length):
break
return list;
def parseLexicalDeclaration(self, options):
node = Node();
kind = self.lex()['value']
assert kind == 'let' or kind == 'const', 'Lexical declaration must be either let or const'
declarations = self.parseBindingList(kind, options);
self.consumeSemicolon();
return node.finishLexicalDeclaration(declarations, kind);
def parseRestElement(self):
node = Node();
self.lex();
if (self.match('{')):
self.throwError(Messages.ObjectPatternAsRestParameter)
param = self.parseVariableIdentifier();
if (self.match('=')):
self.throwError(Messages.DefaultRestParameter);
if (not self.match(')')):
self.throwError(Messages.ParameterAfterRestParameter);
return node.finishRestElement(param);
# 12.3 Empty Statement
def parseEmptyStatement(self, node):
self.expect(';');
return node.finishEmptyStatement()
# 12.4 Expression Statement
def parseExpressionStatement(self, node):
expr = self.parseExpression();
self.consumeSemicolon();
return node.finishExpressionStatement(expr);
# 12.5 If statement
def parseIfStatement(self, node):
self.expectKeyword('if');
self.expect('(');
test = self.parseExpression();
self.expect(')');
consequent = self.parseStatement();
if (self.matchKeyword('else')):
self.lex();
alternate = self.parseStatement();
else:
alternate = null;
return node.finishIfStatement(test, consequent, alternate)
# 12.6 Iteration Statements
def parseDoWhileStatement(self, node):
self.expectKeyword('do')
oldInIteration = self.state['inIteration']
self.state['inIteration'] = true
body = self.parseStatement();
self.state['inIteration'] = oldInIteration;
self.expectKeyword('while');
self.expect('(');
test = self.parseExpression();
self.expect(')')
if (self.match(';')):
self.lex()
return node.finishDoWhileStatement(body, test)
def parseWhileStatement(self, node):
self.expectKeyword('while')
self.expect('(')
test = self.parseExpression()
self.expect(')')
oldInIteration = self.state['inIteration']
self.state['inIteration'] = true
body = self.parseStatement()
self.state['inIteration'] = oldInIteration
return node.finishWhileStatement(test, body)
def parseForStatement(self, node):
previousAllowIn = self.state['allowIn']
init = test = update = null
self.expectKeyword('for')
self.expect('(')
if (self.match(';')):
self.lex()
else:
if (self.matchKeyword('var')):
init = Node()
self.lex()
self.state['allowIn'] = false;
init = init.finishVariableDeclaration(self.parseVariableDeclarationList())
self.state['allowIn'] = previousAllowIn
if (len(init.declarations) == 1 and self.matchKeyword('in')):
self.lex()
left = init
right = self.parseExpression()
init = null
else:
self.expect(';')
elif (self.matchKeyword('const') or self.matchKeyword('let')):
init = Node()
kind = self.lex()['value']
self.state['allowIn'] = false
declarations = self.parseBindingList(kind, {'inFor': true})
self.state['allowIn'] = previousAllowIn
if (len(declarations) == 1 and declarations[0].init == null and self.matchKeyword('in')):
init = init.finishLexicalDeclaration(declarations, kind);
self.lex();
left = init;
right = self.parseExpression();
init = null;
else:
self.consumeSemicolon();
init = init.finishLexicalDeclaration(declarations, kind);
else:
initStartToken = self.lookahead
self.state['allowIn'] = false
init = self.inheritCoverGrammar(self.parseAssignmentExpression);
self.state['allowIn'] = previousAllowIn;
if (self.matchKeyword('in')):
if (not self.isAssignmentTarget):
self.tolerateError(Messages.InvalidLHSInForIn)
self.lex();
self.reinterpretExpressionAsPattern(init);
left = init;
right = self.parseExpression();
init = null;
else:
if (self.match(',')):
initSeq = [init];
while (self.match(',')):
self.lex();
initSeq.append(self.isolateCoverGrammar(self.parseAssignmentExpression))
init = WrappingNode(initStartToken).finishSequenceExpression(initSeq)
self.expect(';');
if ('left' not in locals()):
if (not self.match(';')):
test = self.parseExpression();
self.expect(';');
if (not self.match(')')):
update = self.parseExpression();
self.expect(')');
oldInIteration = self.state['inIteration']
self.state['inIteration'] = true;
body = self.isolateCoverGrammar(self.parseStatement)
self.state['inIteration'] = oldInIteration;
return node.finishForStatement(init, test, update, body) if ('left' not in locals()) else node.finishForInStatement(left, right, body);
# 12.7 The continue statement
def parseContinueStatement(self, node):
label = null
self.expectKeyword('continue');
# Optimize the most common form: 'continue;'.
if ord(self.source[self.startIndex]) == 0x3B:
self.lex();
if (not self.state['inIteration']):
self.throwError(Messages.IllegalContinue)
return node.finishContinueStatement(null)
if (self.hasLineTerminator):
if (not self.state['inIteration']):
self.throwError(Messages.IllegalContinue);
return node.finishContinueStatement(null);
if (self.lookahead['type'] == Token.Identifier):
label = self.parseVariableIdentifier();
key = '$' + label.name;
if not key in self.state['labelSet']: # todo make sure its correct!
self.throwError(Messages.UnknownLabel, label.name);
self.consumeSemicolon()
if (label == null and not self.state['inIteration']):
self.throwError(Messages.IllegalContinue)
return node.finishContinueStatement(label)
# 12.8 The break statement
def parseBreakStatement(self, node):
label = null
self.expectKeyword('break');
# Catch the very common case first: immediately a semicolon (U+003B).
if (ord(self.source[self.lastIndex]) == 0x3B):
self.lex();
if (not (self.state['inIteration'] or self.state['inSwitch'])):
self.throwError(Messages.IllegalBreak)
return node.finishBreakStatement(null)
if (self.hasLineTerminator):
if (not (self.state['inIteration'] or self.state['inSwitch'])):
self.throwError(Messages.IllegalBreak);
return node.finishBreakStatement(null);
if (self.lookahead['type'] == Token.Identifier):
label = self.parseVariableIdentifier();
key = '$' + label.name;
if not (key in self.state['labelSet']):
self.throwError(Messages.UnknownLabel, label.name);
self.consumeSemicolon();
if (label == null and not (self.state['inIteration'] or self.state['inSwitch'])):
self.throwError(Messages.IllegalBreak)
return node.finishBreakStatement(label);
# 12.9 The return statement
def parseReturnStatement(self, node):
argument = null;
self.expectKeyword('return');
if (not self.state['inFunctionBody']):
self.tolerateError(Messages.IllegalReturn);
# 'return' followed by a space and an identifier is very common.
if (ord(self.source[self.lastIndex]) == 0x20):
if (isIdentifierStart(self.source[self.lastIndex + 1])):
argument = self.parseExpression();
self.consumeSemicolon();
return node.finishReturnStatement(argument)
if (self.hasLineTerminator):
# HACK
return node.finishReturnStatement(null)
if (not self.match(';')):
if (not self.match('}') and self.lookahead['type'] != Token.EOF):
argument = self.parseExpression();
self.consumeSemicolon();
return node.finishReturnStatement(argument);
# 12.10 The with statement
def parseWithStatement(self, node):
if (self.strict):
self.tolerateError(Messages.StrictModeWith)
self.expectKeyword('with');
self.expect('(');
obj = self.parseExpression();
self.expect(')');
body = self.parseStatement();
return node.finishWithStatement(obj, body);
# 12.10 The swith statement
def parseSwitchCase(self):
consequent = []
node = Node();
if (self.matchKeyword('default')):
self.lex();
test = null;
else:
self.expectKeyword('case');
test = self.parseExpression();
self.expect(':');
while (self.startIndex < self.length):
if (self.match('}') or self.matchKeyword('default') or self.matchKeyword('case')):
break
statement = self.parseStatementListItem()
consequent.append(statement)
return node.finishSwitchCase(test, consequent)
def parseSwitchStatement(self, node):
self.expectKeyword('switch');
self.expect('(');
discriminant = self.parseExpression();
self.expect(')');
self.expect('{');
cases = [];
if (self.match('}')):
self.lex();
return node.finishSwitchStatement(discriminant, cases);
oldInSwitch = self.state['inSwitch'];
self.state['inSwitch'] = true;
defaultFound = false;
while (self.startIndex < self.length):
if (self.match('}')):
break;
clause = self.parseSwitchCase();
if (clause.test == null):
if (defaultFound):
self.throwError(Messages.MultipleDefaultsInSwitch);
defaultFound = true;
cases.append(clause);
self.state['inSwitch'] = oldInSwitch;
self.expect('}');
return node.finishSwitchStatement(discriminant, cases);
# 12.13 The throw statement
def parseThrowStatement(self, node):
self.expectKeyword('throw');
if (self.hasLineTerminator):
self.throwError(Messages.NewlineAfterThrow);
argument = self.parseExpression();
self.consumeSemicolon();
return node.finishThrowStatement(argument);
# 12.14 The try statement
def parseCatchClause(self):
node = Node();
self.expectKeyword('catch');
self.expect('(');
if (self.match(')')):
self.throwUnexpectedToken(self.lookahead);
param = self.parsePattern();
# 12.14.1
if (self.strict and isRestrictedWord(param.name)):
self.tolerateError(Messages.StrictCatchVariable);
self.expect(')');
body = self.parseBlock();
return node.finishCatchClause(param, body);
def parseTryStatement(self, node):
handler = null
finalizer = null;
self.expectKeyword('try');
block = self.parseBlock();
if (self.matchKeyword('catch')):
handler = self.parseCatchClause()
if (self.matchKeyword('finally')):
self.lex();
finalizer = self.parseBlock();
if (not handler and not finalizer):
self.throwError(Messages.NoCatchOrFinally)
return node.finishTryStatement(block, handler, finalizer)
# 12.15 The debugger statement
def parseDebuggerStatement(self, node):
self.expectKeyword('debugger');
self.consumeSemicolon();
return node.finishDebuggerStatement();
# 12 Statements
def parseStatement(self):
typ = self.lookahead['type']
if (typ == Token.EOF):
self.throwUnexpectedToken(self.lookahead)
if (typ == Token.Punctuator and self.lookahead['value'] == '{'):
return self.parseBlock()
self.isAssignmentTarget = self.isBindingElement = true;
node = Node();
val = self.lookahead['value']
if (typ == Token.Punctuator):
if val == ';':
return self.parseEmptyStatement(node);
elif val == '(':
return self.parseExpressionStatement(node);
elif (typ == Token.Keyword):
if val == 'break':
return self.parseBreakStatement(node);
elif val == 'continue':
return self.parseContinueStatement(node);
elif val == 'debugger':
return self.parseDebuggerStatement(node);
elif val == 'do':
return self.parseDoWhileStatement(node);
elif val == 'for':
return self.parseForStatement(node);
elif val == 'function':
return self.parseFunctionDeclaration(node);
elif val == 'if':
return self.parseIfStatement(node);
elif val == 'return':
return self.parseReturnStatement(node);
elif val == 'switch':
return self.parseSwitchStatement(node);
elif val == 'throw':
return self.parseThrowStatement(node);
elif val == 'try':
return self.parseTryStatement(node);
elif val == 'var':
return self.parseVariableStatement(node);
elif val == 'while':
return self.parseWhileStatement(node);
elif val == 'with':
return self.parseWithStatement(node);
expr = self.parseExpression();
# 12.12 Labelled Statements
if ((expr.type == Syntax.Identifier) and self.match(':')):
self.lex();
key = '$' + expr.name
if key in self.state['labelSet']:
self.throwError(Messages.Redeclaration, 'Label', expr.name);
self.state['labelSet'][key] = true
labeledBody = self.parseStatement()
del self.state['labelSet'][key]
return node.finishLabeledStatement(expr, labeledBody)
self.consumeSemicolon();
return node.finishExpressionStatement(expr)
# 13 Function Definition
def parseFunctionSourceElements(self):
body = []
node = Node()
firstRestricted = None
self.expect('{')
while (self.startIndex < self.length):
if (self.lookahead['type'] != Token.StringLiteral):
break
token = self.lookahead;
statement = self.parseStatementListItem()
body.append(statement)
if (statement.expression.type != Syntax.Literal):
# this is not directive
break
directive = self.source[token['start']+1 : token['end']-1]
if (directive == 'use strict'):
self.strict = true;
if (firstRestricted):
self.tolerateUnexpectedToken(firstRestricted, Messages.StrictOctalLiteral);
else:
if (not firstRestricted and token.get('octal')):
firstRestricted = token;
oldLabelSet = self.state['labelSet']
oldInIteration = self.state['inIteration']
oldInSwitch = self.state['inSwitch']
oldInFunctionBody = self.state['inFunctionBody']
oldParenthesisCount = self.state['parenthesizedCount']
self.state['labelSet'] = {}
self.state['inIteration'] = false
self.state['inSwitch'] = false
self.state['inFunctionBody'] = true
self.state['parenthesizedCount'] = 0
while (self.startIndex < self.length):
if (self.match('}')):
break
body.append(self.parseStatementListItem())
self.expect('}')
self.state['labelSet'] = oldLabelSet;
self.state['inIteration'] = oldInIteration;
self.state['inSwitch'] = oldInSwitch;
self.state['inFunctionBody'] = oldInFunctionBody;
self.state['parenthesizedCount'] = oldParenthesisCount;
return node.finishBlockStatement(body)
def validateParam(self, options, param, name):
key = '$' + name
if (self.strict):
if (isRestrictedWord(name)):
options['stricted'] = param;
options['message'] = Messages.StrictParamName
if key in options['paramSet']:
options['stricted'] = param;
options['message'] = Messages.StrictParamDupe;
elif (not options['firstRestricted']):
if (isRestrictedWord(name)):
options['firstRestricted'] = param;
options['message'] = Messages.StrictParamName;
elif (isStrictModeReservedWord(name)):
options['firstRestricted'] = param;
options['message'] = Messages.StrictReservedWord;
elif key in options['paramSet']:
options['firstRestricted']= param
options['message'] = Messages.StrictParamDupe;
options['paramSet'][key] = true
def parseParam(self, options):
token = self.lookahead
de = None
if (token['value'] == '...'):
param = self.parseRestElement();
self.validateParam(options, param.argument, param.argument.name);
options['params'].append(param);
options['defaults'].append(null);
return false
param = self.parsePatternWithDefault();
self.validateParam(options, token, token['value']);
if (param.type == Syntax.AssignmentPattern):
de = param.right;
param = param.left;
options['defaultCount'] += 1
options['params'].append(param);
options['defaults'].append(de)
return not self.match(')')
def parseParams(self, firstRestricted):
options = {
'params': [],
'defaultCount': 0,
'defaults': [],
'firstRestricted': firstRestricted}
self.expect('(');
if (not self.match(')')):
options['paramSet'] = {};
while (self.startIndex < self.length):
if (not self.parseParam(options)):
break
self.expect(',');
self.expect(')');
if (options['defaultCount'] == 0):
options['defaults'] = [];
return {
'params': options['params'],
'defaults': options['defaults'],
'stricted': options.get('stricted'),
'firstRestricted': options.get('firstRestricted'),
'message': options.get('message')}
def parseFunctionDeclaration(self, node, identifierIsOptional=None):
d = null
params = []
defaults = []
message = None
firstRestricted = None
self.expectKeyword('function');
if (identifierIsOptional or not self.match('(')):
token = self.lookahead;
d = self.parseVariableIdentifier();
if (self.strict):
if (isRestrictedWord(token['value'])):
self.tolerateUnexpectedToken(token, Messages.StrictFunctionName);
else:
if (isRestrictedWord(token['value'])):
firstRestricted = token;
message = Messages.StrictFunctionName;
elif (isStrictModeReservedWord(token['value'])):
firstRestricted = token;
message = Messages.StrictReservedWord;
tmp = self.parseParams(firstRestricted);
params = tmp['params']
defaults = tmp['defaults']
stricted = tmp['stricted']
firstRestricted = tmp['firstRestricted']
if (tmp.get('message')):
message = tmp['message'];
previousStrict = self.strict;
body = self.parseFunctionSourceElements();
if (self.strict and firstRestricted):
self.throwUnexpectedToken(firstRestricted, message);
if (self.strict and stricted):
self.tolerateUnexpectedToken(stricted, message);
self.strict = previousStrict;
return node.finishFunctionDeclaration(d, params, defaults, body);
def parseFunctionExpression(self):
id = null
params = []
defaults = []
node = Node();
firstRestricted = None
message = None
self.expectKeyword('function');
if (not self.match('(')):
token = self.lookahead;
id = self.parseVariableIdentifier();
if (self.strict):
if (isRestrictedWord(token['value'])):
self.tolerateUnexpectedToken(token, Messages.StrictFunctionName);
else:
if (isRestrictedWord(token['value'])):
firstRestricted = token;
message = Messages.StrictFunctionName;
elif (isStrictModeReservedWord(token['value'])):
firstRestricted = token;
message = Messages.StrictReservedWord;
tmp = self.parseParams(firstRestricted);
params = tmp['params']
defaults = tmp['defaults']
stricted = tmp['stricted']
firstRestricted = tmp['firstRestricted']
if (tmp.get('message')):
message = tmp['message']
previousStrict = self.strict;
body = self.parseFunctionSourceElements();
if (self.strict and firstRestricted):
self.throwUnexpectedToken(firstRestricted, message);
if (self.strict and stricted):
self.tolerateUnexpectedToken(stricted, message);
self.strict = previousStrict;
return node.finishFunctionExpression(id, params, defaults, body);
# todo Translate parse class functions!
def parseClassExpression(self):
raise NotImplementedError()
def parseClassDeclaration(self):
raise NotImplementedError()
# 14 Program
def parseScriptBody(self):
body = []
firstRestricted = None
while (self.startIndex < self.length):
token = self.lookahead;
if (token['type'] != Token.StringLiteral):
break
statement = self.parseStatementListItem();
body.append(statement);
if (statement.expression.type != Syntax.Literal):
# this is not directive
break
directive = self.source[token['start'] + 1: token['end'] - 1]
if (directive == 'use strict'):
self.strict = true;
if (firstRestricted):
self.tolerateUnexpectedToken(firstRestricted, Messages.StrictOctalLiteral)
else:
if (not firstRestricted and token.get('octal')):
firstRestricted = token;
while (self.startIndex < self.length):
statement = self.parseStatementListItem();
# istanbul ignore if
if (statement is None):
break
body.append(statement);
return body;
def parseProgram(self):
self.peek()
node = Node()
body = self.parseScriptBody()
return node.finishProgram(body)
# DONE!!!
def parse(self, code, options={}):
if options:
raise NotImplementedError('Options not implemented! You can only use default settings.')
self.clean()
self.source = unicode(code) + ' \n ; //END' # I have to add it in order not to check for EOF every time
self.index = 0
self.lineNumber = 1 if len(self.source) > 0 else 0
self.lineStart = 0
self.startIndex = self.index
self.startLineNumber = self.lineNumber;
self.startLineStart = self.lineStart;
self.length = len(self.source)
self.lookahead = null;
self.state = {
'allowIn': true,
'labelSet': {},
'inFunctionBody': false,
'inIteration': false,
'inSwitch': false,
'lastCommentStart': -1,
'curlyStack': [],
'parenthesizedCount': None}
self.sourceType = 'script';
self.strict = false;
program = self.parseProgram();
return node_to_dict(program)
if __name__=='__main__':
import time
test_path = None
if test_path:
f = open(test_path, 'rb')
x = f.read()
f.close()
else:
x = 'var $ = "Hello!"'
p = PyJsParser()
t = time.time()
res = p.parse(x)
dt = time.time() - t+ 0.000000001
if test_path:
print(len(res))
else:
pprint(res)
print()
print('Parsed everyting in', round(dt,5), 'seconds.')
print('Thats %d characters per second' % int(len(x)/dt))
| repotvsupertuga/tvsupertuga.repository | script.module.universalscrapers/lib/universalscrapers/modules/js2py/translators/pyjsparser.py | Python | gpl-2.0 | 105,094 | [
"VisIt"
] | 42c86e792476c9b3735d2436fcdef514f3b033d639fd07e820b958af852a0818 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
translate.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsProcessingParameterRasterLayer,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingParameterBoolean,
QgsProcessingOutputRasterLayer)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class gdaladdo(GdalAlgorithm):
INPUT = 'INPUT'
LEVELS = 'LEVELS'
CLEAN = 'CLEAN'
RESAMPLING = 'RESAMPLING'
FORMAT = 'FORMAT'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.methods = ((self.tr('Nearest neighbour'), 'nearest'),
(self.tr('Average'), 'average'),
(self.tr('Gaussian'), 'gauss'),
(self.tr('Cubic convolution.'), 'cubic'),
(self.tr('B-Spline convolution'), 'cubicspline'),
(self.tr('Lanczos windowed sinc'), 'lanczos'),
(self.tr('Average MP'), 'average_mp'),
(self.tr('Average in mag/phase space'), 'average_magphase'),
(self.tr('Mode'), 'mode'))
self.formats = (self.tr('Internal (if possible)'),
self.tr('External (GTiff .ovr)'),
self.tr('External (ERDAS Imagine .aux)'))
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterString(self.LEVELS,
self.tr('Overview levels'),
defaultValue='2 4 8 16'))
self.addParameter(QgsProcessingParameterBoolean(self.CLEAN,
self.tr('Remove all existing overviews'),
defaultValue=False))
params = []
params.append(QgsProcessingParameterEnum(self.RESAMPLING,
self.tr('Resampling method'),
options=[i[0] for i in self.methods],
allowMultiple=False,
defaultValue=0))
params.append(QgsProcessingParameterEnum(self.FORMAT,
self.tr('Overviews format'),
options=self.formats,
allowMultiple=False,
defaultValue=0))
self.addOutput(QgsProcessingOutputRasterLayer(self.OUTPUT, self.tr('Pyramidized')))
def name(self):
return 'overviews'
def displayName(self):
return self.tr('Build overviews (pyramids)')
def group(self):
return self.tr('Raster miscellaneous')
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'raster-overview.png'))
def getConsoleCommands(self, parameters, context, feedback):
inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
fileName = inLayer.source()
arguments = []
arguments.append(fileName)
arguments.append('-r')
arguments.append(self.methods[self.parameterAsEnum(parameters, self.RESAMPLING, context)][1])
ovrFormat = self.parameterAsEnum(parameters, self.FORMAT, context)
if ovrFormat == 1:
arguments.append('-ro')
elif ovrFormat == 2:
arguments.extend('--config USE_RRD YES'.split(' '))
if self.parameterAsBool(parameters, self.CLEAN, context):
arguments.append('-clean')
arguments.extend(self.parameterAsString(parameters, self.LEVELS, context).split(' '))
self.setOutputValue(self.OUTPUT, fileName)
return ['gdaladdo', GdalUtils.escapeAndJoin(arguments)]
| nirvn/QGIS | python/plugins/processing/algs/gdal/gdaladdo.py | Python | gpl-2.0 | 5,341 | [
"Gaussian"
] | 1f5c129ab8787ff2aa86832b6bb8e3f4d30330846d6575ff51abc5d7b984d607 |
"""
Unit tests for the sumatra.programs module
"""
from __future__ import unicode_literals
from builtins import str
from builtins import object
try:
import unittest2 as unittest
except ImportError:
import unittest
import distutils.spawn
import sys
import os
try:
from subprocess import check_output
except ImportError:
check_output = False
from sumatra.programs import Executable, version_in_command_line_output, get_executable, \
PythonExecutable, NESTSimulator, RExecutable
class TestVersionRegExp(unittest.TestCase):
def test_common_cases(self):
examples = {
"NEURON -- Release 7.1 (359:7f113b76a94b) 2009-10-26": "7.1",
"NEST version 1.9.8498, built on Mar 2 2010 09:40:15 for x86_64-unknown-linux-gnu\nCopyright (C) 1995-2008 The NEST Initiative": "1.9.8498",
"Python 2.6.2": "2.6.2",
"abcdefg": "unknown",
"usage: ls [-ABCFGHLPRSTWabcdefghiklmnopqrstuwx1] [file ...]": "unknown",
"4.2rc3": "4.2rc3",
"R scripting front-end version 3.1.2 (2014-10-31)": "3.1.2",
"First version that reads numbers from 0..1": "unknown",
"Mature Tool 12.10": "12.10",
"Beta Tool 0.9.0.dev": "0.9.0.dev",
"Another Tool 0.8.4.Clumsy message.": "0.8.4"
}
for input, output in examples.items():
self.assertEqual(version_in_command_line_output(input), output)
class TestExecutable(unittest.TestCase):
def test__init__with_a_full_path_should_just_set_it(self):
prog = Executable("/bin/ls")
self.assertEqual(prog.path, "/bin/ls")
@unittest.skipUnless(check_output, "test requires Python 2.7")
def test__init__with_only_prog_name__should_try_to_find_full_path(self):
prog = Executable("ls")
actual_path = check_output("which ls", shell=True).decode('utf-8').strip()
self.assertEqual(prog.path, actual_path)
def test__init__should_find_version_if_possible(self):
#prog = Executable("/bin/ls")
#self.assertEqual(prog.version, None) # this is true on Darwin, but not on Ubuntu
prog = Executable(sys.executable)
python_version = "%d.%d.%d" % tuple(sys.version_info[:3])
self.assertEqual(prog.version, python_version)
def test__str(self):
prog = Executable(sys.executable)
str(prog)
def test__eq(self):
prog1 = Executable(sys.executable)
prog2 = Executable(sys.executable)
prog3 = Executable("/bin/ls")
self.assertEqual(prog1, prog2)
assert prog1 != prog3
class TestRExecutable(unittest.TestCase):
pass
class TestNEURONSimulator(unittest.TestCase):
pass
class TestNESTSimulator(unittest.TestCase):
pass
class MockParameterSet(object):
saved = False
def save(self, filename, add_extension=False):
self.saved = True
class TestPythonExecutable(unittest.TestCase):
def test__write_parameters__should_call_save_on_the_parameter_set(self):
prog = PythonExecutable(None)
params = MockParameterSet()
prog.write_parameters(params, "test_parameters")
self.assert_(params.saved)
class TestModuleFunctions(unittest.TestCase):
def test__get_executable__with_path_of_registered_executable(self):
prog = get_executable("/usr/bin/python")
assert isinstance(prog, PythonExecutable)
if os.path.exists("/usr/local/bin/nest"):
prog = get_executable("/usr/local/bin/nest")
assert isinstance(prog, NESTSimulator)
rscript_path = distutils.spawn.find_executable('Rscript')
if rscript_path is not None:
prog = get_executable(rscript_path)
assert isinstance(prog, RExecutable)
def test__get_executable__with_path_of_unregistered_executable(self):
prog = get_executable("/bin/cat")
assert isinstance(prog, Executable)
self.assertEqual(prog.name, "cat")
def test__get_executable__with_script_file(self):
prog = get_executable(script_file="test.py")
assert isinstance(prog, PythonExecutable)
prog_r = get_executable(script_file="test.r")
assert isinstance(prog_r, RExecutable)
prog_R = get_executable(script_file="test.R")
assert isinstance(prog_R, RExecutable)
def test__get_executable__with_nonregistered_extension__should_raise_Exception(self):
self.assertRaises(Exception, get_executable, script_file="test.foo")
def test__get_executable__requires_at_least_one_arg(self):
self.assertRaises(Exception, get_executable)
if __name__ == '__main__':
unittest.main()
| dpad/sumatra | test/unittests/test_programs.py | Python | bsd-2-clause | 4,677 | [
"NEURON"
] | 4042a30ad3b86806479630ad10169dca1e107029e55683fa576645ff7fc5a159 |
import unittest
from funcparserlib.parser import finished, skip
from tango.parser import parse
from tango.scope_binder import ScopeBinder, SymbolsExtractor
from tango.utils import find
class TestScopeBinder(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.scope_binder = ScopeBinder()
cls.symbols_extractor = SymbolsExtractor()
def prepare(self, source):
module = parse(source)
module.name = 'main'
self.__class__.symbols_extractor.visit(module)
self.__class__.scope_binder.visit(module)
return module
def test_module_decl(self):
module = self.prepare('')
self.assertIn('scope', module.body.__info__)
self.assertIn('symbols', module.body.__info__)
module = self.prepare('cst x')
self.assertEqual(module.body.__info__['symbols'], {'x'})
module = self.prepare(
'''
cst x
fun f() { }
struct S { }
'''
)
self.assertEqual(module.body.__info__['symbols'], {'x', 'f', 'S'})
def test_property_decl(self):
module = self.prepare('cst x')
decl = find('PropertyDecl:first', module)[0]
self.assertIn('scope', decl.__info__)
self.assertEqual(module.body.__info__['scope'], decl.__info__['scope'])
module = self.prepare('cst x: Int')
decl = find('PropertyDecl:first', module)[0]
self.assertIn('scope', decl.__info__)
self.assertEqual(module.body.__info__['scope'], decl.__info__['scope'])
self.assertIn('scope', decl.type_annotation.__info__)
self.assertEqual(module.body.__info__['scope'], decl.type_annotation.__info__['scope'])
module = self.prepare('cst x = Int')
decl = find('PropertyDecl:first', module)[0]
self.assertIn('scope', decl.__info__)
self.assertEqual(module.body.__info__['scope'], decl.__info__['scope'])
self.assertIn('scope', decl.initializer.__info__)
self.assertEqual(module.body.__info__['scope'], decl.initializer.__info__['scope'])
if __name__ == '__main__':
unittest.main()
| kyouko-taiga/tango | tests/test_scope_binder.py | Python | apache-2.0 | 2,119 | [
"VisIt"
] | 520095a06c477ac67900f9e629bbcf70a15e1613b9b8ce4bf2de42236a6dbc28 |
# -*- coding: utf-8 -*-
#
# connections.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# create connectivity figures for topology manual
import nest
import nest.topology as tp
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
import numpy as np
# seed NumPy RNG to ensure identical results for runs with random placement
np.random.seed(7654321)
def beautify_layer(l, fig=plt.gcf(), xlabel=None, ylabel=None,
xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0):
"""Assume either x and ylims/ticks given or none"""
top = nest.GetStatus(l)[0]['topology']
ctr = top['center']
ext = top['extent']
if xticks is None:
if 'rows' in top:
dx = float(ext[0]) / top['columns']
dy = float(ext[1]) / top['rows']
xticks = ctr[0] - ext[0] / 2. + dx / 2. + dx * np.arange(
top['columns'])
yticks = ctr[1] - ext[1] / 2. + dy / 2. + dy * np.arange(
top['rows'])
if xlim is None:
xlim = [ctr[0] - ext[0] / 2. - dx / 2., ctr[0] + ext[
0] / 2. + dx / 2.] # extra space so extent is visible
ylim = [ctr[1] - ext[1] / 2. - dy / 2., ctr[1] + ext[1] / 2. + dy / 2.]
else:
ext = [xlim[1] - xlim[0], ylim[1] - ylim[0]]
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal', 'box')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.grid(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
def conn_figure(fig, layer, connd, targets=None, showmask=True, showkern=False,
xticks=range(-5, 6), yticks=range(-5, 6),
xlim=[-5.5, 5.5], ylim=[-5.5, 5.5]):
if targets is None:
targets = ((tp.FindCenterElement(layer), 'red'),)
tp.PlotLayer(layer, fig=fig, nodesize=60)
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
if showkern:
kern = connd['kernel']
else:
kern = None
tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern,
src_size=250, tgt_color=clr, tgt_size=20,
kernel_color='green')
beautify_layer(layer, fig,
xlim=xlim, ylim=ylim, xticks=xticks, yticks=yticks,
xlabel='', ylabel='')
fig.gca().grid(False)
# -----------------------------------------------
# Simple connection
#{ conn1 #}
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha'})
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]}}}
tp.ConnectLayers(l, l, conndict)
#{ end #}
fig = plt.figure()
fig.add_subplot(121)
conn_figure(fig, l, conndict,
targets=((tp.FindCenterElement(l), 'red'),
(tp.FindNearestElement(l, [4., 5.]), 'yellow')))
# same another time, with periodic bcs
lpbc = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha', 'edge_wrap': True})
tp.ConnectLayers(lpbc, lpbc, conndict)
fig.add_subplot(122)
conn_figure(fig, lpbc, conndict, showmask=False,
targets=((tp.FindCenterElement(lpbc), 'red'),
(tp.FindNearestElement(lpbc, [4., 5.]), 'yellow')))
plt.savefig('../user_manual_figures/conn1.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def free_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2))
fig = plt.figure()
#{ conn2r #}
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]}}}
#{ end #}
free_mask_fig(fig, 231, conndict)
#{ conn2ro #}
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]},
'anchor': [-1.5, -1.5]}}
#{ end #}
free_mask_fig(fig, 234, conndict)
#{ conn2c #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0}}}
#{ end #}
free_mask_fig(fig, 232, conndict)
#{ conn2co #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0},
'anchor': [-2.0, 0.0]}}
#{ end #}
free_mask_fig(fig, 235, conndict)
#{ conn2d #}
conndict = {'connection_type': 'divergent',
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.}}}
#{ end #}
free_mask_fig(fig, 233, conndict)
#{ conn2do #}
conndict = {'connection_type': 'divergent',
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.},
'anchor': [1.5, 1.5]}}
#{ end #}
free_mask_fig(fig, 236, conndict)
plt.savefig('../user_manual_figures/conn2.png', bbox_inches='tight')
# -----------------------------------------------
# 3d masks
def conn_figure_3d(fig, layer, connd, targets=None, showmask=True,
showkern=False,
xticks=range(-5, 6), yticks=range(-5, 6),
xlim=[-5.5, 5.5], ylim=[-5.5, 5.5]):
if targets is None:
targets = ((tp.FindCenterElement(layer), 'red'),)
tp.PlotLayer(layer, fig=fig, nodesize=20, nodecolor=(.5, .5, 1.))
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
if showkern:
kern = connd['kernel']
else:
kern = None
tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern,
src_size=250, tgt_color=clr, tgt_size=60,
kernel_color='green')
ax = fig.gca()
ax.set_aspect('equal', 'box')
plt.draw()
def free_mask_3d_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer(
{'rows': 11, 'columns': 11, 'layers': 11, 'extent': [11., 11., 11.],
'elements': 'iaf_psc_alpha'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc, projection='3d')
conn_figure_3d(fig, l, cdict, xticks=range(-5, 6, 2),
yticks=range(-5, 6, 2))
fig = plt.figure()
#{ conn_3d_a #}
conndict = {'connection_type': 'divergent',
'mask': {'box': {'lower_left': [-2., -1., -1.],
'upper_right': [2., 1., 1.]}}}
#{ end #}
free_mask_3d_fig(fig, 121, conndict)
#{ conn_3d_b #}
conndict = {'connection_type': 'divergent',
'mask': {'spherical': {'radius': 2.5}}}
#{ end #}
free_mask_3d_fig(fig, 122, conndict)
plt.savefig('../user_manual_figures/conn_3d.png', bbox_inches='tight')
# -----------------------------------------------
# grid masks
def grid_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2),
showmask=False)
fig = plt.figure()
#{ conn3 #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5}}}
#{ end #}
grid_mask_fig(fig, 131, conndict)
#{ conn3c #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5},
'anchor': {'row': 1, 'column': 2}}}
#{ end #}
grid_mask_fig(fig, 132, conndict)
#{ conn3x #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5},
'anchor': {'row': -1, 'column': 2}}}
#{ end #}
grid_mask_fig(fig, 133, conndict)
plt.savefig('../user_manual_figures/conn3.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def kernel_fig(fig, loc, cdict, showkern=True):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2),
showkern=showkern)
fig = plt.figure()
#{ conn4cp #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': 0.5}
#{ end #}
kernel_fig(fig, 231, conndict)
#{ conn4g #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian': {'p_center': 1.0,
'sigma': 1.}}}
#{ end #}
kernel_fig(fig, 232, conndict)
#{ conn4gx #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.},
'anchor': [1.5, 1.5]},
'kernel': {'gaussian': {'p_center': 1.0,
'sigma': 1.,
'anchor': [1.5, 1.5]}}}
#{ end #}
kernel_fig(fig, 233, conndict)
plt.draw()
#{ conn4cut #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian': {'p_center': 1.0,
'sigma': 1.,
'cutoff': 0.5}}}
#{ end #}
kernel_fig(fig, 234, conndict)
#{ conn42d #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian2D': {'p_center': 1.0,
'sigma_x': 1.,
'sigma_y': 3.}}}
#{ end #}
kernel_fig(fig, 235, conndict, showkern=False)
plt.savefig('../user_manual_figures/conn4.png', bbox_inches='tight')
# -----------------------------------------------
def wd_fig(fig, loc, ldict, cdict, what, rpos=None,
xlim=[-1, 51], ylim=[0, 1], xticks=range(0, 51, 5),
yticks=np.arange(0., 1.1, 0.2), clr='blue',
label=''):
nest.ResetKernel()
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict)
ax = fig.add_subplot(loc)
if rpos is None:
rn = nest.GetLeaves(l)[0][:1] # first node
else:
rn = tp.FindNearestElement(l, rpos)
conns = nest.GetConnections(rn)
cstat = nest.GetStatus(conns)
vals = np.array([sd[what] for sd in cstat])
tgts = [sd['target'] for sd in cstat]
locs = np.array(tp.GetPosition(tgts))
ax.plot(locs[:, 0], vals, 'o', mec='none', mfc=clr, label=label)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
fig = plt.figure()
#{ conn5lin #}
ldict = {'rows': 1, 'columns': 51,
'extent': [51., 1.], 'center': [25., 0.],
'elements': 'iaf_psc_alpha'}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0,
'a': -0.05,
'cutoff': 0.0}},
'delays': {'linear': {'c': 0.1, 'a': 0.02}}}
#{ end #}
wd_fig(fig, 311, ldict, cdict, 'weight', label='Weight')
wd_fig(fig, 311, ldict, cdict, 'delay', label='Delay', clr='red')
fig.gca().legend()
lpdict = {'rows': 1, 'columns': 51, 'extent': [51., 1.], 'center': [25., 0.],
'elements': 'iaf_psc_alpha', 'edge_wrap': True}
#{ conn5linpbc #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0,
'a': -0.05,
'cutoff': 0.0}},
'delays': {'linear': {'c': 0.1, 'a': 0.02}}}
#{ end #}
wd_fig(fig, 312, lpdict, cdict, 'weight', label='Weight')
wd_fig(fig, 312, lpdict, cdict, 'delay', label='Delay', clr='red')
fig.gca().legend(loc=1)
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}}}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Linear',
rpos=[25., 0.], clr='orange')
#{ conn5exp #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'exponential': {'a': 1., 'tau': 5.}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Exponential',
rpos=[25., 0.])
#{ conn5gauss #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'gaussian': {'p_center': 1., 'sigma': 5.}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Gaussian', clr='green',
rpos=[25., 0.])
#{ conn5uniform #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'uniform': {'min': 0.2, 'max': 0.8}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Uniform', clr='red',
rpos=[25., 0.])
fig.gca().legend()
plt.savefig('../user_manual_figures/conn5.png', bbox_inches='tight')
# --------------------------------
def pn_fig(fig, loc, ldict, cdict,
xlim=[0., .5], ylim=[0, 3.5], xticks=range(0, 51, 5),
yticks=np.arange(0., 1.1, 0.2), clr='blue',
label=''):
nest.ResetKernel()
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict)
ax = fig.add_subplot(loc)
rn = nest.GetLeaves(l)[0]
conns = nest.GetConnections(rn)
cstat = nest.GetStatus(conns)
srcs = [sd['source'] for sd in cstat]
tgts = [sd['target'] for sd in cstat]
dist = np.array(tp.Distance(srcs, tgts))
ax.hist(dist, bins=50, histtype='stepfilled', normed=True)
r = np.arange(0., 0.51, 0.01)
plt.plot(r, 2 * np.pi * r * (1 - 2 * r) * 12 / np.pi, 'r-', lw=3,
zorder=-10)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
"""ax.set_xticks(xticks)
ax.set_yticks(yticks)"""
# ax.set_aspect(100, 'box')
ax.set_xlabel('Source-target distance d')
ax.set_ylabel('Connection probability pconn(d)')
fig = plt.figure()
#{ conn6 #}
pos = [[np.random.uniform(-1., 1.), np.random.uniform(-1., 1.)]
for j in range(1000)]
ldict = {'positions': pos, 'extent': [2., 2.],
'elements': 'iaf_psc_alpha', 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 1.0}},
'kernel': {'linear': {'c': 1., 'a': -2., 'cutoff': 0.0}},
'number_of_connections': 50,
'allow_multapses': True, 'allow_autapses': False}
#{ end #}
pn_fig(fig, 111, ldict, cdict)
plt.savefig('../user_manual_figures/conn6.png', bbox_inches='tight')
# -----------------------------
#{ conn7 #}
nest.ResetKernel()
nest.CopyModel('iaf_psc_alpha', 'pyr')
nest.CopyModel('iaf_psc_alpha', 'in')
ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']}
cdict_p2i = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.8,
'sources': {'model': 'pyr'},
'targets': {'model': 'in'}}
cdict_i2p = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-0.2, -0.2],
'upper_right': [0.2, 0.2]}},
'sources': {'model': 'in'},
'targets': {'model': 'pyr'}}
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict_p2i)
tp.ConnectLayers(l, l, cdict_i2p)
#{ end #}
# ----------------------------
#{ conn8 #}
nest.ResetKernel()
nest.CopyModel('iaf_psc_alpha', 'pyr')
nest.CopyModel('iaf_psc_alpha', 'in')
nest.CopyModel('static_synapse', 'exc', {'weight': 2.0})
nest.CopyModel('static_synapse', 'inh', {'weight': -8.0})
ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']}
cdict_p2i = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.8,
'sources': {'model': 'pyr'},
'targets': {'model': 'in'},
'synapse_model': 'exc'}
cdict_i2p = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-0.2, -0.2],
'upper_right': [0.2, 0.2]}},
'sources': {'model': 'in'},
'targets': {'model': 'pyr'},
'synapse_model': 'inh'}
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict_p2i)
tp.ConnectLayers(l, l, cdict_i2p)
#{ end #}
# ----------------------------
#{ conn9 #}
nrn_layer = tp.CreateLayer({'rows': 20,
'columns': 20,
'elements': 'iaf_psc_alpha'})
stim = tp.CreateLayer({'rows': 1,
'columns': 1,
'elements': 'poisson_generator'})
cdict_stim = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.1},
'anchor': [0.2, 0.2]}}
tp.ConnectLayers(stim, nrn_layer, cdict_stim)
#{ end #}
# ----------------------------
#{ conn10 #}
rec = tp.CreateLayer({'rows': 1,
'columns': 1,
'elements': 'spike_detector'})
cdict_rec = {'connection_type': 'convergent',
'mask': {'circular': {'radius': 0.1},
'anchor': [-0.2, 0.2]}}
tp.ConnectLayers(nrn_layer, rec, cdict_rec)
#{ end #}
# ----------------------------
#{ conn11 #}
rec = nest.Create('spike_detector')
nrns = nest.GetLeaves(nrn_layer, local_only=True)[0]
nest.Connect(nrns, rec)
#{ end #}
| tobikausk/nest-simulator | topology/doc/user_manual_scripts/connections.py | Python | gpl-2.0 | 18,862 | [
"Gaussian"
] | 3e1c055da002c045ed2093f972ca27e83c7fb8fe2623686578369a6deccec65a |
import string
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import check_random_state
from sklearn.utils.extmath import logsumexp
from . import _hmmc
from .utils import normalize
decoder_algorithms = frozenset(("viterbi", "map"))
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
Decoder algorithm.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emmission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emmission parameters. Defaults to all
parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
# TODO: move all validation from descriptors to 'fit' and 'predict'.
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self.algorithm = algorithm
self.random_state = random_state
def eval(self, X):
return self.score_samples(X)
def score_samples(self, obs):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence ``obs``.
posteriors : array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float64).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the ``obs``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to ``obs``.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM.
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
map_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
_, posteriors = self.score_samples(obs)
state_sequence = np.argmax(posteriors, axis=1)
map_logprob = np.max(posteriors, axis=1).sum()
return map_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to ``obs``.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
if self.algorithm in decoder_algorithms:
algorithm = self.algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
T : array-like, shape (n, n_components)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.score_samples(obs)
return posteriors
def sample(self, n=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : array_like, length `n` List of samples
hidden_states : array_like, length `n` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in range(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.array(obs), np.array(hidden_states, dtype=int)
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data,
or strengthening the appropriate subclass-specific regularization
parameter.
"""
self._init(obs, self.init_params)
logprob = []
for i in range(self.n_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
logprob.append(curr_logprob)
# Check for convergence.
if i > 0 and logprob[-1] - logprob[-2] < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape '
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations <= 1:
return
lneta = np.zeros((n_observations - 1, n_components, n_components))
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lneta)
stats['trans'] += np.exp(logsumexp(lneta, axis=0))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(
np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20))
if 't' in params:
transmat_ = normalize(
np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20),
axis=1)
self.transmat_ = transmat_
| emmaggie/hmmlearn | hmmlearn/base.py | Python | bsd-3-clause | 19,348 | [
"Gaussian"
] | a59631285e07017182aed8affe2a275a5152205a96ec33c85432de55221057d4 |
# =============================================================================
# PROJECT CHRONO - http:#projectchrono.org
#
# Copyright (c) 2019 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http:#projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Asher Elmquist
# =============================================================================
#
# Chrono demonstration of a camera sensor.
# Generates a mesh object and rotates camera sensor around the mesh.
#
# =============================================================================
import pychrono.core as chrono
import pychrono.sensor as sens
import math
import time
def main():
# -----------------
# Create the system
# -----------------
mphysicalSystem = chrono.ChSystemNSC()
# -----------------------------------
# add a mesh to be sensed by a camera
# -----------------------------------
mmesh = chrono.ChTriangleMeshConnected()
mmesh.LoadWavefrontMesh(chrono.GetChronoDataFile(
"vehicle/hmmwv/hmmwv_chassis.obj"), False, True)
# scale to a different size
mmesh.Transform(chrono.ChVectorD(0, 0, 0), chrono.ChMatrix33D(2))
trimesh_shape = chrono.ChTriangleMeshShape()
trimesh_shape.SetMesh(mmesh)
trimesh_shape.SetName("HMMWV Chassis Mesh")
trimesh_shape.SetStatic(True)
mesh_body = chrono.ChBody()
mesh_body.SetPos(chrono.ChVectorD(0, 0, 0))
mesh_body.AddAsset(trimesh_shape)
mesh_body.SetBodyFixed(True)
mphysicalSystem.Add(mesh_body)
# -----------------------
# Create a sensor manager
# -----------------------
manager = sens.ChSensorManager(mphysicalSystem)
intensity = 1.0
manager.scene.AddPointLight(chrono.ChVectorF(
2, 2.5, 100), chrono.ChVectorF(intensity, intensity, intensity), 500.0)
manager.scene.AddPointLight(chrono.ChVectorF(
9, 2.5, 100), chrono.ChVectorF(intensity, intensity, intensity), 500.0)
manager.scene.AddPointLight(chrono.ChVectorF(
16, 2.5, 100), chrono.ChVectorF(intensity, intensity, intensity), 500.0)
manager.scene.AddPointLight(chrono.ChVectorF(
23, 2.5, 100), chrono.ChVectorF(intensity, intensity, intensity), 500.0)
# ------------------------------------------------
# Create a camera and add it to the sensor manager
# ------------------------------------------------
offset_pose = chrono.ChFrameD(
chrono.ChVectorD(-5, 0, 2), chrono.Q_from_AngAxis(2, chrono.ChVectorD(0, 1, 0)))
cam = sens.ChCameraSensor(
mesh_body, # body camera is attached to
update_rate, # update rate in Hz
offset_pose, # offset pose
image_width, # image width
image_height, # image height
fov # camera's horizontal field of view
)
cam.SetName("Camera Sensor")
cam.SetLag(lag)
cam.SetCollectionWindow(exposure_time)
# ------------------------------------------------------------------
# Create a filter graph for post-processing the data from the camera
# ------------------------------------------------------------------
if noise_model == "CONST_NORMAL":
cam.PushFilter(sens.ChFilterCameraNoiseConstNormal(0.0, 0.02))
elif noise_model == "PIXEL_DEPENDENT":
cam.PushFilter(sens.ChFilterCameraNoisePixDep(0, 0.02, 0.03))
elif noise_model == "NONE":
# Don't add any noise models
pass
# Renders the image at current point in the filter graph
if vis:
cam.PushFilter(sens.ChFilterVisualize(
image_width, image_height, "Before Grayscale Filter"))
# Provides the host access to this RGBA8 buffer
cam.PushFilter(sens.ChFilterRGBA8Access())
# Save the current image to a png file at the specified path
if save:
cam.PushFilter(sens.ChFilterSave(out_dir + "rgb/"))
# Filter the sensor to grayscale
cam.PushFilter(sens.ChFilterGrayscale())
# Render the buffer again to see the new grayscaled image
if vis:
cam.PushFilter(sens.ChFilterVisualize(
int(image_width / 2), int(image_height / 2), "Grayscale Image"))
# Save the grayscaled image at the specified path
if save:
cam.PushFilter(sens.ChFilterSave(out_dir + "gray/"))
# Resizes the image to the provided width and height
cam.PushFilter(sens.ChFilterImageResize(
int(image_width / 2), int(image_height / 2)))
# Access the grayscaled buffer as R8 pixels
cam.PushFilter(sens.ChFilterR8Access())
# add sensor to manager
manager.AddSensor(cam)
# ---------------
# Simulate system
# ---------------
orbit_radius = 10
orbit_rate = 0.5
ch_time = 0.0
t1 = time.time()
while (ch_time < end_time):
cam.SetOffsetPose(chrono.ChFrameD(
chrono.ChVectorD(-orbit_radius * math.cos(ch_time * orbit_rate), -
orbit_radius * math.sin(ch_time * orbit_rate), 1),
chrono.Q_from_AngAxis(ch_time * orbit_rate, chrono.ChVectorD(0, 0, 1))))
# Access the RGBA8 buffer from the camera
rgba8_buffer = cam.GetMostRecentRGBA8Buffer()
if (rgba8_buffer.HasData()):
rgba8_data = rgba8_buffer.GetRGBA8Data()
print('RGBA8 buffer recieved from cam. Camera resolution: {0}x{1}'
.format(rgba8_buffer.Width, rgba8_buffer.Height))
print('First Pixel: {0}'.format(rgba8_data[0, 0, :]))
# Update sensor manager
# Will render/save/filter automatically
manager.Update()
# Perform step of dynamics
mphysicalSystem.DoStepDynamics(step_size)
# Get the current time of the simulation
ch_time = mphysicalSystem.GetChTime()
print("Sim time:", end_time, "Wall time:", time.time() - t1)
# -----------------
# Camera parameters
# -----------------
# Noise model attached to the sensor
# TODO: Noise models haven't been implemented in python
# noise_model="CONST_NORMAL" # Gaussian noise with constant mean and standard deviation
# noise_model="PIXEL_DEPENDENT" # Pixel dependent gaussian noise
# noise_model="RESPONSE_FUNCTION" # Noise model based on camera's response and parameters
noise_model = "NONE" # No noise model
# Camera lens model
# Either CameraLensModelType_PINHOLE or CameraLensModelType_FOV_LENS
lens_model = sens.CameraLensModelType_PINHOLE
# Update rate in Hz
update_rate = 30
# Image width and height
image_width = 1280
image_height = 720
# Camera's horizontal field of view
fov = 1.408
# Lag (in seconds) between sensing and when data becomes accessible
lag = 0
# Exposure (in seconds) of each image
exposure_time = 0
# ---------------------
# Simulation parameters
# ---------------------
# Simulation step size
step_size = 1e-3
# Simulation end time
end_time = 20.0
# Save camera images
save = False
# Render camera images
vis = True
# Output directory
out_dir = "SENSOR_OUTPUT/"
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
# chrono.SetChronoDataPath('path/to/data')
main()
| projectchrono/chrono | src/demos/python/sensor/demo_SEN_camera.py | Python | bsd-3-clause | 7,508 | [
"Gaussian"
] | e499ea562ac786555689da97613d6b7ea3acf71c888e5a78873a962f8c840405 |
# A simple CLI runner for slurm that can be used when running Galaxy from a
# non-submit host and using a Slurm cluster.
from logging import getLogger
try:
from galaxy.model import Job
job_states = Job.states
except ImportError:
# Not in Galaxy, map Galaxy job states to Pulsar ones.
from pulsar.util import enum
job_states = enum(RUNNING='running', OK='complete', QUEUED='queued', ERROR="failed")
from ..job import BaseJobExec
log = getLogger(__name__)
argmap = {
'memory': '-M', # There is code in job_script_kwargs relying on this name's setting
'cores': '-n',
'queue': '-q',
'working_dir': '-cwd',
'project': '-P'
}
class LSF(BaseJobExec):
def __init__(self, **params):
self.params = {}
for k, v in params.items():
self.params[k] = v
def job_script_kwargs(self, ofile, efile, job_name):
scriptargs = {'-o': ofile,
'-e': efile,
'-J': job_name}
# Map arguments using argmap.
for k, v in self.params.items():
if k == 'plugin':
continue
try:
if k == 'memory':
# Memory requires both -m and -R rusage[mem=v] request
scriptargs['-R'] = "\"rusage[mem=%s]\"" % v
if not k.startswith('-'):
k = argmap[k]
scriptargs[k] = v
except Exception:
log.warning('Unrecognized long argument passed to LSF CLI plugin: %s' % k)
# Generated template.
template_scriptargs = ''
for k, v in scriptargs.items():
template_scriptargs += '#BSUB {} {}\n'.format(k, v)
return dict(headers=template_scriptargs)
def submit(self, script_file):
# bsub returns Job <9147983> is submitted to default queue <research-rh7>.
# This should be really handled outside with something like
# parse_external. Currently CLI runner expect this to just send it in the last position
# of the string.
return "bsub <%s | awk '{ print $2}' | sed 's/[<>]//g'" % script_file
def delete(self, job_id):
return 'bkill %s' % job_id
def get_status(self, job_ids=None):
return "bjobs -a -o \"id stat\" -noheader" # check this
def get_single_status(self, job_id):
return "bjobs -o stat -noheader " + job_id
def parse_status(self, status, job_ids):
# Get status for each job, skipping header.
rval = {}
for line in status.splitlines():
job_id, state = line.split()
if job_id in job_ids:
# map job states to Galaxy job states.
rval[job_id] = self._get_job_state(state)
return rval
def parse_single_status(self, status, job_id):
if not status:
# Job not found in LSF, most probably finished and forgotten.
# lsf outputs: Job <num> is not found -- but that is on the stderr
# Note: a very old failed job job will not be shown here either,
# which would be badly handled here. So this only works well when Galaxy
# is constantly monitoring the jobs. The logic here is that DONE jobs get forgotten
# faster than failed jobs.
log.warning("Job id '%s' not found LSF status check" % job_id)
return job_states.OK
return self._get_job_state(status)
def get_failure_reason(self, job_id):
return "bjobs -l " + job_id
def parse_failure_reason(self, reason, job_id):
# LSF will produce the following in the job output file:
# TERM_MEMLIMIT: job killed after reaching LSF memory usage limit.
# Exited with exit code 143.
for line in reason.splitlines():
if "TERM_MEMLIMIT" in line:
from galaxy.jobs import JobState
return JobState.runner_states.MEMORY_LIMIT_REACHED
return None
def _get_job_state(self, state):
# based on:
# https://www.ibm.com/support/knowledgecenter/en/SSETD4_9.1.3/lsf_admin/job_state_lsf.html
# https://www.ibm.com/support/knowledgecenter/en/SSETD4_9.1.2/lsf_command_ref/bjobs.1.html
try:
return {
'EXIT': job_states.ERROR,
'RUN': job_states.RUNNING,
'PEND': job_states.QUEUED,
'DONE': job_states.OK,
'PSUSP': job_states.ERROR,
'USUSP': job_states.ERROR,
'SSUSP': job_states.ERROR,
'UNKWN': job_states.ERROR,
'WAIT': job_states.QUEUED,
'ZOMBI': job_states.ERROR
}.get(state)
except KeyError:
raise KeyError("Failed to map LSF status code [%s] to job state." % state)
__all__ = ('LSF',)
| galaxyproject/pulsar | pulsar/managers/util/cli/job/lsf.py | Python | apache-2.0 | 4,846 | [
"Galaxy"
] | d7d84a97839adc4d3c594150581f14febe44be3447abe3e4236d8c94e915adf4 |
import os, sys
import socket
import logging
import marshal
import cPickle
import threading, Queue
import time
import random
import getpass
import urllib
import warnings
import weakref
import multiprocessing
import zmq
import pymesos as mesos
import pymesos.mesos_pb2 as mesos_pb2
from dpark.util import compress, decompress, spawn
from dpark.dependency import NarrowDependency, ShuffleDependency
from dpark.accumulator import Accumulator
from dpark.task import ResultTask, ShuffleMapTask
from dpark.job import SimpleJob
from dpark.env import env
logger = logging.getLogger("scheduler")
MAX_FAILED = 3
EXECUTOR_MEMORY = 64 # cache
POLL_TIMEOUT = 0.1
RESUBMIT_TIMEOUT = 60
MAX_IDLE_TIME = 60 * 30
class TaskEndReason: pass
class Success(TaskEndReason): pass
class FetchFailed(TaskEndReason):
def __init__(self, serverUri, shuffleId, mapId, reduceId):
self.serverUri = serverUri
self.shuffleId = shuffleId
self.mapId = mapId
self.reduceId = reduceId
def __str__(self):
return '<FetchFailed(%s, %d, %d, %d)>' % (self.serverUri,
self.shuffleId, self.mapId, self.reduceId)
class OtherFailure(TaskEndReason):
def __init__(self, message):
self.message = message
def __str__(self):
return '<OtherFailure %s>' % self.message
class Stage:
def __init__(self, rdd, shuffleDep, parents):
self.id = self.newId()
self.rdd = rdd
self.shuffleDep = shuffleDep
self.parents = parents
self.numPartitions = len(rdd)
self.outputLocs = [[] for i in range(self.numPartitions)]
def __str__(self):
return '<Stage(%d) for %s>' % (self.id, self.rdd)
def __getstate__(self):
raise Exception("should not pickle stage")
@property
def isAvailable(self):
if not self.parents and self.shuffleDep == None:
return True
return all(self.outputLocs)
def addOutputLoc(self, partition, host):
self.outputLocs[partition].append(host)
# def removeOutput(self, partition, host):
# prev = self.outputLocs[partition]
# self.outputLocs[partition] = [h for h in prev if h != host]
def removeHost(self, host):
becameUnavailable = False
for ls in self.outputLocs:
if host in ls:
ls.remove(host)
becameUnavailable = True
if becameUnavailable:
logger.info("%s is now unavailable on host %s", self, host)
nextId = 0
@classmethod
def newId(cls):
cls.nextId += 1
return cls.nextId
class Scheduler:
def start(self): pass
def runJob(self, rdd, func, partitions, allowLocal): pass
def clear(self): pass
def stop(self): pass
def defaultParallelism(self):
return 2
class CompletionEvent:
def __init__(self, task, reason, result, accumUpdates):
self.task = task
self.reason = reason
self.result = result
self.accumUpdates = accumUpdates
class DAGScheduler(Scheduler):
def __init__(self):
self.completionEvents = Queue.Queue()
self.idToStage = weakref.WeakValueDictionary()
self.shuffleToMapStage = {}
self.cacheLocs = {}
self._shutdown = False
def check(self):
pass
def clear(self):
self.idToStage.clear()
self.shuffleToMapStage.clear()
self.cacheLocs.clear()
self.cacheTracker.clear()
self.mapOutputTracker.clear()
def shutdown(self):
self._shutdown = True
@property
def cacheTracker(self):
return env.cacheTracker
@property
def mapOutputTracker(self):
return env.mapOutputTracker
def submitTasks(self, tasks):
raise NotImplementedError
def taskEnded(self, task, reason, result, accumUpdates):
self.completionEvents.put(CompletionEvent(task, reason, result, accumUpdates))
def getCacheLocs(self, rdd):
return self.cacheLocs.get(rdd.id, [[] for i in range(len(rdd))])
def updateCacheLocs(self):
self.cacheLocs = self.cacheTracker.getLocationsSnapshot()
def newStage(self, rdd, shuffleDep):
stage = Stage(rdd, shuffleDep, self.getParentStages(rdd))
self.idToStage[stage.id] = stage
logger.debug("new stage: %s", stage)
return stage
def getParentStages(self, rdd):
parents = set()
visited = set()
def visit(r):
if r.id in visited:
return
visited.add(r.id)
if r.shouldCache:
self.cacheTracker.registerRDD(r.id, len(r))
for dep in r.dependencies:
if isinstance(dep, ShuffleDependency):
parents.add(self.getShuffleMapStage(dep))
else:
visit(dep.rdd)
visit(rdd)
return list(parents)
def getShuffleMapStage(self, dep):
stage = self.shuffleToMapStage.get(dep.shuffleId, None)
if stage is None:
stage = self.newStage(dep.rdd, dep)
self.shuffleToMapStage[dep.shuffleId] = stage
return stage
def getMissingParentStages(self, stage):
missing = set()
visited = set()
def visit(r):
if r.id in visited:
return
visited.add(r.id)
if r.shouldCache and all(self.getCacheLocs(r)):
return
for dep in r.dependencies:
if isinstance(dep, ShuffleDependency):
stage = self.getShuffleMapStage(dep)
if not stage.isAvailable:
missing.add(stage)
elif isinstance(dep, NarrowDependency):
visit(dep.rdd)
visit(stage.rdd)
return list(missing)
def runJob(self, finalRdd, func, partitions, allowLocal):
outputParts = list(partitions)
numOutputParts = len(partitions)
finalStage = self.newStage(finalRdd, None)
results = [None]*numOutputParts
finished = [None]*numOutputParts
lastFinished = 0
numFinished = 0
waiting = set()
running = set()
failed = set()
pendingTasks = {}
lastFetchFailureTime = 0
self.updateCacheLocs()
logger.debug("Final stage: %s, %d", finalStage, numOutputParts)
logger.debug("Parents of final stage: %s", finalStage.parents)
logger.debug("Missing parents: %s", self.getMissingParentStages(finalStage))
if allowLocal and (not finalStage.parents or not self.getMissingParentStages(finalStage)) and numOutputParts == 1:
split = finalRdd.splits[outputParts[0]]
yield func(finalRdd.iterator(split))
return
def submitStage(stage):
logger.debug("submit stage %s", stage)
if stage not in waiting and stage not in running:
missing = self.getMissingParentStages(stage)
if not missing:
submitMissingTasks(stage)
running.add(stage)
else:
for parent in missing:
submitStage(parent)
waiting.add(stage)
def submitMissingTasks(stage):
myPending = pendingTasks.setdefault(stage, set())
tasks = []
have_prefer = True
if stage == finalStage:
for i in range(numOutputParts):
if not finished[i]:
part = outputParts[i]
if have_prefer:
locs = self.getPreferredLocs(finalRdd, part)
if not locs:
have_prefer = False
else:
locs = []
tasks.append(ResultTask(finalStage.id, finalRdd,
func, part, locs, i))
else:
for p in range(stage.numPartitions):
if not stage.outputLocs[p]:
if have_prefer:
locs = self.getPreferredLocs(stage.rdd, p)
if not locs:
have_prefer = False
else:
locs = []
tasks.append(ShuffleMapTask(stage.id, stage.rdd,
stage.shuffleDep, p, locs))
logger.debug("add to pending %s tasks", len(tasks))
myPending |= set(t.id for t in tasks)
self.submitTasks(tasks)
submitStage(finalStage)
while numFinished != numOutputParts:
try:
evt = self.completionEvents.get(False)
except Queue.Empty:
self.check()
if self._shutdown:
sys.exit(1)
if failed and time.time() > lastFetchFailureTime + RESUBMIT_TIMEOUT:
self.updateCacheLocs()
for stage in failed:
logger.info("Resubmitting failed stages: %s", stage)
submitStage(stage)
failed.clear()
else:
time.sleep(0.1)
continue
task, reason = evt.task, evt.reason
stage = self.idToStage[task.stageId]
if stage not in pendingTasks: # stage from other job
continue
logger.debug("remove from pedding %s from %s", task, stage)
pendingTasks[stage].remove(task.id)
if isinstance(reason, Success):
Accumulator.merge(evt.accumUpdates)
if isinstance(task, ResultTask):
finished[task.outputId] = True
numFinished += 1
results[task.outputId] = evt.result
while lastFinished < numOutputParts and finished[lastFinished]:
yield results[lastFinished]
results[lastFinished] = None
lastFinished += 1
elif isinstance(task, ShuffleMapTask):
stage = self.idToStage[task.stageId]
stage.addOutputLoc(task.partition, evt.result)
if not pendingTasks[stage] and all(stage.outputLocs):
logger.debug("%s finished; looking for newly runnable stages", stage)
running.remove(stage)
if stage.shuffleDep != None:
self.mapOutputTracker.registerMapOutputs(
stage.shuffleDep.shuffleId,
[l[-1] for l in stage.outputLocs])
self.updateCacheLocs()
newlyRunnable = set(stage for stage in waiting if not self.getMissingParentStages(stage))
waiting -= newlyRunnable
running |= newlyRunnable
logger.debug("newly runnable: %s, %s", waiting, newlyRunnable)
for stage in newlyRunnable:
submitMissingTasks(stage)
elif isinstance(reason, FetchFailed):
if stage in running:
waiting.add(stage)
mapStage = self.shuffleToMapStage[reason.shuffleId]
mapStage.removeHost(reason.serverUri)
failed.add(mapStage)
lastFetchFailureTime = time.time()
else:
logger.error("task %s failed: %s %s %s", task, reason, type(reason), reason.message)
raise Exception(reason.message)
assert not any(results)
return
def getPreferredLocs(self, rdd, partition):
return rdd.preferredLocations(rdd.splits[partition])
def run_task(task, aid):
logger.debug("Running task %r", task)
try:
Accumulator.clear()
result = task.run(aid)
accumUpdates = Accumulator.values()
return (task.id, Success(), result, accumUpdates)
except Exception, e:
logger.error("error in task %s", task)
import traceback
traceback.print_exc()
return (task.id, OtherFailure("exception:" + str(e)), None, None)
class LocalScheduler(DAGScheduler):
attemptId = 0
def nextAttempId(self):
self.attemptId += 1
return self.attemptId
def submitTasks(self, tasks):
logger.debug("submit tasks %s in LocalScheduler", tasks)
for task in tasks:
# task = cPickle.loads(cPickle.dumps(task, -1))
_, reason, result, update = run_task(task, self.nextAttempId())
self.taskEnded(task, reason, result, update)
def run_task_in_process(task, tid, environ):
from dpark.env import env
env.start(False, environ)
logger.debug("run task in process %s %s", task, tid)
try:
return run_task(task, tid)
except KeyboardInterrupt:
sys.exit(0)
class MultiProcessScheduler(LocalScheduler):
def __init__(self, threads):
LocalScheduler.__init__(self)
self.threads = threads
self.tasks = {}
self.pool = multiprocessing.Pool(self.threads or 2)
def submitTasks(self, tasks):
if not tasks:
return
logger.info("Got a job with %d tasks: %s", len(tasks), tasks[0].rdd)
total, self.finished, start = len(tasks), 0, time.time()
def callback(args):
logger.debug("got answer: %s", args)
tid, reason, result, update = args
task = self.tasks.pop(tid)
self.finished += 1
logger.info("Task %s finished (%d/%d) \x1b[1A",
tid, self.finished, total)
if self.finished == total:
logger.info("Job finished in %.1f seconds" + " "*20, time.time() - start)
self.taskEnded(task, reason, result, update)
for task in tasks:
logger.debug("put task async: %s", task)
self.tasks[task.id] = task
self.pool.apply_async(run_task_in_process,
[task, self.nextAttempId(), env.environ],
callback=callback)
def stop(self):
self.pool.terminate()
self.pool.join()
logger.debug("process pool stopped")
def profile(f):
def func(*args, **kwargs):
path = '/tmp/worker-%s.prof' % os.getpid()
import cProfile
import pstats
func = f
cProfile.runctx('func(*args, **kwargs)',
globals(), locals(), path)
stats = pstats.Stats(path)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
stats.sort_stats('cumulative')
stats.print_stats(20)
return func
def safe(f):
def _(self, *a, **kw):
with self.lock:
r = f(self, *a, **kw)
return r
return _
def int2ip(n):
return "%d.%d.%d.%d" % (n & 0xff, (n>>8)&0xff, (n>>16)&0xff, n>>24)
class MesosScheduler(DAGScheduler):
def __init__(self, master, options):
DAGScheduler.__init__(self)
self.master = master
self.use_self_as_exec = options.self
self.cpus = options.cpus
self.mem = options.mem
self.task_per_node = options.parallel or multiprocessing.cpu_count()
self.group = options.group
self.logLevel = options.logLevel
self.options = options
self.started = False
self.last_finish_time = 0
self.isRegistered = False
self.executor = None
self.driver = None
self.out_logger = None
self.err_logger = None
self.lock = threading.RLock()
self.init_job()
def init_job(self):
self.activeJobs = {}
self.activeJobsQueue = []
self.taskIdToJobId = {}
self.taskIdToSlaveId = {}
self.jobTasks = {}
self.slaveTasks = {}
self.slaveFailed = {}
def clear(self):
DAGScheduler.clear(self)
self.init_job()
def start(self):
if not self.out_logger:
self.out_logger = self.start_logger(sys.stdout)
if not self.err_logger:
self.err_logger = self.start_logger(sys.stderr)
def start_driver(self):
name = '[dpark@%s] ' % socket.gethostname()
name += os.path.abspath(sys.argv[0]) + ' ' + ' '.join(sys.argv[1:])
if len(name) > 512:
name = name[:512] + '...'
framework = mesos_pb2.FrameworkInfo()
framework.user = getpass.getuser()
if framework.user == 'root':
raise Exception("dpark is not allowed to run as 'root'")
framework.name = name
self.driver = mesos.MesosSchedulerDriver(self, framework,
self.master)
self.driver.start()
logger.debug("Mesos Scheudler driver started")
self.started = True
self.last_finish_time = time.time()
def check():
while self.started:
now = time.time()
if not self.activeJobs and now - self.last_finish_time > MAX_IDLE_TIME:
logger.info("stop mesos scheduler after %d seconds idle",
now - self.last_finish_time)
self.stop()
break
time.sleep(1)
spawn(check)
def start_logger(self, output):
sock = env.ctx.socket(zmq.PULL)
port = sock.bind_to_random_port("tcp://0.0.0.0")
def collect_log():
while True:
line = sock.recv()
output.write(line)
spawn(collect_log)
host = socket.gethostname()
addr = "tcp://%s:%d" % (host, port)
logger.debug("log collecter start at %s", addr)
return addr
@safe
def registered(self, driver, frameworkId, masterInfo):
self.isRegistered = True
logger.debug("connect to master %s:%s(%s), registered as %s",
int2ip(masterInfo.ip), masterInfo.port, masterInfo.id,
frameworkId.value)
self.executor = self.getExecutorInfo(str(frameworkId.value))
@safe
def reregistered(self, driver, masterInfo):
logger.warning("re-connect to mesos master %s:%s(%s)",
int2ip(masterInfo.ip), masterInfo.port, masterInfo.id)
@safe
def disconnected(self, driver):
logger.debug("framework is disconnected")
@safe
def getExecutorInfo(self, framework_id):
info = mesos_pb2.ExecutorInfo()
if hasattr(info, 'framework_id'):
info.framework_id.value = framework_id
if self.use_self_as_exec:
info.command.value = os.path.abspath(sys.argv[0])
info.executor_id.value = sys.argv[0]
else:
dir = os.path.dirname(__file__)
info.command.value = os.path.abspath(os.path.join(dir, 'bin/executor%d%d.py' % sys.version_info[:2]))
info.executor_id.value = "default"
mem = info.resources.add()
mem.name = 'mem'
mem.type = 0 #mesos_pb2.Value.SCALAR
mem.scalar.value = EXECUTOR_MEMORY
info.data = marshal.dumps((os.path.realpath(sys.argv[0]), os.getcwd(), sys.path, dict(os.environ),
self.task_per_node, self.out_logger, self.err_logger, self.logLevel, env.environ))
return info
@safe
def submitTasks(self, tasks):
if not tasks:
return
job = SimpleJob(self, tasks, self.cpus, tasks[0].rdd.mem or self.mem)
self.activeJobs[job.id] = job
self.activeJobsQueue.append(job)
self.jobTasks[job.id] = set()
logger.info("Got job %d with %d tasks: %s", job.id, len(tasks), tasks[0].rdd)
need_revive = self.started
if not self.started:
self.start_driver()
while not self.isRegistered:
self.lock.release()
time.sleep(0.01)
self.lock.acquire()
if need_revive:
self.requestMoreResources()
def requestMoreResources(self):
logger.debug("reviveOffers")
self.driver.reviveOffers()
@safe
def resourceOffers(self, driver, offers):
rf = mesos_pb2.Filters()
if not self.activeJobs:
rf.refuse_seconds = 60 * 5
for o in offers:
driver.launchTasks(o.id, [], rf)
return
start = time.time()
random.shuffle(offers)
cpus = [self.getResource(o.resources, 'cpus') for o in offers]
mems = [self.getResource(o.resources, 'mem')
- (o.slave_id.value not in self.slaveTasks
and EXECUTOR_MEMORY or 0)
for o in offers]
logger.debug("get %d offers (%s cpus, %s mem), %d jobs",
len(offers), sum(cpus), sum(mems), len(self.activeJobs))
tasks = {}
for job in self.activeJobsQueue:
while True:
launchedTask = False
for i,o in enumerate(offers):
sid = o.slave_id.value
if self.group and (self.getAttribute(o.attributes, 'group') or 'none') not in self.group:
continue
if self.slaveFailed.get(sid, 0) >= MAX_FAILED:
continue
if self.slaveTasks.get(sid, 0) >= self.task_per_node:
continue
if mems[i] < self.mem or cpus[i]+1e-4 < self.cpus:
continue
t = job.slaveOffer(str(o.hostname), cpus[i], mems[i])
if not t:
continue
task = self.createTask(o, job, t, cpus[i])
tasks.setdefault(o.id.value, []).append(task)
logger.debug("dispatch %s into %s", t, o.hostname)
tid = task.task_id.value
self.jobTasks[job.id].add(tid)
self.taskIdToJobId[tid] = job.id
self.taskIdToSlaveId[tid] = sid
self.slaveTasks[sid] = self.slaveTasks.get(sid, 0) + 1
cpus[i] -= min(cpus[i], t.cpus)
mems[i] -= t.mem
launchedTask = True
if not launchedTask:
break
used = time.time() - start
if used > 10:
logger.error("use too much time in slaveOffer: %.2fs", used)
rf.refuse_seconds = 5
for o in offers:
driver.launchTasks(o.id, tasks.get(o.id.value, []), rf)
logger.debug("reply with %d tasks, %s cpus %s mem left",
sum(len(ts) for ts in tasks.values()), sum(cpus), sum(mems))
@safe
def offerRescinded(self, driver, offer_id):
logger.debug("rescinded offer: %s", offer_id)
if self.activeJobs:
self.requestMoreResources()
def getResource(self, res, name):
for r in res:
if r.name == name:
return r.scalar.value
def getAttribute(self, attrs, name):
for r in attrs:
if r.name == name:
return r.text.value
def createTask(self, o, job, t, available_cpus):
task = mesos_pb2.TaskInfo()
tid = "%s:%s:%s" % (job.id, t.id, t.tried)
task.name = "task %s" % tid
task.task_id.value = tid
task.slave_id.value = o.slave_id.value
task.data = compress(cPickle.dumps((t, t.tried), -1))
task.executor.MergeFrom(self.executor)
if len(task.data) > 1000*1024:
logger.warning("task too large: %s %d",
t, len(task.data))
cpu = task.resources.add()
cpu.name = 'cpus'
cpu.type = 0 #mesos_pb2.Value.SCALAR
cpu.scalar.value = min(t.cpus, available_cpus)
mem = task.resources.add()
mem.name = 'mem'
mem.type = 0 #mesos_pb2.Value.SCALAR
mem.scalar.value = t.mem
return task
@safe
def statusUpdate(self, driver, status):
tid = status.task_id.value
state = status.state
logger.debug("status update: %s %s", tid, state)
jid = self.taskIdToJobId.get(tid)
if jid not in self.activeJobs:
logger.debug("Ignoring update from TID %s " +
"because its job is gone", tid)
return
job = self.activeJobs[jid]
_, task_id, tried = map(int, tid.split(':'))
if state == mesos_pb2.TASK_RUNNING:
return job.statusUpdate(task_id, tried, state)
del self.taskIdToJobId[tid]
self.jobTasks[jid].remove(tid)
slave_id = self.taskIdToSlaveId[tid]
if slave_id in self.slaveTasks:
self.slaveTasks[slave_id] -= 1
del self.taskIdToSlaveId[tid]
if state in (mesos_pb2.TASK_FINISHED, mesos_pb2.TASK_FAILED) and status.data:
try:
reason,result,accUpdate = cPickle.loads(status.data)
if result:
flag, data = result
if flag >= 2:
try:
data = urllib.urlopen(data).read()
except IOError:
# try again
data = urllib.urlopen(data).read()
flag -= 2
data = decompress(data)
if flag == 0:
result = marshal.loads(data)
else:
result = cPickle.loads(data)
except Exception, e:
logger.warning("error when cPickle.loads(): %s, data:%s", e, len(status.data))
state = mesos_pb2.TASK_FAILED
return job.statusUpdate(task_id, tried, mesos_pb2.TASK_FAILED, 'load failed: %s' % e)
else:
return job.statusUpdate(task_id, tried, state,
reason, result, accUpdate)
# killed, lost, load failed
job.statusUpdate(task_id, tried, state, status.data)
#if state in (mesos_pb2.TASK_FAILED, mesos_pb2.TASK_LOST):
# self.slaveFailed[slave_id] = self.slaveFailed.get(slave_id,0) + 1
def jobFinished(self, job):
logger.debug("job %s finished", job.id)
if job.id in self.activeJobs:
del self.activeJobs[job.id]
self.activeJobsQueue.remove(job)
for id in self.jobTasks[job.id]:
del self.taskIdToJobId[id]
del self.taskIdToSlaveId[id]
del self.jobTasks[job.id]
self.last_finish_time = time.time()
if not self.activeJobs:
self.slaveTasks.clear()
self.slaveFailed.clear()
@safe
def check(self):
for job in self.activeJobs.values():
if job.check_task_timeout():
self.requestMoreResources()
@safe
def error(self, driver, code, message):
logger.warning("Mesos error message: %s (code: %s)", message, code)
#if self.activeJobs:
# self.requestMoreResources()
#@safe
def stop(self):
if not self.started:
return
logger.debug("stop scheduler")
self.started = False
self.isRegistered = False
self.driver.stop(False)
self.driver = None
def defaultParallelism(self):
return 16
def frameworkMessage(self, driver, slave, executor, data):
logger.warning("[slave %s] %s", slave.value, data)
def executorLost(self, driver, executorId, slaveId, status):
logger.warning("executor at %s %s lost: %s", slaveId.value, executorId.value, status)
self.slaveTasks.pop(slaveId.value, None)
self.slaveFailed.pop(slaveId.value, None)
def slaveLost(self, driver, slaveId):
logger.warning("slave %s lost", slaveId.value)
self.slaveTasks.pop(slaveId.value, None)
self.slaveFailed.pop(slaveId.value, None)
def killTask(self, job_id, task_id, tried):
tid = mesos_pb2.TaskID()
tid.value = "%s:%s:%s" % (job_id, task_id, tried)
self.driver.killTask(tid)
| ee08b397/dpark | dpark/schedule.py | Python | bsd-3-clause | 28,302 | [
"VisIt"
] | 0a3a0a795c4f1f8108100d9e73076aa997ee63ea08d6640cf3d60f4124a547c6 |
# ipop-project
# Copyright 2016, University of Florida
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import errno
import os
import sys
import json
import signal
import controller.framework.fxlib as fxlib
import controller.framework.ipoplib as ipoplib
import argparse
import threading
import importlib
from getpass import getpass
from collections import OrderedDict
from controller.framework.CBT import CBT as _CBT
from controller.framework.CFxHandle import CFxHandle
class CFX(object):
def __init__(self):
self.CONFIG = {}
self.parse_config()
'''
CFxHandleDict is a dict containing the references to CFxHandles of all CMs with key as the module name and
value as the CFxHandle reference
'''
self.CFxHandleDict = {}
self.vpn_type = self.CONFIG['CFx']['Model']
self.loaded_modules = ['CFx'] # list of modules already loaded
self.event = None
def submitCBT(self, CBT):
recipient = CBT.recipient
self.CFxHandleDict[recipient].CMQueue.put(CBT)
def createCBT(self, initiator='', recipient='', action='', data=''):
# create and return an empty CBT
cbt = _CBT(initiator, recipient, action, data)
return cbt
def freeCBT(self):
# deallocate CBT (use python's automatic garbage collector)
pass
def initialize(self,):
# check for circular dependencies in the configuration file
dependency_graph = {}
for key in self.CONFIG:
if key != 'CFx':
try:
dependency_graph[key] = self.CONFIG[key]['dependencies']
except Exception as error:
pass
if self.detect_cyclic_dependency(dependency_graph):
print("Circular dependency detected in config.json. Exiting")
sys.exit()
# iterate and load the modules specified in the configuration file
for key in self.CONFIG:
if key not in self.loaded_modules:
self.load_module(key)
# start all the worker and timer threads
for handle in self.CFxHandleDict:
self.CFxHandleDict[handle].CMThread.start()
if self.CFxHandleDict[handle].timer_thread:
self.CFxHandleDict[handle].timer_thread.start()
def load_module(self, module_name):
if 'Enabled' in self.CONFIG[module_name]:
module_enabled = self.CONFIG[module_name]['Enabled']
else:
module_enabled = True
if (module_name not in self.loaded_modules) and module_enabled and module_name != "Tincan":
# load the dependencies of the module
self.load_dependencies(module_name)
# import the modules dynamically
try:
module = importlib.import_module("controller.modules.{0}".format(module_name))
except ImportError as error:
if self.vpn_type == "GroupVPN":
module = importlib.import_module("controller.modules.gvpn.{0}".format(module_name))
elif self.vpn_type == "SocialVPN":
module = importlib.import_module("controller.modules.svpn.{0}".format(module_name))
else:
module = importlib.import_module("controller.modules.{0}.{1}".format(self.vpn_type, module_name))
# get the class with name key from module
module_class = getattr(module, module_name)
# create a CFxHandle object for each module
handle = CFxHandle(self)
instance = module_class(handle, self.CONFIG[module_name], module_name)
handle.CMInstance = instance
handle.CMConfig = self.CONFIG[module_name]
# store the CFxHandle object references in the
# dict with module name as the key
self.CFxHandleDict[module_name] = handle
# intialize all the CFxHandles which in turn initialize the CMs
handle.initialize()
self.loaded_modules.append(module_name)
def load_dependencies(self, module_name):
# load the dependencies of the module as specified in the configuration file
try:
dependencies = self.CONFIG[module_name]['dependencies']
for module_name in dependencies:
if module_name not in self.loaded_modules:
self.load_module(module_name)
except KeyError:
pass
def detect_cyclic_dependency(self, g):
# test if the directed graph g has a cycle
path = set()
def visit(vertex):
path.add(vertex)
for neighbour in g.get(vertex, ()):
if (neighbour in path) or visit(neighbour):
return True
path.remove(vertex)
return False
return any(visit(v) for v in g)
def __handler(self, signum=None, frame=None):
print('Signal handler called with signal ', signum)
def parse_config(self):
self.CONFIG = fxlib.CONFIG
parser = argparse.ArgumentParser()
parser.add_argument("-c", help="load configuration from a file",
dest="config_file", metavar="config_file")
parser.add_argument("-u", help="update configuration file if needed",
dest="update_config", action="store_true")
parser.add_argument("-p", help="load remote ip configuration file",
dest="ip_config", metavar="ip_config")
parser.add_argument("-s", help="configuration as json string"
" (overrides configuration from file)",
dest="config_string", metavar="config_string")
parser.add_argument("--pwdstdout", help="use stdout as "
"password stream",
dest="pwdstdout", action="store_true")
args = parser.parse_args()
if args.config_file:
# load the configuration file
with open(args.config_file) as f:
# load the configuration file into an OrderedDict with the
# modules in the order in which they appear
self.json_data = json.load(f, object_pairs_hook=OrderedDict)
for key in self.json_data:
if self.CONFIG.get(key, False):
self.CONFIG[key].update(self.json_data[key])
else:
self.CONFIG[key] = self.json_data[key]
if args.config_string:
loaded_config = json.loads(args.config_string)
for key in loaded_config:
if self.CONFIG.get(key, None):
self.CONFIG[key].update(loaded_config[key])
need_save = self.setup_config(self.CONFIG)
if need_save and args.config_file and args.update_config:
with open(args.config_file, "w") as f:
json.dump(self.CONFIG, f, indent=4, sort_keys=True)
'''
if args.ip_config:
fxlib.load_peer_ip_config(args.ip_config)
'''
def setup_config(self, config):
# validate config; return true if the config is modified
if not config['CFx']['local_uid']:
uid = ipoplib.uid_b2a(os.urandom(self.CONFIG['CFx']['uid_size'] // 2))
self.CONFIG['CFx']["local_uid"] = uid
return True # modified
return False
def waitForShutdownEvent(self):
self.event = threading.Event()
# Since signal.pause() is not avaialble on windows, use event.wait()
# with a timeout to catch KeyboardInterrupt. Without timeout, it's
# not possible to catch KeyboardInterrupt because event.wait() is
# a blocking call without timeout. The if condition checks if the os
# is windows.
if os.name == 'nt':
while True:
try:
self.event.wait(1)
except (KeyboardInterrupt, SystemExit) as e:
print("Controller shutdown event: {0}".format(str(e)))
break
else:
for sig in [signal.SIGINT]:
signal.signal(sig, self.__handler)
# signal.pause() sleeps until SIGINT is received
signal.pause()
def terminate(self):
for key in self.CFxHandleDict:
# create a special terminate CBT to terminate all the CMs
terminateCBT = self.createCBT('CFx', key, 'TERMINATE', '')
# clear all the queues and put the terminate CBT in all the queues
self.CFxHandleDict[key].CMQueue.queue.clear()
self.submitCBT(terminateCBT)
# wait for the threads to process their current CBTs and exit
print("waiting for timer threads to exit gracefully...")
for handle in self.CFxHandleDict:
if self.CFxHandleDict[handle].joinEnabled:
self.CFxHandleDict[handle].CMThread.join()
if self.CFxHandleDict[handle].timer_thread:
self.CFxHandleDict[handle].timer_thread.join()
sys.exit(0)
def queryParam(self, ModuleName, ParamName=""):
try:
if ModuleName in [None, ""]:
return None
else:
if ParamName == "":
return None
else:
return self.CONFIG[ModuleName][ParamName]
except Exception as error:
print("Exception occurred while querying data."+str(error))
return None
if __name__ == "__main__":
cf = CFX()
cf.initialize()
| cstapler/Controllers | controller/framework/CFx.py | Python | mit | 10,683 | [
"VisIt"
] | 72e3fcc0a15aa79c4c16118115446785a9fd40d3e76478ca117498166ad1965d |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
import pyscf
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
from pyscf import grad
from pyscf.qmmm import itrf
mol = gto.M(
verbose = 5,
output = '/dev/null',
atom = ''' H -0.00000000 -0.000 0.
H -0.00000000 -0.000 1.
H -0.00000000 -0.82 0.
H -0.91000000 -0.020 0.''',
basis = 'cc-pvdz')
class KnowValues(unittest.TestCase):
def test_energy(self):
coords = [(0.0,0.1,0.0)]
charges = [1.00]
mf = itrf.mm_charge(scf.RHF(mol), coords, charges)
self.assertAlmostEqual(mf.kernel(), 2.0042702433049024, 9)
def test_grad(self):
coords = [(0.0,0.1,0.0)]
charges = [1.00]
mf = itrf.mm_charge(scf.RHF(mol), coords, charges).run()
hfg = itrf.mm_charge_grad(grad.RHF(mf), coords, charges).run()
self.assertAlmostEqual(numpy.linalg.norm(hfg.de), 26.978089280783195, 9)
mfs = mf.as_scanner()
e1 = mfs('''
H -0.00000000 -0.000 0.001
H -0.00000000 -0.000 1.
H -0.00000000 -0.82 0.
H -0.91000000 -0.020 0.
''')
e2 = mfs('''
H -0.00000000 -0.000 -0.001
H -0.00000000 -0.000 1.
H -0.00000000 -0.82 0.
H -0.91000000 -0.020 0.
''')
self.assertAlmostEqual((e1 - e2)/0.002*lib.param.BOHR, hfg.de[0,2], 5)
bak = pyscf.DEBUG
pyscf.DEBUG = 1
ref = hfg.get_hcore()
pyscf.DEBUG = 0
v = hfg.get_hcore()
self.assertAlmostEqual(abs(ref-v).max(), 0, 12)
pyscf.DEBUG = bak
def test_hcore_cart(self):
coords = [(0.0,0.1,0.0)]
charges = [1.00]
mol = gto.M(
verbose = 0,
atom = '''C 0.000 -0.300 0.2
Ne 0.310 0.820 0.1''',
basis = 'cc-pvdz',
cart = True)
mf = itrf.mm_charge(scf.RHF(mol), coords, charges)
h = mf.get_hcore()
self.assertAlmostEqual(lib.finger(h), -147.92831183612765, 9)
h = mf.nuc_grad_method().get_hcore()
self.assertEqual(h.shape, (3,30,30))
self.assertAlmostEqual(lib.finger(h), -178.29768724184771, 9)
def test_casci(self):
mol = gto.Mole()
mol.atom = ''' O 0.00000000 0.00000000 -0.11081188
H -0.00000000 -0.84695236 0.59109389
H -0.00000000 0.89830571 0.52404783 '''
mol.verbose = 0
mol.basis = '6-31g'
mol.build()
coords = [(0.5,0.6,0.1)]
charges = [-0.1]
mf = itrf.add_mm_charges(scf.RHF(mol), coords, charges).run()
mc = mcscf.CASCI(mf, 4, 4).run()
self.assertAlmostEqual(mc.e_tot, -75.98156095286714, 9)
mf = scf.RHF(mol).run()
mc = itrf.add_mm_charges(mcscf.CASCI(mf, 4, 4), coords, charges).run()
self.assertAlmostEqual(mc.e_tot, -75.98156095286714, 9)
def test_casscf(self):
mol = gto.Mole()
mol.atom = ''' O 0.00000000 0.00000000 -0.11081188
H -0.00000000 -0.84695236 0.59109389
H -0.00000000 0.89830571 0.52404783 '''
mol.verbose = 0
mol.basis = '6-31g'
mol.build()
coords = [(0.5,0.6,0.1)]
charges = [-0.1]
mf = itrf.add_mm_charges(scf.RHF(mol), coords, charges).run()
mc = mcscf.CASSCF(mf, 4, 4).run()
self.assertAlmostEqual(mc.e_tot, -76.0461574155984, 7)
mf = scf.RHF(mol).run()
mc = itrf.add_mm_charges(mcscf.CASSCF(mf, 4, 4), coords, charges).run()
self.assertAlmostEqual(mc.e_tot, -76.0461574155984, 7)
if __name__ == "__main__":
print("Full Tests for qmmm")
unittest.main()
| gkc1000/pyscf | pyscf/qmmm/test/test_itrf.py | Python | apache-2.0 | 4,809 | [
"PySCF"
] | f4f706e1134c1ac76f269cbd3ab1035c5212fabe33f0a7813d27d1b49f77cec7 |
from galaxy.webapps.galaxy.controllers.user import *
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/webapps/tool_shed/controllers/user.py | Python | gpl-3.0 | 53 | [
"Galaxy"
] | c22e44b073b82f1c1c6405fedc853294077037ac357e7f361ca4a1dd0b6e0b7e |
#!python3
# File skipper.py
class SkipObject:
def __init__(self, wrapped): # Save item to be used
self.wrapped = wrapped
def __iter__(self):
return SkipIterator(self.wrapped) # New iterator each time
class SkipIterator:
def __init__(self, wrapped):
self.wrapped = wrapped # Iterator state information
self.offset = 0
def __next__(self):
if self.offset >= len(self.wrapped): # Terminate iterations
raise StopIteration
else:
item = self.wrapped[self.offset] # else return and skip
self.offset += 2
return item
if __name__ == '__main__':
alpha = 'abcdef'
skipper = SkipObject(alpha) # Make container object
I = iter(skipper) # Make an iterator on it
print(next(I), next(I), next(I)) # Visit offsets 0, 2, 4
for x in skipper: # for calls __iter__ automatically
for y in skipper: # Nested fors call __iter__ again each time
print(x + y, end=' ') # Each iterator has its own state, offset
| simontakite/sysadmin | pythonscripts/learningPython/skipper.py | Python | gpl-2.0 | 1,209 | [
"VisIt"
] | edc907a5276876f2d36878d9897272f2f424849f9490704c386c677d1fc2c3a0 |
#!/usr/bin/env python
import os
from setuptools import setup
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# read the docs could not compile numpy and c extensions
if on_rtd:
setup_requires = []
install_requires = []
tests_require = []
else:
setup_requires = [
'nose',
'coverage',
]
install_requires = [
'six',
'bistiming',
'numpy',
]
tests_require = [
'scikit-learn',
'scipy',
]
description = ("A Python serialization tool containing many serialization "
"and deserialization shortcuts with timing.")
long_description = """\
Please visit the `GitHub repository <https://github.com/ianlini/serialtime>`_
for more information.\n
"""
with open('README.rst') as fp:
long_description += fp.read()
setup(
name='serialtime',
version="0.1.1",
description=description,
long_description=long_description,
author='ianlini',
url='https://github.com/ianlini/serialtime',
setup_requires=setup_requires,
install_requires=install_requires,
tests_require=tests_require,
license="BSD 2-Clause License",
classifiers=[
'Topic :: Utilities',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: BSD License',
],
test_suite='nose.collector',
packages=[
'serialtime',
],
package_dir={
'serialtime': 'serialtime',
},
)
| ianlini/serialtime | setup.py | Python | bsd-2-clause | 1,493 | [
"VisIt"
] | 09cce3d349dc72607ab48df14ffcdc5c45ad1441d802beaff8fc35d590e229fd |
"""
Geosoft vox display handling, which manages the rendering of a `geosoft.gxpy.vox.Vox` in a 3d view.
:Classes:
:`VoxDisplay`: 3D visualization of a vox, which can be placed `geosoft.gxpy.view.View_3d`
:Constants:
:ZONE_DEFAULT: 0
:ZONE_LINEAR: 1
:ZONE_NORMAL: 2
:ZONE_EQUALAREA: 3
:ZONE_SHADE: 4
:ZONE_LOGLINEAR: 5
:ZONE_LAST: 6
:RENDER_FILL: 0
:RENDER_EDGE: 1
:RENDER_FILL_EDGE: 2
:RENDER_SMOOTH: 3
.. seealso:: `geosoft.gxpy.vox.Vox`, `geosoft.gxpy.view.View_3d`, `geosoft.gxapi.GXVOXD`
.. note::
Regression tests provide usage examples:
`vox_display tests <https://github.com/GeosoftInc/gxpy/blob/master/geosoft/gxpy/tests/test_vox_display.py>`_
.. versionadded:: 9.3.1
"""
import os
import geosoft
import geosoft.gxapi as gxapi
from . import gx
from . import view as gxview
from . import group as gxgroup
from . import vox as gxvox
from . import map as gxmap
__version__ = geosoft.__version__
def _t(s):
return geosoft.gxpy.system.translate(s)
class VoxDisplayException(geosoft.GXRuntimeError):
"""
Exceptions from :mod:`geosoft.gxpy.vox_display`.
.. versionadded:: 9.2
"""
pass
ZONE_DEFAULT = 0
ZONE_LINEAR = 1
ZONE_NORMAL = 2
ZONE_EQUALAREA = 3
ZONE_SHADE = 4
ZONE_LOGLINEAR = 5
ZONE_LAST = 6
RENDER_FILL = 0
RENDER_EDGE = 1
RENDER_FILL_EDGE = 2
RENDER_SMOOTH = 3
class VoxDisplay:
"""
Creation and handling of vox displays. Vox displays can be placed into a 3D view for display.
:Constructors:
:`solid`: create as a solid, each cell colored from a `geosoft.gxpy.group.Color_map`
:`vector`: create as a vector voxel as vectors colored from a `geosoft.gxpy.group.Color_map`
:`gxapi_gxvoxd`: create from an existing `geosoft.gxapi.GXVOXD` instance
.. versionadded:: 9.3.1
"""
def __repr__(self):
return "{}({})".format(self.__class__, self.__dict__)
def __str__(self):
return self.name
def __enter__(self):
return self
def __exit__(self, _type, _value, _traceback):
self.__del__()
def __del__(self):
if hasattr(self, '_close'):
self._close()
def _close(self):
if hasattr(self, '_open'):
if self._open:
gx.pop_resource(self._open)
self._gxvoxd = None
self._vox = None
self._open = None
def __init__(self, vox, name=None):
self._gxvoxd = None
self._vox = vox
if name is None:
if vox is not None:
name = vox.name
self._name = name
self._vector = False
self._vector_cone_specs = (1., 4., 0.25, 5000)
self._open = gx.track_resource(self.__class__.__name__, name)
@classmethod
def solid(cls,
vox,
color_map=None,
zone=ZONE_DEFAULT,
contour=None):
"""
Create a solid colored vox_display from a `geosoft.gxpy.vox.Vox` instance.
:param vox: `geosoft.gxpy.vox.Vox` instance
:param color_map: `gxpy.group.Color_map` instance, or the name of a file, which may be
`.tbl`, `.zon`, `.itr`, or `.agg`.
:param zone: Colour distribution method:
=================== ==================================================
ZONE_DEFAULT as set by user global default settings
ZONE_LINEAR linearly distributed
ZONE_NORMAL normal (Gaussian) distribution
ZONE_EQUALAREA each color will occupy an equal area on the image
ZONE_LOGLINEAR logarithmic linear distribution
ZONE_LAST last used coloring for this vox
=================== ==================================================
:param contour: break colours on even multiples of contour
.. versionadded:: 9.3.1
"""
voxd = cls(vox)
if (color_map is None) or (isinstance(color_map, str)):
color_map = geosoft.gxpy.group.Color_map(color_map)
color_map_file = color_map.save_file()
if contour is None:
contour = gxapi.rDUMMY
voxd._gxvoxd = gxapi.GXVOXD.create(vox.gxvox, color_map_file, zone, contour)
return voxd
@classmethod
def vector(cls,
vox,
vector_cone_specs=(1., 4., 0.25, 5000),
color_map=None,
zone=ZONE_DEFAULT,
contour=None):
"""
Create a vector symbol vox_display from a `geosoft.gxpy.vox.Vox` instance.
:param vox: `geosoft.gxpy.vox.Vox` instance
:param vector_cone_specs: Vector plotting specs
(scale_cell_ratio, height_base_ratio, base_cell_ratio, max_cones).
Default is (1., 4., 0.25, 5000). See `vector_cone_specs` property.
:param color_map: `gxpy.group.Color_map` instance, or the name of a file, which may be
`.tbl`, `.zon`, `.itr`, or `.agg`.
:param zone: Colour distribution method:
::
ZONE_DEFAULT as set by user global default settings
ZONE_LINEAR linearly distributed
ZONE_NORMAL normal (Gaussian) distribution
ZONE_EQUALAREA each color will occupy an equal area on the image
ZONE_LOGLINEAR logarithmic linear distribution
ZONE_LAST last used coloring for this vox
:param contour: break colours on even multiples of contour
.. versionadded:: 9.3.1
"""
if not vox.is_vectorvox:
raise VoxDisplayException(_t('vox must be a vectorvoxel to create a vector swarm'))
voxd = VoxDisplay.solid(vox, color_map, zone, contour)
voxd._vector = True
voxd._vector_cone_specs = vector_cone_specs
return voxd
@classmethod
def gxapi_gxvoxd(cls, gxapi_voxd, name=None):
"""
Create a VoxDisplay instance from a `geosoft.gxapi.GXVOXD` or a `geosoft.gxapi.GXVECTOR3D` instance.
:param gxapi_voxd: `geosoft.gxapi.VOXD` or `geosoft.gxapi.GXVECTOR3D` instance
:param name: name of the voxel, required for a vector voxel.
.. versionadded 9.3.1
"""
if isinstance(gxapi_voxd, gxapi.GXVOXD):
if name is None:
name = gxapi.str_ref()
gxapi_voxd.get_name(name)
name = name.value
else:
if not name:
raise VoxDisplayException(_t('a name is required to open a GXVECTOR3D object'))
try:
vox = gxvox.Vox.open(name)
except Exception:
vox = None
name = os.path.splitext(os.path.basename(name))[0]
voxd = cls(vox, name=name)
voxd._gxvoxd = gxapi_voxd
return voxd
@property
def vox(self):
""" `geosoft.gxpy.vox.Vox` instance"""
return self._vox
@property
def name(self):
""" instance name, same as the contained Vox name"""
return self._name
@property
def unit_of_measure(self):
"""Unit of data measurement for the contained vox data."""
return self.color_map.unit_of_measure
@property
def is_vector(self):
"""True if this is a vector style display"""
return self._vector
@property
def vector_cone_specs(self):
"""
Vector plotting specs: (scale_cell_ratio, height_base_ratio, base_cell_ratio, max_cones). Can be set.
scale_cell_ratio scales the maximum cone length to the size of the smallest cell. If None, default is 1.
height_base_ratio is the ration of the cone height to the base size. If None, default is 4.
base_cell_ratio is the maximum base size relative to the minimum cell size. If None, default is 0.25.
max_cones is the maximum number of cones to draw. Voxel is decimated to limit the cones. None to plot all
cones, though typically this is limited to about 2000 to improve display performance.
.. versionadded:: 9.3.1
"""
return self._vector_cone_specs
@vector_cone_specs.setter
def vector_cone_specs(self, specs):
sc, hb, bc, mx = specs
if sc is None or sc <= 0.:
sc = 1.0
if hb is None or hb <= 0.:
hb = 4.
if bc is None or bc <= 0.:
bc = 0.25
if mx is not None and mx <= 0:
mx = None
self._vector_cone_specs = (sc, hb, bc, mx)
@property
def draw_controls(self):
"""
Vox drawing settings, returned as a tuple:
(box_on, opacity, extent) as (boolean, float, (min_x, min_y, min_z, max_x, max_y, max_z))
Can be set.
.. versionadded:: 9.3.1
"""
if self.is_vector:
return None, None, None
box = gxapi.int_ref()
trans = gxapi.float_ref()
x0 = gxapi.float_ref()
x1 = gxapi.float_ref()
y0 = gxapi.float_ref()
y1 = gxapi.float_ref()
z0 = gxapi.float_ref()
z1 = gxapi.float_ref()
self.gxvoxd.get_draw_controls(box, trans, x0, y0, z0, x1, y1, z1)
return bool(box.value), trans.value, (x0.value, y0.value, z0.value, x1.value, y1.value, z1.value)
@draw_controls.setter
def draw_controls(self, controls):
if self.is_vector:
raise VoxDisplayException(_t('cannot set draw controls for a vector display'))
box, trans, extent = controls
x0, y0, z0, x1, y1, z1 = extent
self.gxvoxd.set_draw_controls(box, trans, x0, y0, z0, x1, y1, z1)
@property
def render_mode(self):
rm = gxapi.int_ref()
self.gxvoxd.get_render_mode(rm)
return rm.value
@render_mode.setter
def render_mode(self, mode):
if mode not in (RENDER_FILL, RENDER_EDGE, RENDER_FILL_EDGE, RENDER_SMOOTH):
raise VoxDisplayException(_t('Invalid render mode {}').format(mode))
self.gxvoxd.set_render_mode(mode)
@property
def gxvoxd(self):
"""The :class:`geosoft.gxapi.GXVOXD` instance handle, None for a vector display."""
return self._gxvoxd
@property
def is_thematic(self):
"""True if this is a thematic vox display"""
if self.is_vector:
return False
return bool(self.gxvoxd.is_thematic())
@property
def opacity(self):
"""Opacity between 0. (invisible) and 1. (opaque) can be set."""
return self.draw_controls[1]
@opacity.setter
def opacity(self, t):
controls = list(self.draw_controls)
controls[1] = t
self.draw_controls = controls
@property
def color_map(self):
"""Return the colour map for this vox"""
itr = gxapi.GXITR.create()
self.gxvoxd.get_itr(itr)
cmap = geosoft.gxpy.group.Color_map(itr)
cmap.title = self.name
if self.vox:
cmap.unit_of_measure = self.vox.unit_of_measure
return cmap
@property
def shell_limits(self):
"""
The data limits of the visible data shell for scalar data. Can be set.
returns: (min, max) limits, data outside this range is transparent, None for no limit
.. versionadded 9.3.1
"""
vmin = gxapi.float_ref()
vmax = gxapi.float_ref()
self.gxvoxd.get_shell_controls(vmin, vmax)
vmin = vmin.value
vmax = vmax.value
if vmin == gxapi.rDUMMY:
vmin = None
if vmax == gxapi.rDUMMY:
vmax = None
return vmin, vmax
@shell_limits.setter
def shell_limits(self, limits):
vmin, vmax = limits
if vmin is None:
vmin = gxapi.rDUMMY
if vmax is None:
vmax = gxapi.rDUMMY
self.gxvoxd.set_shell_controls(vmin, vmax)
def view_3d(self, file_name=None, overwrite=True, plane_2d=False):
"""
Create a 3d view (`geosoft.gxpy.view.View_3d`) from the instance.
:param file_name: the name of a file for the 3d view. If None a temporary 3d view created.
:param overwrite: True to overwrite existing file
:param plane_2d: True to keep the 2D plane. Only keep it if you intend to draw on it otherwise a grey
plane will appear in the view.
.. versionadded:: 9.3
"""
v3d = gxview.View_3d.new(file_name, overwrite=overwrite)
gxgroup.VoxDisplayGroup.new(v3d, self)
if not plane_2d:
v3d.delete_plane(0)
return v3d
def figure_map(self, file_name=None, overwrite=True,
title=None, legend_label=None,
features=('LEGEND', 'NEATLINE'), **kwargs):
"""
Create a figure view file from the instance.
:param file_name: the name of a file for the 3d view. If None a temporary 3d view created.
:param overwrite: True to overwrite existing file
:param title: Title added to the image
:param legend_label: If plotting a legend make this the legned title. The default is the title in the
first aggregate layer colour map.
:param features: list of features to place on the map, default is ('SCALE', 'LEGEND', 'NEATLINE')
=========== =========================================
'LEGEND' show the colour legend
'NEATLINE' draw a neat-line around the image
=========== =========================================
:param kwargs: passed to `geosoft.gxpy.map.Map.new`
.. versionadded:: 9.3
"""
# uppercase features, use a dict so we pop things we use and report error
if isinstance(features, str):
features = (features,)
feature_list = {}
if features is not None:
for f in features:
feature_list[f.upper()] = None
features = list(feature_list.keys())
# setup margins
if not ('margins' in kwargs):
bottom_margin = 1.0
if title:
bottom_margin += len(title.split('\n')) * 1.0
right_margin = 1
if 'LEGEND' in feature_list:
right_margin += 3.5
kwargs['margins'] = (1, right_margin, bottom_margin, 1)
gmap = gxmap.Map.figure((0, 0, 100, 100),
file_name=file_name,
features=features,
title=title,
overwrite=overwrite,
**kwargs)
with gxview.View.open(gmap, "data") as v:
if 'LEGEND' in features:
gxgroup.legend_color_bar(v, 'legend',
title=legend_label,
location=(1, 0),
cmap=self.color_map)
area = gxview.View.open(gmap, gmap.current_data_view).extent_map_cm()
area = (area[0] * 10., area[1] * 10., area[2] * 10., area[3] * 10.)
gmap.create_linked_3d_view(self.view_3d(), area_on_map=area)
return gmap
| GeosoftInc/gxpy | geosoft/gxpy/vox_display.py | Python | bsd-2-clause | 15,566 | [
"Gaussian"
] | 99d1974296326d577fa3c34db3660128cb3dc62c34c1fbdb4e457956c3eac895 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
def main():
colors = vtk.vtkNamedColors()
planes = list()
titles = list()
# Using frustum planes.
titles.append('Using frustum planes')
camera = vtk.vtkCamera()
planesArray = [0] * 24
camera.GetFrustumPlanes(1, planesArray)
planes.append(vtk.vtkPlanes())
planes[0].SetFrustumPlanes(planesArray)
# Using bounds.
titles.append('Using bounds')
sphereSource = vtk.vtkSphereSource()
sphereSource.Update()
bounds = [0] * 6
sphereSource.GetOutput().GetBounds(bounds)
planes.append(vtk.vtkPlanes())
planes[1].SetBounds(bounds)
# At this point we have the planes created by both of the methods above.
# You can do whatever you want with them.
# For visualisation we will produce an n-sided convex hull
# and visualise it.
# Create a common text property.
textProperty = vtk.vtkTextProperty()
textProperty.SetFontSize(16)
textProperty.SetJustificationToCentered()
renWin = vtk.vtkRenderWindow()
renWin.SetSize(600, 600)
renWin.SetWindowName("Planes")
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin)
hulls = list()
pds = list()
mappers = list()
actors = list()
renderers = list()
textMappers = list()
textActors = list()
for i in range(0, len(planes)):
hulls.append(vtk.vtkHull())
hulls[i].SetPlanes(planes[i])
pds.append(vtk.vtkPolyData())
# To generate the convex hull we supply a vtkPolyData object and a bounding box.
# We define the bounding box to be where we expect the resulting polyhedron to lie.
# Make it a generous fit as it is only used to create the initial
# polygons that are eventually clipped.
hulls[i].GenerateHull(pds[i], -200, 200, -200, 200, -200, 200)
mappers.append(vtk.vtkPolyDataMapper())
mappers[i].SetInputData(pds[i])
actors.append(vtk.vtkActor())
actors[i].SetMapper(mappers[i])
actors[i].GetProperty().SetColor(colors.GetColor3d("Moccasin"))
actors[i].GetProperty().SetSpecular(0.8)
actors[i].GetProperty().SetSpecularPower(30)
renderers.append(vtk.vtkRenderer())
renderers[i].AddActor(actors[i])
textMappers.append(vtk.vtkTextMapper())
textMappers[i].SetInput(titles[i])
textMappers[i].SetTextProperty(textProperty)
textActors.append(vtk.vtkActor2D())
textActors[i].SetMapper(textMappers[i])
textActors[i].SetPosition(100, 10)
renderers[i].AddViewProp(textActors[i])
renWin.AddRenderer(renderers[i])
# Setup the viewports
xGridDimensions = 2
yGridDimensions = 1
rendererSize = 300
renWin.SetSize(rendererSize * xGridDimensions, rendererSize * yGridDimensions)
for row in range(0, yGridDimensions):
for col in range(0, xGridDimensions):
index = row * xGridDimensions + col
# (xmin, ymin, xmax, ymax)
viewport = [float(col) / xGridDimensions,
float(yGridDimensions - (row + 1)) / yGridDimensions,
float(col + 1) / xGridDimensions,
float(yGridDimensions - row) / yGridDimensions]
if index > (len(actors) - 1):
# Add a renderer even if there is no actor.
# This makes the render window background all the same color.
ren = vtk.vtkRenderer()
ren.SetBackground(colors.GetColor3d("DarkSlateGray"))
ren.SetViewport(viewport)
renWin.AddRenderer(ren)
continue
renderers[index].SetViewport(viewport)
renderers[index].SetBackground(colors.GetColor3d("DarkSlateGray"))
renderers[index].ResetCamera()
renderers[index].GetActiveCamera().Azimuth(30)
renderers[index].GetActiveCamera().Elevation(-30)
renderers[index].ResetCameraClippingRange()
iRen.Initialize()
renWin.Render()
iRen.Start()
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/GeometricObjects/Planes.py | Python | apache-2.0 | 4,142 | [
"VTK"
] | fdc7b81eacd86dbeda41b15ddb2e5915e132bc619db661f76a161037f834e983 |
#!/usr/bin/env python
# File: plot_icd_vs_mass.py
# Created on: Mon 12 Mar 2012 11:50:09 AM CDT
# Last Change: Thu Feb 28 17:11:01 2013
# Purpose of script: <+INSERT+>
# Author: Steven Boada
import pylab as pyl
from mk_galaxy_struc import mk_galaxy_struc
def plot_icd_vs_mass():
galaxies = mk_galaxy_struc()
galaxies = filter(lambda galaxy: galaxy.ston_I > 30., galaxies)
# Mass vs ICD plot I-H
f1 = pyl.figure(1,figsize=(6,4))
f1s1 = f1.add_subplot(111)
#Upper and Lower limit arrow verts
arrowup_verts = [[0.,0.], [-1., -1], [0.,0.], [0.,-2.],[0.,0.], [1, -1]]
arrowdown_verts = [[0.,0.], [-1., 1], [0.,0.], [0.,2.],[0.,0.], [1, 1]]
for galaxy in galaxies:
f1s1.scatter(galaxy.Mass, galaxy.ICD_IH*100., c='r', s=50)
#if galaxies[i].ICD_IH > 0.25:
# f1s1.scatter(galaxies[i].Mass,0.25*100.,s=100,marker=None,
# verts=arrowup_verts)
#if galaxies[i].ICD_IH < -0.05:
# f1s1.scatter(galaxies[i].Mass,-0.05*100.,s=100,marker=None,
# verts=arrowdown_verts)
############
# FIGURE 1 #
############
pyl.figure(1)
#f1s1.axvspan(3e7,1e9,facecolor='#FFFDD0',ec='None',zorder=0)
#f1s1.axvspan(1e11,1e12,facecolor='#FFFDD0',ec='None',zorder=0)
f1s1.set_xscale('log')
f1s1.set_xlim(3e7,1e12)
f1s1.set_ylim(-5,25)
f1s1.hlines(0.0,3e7,1e12)
#f1s2.set_xscale('log')
#f1s2.set_xlim(3e7,1e12)
#f1s2.set_ylim(-0.1,0.4)
#f1s2.hlines(0.0,3e7,1e12)
f1s1.set_xlabel(r"Mass ($M_{\odot})$")
f1s1.set_ylabel("ICD[F775W, F160W] (%)")
pyl.tight_layout()
pyl.show()
if __name__=='__main__':
plot_icd_vs_mass()
| boada/ICD | sandbox/legacy_plot_code/plot_icd_vs_mass_p1ar.py | Python | mit | 1,673 | [
"Galaxy"
] | 6226571fcf6e749e071abd7406e0eeba8f91a6f7a988b54b18187a4a9874b4cb |
import os, sys
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
from CIP.logic.SlicerUtil import SlicerUtil
from CIP_PointsLabelling import CIP_PointsLabelling, CIP_PointsLabellingWidget, CIP_PointsLabellingLogic
# Note: this is necessary in development because of the python path dependency
# try:
# from CIP_PointsLabelling import CIP_PointsLabelling, CIP_PointsLabellingWidget, CIP_PointsLabellingLogic
# except:
# import inspect
# path = os.path.dirname(inspect.getfile(inspect.currentframe()))
# path = os.path.normpath(os.path.join(path, '../CIP_PointsLabelling')) # We assume that CIP_Common is a sibling folder of the one that contains this module
# sys.path.append(path)
# from CIP_PointsLabelling import CIP_PointsLabelling, CIP_PointsLabellingWidget, CIP_PointsLabellingLogic
from CIP_ParenchymaSubtypeTrainingLogic.SubtypingParameters import SubtypingParameters
#
# CIP_ParenchymaSubtypeTraining
class CIP_ParenchymaSubtypeTraining(CIP_PointsLabelling):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Parenchyma Subtype Training"
self.parent.categories = SlicerUtil.CIP_ModulesCategory
self.parent.dependencies = [SlicerUtil.CIP_ModuleName, "CIP_PointsLabelling"]
self.parent.contributors = ["Jorge Onieva (jonieva@bwh.harvard.edu)", "Applied Chest Imaging Laboratory",
"Brigham and Women's Hospital"]
self.parent.helpText = """Training parenchyma subtypes done quickly by an expert<br>
+A quick tutorial of the module can be found <a href='ttps://chestimagingplatform.org/files/chestimagingplatform/files/parenchymasubtypetraining_tutorial.pdf'>here</a>"""
self.parent.acknowledgementText = SlicerUtil.ACIL_AcknowledgementText
#
# CIP_ParenchymaSubtypeTrainingWidget
#
class CIP_ParenchymaSubtypeTrainingWidget(CIP_PointsLabellingWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
CIP_PointsLabellingWidget.__init__(self, parent)
def _initLogic_(self):
"""Create a new logic object for the plugin"""
self.logic = CIP_ParenchymaSubtypeTrainingLogic()
def setup(self):
"""This is called one time when the module GUI is initialized
"""
CIP_PointsLabellingWidget.setup(self)
# Part of the GUI will be inherited. We just fill the radio buttons area
# Radio buttons frame
self.radioButtonsLayout = qt.QHBoxLayout(self.radioButtonsFrame)
self.typesFrame = qt.QFrame()
self.radioButtonsLayout.addWidget(self.typesFrame)
self.typesLayout = qt.QVBoxLayout(self.typesFrame)
labelsStyle = "font-weight: bold; margin: 0 0 10px 0px;"
# Types Radio Buttons
typesLabel = qt.QLabel("Select type")
typesLabel.setStyleSheet(labelsStyle)
self.typesLayout.addWidget(typesLabel)
self.typesRadioButtonGroup = qt.QButtonGroup()
for key in self.logic.params.mainTypes.keys():
rbitem = qt.QRadioButton(self.logic.params.getMainTypeLabel(key))
self.typesRadioButtonGroup.addButton(rbitem, key)
self.typesLayout.addWidget(rbitem)
self.typesRadioButtonGroup.buttons()[0].setChecked(True)
# Subtypes Radio buttons
# The content will be loaded dynamically every time the main type is modified
self.subtypesFrame = qt.QFrame()
self.radioButtonsLayout.addWidget(self.subtypesFrame)
self.subtypesLayout = qt.QVBoxLayout(self.subtypesFrame)
subtypesLabel = qt.QLabel("Select subtype")
subtypesLabel.setStyleSheet(labelsStyle)
self.subtypesLayout.addWidget(subtypesLabel)
self.subtypesLayout.setAlignment(SlicerUtil.ALIGNMENT_VERTICAL_TOP)
self.subtypesRadioButtonGroup = qt.QButtonGroup()
# Add all the subtypes (we will filter later in "updateState" function)
for key in self.logic.params.subtypes.keys():
# Build the description
rbitem = qt.QRadioButton(self.logic.params.getSubtypeLabel(key))
self.subtypesRadioButtonGroup.addButton(rbitem, key)
self.subtypesLayout.addWidget(rbitem, SlicerUtil.ALIGNMENT_VERTICAL_TOP)
self.subtypesLayout.addStretch()
# Region radio buttons
self.regionsFrame = qt.QFrame()
self.radioButtonsLayout.addWidget(self.regionsFrame)
self.regionsLayout = qt.QVBoxLayout(self.regionsFrame)
regionsLabel = qt.QLabel("Select region")
regionsLabel.setStyleSheet(labelsStyle)
self.regionsLayout.addWidget(regionsLabel)
self.regionsLayout.setAlignment(SlicerUtil.ALIGNMENT_VERTICAL_TOP)
self.regionsLayout.setStretch(0, 0)
self.regionsRadioButtonGroup = qt.QButtonGroup()
self.regionsFrame = qt.QFrame()
# Add all the regions
for key in self.logic.params.regions.keys():
# Build the description
rbitem = qt.QRadioButton(self.logic.params.getRegionLabel(key))
self.regionsRadioButtonGroup.addButton(rbitem, key)
self.regionsLayout.addWidget(rbitem, SlicerUtil.ALIGNMENT_VERTICAL_TOP)
self.regionsLayout.addStretch()
self.regionsRadioButtonGroup.buttons()[0].setChecked(True)
# Artifact radio buttons (Add them to the same layout as the type)
self.separatorLabel = qt.QLabel("------------")
labelsStyle = "margin: 5px 0 5px 0;"
self.separatorLabel.setStyleSheet(labelsStyle)
self.typesLayout.addWidget(self.separatorLabel)
self.artifactsLabel = qt.QLabel("Select artifact")
labelsStyle = "font-weight: bold; margin: 15px 0 10px 0;"
self.artifactsLabel.setStyleSheet(labelsStyle)
self.typesLayout.addWidget(self.artifactsLabel)
self.artifactsRadioButtonGroup = qt.QButtonGroup()
for artifactId in self.logic.params.artifacts.keys():
rbitem = qt.QRadioButton(self.logic.params.getArtifactLabel(artifactId))
self.artifactsRadioButtonGroup.addButton(rbitem, artifactId)
self.typesLayout.addWidget(rbitem)
self.artifactsRadioButtonGroup.buttons()[0].setChecked(True)
self.typesLayout.setAlignment(SlicerUtil.ALIGNMENT_VERTICAL_TOP)
self.typesLayout.addStretch()
# Connections
self.typesRadioButtonGroup.connect("buttonClicked (QAbstractButton*)", self.__onTypesRadioButtonClicked__)
self.subtypesRadioButtonGroup.connect("buttonClicked (QAbstractButton*)",
self.__onSecondaryRadioButtonClicked__)
self.regionsRadioButtonGroup.connect("buttonClicked (QAbstractButton*)", self.__onSecondaryRadioButtonClicked__)
self.artifactsRadioButtonGroup.connect("buttonClicked (QAbstractButton*)",
self.__onSecondaryRadioButtonClicked__)
self.updateState()
def cleanup(self):
pass
def updateState(self):
""" Refresh the markups state, activate the right fiducials list node (depending on the
current selected type) and creates it when necessary
:return:
"""
# Load the subtypes for this type
subtypesDict = self.logic.getSubtypes(self.typesRadioButtonGroup.checkedId())
# Hide/Show the subtypes for this type
for b in self.subtypesRadioButtonGroup.buttons():
id = self.subtypesRadioButtonGroup.id(b)
if id in subtypesDict:
b.show()
else:
b.hide()
# Check first element by default
self.subtypesRadioButtonGroup.buttons()[0].setChecked(True)
# Set the correct state for fiducials
if self.currentVolumeLoaded is not None:
typesList = (self.typesRadioButtonGroup.checkedId(), self.subtypesRadioButtonGroup.checkedId()
, self.regionsRadioButtonGroup.checkedId(), self.artifactsRadioButtonGroup.checkedId())
self.logic.setActiveFiducialsListNode(self.currentVolumeLoaded, typesList)
# def _getColorTable_(self):
# """ Color table for this module for a better labelmap visualization.
# This must be implemented by child classes"""
# colorTableNode = SlicerUtil.getNode("CIP_ILDClassification_ColorMap*")
# if colorTableNode is None:
# # Load the node from disk
# p = os.path.join(os.path.dirname(os.path.realpath(__file__)),
# "Resources/CIP_ILDClassification_ColorMap.ctbl")
# colorTableNode = slicer.modules.colors.logic().LoadColorFile(p)
# return colorTableNode
# def __onNewILDClassificationLabelmapLoaded__(self, labelmapNode, split1, split2):
# """ Load a new ILD classification labelmap volume.
# If the labelmap is a known labelmap type, set the right colors and opacity
# @param labelmapNode:
# """
# if SlicerUtil.isExtensionMatch(labelmapNode, "ILDClassificationLabelmap"):
# colorNode = self._getColorTable_()
# displayNode = labelmapNode.GetDisplayNode()
# displayNode.SetAndObserveColorNodeID(colorNode.GetID())
# # Change Opacity
# SlicerUtil.displayLabelmapVolume(labelmapNode.GetID())
# SlicerUtil.changeLabelmapOpacity(0.3)
def __onTypesRadioButtonClicked__(self, button):
""" One of the radio buttons has been pressed
:param button:
:return:
"""
self.updateState()
def __onSecondaryRadioButtonClicked__(self, button):
""" One of the subtype radio buttons has been pressed
:param button:
:return:
"""
selectedVolume = self.volumeSelector.currentNode()
if selectedVolume is not None:
typesList = (self.typesRadioButtonGroup.checkedId(), self.subtypesRadioButtonGroup.checkedId(),
self.regionsRadioButtonGroup.checkedId(), self.artifactsRadioButtonGroup.checkedId())
self.logic.setActiveFiducialsListNode(selectedVolume, typesList)
# CIP_ParenchymaSubtypeTrainingLogic
#
class CIP_ParenchymaSubtypeTrainingLogic(CIP_PointsLabellingLogic):
def __init__(self):
CIP_PointsLabellingLogic.__init__(self)
self._params_ = SubtypingParameters()
@property
def _xmlFileExtensionKey_(self):
"""Overrriden. Key of the dictionary of file conventions that will be used in this module"""
return "ParenchymaTrainingFiducialsXml"
@property
def params(self):
"""Overrriden. Params manager object"""
if self._params_ is None:
raise NotImplementedError("Object _params_ should be initialized in a child class")
return self._params_
def getSubtypes(self, typeId):
""" Get all the subtypes for the specified type
:param typeId: type id
:return: Dictionary with Key=subtype_id and Value=tuple with subtypes features """
return self.params.getSubtypes(typeId)
def getTypeId(self, typesList):
return typesList[0]
def getSubtypeId(self, typesList):
return typesList[1]
def getRegionId(self, typesList):
return typesList[2]
def getArtifactId(self, typesList):
return typesList[3]
def getEffectiveType(self, typeId, subtypeId):
""" Return the subtype id unless it's 0. In this case, return the main type id
:param typeId:
:param subtypeId:
:return:
"""
return typeId if subtypeId == 0 else subtypeId
def _createFiducialsListNode_(self, nodeName, typesList):
""" Overrriden. Create a fiducials node based on the types list specified.
Depending on the child class, the number of types-subtypes will change, so every child class should
have its own implementation
:param nodeName: name of the fiducial node, created like Subtype_Region_Artifact (Subtype could be a Main type)
:param typesList: list of types
:return: fiducials list node
"""
fidListID = self.markupsLogic.AddNewFiducialNode(nodeName, slicer.mrmlScene)
fidNode = slicer.mrmlScene.GetNodeByID(fidListID)
displayNode = fidNode.GetDisplayNode()
typeId = self.getTypeId(typesList)
artifactId = self.getArtifactId(typesList)
# The color will be based just in the main type and if it's an artifact
displayNode.SetSelectedColor(self.params.getColor(typeId, artifactId))
displayNode.SetTextScale(2)
# Add an observer when a new markup is added
fidNode.AddObserver(fidNode.PointPositionDefinedEvent, self.onMarkupAdded)
return fidNode
def setActiveFiducialsListNode(self, volumeNode, typesList, createIfNotExists=True):
""" Overrriden. Create a fiducials list node corresponding to this volume and this type list.
In this case
:param volumeNode: Scalar volume node
:param typesList: list of types-subtypes. It can be a region-type-artifact or any other combination
:param createIfNotExists: create the fiducials node if it doesn't exist yet for this subtype list
:return: fiducials volume node
"""
typeId = self.getTypeId(typesList)
artifactId = self.getArtifactId(typesList)
regionId = self.getRegionId(typesList)
if volumeNode is not None:
if artifactId == -1:
# No artifact
nodeName = "{}_fiducials_{}_{}".format(volumeNode.GetName(), typeId, regionId)
else:
# Artifact. Add the type of artifact to the node name
nodeName = "{}_fiducials_{}_{}_{}".format(volumeNode.GetName(), typeId, regionId, artifactId)
fid = SlicerUtil.getNode(nodeName)
if fid is None and createIfNotExists:
SlicerUtil.logDevelop("DEBUG: creating a new fiducials node: " + nodeName)
fid = self._createFiducialsListNode_(nodeName, typesList)
# Add the volume to the list of "managed" cases
self.savedVolumes[volumeNode.GetName()] = False
self.currentVolumeId = volumeNode.GetID()
self.currentTypesList = typesList
# Mark the node list as the active one
self.markupsLogic.SetActiveListID(fid)
return fid
def getPointMetadataFromFiducialDescription(self, description):
"""
Overriden. Get the main metadata for a GeometryTopologyObject Point object (region, type, feature, description)
from a fiducial description
:param description: fiducial description
:return: (region, type, feature, description) tuple for a point initialization
"""
spl = description.split("_")
typeId = int(spl[0])
regionId = int(spl[1])
artifactId = int(spl[2])
# Point description will not be used
return (regionId, typeId, artifactId, None)
def getMarkupLabel(self, typesList):
"""
Overriden. Get the text that will be displayed in the fiducial for the corresponding types-subtypes combination.
The format is:
TypeAbbreviation[-RegionAbbreviation][-Artifact]
:param typesList: tuple (type-subtype-region-artifact)
:return: label string for this fiducial
"""
typeId = self.getTypeId(typesList)
subtypeId = self.getSubtypeId(typesList)
regionId = self.getRegionId(typesList)
artifactId = self.getArtifactId(typesList)
if subtypeId == 0:
# No subtype. Just add the general type description
typeLabel = self.params.getMainTypeAbbreviation(typeId)
else:
# Initials of the subtype
typeLabel = self.params.getSubtypeAbbreviation(subtypeId)
regionLabel = "-{}".format(self.params.getRegionAbbreviation(regionId)) if regionId != 0 else ""
artifactLabel = "-{}".format(self.params.getArtifactAbbreviation(artifactId)) if artifactId != 0 else ""
return typeLabel + regionLabel + artifactLabel
def getTypesListFromXmlPoint(self, geometryTopologyDataPoint):
"""
Overriden. Get a list of types that the module will use to operate from a Point object in a GeometryTopologyData object
:param geometryTopologyDataPoint: GeometryTopologyData.Point object
:return: tuple (typeId, subtypeId, artifactId)
"""
subtype = geometryTopologyDataPoint.chest_type
if subtype in list(self.params.mainTypes.keys()):
# Main type. The subtype will be "Any"
mainType = subtype
subtype = 0
else:
mainType = self.params.getMainTypeForSubtype(subtype)
return (mainType, subtype, geometryTopologyDataPoint.chest_region, geometryTopologyDataPoint.feature_type)
def onMarkupAdded(self, markupListNode, event):
"""
New markup node added. It will be renamed based on the type-subtype
:param markupListNode: Markup LIST Node that was added
:param event:
:return:
"""
label = self.getMarkupLabel(self.currentTypesList)
# Get the last added markup (there is no index in the event!)
n = markupListNode.GetNumberOfFiducials()
# Change the label
markupListNode.SetNthMarkupLabel(n - 1, label)
# Use the description to store the type of the fiducial that will be saved in
# the GeometryTopolyData object
currentTypeId = self.getTypeId(self.currentTypesList)
currentSubTypeId = self.getSubtypeId(self.currentTypesList)
currentRegionId = self.getRegionId(self.currentTypesList)
currentArtifactId = self.getArtifactId(self.currentTypesList)
markupListNode.SetNthMarkupDescription(n - 1,
"{}_{}_{}".format(
self.getEffectiveType(currentTypeId, currentSubTypeId),
currentRegionId,
currentArtifactId))
# Markup added. Mark the current volume as state modified
self.savedVolumes[self.currentVolumeId] = False
class CIP_ParenchymaSubtypeTrainingTest(ScriptedLoadableModuleTest):
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_CIP_ParenchymaSubtypeTraining()
def test_CIP_ParenchymaSubtypeTraining(self):
self.delayDisplay('Test not implemented!')
| acil-bwh/SlicerCIP | Scripted/CIP_ParenchymaSubtypeTraining/CIP_ParenchymaSubtypeTraining.py | Python | bsd-3-clause | 19,171 | [
"VTK"
] | 94cac648aa74f2d89a1bbd209e5dcd08fd0cb015b22cc0e2e71c4142d040a968 |
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from peacock.base.PluginManager import PluginManager
from peacock.base.TabPlugin import TabPlugin
from PyQt5.QtWidgets import QWidget, QVBoxLayout
from PyQt5.QtCore import pyqtSignal
from peacock.utils import ExeFinder
from .ExecuteOptionsPlugin import ExecuteOptionsPlugin
from .ExecuteRunnerPlugin import ExecuteRunnerPlugin
from .ConsoleOutputViewerPlugin import ConsoleOutputViewerPlugin
from peacock.Input.ExecutableInfo import ExecutableInfo
import os
class ExecuteTabPlugin(QWidget, PluginManager, TabPlugin):
"""
The GUI for running an executable.
It contains a JobRunner object that actually
runs the process and it communicates via signals.
Signals:
executableInfoChanged: The executable changed. Argument is the new ExecutableInfo
needInputFile: Emitted when this widget needs an input file written to disk. Argument is the path to the file to be written.
start_job: Emitted when a job is started. Arguments are whether CSV output is enabled, and input filename
"""
needInputFile = pyqtSignal(str)
executableInfoChanged = pyqtSignal(ExecutableInfo)
startJob = pyqtSignal(bool, str, float)
@staticmethod
def commandLineArgs(parser):
"""
Add the option to specify the executable and method on the command line
Input:
parser[argparse.ArgumentParser]: The parse to add options to
"""
group = parser.add_argument_group("Executable", "Finding or setting the initial executable")
group.add_argument('-e', '--executable', dest='executable', type=str, help='Executable file')
group.add_argument('--run', dest='auto_run', action='store_true', help='If an input file and executable are also given on the command line, then immediately run it.')
group.add_argument('--no-exe-search', action='store_false', dest='exe_search', help='Do not do an automatic executable search')
group.add_argument('-m', '--method',
dest='method',
choices=('opt', 'dbg', 'oprof', 'devel'),
help="Works the same as setting the $METHOD environment variable. This setting will take precedence over $METHOD")
def __init__(self, plugins=[ExecuteOptionsPlugin, ExecuteRunnerPlugin, ConsoleOutputViewerPlugin]):
super(ExecuteTabPlugin, self).__init__(plugins=plugins)
self.MainLayout = QVBoxLayout()
self.setLayout(self.MainLayout)
self.setup()
self.ExecuteOptionsPlugin.executableInfoChanged.connect(self.onExecutableInfoChanged)
self.ExecuteRunnerPlugin.needInputFile.connect(self.needInputFile)
self.ExecuteRunnerPlugin.needCommand.connect(self.onNeedCommand)
self.ExecuteRunnerPlugin.startJob.connect(self.startJob)
self.search_from_dir = os.getcwd()
def tabName(self):
return "Execute"
def onExecutableInfoChanged(self, exe_info):
"""
Executable has changed.
Input:
exe_info[ExecutableInfo]: New ExecutableInfo object
"""
if exe_info.valid():
self.executableInfoChanged.emit(exe_info)
self.ExecuteRunnerPlugin.runEnabled(exe_info.valid())
def onNeedCommand(self):
cmd, args = self.ExecuteOptionsPlugin.buildCommandWithNoInputFile()
csv = self.ExecuteOptionsPlugin.csv_checkbox.isChecked()
if self.ExecuteOptionsPlugin.test_checkbox.isChecked():
args.append("--allow-test-objects")
self.ExecuteRunnerPlugin.setCommand(cmd, args, csv)
def onNumTimeStepsChanged(self, num_steps):
"""
This will get auto connected to InputFileEditorWithMesh:numTimeStepsChanged signal
Input:
num_steps[int]: new number of time steps
"""
self.ExecuteRunnerPlugin.onNumTimeStepsChanged(num_steps)
def setExe(self, options):
"""
Tries to find an executable.
It first looks in the command line options.
If not found it will search up the directory path.
Input:
options[argparse namespace]: The command line options as returned by ArgumentParser.parse_args()
"""
if options.executable and not os.path.isabs(options.executable):
options.executable = os.path.abspath(os.path.join(options.start_dir, options.executable))
exe_path = ExeFinder.getExecutablePath(options, start_dir=self.search_from_dir)
if exe_path:
self.ExecuteOptionsPlugin.setExecutablePath(exe_path)
def initialize(self, options):
"""
Initialize this widget.
It will search kwargs for "cmd_line_options" to help initialize
this object from arguments on the command line.
"""
super(ExecuteTabPlugin, self).initialize(options)
self.setExe(options)
self.setEnabled(True)
def clearRecentlyUsed(self):
"""
Clear the recently used menus
"""
self.ExecuteOptionsPlugin.clearRecentlyUsed()
def addToMainMenu(self, menubar):
"""
Adds menu entries specific to this tab to the menubar.
"""
executeMenu = menubar.addMenu("E&xecute")
self.ExecuteOptionsPlugin.addToMenu(executeMenu)
def closing(self):
"""
Gets called when the user tries to quit.
Make sure things are saved before quitting.
"""
self.ExecuteRunnerPlugin.closing()
if __name__ == "__main__":
from PyQt5.QtWidgets import QApplication, QMainWindow
from peacock.utils import Testing
import argparse
import sys
parser = argparse.ArgumentParser(description='Execute tab')
main_win = QMainWindow()
ExecuteTabPlugin.commandLineArgs(parser)
exe = Testing.find_moose_test_exe()
def needInputFile(input_file):
this_dir = os.path.dirname(os.path.abspath(__file__))
peacock_dir = os.path.dirname(this_dir)
test_file = os.path.join(peacock_dir, "tests", "common", "transient.i")
with open(test_file, "r") as fin:
data = fin.read()
with open(input_file, "w") as fout:
fout.write(data)
parsed = parser.parse_args(["-e", exe])
qapp = QApplication(sys.argv)
w = ExecuteTabPlugin()
w.needInputFile.connect(needInputFile)
main_win.setCentralWidget(w)
menubar = main_win.menuBar()
menubar.setNativeMenuBar(False)
w.addToMainMenu(menubar)
main_win.show()
w.initialize(parsed)
sys.exit(qapp.exec_())
| nuclear-wizard/moose | python/peacock/Execute/ExecuteTabPlugin.py | Python | lgpl-2.1 | 6,818 | [
"MOOSE"
] | 59ea9a360c1d3b3c26a9c3e1000ec0485ef16241766c42910f34de95e8e5b363 |
#!/usr/bin/env python
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import vtk
import chigger
camera = vtk.vtkCamera()
camera.SetViewUp(-0.01297019406812408, 0.87867984226827, 0.4772352762079132)
camera.SetPosition(10.331000991784688, -5.473421359648077, 10.483371124667542)
camera.SetFocalPoint(0.16947273724857123, 0.07124492441302266, -0.0015694043706061533)
reader = chigger.exodus.ExodusReader('../../input/mug_blocks_out.e', boundary=['bottom', 'top'])
mug = chigger.exodus.ExodusResult(reader, block=None, boundary=['top'], variable='convected', cmap='coolwarm', camera=camera)
window = chigger.RenderWindow(mug, size=[300,300], test=True)
window.write('boundary.png')
window.start()
| liuwenf/moose | python/chigger/tests/exodus/blocks/boundary.py | Python | lgpl-2.1 | 1,540 | [
"MOOSE",
"VTK"
] | c24100b34a748cbeefcba0c9500760fbb01ce91c7e8bdb8dd580485d967c34f5 |
########################################################################
# File : HTCondorCEComputingElement.py
# Author : A.S.
########################################################################
"""HTCondorCE Computing Element
Allows direct submission to HTCondorCE Computing Elements with a SiteDirector Agent
Needs the condor grid middleware (condor_submit, condor_history, condor_q, condor_rm)
Configuration for the HTCondorCE submission can be done via the configuration system ::
WorkingDirectory: Location to store the pilot and condor log files
DaysToKeepLogs: how long to keep the log files until they are removed
ExtraSubmitString: Additional option for the condor submit file, separate options with '\\n', for example:
request_cpus = 8 \\n periodic_remove = ...
UseLocalSchedd: If False, directly submit to a remote condor schedule daemon,
then one does not need to run condor daemons on the submit machine
see :ref:`res-comp-htcondor`
"""
# Note: if you read this documentation in the source code and not via the sphinx
# created documentation, there should only be one slash when setting the option,
# but "\n" gets rendered as a linebreak in sphinx
import os
import tempfile
import commands
import errno
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.Core.Utilities.Grid import executeGridCommand
from DIRAC.Core.Utilities.File import mkDir
from DIRAC.Core.Utilities.List import breakListIntoChunks
# BEWARE: this import makes it impossible to instantiate this CE client side
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
from DIRAC.WorkloadManagementSystem.Agent.SiteDirector import WAITING_PILOT_STATUS
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.Core.Utilities.Subprocess import Subprocess
from DIRAC.Resources.Computing.BatchSystems.Condor import parseCondorStatus, treatCondorHistory
__RCSID__ = "$Id$"
CE_NAME = 'HTCondorCE'
MANDATORY_PARAMETERS = ['Queue']
DEFAULT_WORKINGDIRECTORY = '/opt/dirac/pro/runit/WorkloadManagement/SiteDirectorHT'
DEFAULT_DAYSTOKEEPLOGS = 15
def condorIDFromJobRef(jobRef):
"""return tuple of "jobURL" and condorID from the jobRef string"""
jobURL = jobRef.split(":::")[0]
condorID = jobURL.split("/")[-1]
return jobURL, condorID
def findFile(workingDir, fileName):
""" find a pilot out, err, log file """
res = Subprocess().systemCall("find %s -name '%s'" % (workingDir, fileName), shell=True)
if not res['OK']:
return res
paths = res['Value'][1].splitlines()
if not paths:
return S_ERROR(errno.ENOENT, "Could not find %s in directory %s" % (fileName, workingDir))
return S_OK(paths)
def getCondorLogFile(pilotRef):
"""return the location of the logFile belonging to the pilot reference"""
_jobUrl, condorID = condorIDFromJobRef(pilotRef)
# FIXME: This gets called from the WMSAdministrator, so we don't have the same
# working directory as for the SiteDirector unless we force it, there is also
# no CE instantiated when this function is called so we can only pick this option up from one place
workingDirectory = gConfig.getValue("Resources/Computing/HTCondorCE/WorkingDirectory",
DEFAULT_WORKINGDIRECTORY)
resLog = findFile(workingDirectory, '%s.log' % condorID)
return resLog
class HTCondorCEComputingElement(ComputingElement):
""" HTCondorCE computing element class
implementing the functions jobSubmit, getJobOutput
"""
#############################################################################
def __init__(self, ceUniqueID):
""" Standard constructor.
"""
super(HTCondorCEComputingElement, self).__init__(ceUniqueID)
self.ceType = CE_NAME
self.submittedJobs = 0
self.mandatoryParameters = MANDATORY_PARAMETERS
self.pilotProxy = ''
self.queue = ''
self.outputURL = 'gsiftp://localhost'
self.gridEnv = ''
self.proxyRenewal = 0
self.daysToKeepLogs = DEFAULT_DAYSTOKEEPLOGS
self.extraSubmitString = ''
# see note on getCondorLogFile, why we can only use the global setting
self.workingDirectory = gConfig.getValue("Resources/Computing/HTCondorCE/WorkingDirectory",
DEFAULT_WORKINGDIRECTORY)
self.useLocalSchedd = True
self.remoteScheddOptions = ""
#############################################################################
def __writeSub(self, executable, nJobs):
""" Create the Sub File for submission
"""
self.log.debug("Working directory: %s " % self.workingDirectory)
# We randomize the location of the pilotoutput and log, because there are just too many of them
pre1 = makeGuid()[:3]
pre2 = makeGuid()[:3]
mkDir(os.path.join(self.workingDirectory, pre1, pre2))
initialDirPrefix = "%s/%s" % (pre1, pre2)
self.log.debug("InitialDir: %s" % os.path.join(self.workingDirectory, initialDirPrefix))
self.log.debug("ExtraSubmitString:\n### \n %s \n###" % self.extraSubmitString)
fd, name = tempfile.mkstemp(suffix='.sub', prefix='HTCondorCE_', dir=self.workingDirectory)
subFile = os.fdopen(fd, 'w')
executable = os.path.join(self.workingDirectory, executable)
localScheddOptions = """
ShouldTransferFiles = YES
WhenToTransferOutput = ON_EXIT_OR_EVICT
""" if self.useLocalSchedd else ""
targetUniverse = "grid" if self.useLocalSchedd else "vanilla"
sub = """
executable = %(executable)s
universe = %(targetUniverse)s
use_x509userproxy = true
output = $(Cluster).$(Process).out
error = $(Cluster).$(Process).err
log = $(Cluster).$(Process).log
environment = "HTCONDOR_JOBID=$(Cluster).$(Process)"
initialdir = %(initialDir)s
grid_resource = condor %(ceName)s %(ceName)s:9619
transfer_output_files = ""
%(localScheddOptions)s
kill_sig=SIGTERM
%(extraString)s
Queue %(nJobs)s
""" % dict(executable=executable,
nJobs=nJobs,
ceName=self.ceName,
extraString=self.extraSubmitString,
initialDir=os.path.join(self.workingDirectory, initialDirPrefix),
localScheddOptions=localScheddOptions,
targetUniverse=targetUniverse,
)
subFile.write(sub)
subFile.close()
return name
def _reset(self):
self.queue = self.ceParameters['Queue']
self.outputURL = self.ceParameters.get('OutputURL', 'gsiftp://localhost')
self.gridEnv = self.ceParameters['GridEnv']
self.daysToKeepLogs = self.ceParameters.get('DaysToKeepLogs', DEFAULT_DAYSTOKEEPLOGS)
self.extraSubmitString = self.ceParameters.get('ExtraSubmitString', '').decode('string_escape')
self.useLocalSchedd = self.ceParameters.get('UseLocalSchedd', self.useLocalSchedd)
if isinstance(self.useLocalSchedd, basestring):
if self.useLocalSchedd == "False":
self.useLocalSchedd = False
self.remoteScheddOptions = "" if self.useLocalSchedd else "-pool %s:9619 -name %s " % (self.ceName, self.ceName)
self.log.debug("Using local schedd: %r " % self.useLocalSchedd)
self.log.debug("Remote scheduler option: '%s' " % self.remoteScheddOptions)
#############################################################################
def submitJob(self, executableFile, proxy, numberOfJobs=1):
""" Method to submit job
"""
self.log.verbose("Executable file path: %s" % executableFile)
if not os.access(executableFile, 5):
os.chmod(executableFile, 0o755)
subName = self.__writeSub(executableFile, numberOfJobs)
jobStamps = []
for _i in range(numberOfJobs):
jobStamps.append(makeGuid()[:8])
cmd = ['condor_submit', '-terse', subName]
# the options for submit to remote are different than the other remoteScheddOptions
scheddOptions = [] if self.useLocalSchedd else ['-pool', '%s:9619' % self.ceName, '-remote', self.ceName]
for op in scheddOptions:
cmd.insert(-1, op)
result = executeGridCommand(self.proxy, cmd, self.gridEnv)
self.log.verbose(result)
os.unlink(subName)
if not result['OK']:
self.log.error("Failed to submit jobs to htcondor", result['Message'])
return result
if result['Value'][0]:
# We have got a non-zero status code
errorString = result['Value'][2] if result['Value'][2] else result['Value'][1]
return S_ERROR('Pilot submission failed with error: %s ' % errorString.strip())
pilotJobReferences = self.__getPilotReferences(result['Value'][1].strip())
if not pilotJobReferences['OK']:
return pilotJobReferences
pilotJobReferences = pilotJobReferences['Value']
self.log.verbose("JobStamps: %s " % jobStamps)
self.log.verbose("pilotRefs: %s " % pilotJobReferences)
result = S_OK(pilotJobReferences)
result['PilotStampDict'] = dict(zip(pilotJobReferences, jobStamps))
self.log.verbose("Result for submission: %s " % result)
return result
def killJob(self, jobIDList):
""" Kill the specified jobs
"""
if isinstance(jobIDList, basestring):
jobIDList = [jobIDList]
self.log.verbose("KillJob jobIDList: %s" % jobIDList)
for jobRef in jobIDList:
job, jobID = condorIDFromJobRef(jobRef)
self.log.verbose("Killing pilot %s " % job)
status, stdout = commands.getstatusoutput('condor_rm %s %s' % (self.remoteScheddOptions, jobID))
if status != 0:
return S_ERROR("Failed to kill pilot %s: %s" % (job, stdout))
return S_OK()
#############################################################################
def getCEStatus(self):
""" Method to return information on running and pending jobs.
"""
result = S_OK()
result['SubmittedJobs'] = 0
result['RunningJobs'] = 0
result['WaitingJobs'] = 0
# getWaitingPilots
condDict = {'DestinationSite': self.ceName,
'Status': WAITING_PILOT_STATUS}
res = PilotAgentsDB().countPilots(condDict)
if res['OK']:
result['WaitingJobs'] = int(res['Value'])
else:
self.log.warn("Failure getting pilot count for %s: %s " % (self.ceName, res['Message']))
# getRunningPilots
condDict = {'DestinationSite': self.ceName,
'Status': 'Running'}
res = PilotAgentsDB().countPilots(condDict)
if res['OK']:
result['RunningJobs'] = int(res['Value'])
else:
self.log.warn("Failure getting pilot count for %s: %s " % (self.ceName, res['Message']))
return result
def getJobStatus(self, jobIDList):
""" Get the status information for the given list of jobs
"""
self.__cleanup()
self.log.verbose("Job ID List for status: %s " % jobIDList)
if isinstance(jobIDList, basestring):
jobIDList = [jobIDList]
resultDict = {}
condorIDs = {}
# Get all condorIDs so we can just call condor_q and condor_history once
for jobRef in jobIDList:
job, jobID = condorIDFromJobRef(jobRef)
condorIDs[job] = jobID
qList = []
for _condorIDs in breakListIntoChunks(condorIDs.values(), 100):
# This will return a list of 1245.75 3
status, stdout_q = commands.getstatusoutput('condor_q %s %s -af:j JobStatus ' % (self.remoteScheddOptions,
' '.join(_condorIDs)))
if status != 0:
return S_ERROR(stdout_q)
_qList = stdout_q.strip().split('\n')
qList.extend(_qList)
# FIXME: condor_history does only support j for autoformat from 8.5.3,
# format adds whitespace for each field This will return a list of 1245 75 3
# needs to cocatenate the first two with a dot
condorHistCall = 'condor_history %s %s -af ClusterId ProcId JobStatus' % (self.remoteScheddOptions,
' '.join(_condorIDs))
treatCondorHistory(condorHistCall, qList)
for job, jobID in condorIDs.iteritems():
pilotStatus = parseCondorStatus(qList, jobID)
if pilotStatus == 'HELD':
# make sure the pilot stays dead and gets taken out of the condor_q
_rmStat, _rmOut = commands.getstatusoutput('condor_rm %s %s ' % (self.remoteScheddOptions, jobID))
#self.log.debug( "condor job killed: job %s, stat %s, message %s " % ( jobID, rmStat, rmOut ) )
pilotStatus = 'Aborted'
resultDict[job] = pilotStatus
self.log.verbose("Pilot Statuses: %s " % resultDict)
return S_OK(resultDict)
def getJobOutput(self, jobID, _localDir=None):
""" TODO: condor can copy the output automatically back to the
submission, so we just need to pick it up from the proper folder
"""
self.log.verbose("Getting job output for jobID: %s " % jobID)
_job, condorID = condorIDFromJobRef(jobID)
# FIXME: the WMSAdministrator does not know about the
# SiteDirector WorkingDirectory, it might not even run on the
# same machine
#workingDirectory = self.ceParameters.get( 'WorkingDirectory', DEFAULT_WORKINGDIRECTORY )
if not self.useLocalSchedd:
iwd = None
status, stdout_q = commands.getstatusoutput('condor_q %s %s -af SUBMIT_Iwd' % (self.remoteScheddOptions,
condorID))
self.log.verbose('condor_q:', stdout_q)
if status != 0:
return S_ERROR(stdout_q)
if self.workingDirectory in stdout_q:
iwd = stdout_q
try:
os.makedirs(iwd)
except OSError as e:
self.log.verbose(str(e))
if iwd is None:
return S_ERROR("Failed to find condor job %s" % condorID)
cmd = ['condor_transfer_data', '-pool', '%s:9619' % self.ceName, '-name', self.ceName, condorID]
result = executeGridCommand(self.proxy, cmd, self.gridEnv)
self.log.verbose(result)
if not result['OK']:
self.log.error("Failed to get job output from htcondor", result['Message'])
return result
output = ''
error = ''
resOut = findFile(self.workingDirectory, '%s.out' % condorID)
if not resOut['OK']:
self.log.error("Failed to find output file for condor job", jobID)
return resOut
outputfilename = resOut['Value'][0]
resErr = findFile(self.workingDirectory, '%s.err' % condorID)
if not resErr['OK']:
self.log.error("Failed to find error file for condor job", jobID)
return resErr
errorfilename = resErr['Value'][0]
try:
with open(outputfilename) as outputfile:
output = outputfile.read()
except IOError as e:
self.log.error("Failed to open outputfile", str(e))
return S_ERROR("Failed to get pilot output")
try:
with open(errorfilename) as errorfile:
error = errorfile.read()
except IOError as e:
self.log.error("Failed to open errorfile", str(e))
return S_ERROR("Failed to get pilot error")
return S_OK((output, error))
def __getPilotReferences(self, jobString):
"""get the references from the condor_submit output
cluster ids look like " 107.0 - 107.0 " or " 107.0 - 107.4 "
"""
self.log.verbose("getPilotReferences: %s" % jobString)
clusterIDs = jobString.split('-')
if len(clusterIDs) != 2:
return S_ERROR("Something wrong with the condor_submit output: %s" % jobString)
clusterIDs = [clu.strip() for clu in clusterIDs]
self.log.verbose("Cluster IDs parsed: %s " % clusterIDs)
try:
clusterID = clusterIDs[0].split('.')[0]
numJobs = clusterIDs[1].split('.')[1]
except IndexError:
return S_ERROR("Something wrong with the condor_submit output: %s" % jobString)
cePrefix = "htcondorce://%s/" % self.ceName
jobReferences = ["%s%s.%s" % (cePrefix, clusterID, i) for i in range(int(numJobs) + 1)]
return S_OK(jobReferences)
def __cleanup(self):
""" clean the working directory of old jobs"""
# FIXME: again some issue with the working directory...
# workingDirectory = self.ceParameters.get( 'WorkingDirectory', DEFAULT_WORKINGDIRECTORY )
self.log.debug("Cleaning working directory: %s" % self.workingDirectory)
# remove all files older than 120 minutes starting with DIRAC_ Condor will
# push files on submission, but it takes at least a few seconds until this
# happens so we can't directly unlink after condor_submit
status, stdout = commands.getstatusoutput('find %s -mmin +120 -name "DIRAC_*" -delete ' % self.workingDirectory)
if status:
self.log.error("Failure during HTCondorCE __cleanup", stdout)
# remove all out/err/log files older than "DaysToKeepLogs" days
findPars = dict(workDir=self.workingDirectory, days=self.daysToKeepLogs)
# remove all out/err/log files older than "DaysToKeepLogs" days
status, stdout = commands.getstatusoutput(
r'find %(workDir)s -mtime +%(days)s -type f \( -name "*.out" -o -name "*.err" -o -name "*.log" \) -delete ' %
findPars)
if status:
self.log.error("Failure during HTCondorCE __cleanup", stdout)
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| chaen/DIRAC | Resources/Computing/HTCondorCEComputingElement.py | Python | gpl-3.0 | 17,070 | [
"DIRAC"
] | 0adff4ba94c8f8be7ed66f4551769e2e7647169239b2111afa870d3c7b7bbeb0 |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds two rule-based remarketing user lists.
Adds two rule-based remarketing user lists; one with no site visit date
restrictions and another that will only include users who visit your site in
the next six months.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: AdwordsUserListService.mutate
"""
__author__ = 'Mark Saniscalchi'
import calendar
from datetime import date
from datetime import datetime
from datetime import timedelta
from googleads import adwords
def main(client):
# Initialize appropriate service.
adwords_user_list_service = client.GetService(
'AdwordsUserListService', version='v201409')
# First rule item group - users who visited the checkout page and had more
# than one item in their shopping cart.
checkout_rule_item = {
'StringRuleItem': {
'key': {
'name': 'ecomm_pagetype'
},
'op': 'EQUALS',
'value': 'checkout'
}
}
cart_size_rule_item = {
'NumberRuleItem': {
'key': {
'name': 'cartsize'
},
'op': 'GREATER_THAN',
'value': '1.0'
}
}
# Combine the two rule items into a RuleItemGroup so AdWords will logically
# AND the rules together.
checkout_multiple_item_group = {
'items': [checkout_rule_item, cart_size_rule_item]
}
# Second rule item group - users who checked out within the next 3 months.
today = date.today()
start_date_rule_item = {
'DateRuleItem': {
'key': {
'name': 'checkoutdate'
},
'op': 'AFTER',
'value': today.strftime('%Y%m%d')
}
}
three_months_later = AddMonths(today, 3)
three_months_later_rule_item = {
'DateRuleItem': {
'key': {
'name': 'checkoutdate'
},
'op': 'BEFORE',
'value': three_months_later.strftime('%Y%m%d')
}
}
# Combine the date rule items into a RuleItemGroup
checked_out_date_range_item_group = {
'items': [start_date_rule_item, three_months_later_rule_item]
}
# Combine the rule item groups into a Rule so AdWords will logically OR the
# groups together.
rule = {
'groups': [
checkout_multiple_item_group,
checked_out_date_range_item_group
]
}
# Create the user list with no restrictions on site visit date.
expression_user_list = {
'xsi_type': 'ExpressionRuleUserList',
'name': 'Expression-based user list created at %s'
% datetime.today().strftime('%Y%m%d %H:%M:%S'),
'description': 'Users who checked out in three month window OR visited'
' the checkout page with more than one item in their'
' cart.',
'rule': rule
}
# Create the user list restricted to users who visit your site within the next
# six months.
end_date = AddMonths(today, 6)
date_user_list = {
'xsi_type': 'DateSpecificRuleUserList',
'name': 'Date rule user list created at %s'
% datetime.today().strftime('%Y%m%d %H:%M:%S'),
'description': 'Users who visited the site between %s and %s and checked'
' out in three month window OR visited the checkout page'
' with more than one item in their cart.'
% (today.strftime('%Y%m%d'), end_date.strftime('%Y%m%d')),
'rule': rule,
'startDate': today.strftime('%Y%m%d'),
'endDate': end_date.strftime('%Y%m%d')
}
# Create operations to add the user lists.
operations = [
{
'operand': user_list,
'operator': 'ADD',
} for user_list in [expression_user_list, date_user_list]
]
# Submit the operations.
user_lists = adwords_user_list_service.mutate(operations)
# Display results.
for user_list in user_lists['value']:
print (('User list added with ID %d, name "%s", status "%s", list type'
' "%s", accountUserListStatus "%s", description "%s".') %
(user_list['id'], user_list['name'],
user_list['status'], user_list['listType'],
user_list['accountUserListStatus'], user_list['description']))
def AddMonths(start_date, months):
"""A simple convenience utility for adding months to a given start date.
This increments the months by adding the number of days in the current month
to the current month, for each month.
Args:
start_date: date The date months are being added to.
months: int The number of months to add.
Returns:
A date equal to the start date incremented by the given number of months.
"""
current_date = start_date
i = 0
while i < months:
month_days = calendar.monthrange(current_date.year, current_date.month)[1]
current_date += timedelta(days=month_days)
i += 1
return current_date
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
| dietrichc/streamline-ppc-reports | examples/adwords/v201409/remarketing/add_rule_based_user_lists.py | Python | apache-2.0 | 5,782 | [
"VisIt"
] | 8cbab0021ae10d358335dd4c818fbe7d98d0d64e75809e4bbda6e2354fe9f673 |
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, draw, color, transform, feature, measure, \
filters, morphology, segmentation, restoration, io, img_as_float
image = io.imread('./testzie/dataset/seal_real.jpg')
image = io.imread('./testzie/dataset/seal_bad.jpg')
image = io.imread('./testzie/dataset/seal.jpg')
image = io.imread('./testzie/dataset/seal_fuck.jpg')
# image_gray = color.rgb2gray(image)
image_gray = (255 - np.dot(image, [0, 1/2, 1/2])).astype('uint8')
thresh = filters.threshold_otsu(image_gray)
thresh = filters.threshold_yen(image_gray)
thresh = (filters.threshold_otsu(image_gray) + filters.threshold_yen(image_gray)) / 2
bw = image_gray > thresh
# bw = morphology.closing(bw, morphology.square(3))
# bw = morphology.opening(bw, morphology.square(3))
# bw = morphology.closing(bw, morphology.square(3))
# bw = morphology.opening(bw, morphology.square(3))
# bw = morphology.opening(bw, morphology.square(3))
# bw = morphology.opening(bw, morphology.square(3))
plt.imshow(image_gray)
plt.imshow(bw)
np.meshgrid()
image_gray_fix = image_gray - image_gray.min()
image_gray_fix = image_gray_fix / image_gray_fix.max()
plt.imshow(image_gray_fix, 'gray')
image_red = np.zeros(image_gray_fix.shape + (4, ), 'float32')
image_red[:, :, 0] = 1
image_red[:, :, 1] = (1 - image_gray_fix)
image_red[:, :, 2] = (1 - image_gray_fix)
image_red[:, :, 3] = 0.75
image_red = image_red * bw[:, :, np.newaxis]
plt.imshow(image_red)
fig, (ax0, ax1) = plt.subplots(2, 1, figsize=(8, 6))
ax0.imshow(image)
ax1.imshow(image_red)
plt.imsave('c:/users/dell/desktop/seal_real.png', image_red)
image = img_as_float(image)
blurred = filters.gaussian(image, sigma=1, multichannel=True)
sharper = np.clip(image * 1.5 - blurred * 0.5, 0, 1.0)
io.imshow(blurred)
io.imshow(sharper)
image_gray = (1 - np.dot(sharper, [0, 1/2, 1/2]))
| amozie/amozie | testzie/skimage_test/deal_seal_photo.py | Python | apache-2.0 | 1,854 | [
"Gaussian"
] | f640afe18bab60208b0b12415a26d25ab963d97000eefaac550f9c7b328c5905 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (c) Copyright 2003-2008 Hewlett-Packard Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Based on:
# "sane.py", part of the Python Imaging Library (PIL)
# http://www.pythonware.com/products/pil/
# Python wrapper on top of the _sane module, which is in turn a very
# thin wrapper on top of the SANE library. For a complete understanding
# of SANE, consult the documentation at the SANE home page:
# http://www.mostang.com/sane/ .#
#
# Modified to work without PIL by Don Welch
#
# (C) Copyright 2003 A.M. Kuchling. All Rights Reserved
# (C) Copyright 2004 A.M. Kuchling, Ralph Heinkel All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of A.M. Kuchling and
# Ralph Heinkel not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior permission.
#
# A.M. KUCHLING, R.H. HEINKEL DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
# USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# Python wrapper on top of the scanext module, which is in turn a very
# thin wrapper on top of the SANE library. For a complete understanding
# of SANE, consult the documentation at the SANE home page:
# http://www.mostang.com/sane/ .
#
# Original authors: Andrew Kuchling, Ralph Heinkel
# Modified by: Don Welch, Sarbeswar Meher
#
# Std Lib
import scanext
import threading
import time
import os
# Local
from base.g import *
from base import utils
from base.sixext import to_bytes_utf8
from base.sixext.moves import queue
EVENT_SCAN_CANCELED = 1
TYPE_STR = { scanext.TYPE_BOOL: "TYPE_BOOL", scanext.TYPE_INT: "TYPE_INT",
scanext.TYPE_FIXED: "TYPE_FIXED", scanext.TYPE_STRING: "TYPE_STRING",
scanext.TYPE_BUTTON: "TYPE_BUTTON", scanext.TYPE_GROUP: "TYPE_GROUP" }
UNIT_STR = { scanext.UNIT_NONE: "UNIT_NONE",
scanext.UNIT_PIXEL: "UNIT_PIXEL",
scanext.UNIT_BIT: "UNIT_BIT",
scanext.UNIT_MM: "UNIT_MM",
scanext.UNIT_DPI: "UNIT_DPI",
scanext.UNIT_PERCENT: "UNIT_PERCENT",
scanext.UNIT_MICROSECOND: "UNIT_MICROSECOND" }
MAX_READSIZE = 65536
class Option:
"""Class representing a SANE option.
Attributes:
index -- number from 0 to n, giving the option number
name -- a string uniquely identifying the option
title -- single-line string containing a title for the option
desc -- a long string describing the option; useful as a help message
type -- type of this option. Possible values: TYPE_BOOL,
TYPE_INT, TYPE_STRING, and so forth.
unit -- units of this option. Possible values: UNIT_NONE,
UNIT_PIXEL, etc.
size -- size of the value in bytes
cap -- capabilities available; CAP_EMULATED, CAP_SOFT_SELECT, etc.
constraint -- constraint on values. Possible values:
None : No constraint
(min,max,step) Integer values, from min to max, stepping by
list of integers or strings: only the listed values are allowed
"""
def __init__(self, args, cur_device):
import string
self.cur_device = cur_device
self.index, self.name, self.title, self.desc, self.type, \
self.unit, self.size, self.cap, self.constraint = args
if type(self.name) != type(''):
self.name = str(self.name)
def isActive(self):
return scanext.isOptionActive(self.cap)
def isSettable(self):
return scanext.isOptionSettable(self.cap)
def __repr__(self):
if self.isSettable():
settable = 'yes'
else:
settable = 'no'
if self.isActive():
active = 'yes'
curValue = self.cur_device.getOption(self.name)
else:
active = 'no'
curValue = '<not available, inactive option>'
return """\nName: %s
Cur value: %s
Index: %d
Title: %s
Desc: %s
Type: %s
Unit: %s
Constr: %s
isActive: %s
isSettable: %s\n""" % (self.name, curValue,
self.index, self.title, self.desc,
TYPE_STR[self.type], UNIT_STR[self.unit],
self.constraint, active, settable)
return s
def limitAndSet(self, value):
if value is not None and self.constraint is not None:
if type(self.constraint) == type(()):
if value < self.constraint[0]:
value = self.constraint[0]
log.warn("Invalid value for %s (%s < min value of %d). Using %d." %
(self.name, self.name, value, value))
elif value > self.constraint[1]:
value = self.constraint[1]
log.warn("Invalid value for %s (%s > max value of %d). Using %d." %
(self.name, self.name, value, value))
self.cur_device.setOption(self.name, value)
elif type(self.constraint) == type([]):
if value not in self.constraint:
v = self.constraint[0]
min_dist = sys.maxsize
for x in self.constraint:
if abs(value-x) < min_dist:
min_dist = abs(value-x)
v = x
log.warn("Invalid value for %s (%s not in constraint list: %s). Using %d." %
(self.name, self.name, value, ', '.join(self.constraint), v))
self.cur_device.setOption(self.name, v)
else:
value = self.cur_device.getOption(self.name)
return value
##class _SaneIterator:
## """ intended for ADF scans.
## """
##
## def __init__(self, cur_device):
## self.cur_device = cur_device
##
## def __iter__(self):
## return self
##
## def __del__(self):
## self.cur_device.cancelScan()
##
## def next(self):
## try:
## self.cur_device.startScan()
## except error, v:
## if v == 'Document feeder out of documents':
## raise StopIteration
## else:
## raise
## return self.cur_device.performScan(1)
class ScanDevice:
"""Class representing a SANE device.
Methods:
startScan() -- initiate a scan, using the current settings
cancelScan() -- cancel an in-progress scanning operation
Also available, but rather low-level:
getParameters() -- get the current parameter settings of the device
getOptions() -- return a list of tuples describing all the options.
Attributes:
optlist -- list of option names
You can also access an option name to retrieve its value, and to
set it. For example, if one option has a .name attribute of
imagemode, and scanner is a ScanDevice object, you can do:
print scanner.imagemode
scanner.imagemode = 'Full frame'
scanner.['imagemode'] returns the corresponding Option object.
"""
def __init__(self, dev):
self.scan_thread = None
self.dev = scanext.openDevice(dev)
self.options = {}
self.__load_options_dict()
def __load_options_dict(self):
opts = self.options
opt_list = self.dev.getOptions()
for t in opt_list:
o = Option(t, self)
if o.type != scanext.TYPE_GROUP:
opts[o.name] = o
def setOption(self, key, value):
opts = self.options
if key not in opts:
opts[key] = value
return
opt = opts[key]
if opt.type == scanext.TYPE_GROUP:
log.error("Groups can't be set: %s" % key)
if not scanext.isOptionActive(opt.cap):
log.error("Inactive option: %s" % key)
if not scanext.isOptionSettable(opt.cap):
log.error("Option can't be set by software: %s" % key)
if type(value) == int and opt.type == scanext.TYPE_FIXED:
# avoid annoying errors of backend if int is given instead float:
value = float(value)
try:
self.last_opt = self.dev.setOption(opt.index, value)
except scanext.error:
log.error("Unable to set option %s to value %s" % (key, value))
return
# do binary AND to find if we have to reload options:
if self.last_opt & scanext.INFO_RELOAD_OPTIONS:
self.__load_options_dict()
def getOption(self, key):
opts = self.options
if key == 'optlist':
return list(opts.keys())
if key == 'area':
return (opts["tl-x"], opts["tl-y"]), (opts["br-x"], opts["br-y"])
if key not in opts:
raise AttributeError('No such attribute: %s' % key)
opt = opts[key]
if opt.type == scanext.TYPE_BUTTON:
raise AttributeError("Buttons don't have values: %s" % key)
if opt.type == scanext.TYPE_GROUP:
raise AttributeError("Groups don't have values: %s " % key)
if not scanext.isOptionActive(opt.cap):
raise AttributeError('Inactive option: %s' % key)
return self.dev.getOption(opt.index)
def getOptionObj(self, key):
opts = self.options
if key in opts:
return opts[key]
def getParameters(self):
"""Return a 6-tuple holding all the current device settings:
(format, format_name, last_frame, (pixels_per_line, lines), depth, bytes_per_line)
- format is the SANE frame type
- format is one of 'grey', 'color' (RGB), 'red', 'green', 'blue'.
- last_frame [bool] indicates if this is the last frame of a multi frame image
- (pixels_per_line, lines) specifies the size of the scanned image (x,y)
- lines denotes the number of scanlines per frame
- depth gives number of pixels per sample
"""
return self.dev.getParameters()
def getOptions(self):
"Return a list of tuples describing all the available options"
return self.dev.getOptions()
def startScan(self, byte_format='BGRA', update_queue=None, event_queue=None):
"""
Perform a scan with the current device.
Calls sane_start().
"""
if not self.isScanActive():
status = self.dev.startScan()
self.format, self.format_name, self.last_frame, self.pixels_per_line, \
self.lines, self.depth, self.bytes_per_line = self.dev.getParameters()
self.scan_thread = ScanThread(self.dev, byte_format, update_queue, event_queue)
self.scan_thread.scan_active = True
self.scan_thread.start()
return True, self.lines * self.bytes_per_line, status
else:
# Already active
return False, 0, scanext.SANE_STATUS_DEVICE_BUSY
def cancelScan(self):
"Cancel an in-progress scanning operation."
return self.dev.cancelScan()
def getScan(self):
"Get the output buffer and info about a completed scan."
if not self.isScanActive():
s = self.scan_thread
return s.buffer, s.format, s.format_name, s.pixels_per_line, \
s.lines, s.depth, s.bytes_per_line, s.pad_bytes, s.total_read, s.total_write
def freeScan(self):
"Cleanup the scan file after a completed scan."
if not self.isScanActive():
s = self.scan_thread
try:
s.buffer.close()
os.remove(s.buffer_path)
except (IOError, AttributeError):
pass
def isScanActive(self):
if self.scan_thread is not None:
return self.scan_thread.isAlive() and self.scan_thread.scan_active
else:
return False
def waitForScanDone(self):
if self.scan_thread is not None and \
self.scan_thread.isAlive() and \
self.scan_thread.scan_active:
try:
self.scan_thread.join()
except KeyboardInterrupt:
pass
def waitForScanActive(self):
#time.sleep(0.5)
if self.scan_thread is not None:
while True:
#print self.scan_thread.isAlive()
#print self.scan_thread.scan_active
if self.scan_thread.isAlive() and \
self.scan_thread.scan_active:
return
time.sleep(0.1)
#print "Waiting..."
## def scanMulti(self):
## return _SaneIterator(self)
def closeScan(self):
"Close the SANE device after a scan."
self.dev.closeScan()
class ScanThread(threading.Thread):
def __init__(self, device, byte_format='BGRA', update_queue=None, event_queue=None):
threading.Thread.__init__(self)
self.scan_active = True
self.dev = device
self.update_queue = update_queue
self.event_queue = event_queue
self.buffer_fd, self.buffer_path = utils.make_temp_file(prefix='hpscan')
self.buffer = os.fdopen(self.buffer_fd, "w+b")
self.format = -1
self.format_name = ''
self.last_frame = -1
self.pixels_per_line = -1
self.lines = -1
self.depth = -1
self.bytes_per_line = -1
self.pad_bytes = -1
self.total_read = 0
self.byte_format = byte_format
self.total_write = 0
def updateQueue(self, status, bytes_read):
if self.update_queue is not None:
try:
status = int(status)
except (ValueError, TypeError):
status = -1 #scanext.SANE_STATUS_GOOD
self.update_queue.put((status, bytes_read))
def run(self):
from base.sixext import to_bytes_utf8
#self.scan_active = True
self.format, self.format_name, self.last_frame, self.pixels_per_line, \
self.lines, self.depth, self.bytes_per_line = self.dev.getParameters()
log.debug("format=%d" % self.format)
log.debug("format_name=%s" % self.format_name)
log.debug("last_frame=%d" % self.last_frame)
log.debug("ppl=%d" % self.pixels_per_line)
log.debug("lines=%d" % self.lines)
log.debug("depth=%d" % self.depth)
log.debug("bpl=%d" % self.bytes_per_line)
log.debug("byte_format=%s" % self.byte_format)
w = self.buffer.write
readbuffer = self.bytes_per_line
if self.format == scanext.FRAME_RGB: # "Color"
if self.depth == 8: # 8 bpp (32bit)
self.pad_bytes = self.bytes_per_line - 3 * self.pixels_per_line
log.debug("pad_bytes=%d" % self.pad_bytes)
dir = -1
if self.byte_format == 'RGBA':
dir = 1
try:
st, t = self.dev.readScan(readbuffer)
except scanext.error as stObj:
st = stObj.args[0]
self.updateQueue(st, 0)
while st == scanext.SANE_STATUS_GOOD:
if t:
len_t = len(t)
w(b"".join([t[index:index+3:dir] + b'\xff' for index in range(0,len_t - self.pad_bytes,3)]))
self.total_read += len_t
self.total_write += len_t+(len_t - self.pad_bytes)/3
self.updateQueue(st, self.total_read)
log.debug("Color Read %d bytes" % self.total_read)
else:
time.sleep(0.1)
try:
st, t = self.dev.readScan(readbuffer)
except scanext.error as stObj:
st = stObj.args[0]
self.updateQueue(st, self.total_read)
break
if self.checkCancel():
break
elif self.format == scanext.FRAME_GRAY:
if self.depth == 1: # 1 bpp lineart
self.pad_bytes = self.bytes_per_line - (self.pixels_per_line + 7) // 8;
log.debug("pad_bytes=%d" % self.pad_bytes)
try:
st, t = self.dev.readScan(readbuffer)
except scanext.error as stObj:
st = stObj.args[0]
self.updateQueue(st, 0)
while st == scanext.SANE_STATUS_GOOD:
if t:
len_t = len(t)
w(b''.join([b''.join([b"\x00\x00\x00\xff" if k & ord(t[index:index+1]) else b"\xff\xff\xff\xff" for k in [0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1]]) for index in range(0, len_t - self.pad_bytes)]))
self.total_read += len_t
self.total_write += ((len_t - self.pad_bytes) * 32)
self.updateQueue(st, self.total_read)
log.debug("Lineart Read %d bytes" % self.total_read)
else:
time.sleep(0.1)
try:
st, t = self.dev.readScan(readbuffer)
except scanext.error as stObj:
st = stObj.args[0]
self.updateQueue(st, self.total_read)
break
if self.checkCancel():
break
elif self.depth == 8: # 8 bpp grayscale
self.pad_bytes = self.bytes_per_line - self.pixels_per_line
log.debug("pad_bytes=%d" % self.pad_bytes)
try:
st, t = self.dev.readScan(readbuffer)
except scanext.error as stObj:
st = stObj.args[0]
self.updateQueue(st, 0)
while st == scanext.SANE_STATUS_GOOD:
if t:
len_t = len(t)
w(b"".join([3*t[index:index+1] + b'\xff' for index in range(0, len_t - self.pad_bytes)]))
self.total_read += len_t
self.total_write += ((len_t - self.pad_bytes) * 4)
self.updateQueue(st, self.total_read)
log.debug("Gray Read %d bytes" % self.total_read)
else:
time.sleep(0.1)
try:
st, t = self.dev.readScan(readbuffer)
except scanext.error as stObj:
st = stObj.args[0]
self.updateQueue(st, self.total_read)
break
if self.checkCancel():
break
#self.dev.cancelScan()
self.buffer.seek(0)
self.scan_active = False
log.debug("Scan thread exiting...")
def checkCancel(self):
canceled = False
while self.event_queue.qsize():
try:
event = self.event_queue.get(0)
if event == EVENT_SCAN_CANCELED:
canceled = True
log.debug("Cancel pressed!")
self.dev.canclScan()
except queue.Empty:
break
return canceled
def init():
return scanext.init()
def deInit():
return scanext.deInit()
def openDevice(dev):
"Open a device for scanning"
return ScanDevice(dev)
def getDevices(local_only=0):
return scanext.getDevices(local_only)
def reportError(code):
log.error("SANE: %s (code=%d)" % (scanext.getErrorMessage(code), code))
| matrumz/RPi_Custom_Files | Printing/hplip-3.15.2/scan/sane.py | Python | gpl-2.0 | 21,004 | [
"ADF"
] | b4ba8183a78a558f1d5d3b5428fc760f67475542425db29d5612a62afb4e5352 |
#!/usr/bin/env python
#<examples/doc_with_itercb.py>
from numpy import linspace, random
import matplotlib.pylab as pylab
from lmfit.models import LinearModel, GaussianModel
from lmfit.lineshapes import gaussian
def per_iteration(pars, iter, resid, *args, **kws):
if iter < 3 or iter % 10 == 0:
out = ['== %i ' % iter]
for key, val in pars.valuesdict().items():
out.append('%s=%.3f' % (key, val))
print ', '.join(out)
print args, kws
x = linspace(0., 20, 401)
y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23)
y = y - .20*x + 3.333 + random.normal(scale=0.23, size=len(x))
mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')
pars = mod.make_params()
pars['peak_amplitude'].value = 3.0
pars['peak_center'].value = 6.0
pars['peak_sigma'].value = 2.0
pars['bkg_intercept'].value = 0.0
pars['bkg_slope'].value = 0.0
out = mod.fit(y, pars, x=x, iter_cb=per_iteration)
pylab.plot(x, y, 'b--')
# print(' Nfev = ', out.nfev)
print( out.fit_report())
pylab.plot(x, out.best_fit, 'k-')
pylab.show()
#<end examples/doc_with_itercb.py>
| DiamondLightSource/auto_tomo_calibration-experimental | old_code_scripts/simulate_data/lmfit-py/examples/doc_model_with_iter_callback.py | Python | apache-2.0 | 1,109 | [
"Gaussian"
] | 2b32bcfe49c61b9edb7a78daa0d5f67628bc825f30f62745c4b9430538be111e |
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import annotations
import collections
from collections import abc
import datetime
import functools
from io import StringIO
import itertools
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Hashable,
Iterable,
Iterator,
Literal,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
import numpy.ma as ma
from pandas._config import get_option
from pandas._libs import (
algos as libalgos,
lib,
properties,
)
from pandas._libs.hashtable import duplicated
from pandas._libs.lib import no_default
from pandas._typing import (
AggFuncType,
AnyArrayLike,
ArrayLike,
Axes,
Axis,
ColspaceArgType,
CompressionOptions,
Dtype,
DtypeObj,
FilePath,
FillnaOptions,
FloatFormatType,
FormattersType,
Frequency,
IndexKeyFunc,
IndexLabel,
Level,
PythonFuncType,
ReadBuffer,
Renamer,
Scalar,
StorageOptions,
Suffixes,
TimedeltaConvertibleTypes,
TimestampConvertibleTypes,
ValueKeyFunc,
WriteBuffer,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
deprecate_kwarg,
deprecate_nonkeyword_arguments,
doc,
rewrite_axis_style_signature,
)
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import (
validate_ascending,
validate_axis_style_args,
validate_bool_kwarg,
validate_percentile,
)
from pandas.core.dtypes.cast import (
LossySetitemError,
can_hold_element,
construct_1d_arraylike_from_scalar,
construct_2d_arraylike_from_scalar,
find_common_type,
infer_dtype_from_scalar,
invalidate_string_dtypes,
maybe_box_native,
maybe_downcast_to_dtype,
)
from pandas.core.dtypes.common import (
ensure_platform_int,
infer_dtype_from_object,
is_1d_only_ea_dtype,
is_bool_dtype,
is_dataclass,
is_datetime64_any_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import (
isna,
notna,
)
from pandas.core import (
algorithms,
common as com,
nanops,
ops,
)
from pandas.core.accessor import CachedAccessor
from pandas.core.apply import (
reconstruct_func,
relabel_result,
)
from pandas.core.array_algos.take import take_2d_multi
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.construction import (
extract_array,
sanitize_array,
sanitize_masked_array,
)
from pandas.core.generic import NDFrame
from pandas.core.indexers import check_key_length
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
PeriodIndex,
default_index,
ensure_index,
ensure_index_from_sequences,
)
from pandas.core.indexes.multi import (
MultiIndex,
maybe_droplevels,
)
from pandas.core.indexing import (
check_bool_indexer,
check_deprecated_indexers,
convert_to_index_sliceable,
)
from pandas.core.internals import (
ArrayManager,
BlockManager,
)
from pandas.core.internals.construction import (
arrays_to_mgr,
dataclasses_to_dicts,
dict_to_mgr,
mgr_to_mgr,
ndarray_to_mgr,
nested_data_to_arrays,
rec_array_to_mgr,
reorder_arrays,
to_arrays,
treat_as_nested,
)
from pandas.core.reshape.melt import melt
from pandas.core.series import Series
from pandas.core.shared_docs import _shared_docs
from pandas.core.sorting import (
get_group_index,
lexsort_indexer,
nargsort,
)
from pandas.io.common import get_handle
from pandas.io.formats import (
console,
format as fmt,
)
from pandas.io.formats.info import (
INFO_DOCSTRING,
DataFrameInfo,
frame_sub_kwargs,
)
import pandas.plotting
if TYPE_CHECKING:
from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.core.internals import SingleDataManager
from pandas.core.resample import Resampler
from pandas.io.formats.style import Styler
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = {
"axes": "index, columns",
"klass": "DataFrame",
"axes_single_arg": "{0 or 'index', 1 or 'columns'}",
"axis": """axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
"inplace": """
inplace : bool, default False
If True, performs operation inplace and returns None.""",
"optional_by": """
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels.
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels.""",
"optional_labels": """labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
"optional_axis": """axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
"replace_iloc": """
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.""",
}
_numeric_only_doc = """numeric_only : bool or None, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
A named Series object is treated as a DataFrame with a single named column.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
When performing a cross merge, no column specifications to merge on are
allowed.
.. warning::
If both key columns contain rows where the key is a null value, those
rows will be matched against each other. This is different from usual SQL
join behaviour and can lead to unexpected results.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
* cross: creates the cartesian product from both frames, preserves the order
of the left keys.
.. versionadded:: 1.2.0
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : list-like, default is ("_x", "_y")
A length-2 sequence where each element is optionally a string
indicating the suffix to add to overlapping column names in
`left` and `right` respectively. Pass a value of `None` instead
of a string to indicate that the column name from `left` or
`right` should be left as-is, with no suffix. At least one of the
values must not be None.
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to the output DataFrame called "_merge" with
information on the source of each row. The column can be given a different
name by providing a string argument. The column will have a Categorical
type with the value of "left_only" for observations whose merge key only
appears in the left DataFrame, "right_only" for observations
whose merge key only appears in the right DataFrame, and "both"
if the observation's merge key is found in both DataFrames.
validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
merge_ordered : Merge with optional filling/interpolation.
merge_asof : Merge on nearest keys.
DataFrame.join : Similar method using indices.
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
Examples
--------
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2 with specified left and right suffixes
appended to any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey',
... suffixes=('_left', '_right'))
lkey value_left rkey value_right
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
Traceback (most recent call last):
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
>>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})
>>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]})
>>> df1
a b
0 foo 1
1 bar 2
>>> df2
a c
0 foo 3
1 baz 4
>>> df1.merge(df2, how='inner', on='a')
a b c
0 foo 1 3
>>> df1.merge(df2, how='left', on='a')
a b c
0 foo 1 3.0
1 bar 2 NaN
>>> df1 = pd.DataFrame({'left': ['foo', 'bar']})
>>> df2 = pd.DataFrame({'right': [7, 8]})
>>> df1
left
0 foo
1 bar
>>> df2
right
0 7
1 8
>>> df1.merge(df2, how='cross')
left right
0 foo 7
1 foo 8
2 bar 7
3 bar 8
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame, OpsMixin):
"""
Two-dimensional, size-mutable, potentially heterogeneous tabular data.
Data structure also contains labeled axes (rows and columns).
Arithmetic operations align on both row and column labels. Can be
thought of as a dict-like container for Series objects. The primary
pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, dataclass or list-like objects. If
data is a dict, column order follows insertion-order. If a dict contains Series
which have an index defined, it is aligned by its index.
.. versionchanged:: 0.25.0
If data is a list of dicts, column order follows insertion-order.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided.
columns : Index or array-like
Column labels to use for resulting frame when data does not have them,
defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels,
will perform column selection instead.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer.
copy : bool or None, default None
Copy data from inputs.
For dict data, the default of None behaves like ``copy=True``. For DataFrame
or 2d ndarray input, the default of None behaves like ``copy=False``.
If data is a dict containing one or more Series (possibly of different dtypes),
``copy=False`` will ensure that these inputs are not copied.
.. versionchanged:: 1.3.0
See Also
--------
DataFrame.from_records : Constructor from tuples, also record arrays.
DataFrame.from_dict : From dicts of Series, arrays, or dicts.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_table : Read general delimited file into DataFrame.
read_clipboard : Read text from clipboard into DataFrame.
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from a dictionary including Series:
>>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])}
>>> pd.DataFrame(data=d, index=[0, 1, 2, 3])
col1 col2
0 0 NaN
1 1 NaN
2 2 2.0
3 3 3.0
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... columns=['a', 'b', 'c'])
>>> df2
a b c
0 1 2 3
1 4 5 6
2 7 8 9
Constructing DataFrame from a numpy ndarray that has labeled columns:
>>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)],
... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")])
>>> df3 = pd.DataFrame(data, columns=['c', 'a'])
...
>>> df3
c a
0 3 1
1 6 4
2 9 7
Constructing DataFrame from dataclass:
>>> from dataclasses import make_dataclass
>>> Point = make_dataclass("Point", [("x", int), ("y", int)])
>>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)])
x y
0 0 0
1 0 3
2 2 3
"""
_internal_names_set = {"columns", "index"} | NDFrame._internal_names_set
_typ = "dataframe"
_HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray)
_accessors: set[str] = {"sparse"}
_hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([])
_mgr: BlockManager | ArrayManager
@property
def _constructor(self) -> Callable[..., DataFrame]:
return DataFrame
_constructor_sliced: Callable[..., Series] = Series
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data=None,
index: Axes | None = None,
columns: Axes | None = None,
dtype: Dtype | None = None,
copy: bool | None = None,
):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._mgr
if isinstance(data, (BlockManager, ArrayManager)):
# first check if a Manager is passed without any other arguments
# -> use fastpath (without checking Manager type)
if index is None and columns is None and dtype is None and not copy:
# GH#33357 fastpath
NDFrame.__init__(self, data)
return
manager = get_option("mode.data_manager")
if copy is None:
if isinstance(data, dict):
# retain pre-GH#38939 default behavior
copy = True
elif (
manager == "array"
and isinstance(data, (np.ndarray, ExtensionArray))
and data.ndim == 2
):
# INFO(ArrayManager) by default copy the 2D input array to get
# contiguous 1D arrays
copy = True
else:
copy = False
if isinstance(data, (BlockManager, ArrayManager)):
mgr = self._init_mgr(
data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy
)
elif isinstance(data, dict):
# GH#38939 de facto copy defaults to False only in non-dict cases
mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = rec_array_to_mgr(
data,
index,
columns,
dtype,
copy,
typ=manager,
)
warnings.warn(
"Support for MaskedRecords is deprecated and will be "
"removed in a future version. Pass "
"{name: data[name] for name in data.dtype.names} instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
# a masked array
else:
data = sanitize_masked_array(data)
mgr = ndarray_to_mgr(
data,
index,
columns,
dtype=dtype,
copy=copy,
typ=manager,
)
elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)):
if data.dtype.names:
# i.e. numpy structured array
data = cast(np.ndarray, data)
mgr = rec_array_to_mgr(
data,
index,
columns,
dtype,
copy,
typ=manager,
)
elif getattr(data, "name", None) is not None:
# i.e. Series/Index with non-None name
mgr = dict_to_mgr(
# error: Item "ndarray" of "Union[ndarray, Series, Index]" has no
# attribute "name"
{data.name: data}, # type: ignore[union-attr]
index,
columns,
dtype=dtype,
typ=manager,
)
else:
mgr = ndarray_to_mgr(
data,
index,
columns,
dtype=dtype,
copy=copy,
typ=manager,
)
# For data is list-like, or Iterable (will consume into list)
elif is_list_like(data):
if not isinstance(data, (abc.Sequence, ExtensionArray)):
if hasattr(data, "__array__"):
# GH#44616 big perf improvement for e.g. pytorch tensor
data = np.asarray(data)
else:
data = list(data)
if len(data) > 0:
if is_dataclass(data[0]):
data = dataclasses_to_dicts(data)
if not isinstance(data, np.ndarray) and treat_as_nested(data):
# exclude ndarray as we may have cast it a few lines above
if columns is not None:
# error: Argument 1 to "ensure_index" has incompatible type
# "Collection[Any]"; expected "Union[Union[Union[ExtensionArray,
# ndarray], Index, Series], Sequence[Any]]"
columns = ensure_index(columns) # type: ignore[arg-type]
arrays, columns, index = nested_data_to_arrays(
# error: Argument 3 to "nested_data_to_arrays" has incompatible
# type "Optional[Collection[Any]]"; expected "Optional[Index]"
data,
columns,
index, # type: ignore[arg-type]
dtype,
)
mgr = arrays_to_mgr(
arrays,
columns,
index,
dtype=dtype,
typ=manager,
)
else:
mgr = ndarray_to_mgr(
data,
index,
columns,
dtype=dtype,
copy=copy,
typ=manager,
)
else:
mgr = dict_to_mgr(
{},
index,
columns,
dtype=dtype,
typ=manager,
)
# For data is scalar
else:
if index is None or columns is None:
raise ValueError("DataFrame constructor not properly called!")
# Argument 1 to "ensure_index" has incompatible type "Collection[Any]";
# expected "Union[Union[Union[ExtensionArray, ndarray],
# Index, Series], Sequence[Any]]"
index = ensure_index(index) # type: ignore[arg-type]
# Argument 1 to "ensure_index" has incompatible type "Collection[Any]";
# expected "Union[Union[Union[ExtensionArray, ndarray],
# Index, Series], Sequence[Any]]"
columns = ensure_index(columns) # type: ignore[arg-type]
if not dtype:
dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True)
# For data is a scalar extension dtype
if isinstance(dtype, ExtensionDtype):
# TODO(EA2D): special case not needed with 2D EAs
values = [
construct_1d_arraylike_from_scalar(data, len(index), dtype)
for _ in range(len(columns))
]
mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager)
else:
arr2d = construct_2d_arraylike_from_scalar(
data,
len(index),
len(columns),
dtype,
copy,
)
mgr = ndarray_to_mgr(
arr2d,
index,
columns,
dtype=arr2d.dtype,
copy=False,
typ=manager,
)
# ensure correct Manager type according to settings
mgr = mgr_to_mgr(mgr, typ=manager)
NDFrame.__init__(self, mgr)
# ----------------------------------------------------------------------
@property
def axes(self) -> list[Index]:
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self) -> tuple[int, int]:
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape : Tuple of array dimensions.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
@property
def _is_homogeneous_type(self) -> bool:
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
See Also
--------
Index._is_homogeneous_type : Whether the object has a single
dtype.
MultiIndex._is_homogeneous_type : Whether all the levels of a
MultiIndex have the same dtype.
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if isinstance(self._mgr, ArrayManager):
return len({arr.dtype for arr in self._mgr.arrays}) == 1
if self._mgr.any_extension_types:
return len({block.dtype for block in self._mgr.blocks}) == 1
else:
return not self._is_mixed_type
@property
def _can_fast_transpose(self) -> bool:
"""
Can we transpose this DataFrame without creating any new array objects.
"""
if isinstance(self._mgr, ArrayManager):
return False
blocks = self._mgr.blocks
if len(blocks) != 1:
return False
dtype = blocks[0].dtype
# TODO(EA2D) special case would be unnecessary with 2D EAs
return not is_1d_only_ea_dtype(dtype)
# error: Return type "Union[ndarray, DatetimeArray, TimedeltaArray]" of
# "_values" incompatible with return type "ndarray" in supertype "NDFrame"
@property
def _values( # type: ignore[override]
self,
) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray:
"""
Analogue to ._values that may return a 2D ExtensionArray.
"""
self._consolidate_inplace()
mgr = self._mgr
if isinstance(mgr, ArrayManager):
if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype):
# error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]"
# has no attribute "reshape"
return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr]
return self.values
blocks = mgr.blocks
if len(blocks) != 1:
return self.values
arr = blocks[0].values
if arr.ndim == 1:
# non-2D ExtensionArray
return self.values
# more generally, whatever we allow in NDArrayBackedExtensionBlock
arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr)
return arr.T
# ----------------------------------------------------------------------
# Rendering Methods
def _repr_fits_vertical_(self) -> bool:
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns.
In case of non-interactive session, no boundaries apply.
`ignore_width` is here so ipynb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if (max_columns and nb_columns > max_columns) or (
(not ignore_width) and width and nb_columns > (width // 2)
):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if get_option("display.width") is not None or console.in_ipython_frontend():
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if max_rows is not None: # unlimited rows
# min of two, where one may be None
d = d.iloc[: min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(line) for line in value.split("\n"))
return repr_width < width
def _info_repr(self) -> bool:
"""
True if the repr should show the info view.
"""
info_repr_option = get_option("display.large_repr") == "info"
return info_repr_option and not (
self._repr_fits_horizontal_() and self._repr_fits_vertical_()
)
def __repr__(self) -> str:
"""
Return a string representation for a particular DataFrame.
"""
if self._info_repr():
buf = StringIO()
self.info(buf=buf)
return buf.getvalue()
repr_params = fmt.get_dataframe_repr_params()
return self.to_string(**repr_params)
def _repr_html_(self) -> str | None:
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
if self._info_repr():
buf = StringIO()
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace("<", r"<", 1)
val = val.replace(">", r">", 1)
return "<pre>" + val + "</pre>"
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
formatter = fmt.DataFrameFormatter(
self,
columns=None,
col_space=None,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
justify=None,
index_names=True,
header=True,
index=True,
bold_rows=True,
escape=True,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=".",
)
return fmt.DataFrameRenderer(formatter).to_html(notebook=True)
else:
return None
@overload
def to_string(
self,
buf: None = ...,
columns: Sequence[str] | None = ...,
col_space: int | list[int] | dict[Hashable, int] | None = ...,
header: bool | Sequence[str] = ...,
index: bool = ...,
na_rep: str = ...,
formatters: fmt.FormattersType | None = ...,
float_format: fmt.FloatFormatType | None = ...,
sparsify: bool | None = ...,
index_names: bool = ...,
justify: str | None = ...,
max_rows: int | None = ...,
max_cols: int | None = ...,
show_dimensions: bool = ...,
decimal: str = ...,
line_width: int | None = ...,
min_rows: int | None = ...,
max_colwidth: int | None = ...,
encoding: str | None = ...,
) -> str:
...
@overload
def to_string(
self,
buf: FilePath | WriteBuffer[str],
columns: Sequence[str] | None = ...,
col_space: int | list[int] | dict[Hashable, int] | None = ...,
header: bool | Sequence[str] = ...,
index: bool = ...,
na_rep: str = ...,
formatters: fmt.FormattersType | None = ...,
float_format: fmt.FloatFormatType | None = ...,
sparsify: bool | None = ...,
index_names: bool = ...,
justify: str | None = ...,
max_rows: int | None = ...,
max_cols: int | None = ...,
show_dimensions: bool = ...,
decimal: str = ...,
line_width: int | None = ...,
min_rows: int | None = ...,
max_colwidth: int | None = ...,
encoding: str | None = ...,
) -> None:
...
@Substitution(
header_type="bool or sequence of str",
header="Write out the column names. If a list of strings "
"is given, it is assumed to be aliases for the "
"column names",
col_space_type="int, list or dict of int",
col_space="The minimum width of each column. If a list of ints is given "
"every integers corresponds with one column. If a dict is given, the key "
"references the column, while the value defines the space to use.",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_string(
self,
buf: FilePath | WriteBuffer[str] | None = None,
columns: Sequence[str] | None = None,
col_space: int | list[int] | dict[Hashable, int] | None = None,
header: bool | Sequence[str] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: fmt.FormattersType | None = None,
float_format: fmt.FloatFormatType | None = None,
sparsify: bool | None = None,
index_names: bool = True,
justify: str | None = None,
max_rows: int | None = None,
max_cols: int | None = None,
show_dimensions: bool = False,
decimal: str = ".",
line_width: int | None = None,
min_rows: int | None = None,
max_colwidth: int | None = None,
encoding: str | None = None,
) -> str | None:
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
min_rows : int, optional
The number of rows to display in the console in a truncated repr
(when number of rows is above `max_rows`).
max_colwidth : int, optional
Max width to truncate each column in characters. By default, no limit.
.. versionadded:: 1.0.0
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
from pandas import option_context
with option_context("display.max_colwidth", max_colwidth):
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
min_rows=min_rows,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
)
return fmt.DataFrameRenderer(formatter).to_string(
buf=buf,
encoding=encoding,
line_width=line_width,
)
# ----------------------------------------------------------------------
@property
def style(self) -> Styler:
"""
Returns a Styler object.
Contains methods for building a styled HTML representation of the DataFrame.
See Also
--------
io.formats.style.Styler : Helps style a DataFrame or Series according to the
data with HTML and CSS.
"""
from pandas.io.formats.style import Styler
return Styler(self)
_shared_docs[
"items"
] = r"""
Iterate over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.items():
... print(f'label: {label}')
... print(f'content: {content}', sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
@Appender(_shared_docs["items"])
def items(self) -> Iterable[tuple[Hashable, Series]]:
if self.columns.is_unique and hasattr(self, "_item_cache"):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
@Appender(_shared_docs["items"])
def iteritems(self) -> Iterable[tuple[Hashable, Series]]:
warnings.warn(
"iteritems is deprecated and will be removed in a future version. "
"Use .items instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
yield from self.items()
def iterrows(self) -> Iterable[tuple[Hashable, Series]]:
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
See Also
--------
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(
self, index: bool = True, name: str | None = "Pandas"
) -> Iterable[tuple[Any, ...]]:
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
if name is not None:
# https://github.com/python/mypy/issues/9046
# error: namedtuple() expects a string literal as the first argument
itertuple = collections.namedtuple( # type: ignore[misc]
name, fields, rename=True
)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays)
def __len__(self) -> int:
"""
Returns length of info axis, but here we use the index.
"""
return len(self.index)
@overload
def dot(self, other: Series) -> Series:
...
@overload
def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame:
...
def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series:
"""
Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Series. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication. In addition, the column names of
DataFrame and the index of other must contain the same values, as they
will be aligned prior to the multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2
Note how shuffling of the objects does not change the result.
>>> s2 = s.reindex([1, 0, 2, 3])
>>> df.dot(s2)
0 -4
1 5
dtype: int64
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if len(common) > len(self.columns) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right._values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, DataFrame):
return self._constructor(
np.dot(lvals, rvals), index=left.index, columns=other.columns
)
elif isinstance(other, Series):
return self._constructor_sliced(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return self._constructor_sliced(result, index=left.index)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
@overload
def __matmul__(self, other: Series) -> Series:
...
@overload
def __matmul__(
self, other: AnyArrayLike | DataFrame | Series
) -> DataFrame | Series:
...
def __matmul__(
self, other: AnyArrayLike | DataFrame | Series
) -> DataFrame | Series:
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
try:
return self.T.dot(np.transpose(other)).T
except ValueError as err:
if "shape mismatch" not in str(err):
raise
# GH#21581 give exception message for original shapes
msg = f"shapes {np.shape(other)} and {self.shape} not aligned"
raise ValueError(msg) from err
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(
cls,
data,
orient: str = "columns",
dtype: Dtype | None = None,
columns=None,
) -> DataFrame:
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index', 'tight'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
If 'tight', assume a dict with keys ['index', 'columns', 'data',
'index_names', 'column_names'].
.. versionadded:: 1.4.0
'tight' as an allowed value for the ``orient`` argument
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'`` or ``orient='tight'``.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from structured ndarray, sequence
of tuples or dicts, or DataFrame.
DataFrame : DataFrame object creation using constructor.
DataFrame.to_dict : Convert the DataFrame to a dictionary.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
Specify ``orient='tight'`` to create the DataFrame using a 'tight'
format:
>>> data = {'index': [('a', 'b'), ('a', 'c')],
... 'columns': [('x', 1), ('y', 2)],
... 'data': [[1, 3], [2, 4]],
... 'index_names': ['n1', 'n2'],
... 'column_names': ['z1', 'z2']}
>>> pd.DataFrame.from_dict(data, orient='tight')
z1 x y
z2 1 2
n1 n2
a b 1 3
c 2 4
"""
index = None
orient = orient.lower()
if orient == "index":
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == "columns" or orient == "tight":
if columns is not None:
raise ValueError(f"cannot use columns parameter with orient='{orient}'")
else: # pragma: no cover
raise ValueError("only recognize index or columns for orient")
if orient != "tight":
return cls(data, index=index, columns=columns, dtype=dtype)
else:
realdata = data["data"]
def create_index(indexlist, namelist):
index: Index
if len(namelist) > 1:
index = MultiIndex.from_tuples(indexlist, names=namelist)
else:
index = Index(indexlist, name=namelist[0])
return index
index = create_index(data["index"], data["index_names"])
columns = create_index(data["columns"], data["column_names"])
return cls(realdata, index=index, columns=columns, dtype=dtype)
def to_numpy(
self,
dtype: npt.DTypeLike | None = None,
copy: bool = False,
na_value=lib.no_default,
) -> np.ndarray:
"""
Convert the DataFrame to a NumPy array.
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the dtypes of the DataFrame columns.
.. versionadded:: 1.1.0
Returns
-------
numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
self._consolidate_inplace()
if dtype is not None:
dtype = np.dtype(dtype)
result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value)
if result.dtype is not dtype:
result = np.array(result, dtype=dtype, copy=False)
return result
def to_dict(self, orient: str = "dict", into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'tight' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values],
'index_names' -> [index.names], 'column_names' -> [column.names]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
.. versionadded:: 1.4.0
'tight' as an allowed value for the ``orient`` argument
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
>>> df.to_dict('tight')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=find_stack_level(),
)
# GH16122
into_c = com.standardize_mapping(into)
orient = orient.lower()
# GH32515
if orient.startswith(("d", "l", "s", "r", "i")) and orient not in {
"dict",
"list",
"series",
"split",
"records",
"index",
}:
warnings.warn(
"Using short name for 'orient' is deprecated. Only the "
"options: ('dict', list, 'series', 'split', 'records', 'index') "
"will be used in a future version. Use one of the above "
"to silence this warning.",
FutureWarning,
stacklevel=find_stack_level(),
)
if orient.startswith("d"):
orient = "dict"
elif orient.startswith("l"):
orient = "list"
elif orient.startswith("sp"):
orient = "split"
elif orient.startswith("s"):
orient = "series"
elif orient.startswith("r"):
orient = "records"
elif orient.startswith("i"):
orient = "index"
if orient == "dict":
return into_c((k, v.to_dict(into)) for k, v in self.items())
elif orient == "list":
return into_c((k, v.tolist()) for k, v in self.items())
elif orient == "split":
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
[
list(map(maybe_box_native, t))
for t in self.itertuples(index=False, name=None)
],
),
)
)
elif orient == "tight":
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
[
list(map(maybe_box_native, t))
for t in self.itertuples(index=False, name=None)
],
),
("index_names", list(self.index.names)),
("column_names", list(self.columns.names)),
)
)
elif orient == "series":
return into_c((k, v) for k, v in self.items())
elif orient == "records":
columns = self.columns.tolist()
rows = (
dict(zip(columns, row))
for row in self.itertuples(index=False, name=None)
)
return [
into_c((k, maybe_box_native(v)) for k, v in row.items()) for row in rows
]
elif orient == "index":
if not self.index.is_unique:
raise ValueError("DataFrame index must be unique for orient='index'.")
return into_c(
(t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples(name=None)
)
else:
raise ValueError(f"orient '{orient}' not understood")
def to_gbq(
self,
destination_table: str,
project_id: str | None = None,
chunksize: int | None = None,
reauth: bool = False,
if_exists: str = "fail",
auth_local_webserver: bool = False,
table_schema: list[dict[str, str]] | None = None,
location: str | None = None,
progress_bar: bool = True,
credentials=None,
) -> None:
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists raise pandas_gbq.gbq.TableCreationError.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
gbq.to_gbq(
self,
destination_table,
project_id=project_id,
chunksize=chunksize,
reauth=reauth,
if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema,
location=location,
progress_bar=progress_bar,
credentials=credentials,
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float: bool = False,
nrows: int | None = None,
) -> DataFrame:
"""
Convert structured or record ndarray to DataFrame.
Creates a DataFrame object from a structured ndarray, sequence of
tuples or dicts, or DataFrame.
Parameters
----------
data : structured ndarray, sequence of tuples or dicts, or DataFrame
Structured input data.
index : str, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use.
exclude : sequence, default None
Columns or fields to exclude.
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns).
coerce_float : bool, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
nrows : int, default None
Number of rows to read if data is an iterator.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_dict : DataFrame from dict of array-like or dicts.
DataFrame : DataFrame object creation using constructor.
Examples
--------
Data can be provided as a structured ndarray:
>>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')],
... dtype=[('col_1', 'i4'), ('col_2', 'U1')])
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Data can be provided as a list of dicts:
>>> data = [{'col_1': 3, 'col_2': 'a'},
... {'col_1': 2, 'col_2': 'b'},
... {'col_1': 1, 'col_2': 'c'},
... {'col_1': 0, 'col_2': 'd'}]
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Data can be provided as a list of tuples with corresponding columns:
>>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')]
>>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2'])
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
"""
result_index = None
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
def maybe_reorder(
arrays: list[ArrayLike], arr_columns: Index, columns: Index, index
) -> tuple[list[ArrayLike], Index, Index | None]:
"""
If our desired 'columns' do not match the data's pre-existing 'arr_columns',
we re-order our arrays. This is like a pre-emptive (cheap) reindex.
"""
if len(arrays):
length = len(arrays[0])
else:
length = 0
result_index = None
if len(arrays) == 0 and index is None and length == 0:
# for backward compat use an object Index instead of RangeIndex
result_index = Index([])
arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length)
return arrays, arr_columns, result_index
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, "dtype") and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns_list = []
for k, v in data.items():
if k in columns:
arr_columns_list.append(k)
arrays.append(v)
arr_columns = Index(arr_columns_list)
arrays, arr_columns, result_index = maybe_reorder(
arrays, arr_columns, columns, index
)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
arr_columns = columns
else:
arrays, arr_columns = to_arrays(data, columns)
if coerce_float:
for i, arr in enumerate(arrays):
if arr.dtype == object:
# error: Argument 1 to "maybe_convert_objects" has
# incompatible type "Union[ExtensionArray, ndarray]";
# expected "ndarray"
arrays[i] = lib.maybe_convert_objects(
arr, # type: ignore[arg-type]
try_float=True,
)
arr_columns = ensure_index(arr_columns)
if columns is None:
columns = arr_columns
else:
arrays, arr_columns, result_index = maybe_reorder(
arrays, arr_columns, columns, index
)
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
if index is not None:
if isinstance(index, str) or not hasattr(index, "__iter__"):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
index_data = [arrays[arr_columns.get_loc(field)] for field in index]
except (KeyError, TypeError):
# raised by get_loc, see GH#29258
result_index = index
else:
result_index = ensure_index_from_sequences(index_data, names=index)
exclude.update(index)
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
columns = columns.drop(exclude)
manager = get_option("mode.data_manager")
mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager)
return cls(mgr)
def to_records(
self, index=True, column_dtypes=None, index_dtypes=None
) -> np.recarray:
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
column_dtypes : str, type, dict, default None
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = f"<S{df.index.str.len().max()}"
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if index:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = list(map(np.array, zip(*self.index._values)))
else:
# error: List item 0 has incompatible type "ArrayLike"; expected
# "ndarray"
ix_vals = [self.index.values] # type: ignore[list-item]
arrays = ix_vals + [
np.asarray(self.iloc[:, i]) for i in range(len(self.columns))
]
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
index_names = com.fill_missing_names(index_names)
elif index_names[0] is None:
index_names = ["index"]
names = [str(name) for name in itertools.chain(index_names, self.columns)]
else:
arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))]
names = [str(c) for c in self.columns]
index_names = []
index_len = len(index_names)
formats = []
for i, v in enumerate(arrays):
index = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
# followed by those in its columns.
#
# Thus, the total length of the array is:
# len(index_names) + len(DataFrame.columns).
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
if index < index_len:
dtype_mapping = index_dtypes
name = index_names[index]
else:
index -= index_len
dtype_mapping = column_dtypes
name = self.columns[index]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
# be denoted by its name in the DataFrame or its
# position in DataFrame's array of indices or
# columns, whichever is applicable.
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
elif index in dtype_mapping:
dtype_mapping = dtype_mapping[index]
else:
dtype_mapping = None
# If no mapping can be found, use the array's
# dtype attribute for formatting.
#
# A valid dtype must either be a type or
# string naming a type.
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype, str)):
# Argument 1 to "append" of "list" has incompatible type
# "Union[type, dtype[Any], str]"; expected "dtype[_SCT]" [arg-type]
formats.append(dtype_mapping) # type: ignore[arg-type]
else:
element = "row" if i < index_len else "column"
msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}"
raise ValueError(msg)
return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats})
@classmethod
def _from_arrays(
cls,
arrays,
columns,
index,
dtype: Dtype | None = None,
verify_integrity: bool = True,
) -> DataFrame:
"""
Create DataFrame from a list of arrays corresponding to the columns.
Parameters
----------
arrays : list-like of arrays
Each array in the list corresponds to one column, in order.
columns : list-like, Index
The column names for the resulting DataFrame.
index : list-like, Index
The rows labels for the resulting DataFrame.
dtype : dtype, optional
Optional dtype to enforce for all arrays.
verify_integrity : bool, default True
Validate and homogenize all input. If set to False, it is assumed
that all elements of `arrays` are actual arrays how they will be
stored in a block (numpy ndarray or ExtensionArray), have the same
length as and are aligned with the index, and that `columns` and
`index` are ensured to be an Index object.
Returns
-------
DataFrame
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
manager = get_option("mode.data_manager")
columns = ensure_index(columns)
if len(columns) != len(arrays):
raise ValueError("len(columns) must match len(arrays)")
mgr = arrays_to_mgr(
arrays,
columns,
index,
dtype=dtype,
verify_integrity=verify_integrity,
typ=manager,
)
return cls(mgr)
@doc(
storage_options=_shared_docs["storage_options"],
compression_options=_shared_docs["compression_options"] % "path",
)
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_stata(
self,
path: FilePath | WriteBuffer[bytes],
convert_dates: dict[Hashable, str] | None = None,
write_index: bool = True,
byteorder: str | None = None,
time_stamp: datetime.datetime | None = None,
data_label: str | None = None,
variable_labels: dict[Hashable, str] | None = None,
version: int | None = 114,
convert_strl: Sequence[Hashable] | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
*,
value_labels: dict[Hashable, dict[float | int, str]] | None = None,
) -> None:
"""
Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
path : str, path object, or buffer
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``write()`` function.
.. versionchanged:: 1.0.0
Previously this was "fname"
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
version : {{114, 117, 118, 119, None}}, default 114
Version to use in the output dta file. Set to None to let pandas
decide between 118 or 119 formats depending on the number of
columns in the frame. Version 114 can be read by Stata 10 and
later. Version 117 can be read by Stata 13 or later. Version 118
is supported in Stata 14 and later. Version 119 is supported in
Stata 15 and later. Version 114 limits string variables to 244
characters or fewer while versions 117 and later allow strings
with lengths up to 2,000,000 characters. Versions 118 and 119
support Unicode characters, and version 119 supports more than
32,767 variables.
Version 119 should usually only be used when the number of
variables exceeds the capacity of dta format 118. Exporting
smaller datasets in format 119 may have unintended consequences,
and, as of November 2020, Stata SE cannot read version 119 files.
.. versionchanged:: 1.0.0
Added support for formats 118 and 119.
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
{compression_options}
.. versionadded:: 1.1.0
.. versionchanged:: 1.4.0 Zstandard support.
{storage_options}
.. versionadded:: 1.2.0
value_labels : dict of dicts
Dictionary containing columns as keys and dictionaries of column value
to labels as values. Labels for a single variable must be 32,000
characters or smaller.
.. versionadded:: 1.4.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
if version not in (114, 117, 118, 119, None):
raise ValueError("Only formats 114, 117, 118 and 119 are supported.")
if version == 114:
if convert_strl is not None:
raise ValueError("strl is not supported in format 114")
from pandas.io.stata import StataWriter as statawriter
elif version == 117:
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import ( # type: ignore[no-redef]
StataWriter117 as statawriter,
)
else: # versions 118 and 119
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import ( # type: ignore[no-redef]
StataWriterUTF8 as statawriter,
)
kwargs: dict[str, Any] = {}
if version is None or version >= 117:
# strl conversion is only supported >= 117
kwargs["convert_strl"] = convert_strl
if version is None or version >= 118:
# Specifying the version is only supported for UTF8 (118 or 119)
kwargs["version"] = version
writer = statawriter(
path,
self,
convert_dates=convert_dates,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
write_index=write_index,
variable_labels=variable_labels,
compression=compression,
storage_options=storage_options,
value_labels=value_labels,
**kwargs,
)
writer.write_file()
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None:
"""
Write a DataFrame to the binary Feather format.
Parameters
----------
path : str, path object, file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``write()`` function. If a string or a path,
it will be used as Root Directory path when writing a partitioned dataset.
**kwargs :
Additional keywords passed to :func:`pyarrow.feather.write_feather`.
Starting with pyarrow 0.17, this includes the `compression`,
`compression_level`, `chunksize` and `version` keywords.
.. versionadded:: 1.1.0
Notes
-----
This function writes the dataframe as a `feather file
<https://arrow.apache.org/docs/python/feather.html>`_. Requires a default
index. For saving the DataFrame with your custom index use a method that
supports custom indices e.g. `to_parquet`.
"""
from pandas.io.feather_format import to_feather
to_feather(self, path, **kwargs)
@doc(
Series.to_markdown,
klass=_shared_doc_kwargs["klass"],
storage_options=_shared_docs["storage_options"],
examples="""Examples
--------
>>> df = pd.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(df.to_markdown())
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
Output markdown with a tabulate option.
>>> print(df.to_markdown(tablefmt="grid"))
+----+------------+------------+
| | animal_1 | animal_2 |
+====+============+============+
| 0 | elk | dog |
+----+------------+------------+
| 1 | pig | quetzal |
+----+------------+------------+""",
)
def to_markdown(
self,
buf: FilePath | WriteBuffer[str] | None = None,
mode: str = "wt",
index: bool = True,
storage_options: StorageOptions = None,
**kwargs,
) -> str | None:
if "showindex" in kwargs:
warnings.warn(
"'showindex' is deprecated. Only 'index' will be used "
"in a future version. Use 'index' to silence this warning.",
FutureWarning,
stacklevel=find_stack_level(),
)
kwargs.setdefault("headers", "keys")
kwargs.setdefault("tablefmt", "pipe")
kwargs.setdefault("showindex", index)
tabulate = import_optional_dependency("tabulate")
result = tabulate.tabulate(self, **kwargs)
if buf is None:
return result
with get_handle(buf, mode, storage_options=storage_options) as handles:
handles.handle.write(result)
return None
@doc(storage_options=_shared_docs["storage_options"])
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_parquet(
self,
path: FilePath | WriteBuffer[bytes] | None = None,
engine: str = "auto",
compression: str | None = "snappy",
index: bool | None = None,
partition_cols: list[str] | None = None,
storage_options: StorageOptions = None,
**kwargs,
) -> bytes | None:
"""
Write a DataFrame to the binary parquet format.
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
path : str, path object, file-like object, or None, default None
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``write()`` function. If None, the result is
returned as bytes. If a string or path, it will be used as Root Directory
path when writing a partitioned dataset.
.. versionchanged:: 1.2.0
Previously this was "fname"
engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file.
If ``None``, similar to ``True`` the dataframe's index(es)
will be saved. However, instead of being saved as values,
the RangeIndex will be stored as a range in the metadata so it
doesn't require much space and is faster. Other indexes will
be included as columns in the file output.
partition_cols : list, optional, default None
Column names by which to partition the dataset.
Columns are partitioned in the order they are given.
Must be None if path is not a string.
{storage_options}
.. versionadded:: 1.2.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
Returns
-------
bytes if no path argument is provided else None
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4
If you want to get a buffer to the parquet content you can use a io.BytesIO
object, as long as you don't use partition_cols, which creates multiple files.
>>> import io
>>> f = io.BytesIO()
>>> df.to_parquet(f)
>>> f.seek(0)
0
>>> content = f.read()
"""
from pandas.io.parquet import to_parquet
return to_parquet(
self,
path,
engine,
compression=compression,
index=index,
partition_cols=partition_cols,
storage_options=storage_options,
**kwargs,
)
@Substitution(
header_type="bool",
header="Whether to print column labels, default True",
col_space_type="str or int, list or dict of int or str",
col_space="The minimum width of each column in CSS length "
"units. An int is assumed to be px units.\n\n"
" .. versionadded:: 0.25.0\n"
" Ability to use str",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_html(
self,
buf: FilePath | WriteBuffer[str] | None = None,
columns: Sequence[str] | None = None,
col_space: ColspaceArgType | None = None,
header: bool | Sequence[str] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: FormattersType | None = None,
float_format: FloatFormatType | None = None,
sparsify: bool | None = None,
index_names: bool = True,
justify: str | None = None,
max_rows: int | None = None,
max_cols: int | None = None,
show_dimensions: bool | str = False,
decimal: str = ".",
bold_rows: bool = True,
classes: str | list | tuple | None = None,
escape: bool = True,
notebook: bool = False,
border: int | None = None,
table_id: str | None = None,
render_links: bool = False,
encoding: str | None = None,
):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.display.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links.
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
justify=justify,
index_names=index_names,
escape=escape,
decimal=decimal,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
)
# TODO: a generic formatter wld b in DataFrameFormatter
return fmt.DataFrameRenderer(formatter).to_html(
buf=buf,
classes=classes,
notebook=notebook,
border=border,
encoding=encoding,
table_id=table_id,
render_links=render_links,
)
@doc(
storage_options=_shared_docs["storage_options"],
compression_options=_shared_docs["compression_options"] % "path_or_buffer",
)
def to_xml(
self,
path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
index: bool = True,
root_name: str | None = "data",
row_name: str | None = "row",
na_rep: str | None = None,
attr_cols: list[str] | None = None,
elem_cols: list[str] | None = None,
namespaces: dict[str | None, str] | None = None,
prefix: str | None = None,
encoding: str = "utf-8",
xml_declaration: bool | None = True,
pretty_print: bool | None = True,
parser: str | None = "lxml",
stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> str | None:
"""
Render a DataFrame to an XML document.
.. versionadded:: 1.3.0
Parameters
----------
path_or_buffer : str, path object, file-like object, or None, default None
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a ``write()`` function. If None, the result is returned
as a string.
index : bool, default True
Whether to include index in XML document.
root_name : str, default 'data'
The name of root element in XML document.
row_name : str, default 'row'
The name of row element in XML document.
na_rep : str, optional
Missing data representation.
attr_cols : list-like, optional
List of columns to write as attributes in row element.
Hierarchical columns will be flattened with underscore
delimiting the different levels.
elem_cols : list-like, optional
List of columns to write as children in row element. By default,
all columns output as children of row element. Hierarchical
columns will be flattened with underscore delimiting the
different levels.
namespaces : dict, optional
All namespaces to be defined in root element. Keys of dict
should be prefix names and values of dict corresponding URIs.
Default namespaces should be given empty string key. For
example, ::
namespaces = {{"": "https://example.com"}}
prefix : str, optional
Namespace prefix to be used for every element and/or attribute
in document. This should be one of the keys in ``namespaces``
dict.
encoding : str, default 'utf-8'
Encoding of the resulting document.
xml_declaration : bool, default True
Whether to include the XML declaration at start of document.
pretty_print : bool, default True
Whether output should be pretty printed with indentation and
line breaks.
parser : {{'lxml','etree'}}, default 'lxml'
Parser module to use for building of tree. Only 'lxml' and
'etree' are supported. With 'lxml', the ability to use XSLT
stylesheet is supported.
stylesheet : str, path object or file-like object, optional
A URL, file-like object, or a raw string containing an XSLT
script used to transform the raw XML output. Script should use
layout of elements and attributes from original output. This
argument requires ``lxml`` to be installed. Only XSLT 1.0
scripts and not later versions is currently supported.
{compression_options}
.. versionchanged:: 1.4.0 Zstandard support.
{storage_options}
Returns
-------
None or str
If ``io`` is None, returns the resulting XML format as a
string. Otherwise returns None.
See Also
--------
to_json : Convert the pandas object to a JSON string.
to_html : Convert DataFrame to a html.
Examples
--------
>>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'],
... 'degrees': [360, 360, 180],
... 'sides': [4, np.nan, 3]}})
>>> df.to_xml() # doctest: +SKIP
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>
>>> df.to_xml(attr_cols=[
... 'index', 'shape', 'degrees', 'sides'
... ]) # doctest: +SKIP
<?xml version='1.0' encoding='utf-8'?>
<data>
<row index="0" shape="square" degrees="360" sides="4.0"/>
<row index="1" shape="circle" degrees="360"/>
<row index="2" shape="triangle" degrees="180" sides="3.0"/>
</data>
>>> df.to_xml(namespaces={{"doc": "https://example.com"}},
... prefix="doc") # doctest: +SKIP
<?xml version='1.0' encoding='utf-8'?>
<doc:data xmlns:doc="https://example.com">
<doc:row>
<doc:index>0</doc:index>
<doc:shape>square</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides>4.0</doc:sides>
</doc:row>
<doc:row>
<doc:index>1</doc:index>
<doc:shape>circle</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides/>
</doc:row>
<doc:row>
<doc:index>2</doc:index>
<doc:shape>triangle</doc:shape>
<doc:degrees>180</doc:degrees>
<doc:sides>3.0</doc:sides>
</doc:row>
</doc:data>
"""
from pandas.io.formats.xml import (
EtreeXMLFormatter,
LxmlXMLFormatter,
)
lxml = import_optional_dependency("lxml.etree", errors="ignore")
TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter]
if parser == "lxml":
if lxml is not None:
TreeBuilder = LxmlXMLFormatter
else:
raise ImportError(
"lxml not found, please install or use the etree parser."
)
elif parser == "etree":
TreeBuilder = EtreeXMLFormatter
else:
raise ValueError("Values for parser can only be lxml or etree.")
xml_formatter = TreeBuilder(
self,
path_or_buffer=path_or_buffer,
index=index,
root_name=root_name,
row_name=row_name,
na_rep=na_rep,
attr_cols=attr_cols,
elem_cols=elem_cols,
namespaces=namespaces,
prefix=prefix,
encoding=encoding,
xml_declaration=xml_declaration,
pretty_print=pretty_print,
stylesheet=stylesheet,
compression=compression,
storage_options=storage_options,
)
return xml_formatter.write_output()
# ----------------------------------------------------------------------
@doc(INFO_DOCSTRING, **frame_sub_kwargs)
def info(
self,
verbose: bool | None = None,
buf: WriteBuffer[str] | None = None,
max_cols: int | None = None,
memory_usage: bool | str | None = None,
show_counts: bool | None = None,
null_counts: bool | None = None,
) -> None:
if null_counts is not None:
if show_counts is not None:
raise ValueError("null_counts used with show_counts. Use show_counts.")
warnings.warn(
"null_counts is deprecated. Use show_counts instead",
FutureWarning,
stacklevel=find_stack_level(),
)
show_counts = null_counts
info = DataFrameInfo(
data=self,
memory_usage=memory_usage,
)
info.render(
buf=buf,
max_cols=max_cols,
verbose=verbose,
show_counts=show_counts,
)
def memory_usage(self, index: bool = True, deep: bool = False) -> Series:
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Notes
-----
See the :ref:`Frequently Asked Questions <df-memory-usage>` for more
details.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.0+0.0j 1 True
1 1 1.0 1.0+0.0j 1 True
2 1 1.0 1.0+0.0j 1 True
3 1 1.0 1.0+0.0j 1 True
4 1 1.0 1.0+0.0j 1 True
>>> df.memory_usage()
Index 128
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 128
int64 40000
float64 40000
complex128 80000
object 180000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5244
"""
result = self._constructor_sliced(
[c.memory_usage(index=False, deep=deep) for col, c in self.items()],
index=self.columns,
)
if index:
index_memory_usage = self._constructor_sliced(
self.index.memory_usage(deep=deep), index=["Index"]
)
result = index_memory_usage._append(result)
return result
def transpose(self, *args, copy: bool = False) -> DataFrame:
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
*args : tuple, optional
Accepted for compatibility with NumPy.
copy : bool, default False
Whether to copy the data after transposing, even for DataFrames
with a single dtype.
Note that a copy is always required for mixed dtype DataFrames,
or for DataFrames with any extension types.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8.0
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, {})
# construct the args
dtypes = list(self.dtypes)
if self._can_fast_transpose:
# Note: tests pass without this, but this improves perf quite a bit.
new_vals = self._values.T
if copy:
new_vals = new_vals.copy()
result = self._constructor(new_vals, index=self.columns, columns=self.index)
elif (
self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0])
):
# We have EAs with the same dtype. We can preserve that dtype in transpose.
dtype = dtypes[0]
arr_type = dtype.construct_array_type()
values = self.values
new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values]
result = type(self)._from_arrays(
new_values, index=self.columns, columns=self.index
)
else:
new_arr = self.values.T
if copy:
new_arr = new_arr.copy()
result = self._constructor(new_arr, index=self.columns, columns=self.index)
return result.__finalize__(self, method="transpose")
@property
def T(self) -> DataFrame:
return self.transpose()
# ----------------------------------------------------------------------
# Indexing Methods
def _ixs(self, i: int, axis: int = 0):
"""
Parameters
----------
i : int
axis : int
Notes
-----
If slice passed, the resulting data will be a view.
"""
# irow
if axis == 0:
new_values = self._mgr.fast_xs(i)
# if we are a copy, mark as such
copy = isinstance(new_values, np.ndarray) and new_values.base is None
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype,
)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
label = self.columns[i]
col_mgr = self._mgr.iget(i)
result = self._box_col_values(col_mgr, i)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def _get_column_array(self, i: int) -> ArrayLike:
"""
Get the values of the i'th column (ndarray or ExtensionArray, as stored
in the Block)
"""
return self._mgr.iget_values(i)
def _iter_column_arrays(self) -> Iterator[ArrayLike]:
"""
Iterate over the arrays of all columns in order.
This returns the values as stored in the Block (ndarray or ExtensionArray).
"""
for i in range(len(self.columns)):
yield self._get_column_array(i)
def __getitem__(self, key):
check_deprecated_indexers(key)
key = lib.item_from_zerodim(key)
key = com.apply_if_callable(key, self)
if is_hashable(key) and not is_iterator(key):
# is_iterator to exclude generator e.g. test_getitem_listlike
# shortcut if the key is in columns
if self.columns.is_unique and key in self.columns:
if isinstance(self.columns, MultiIndex):
return self._getitem_multilevel(key)
return self._get_item_cache(key)
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
if isinstance(indexer, np.ndarray):
indexer = lib.maybe_indices_to_slice(
indexer.astype(np.intp, copy=False), len(self)
)
if isinstance(indexer, np.ndarray):
# GH#43223 If we can not convert, use take
return self.take(indexer, axis=0)
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self.where(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.columns._get_indexer_strict(key, "columns")[1]
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self._take_with_is_copy(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
# GH#26490 using data[key] can cause RecursionError
return data._get_item_cache(key)
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn(
"Boolean Series key will be reindexed to match DataFrame index.",
UserWarning,
stacklevel=find_stack_level(),
)
elif len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}."
)
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take_with_is_copy(indexer, axis=0)
def _getitem_multilevel(self, key):
# self.columns is a MultiIndex
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(
new_values, index=self.index, columns=result_columns
)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == "":
result = result[""]
if isinstance(result, Series):
result = self._constructor_sliced(
result, index=self.index, name=key
)
result._set_is_copy(self)
return result
else:
# loc is neither a slice nor ndarray, so must be an int
return self._ixs(loc, axis=1)
def _get_value(self, index, col, takeable: bool = False) -> Scalar:
"""
Quickly retrieve single value at passed column and index.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
Notes
-----
Assumes that both `self.index._index_as_unique` and
`self.columns._index_as_unique`; Caller is responsible for checking.
"""
if takeable:
series = self._ixs(col, axis=1)
return series._values[index]
series = self._get_item_cache(col)
engine = self.index._engine
if not isinstance(self.index, MultiIndex):
# CategoricalIndex: Trying to use the engine fastpath may give incorrect
# results if our categories are integers that dont match our codes
# IntervalIndex: IntervalTree has no get_loc
row = self.index.get_loc(index)
return series._values[row]
# For MultiIndex going through engine effectively restricts us to
# same-length tuples; see test_get_set_value_no_partial_indexing
loc = engine.get_loc(index)
return series._values[loc]
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
elif isinstance(value, DataFrame):
self._set_item_frame_value(key, value)
elif (
is_list_like(value)
and not self.columns.is_unique
and 1 < len(self.columns.get_indexer_for([key])) == len(value)
):
# Column to set is duplicated
self._setitem_array([key], value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key: slice, value):
# NB: we can't just use self.loc[key] = value because that
# operates on labels and we need to operate positional for
# backwards-compat, xref GH#31469
self._check_setitem_copy()
self.iloc[key] = value
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
# bool indexer is indexing along rows
if len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}!"
)
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
if isinstance(value, DataFrame):
# GH#39931 reindex since iloc does not align
value = value.reindex(self.index.take(indexer))
self.iloc[indexer] = value
else:
# Note: unlike self.iloc[:, indexer] = value, this will
# never try to overwrite values inplace
if isinstance(value, DataFrame):
check_key_length(self.columns, key, value)
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
elif not is_list_like(value):
for col in key:
self[col] = value
elif isinstance(value, np.ndarray) and value.ndim == 2:
self._iset_not_inplace(key, value)
elif np.ndim(value) > 1:
# list of lists
value = DataFrame(value).values
return self._setitem_array(key, value)
else:
self._iset_not_inplace(key, value)
def _iset_not_inplace(self, key, value):
# GH#39510 when setting with df[key] = obj with a list-like key and
# list-like value, we iterate over those listlikes and set columns
# one at a time. This is different from dispatching to
# `self.loc[:, key]= value` because loc.__setitem__ may overwrite
# data inplace, whereas this will insert new arrays.
def igetitem(obj, i: int):
# Note: we catch DataFrame obj before getting here, but
# hypothetically would return obj.iloc[:, i]
if isinstance(obj, np.ndarray):
return obj[..., i]
else:
return obj[i]
if self.columns.is_unique:
if np.shape(value)[-1] != len(key):
raise ValueError("Columns must be same length as key")
for i, col in enumerate(key):
self[col] = igetitem(value, i)
else:
ilocs = self.columns.get_indexer_non_unique(key)[0]
if (ilocs < 0).any():
# key entries not in self.columns
raise NotImplementedError
if np.shape(value)[-1] != len(ilocs):
raise ValueError("Columns must be same length as key")
assert np.ndim(value) <= 2
orig_columns = self.columns
# Using self.iloc[:, i] = ... may set values inplace, which
# by convention we do not do in __setitem__
try:
self.columns = Index(range(len(self.columns)))
for i, iloc in enumerate(ilocs):
self[iloc] = igetitem(value, i)
finally:
self.columns = orig_columns
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
key = self._constructor(key, **self._construct_axes_dict())
if key.size and not is_bool_dtype(key.values):
raise TypeError(
"Must pass DataFrame or 2-d ndarray with boolean values only"
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _set_item_frame_value(self, key, value: DataFrame) -> None:
self._ensure_valid_index(value)
# align columns
if key in self.columns:
loc = self.columns.get_loc(key)
cols = self.columns[loc]
len_cols = 1 if is_scalar(cols) else len(cols)
if len_cols != len(value.columns):
raise ValueError("Columns must be same length as key")
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and isinstance(
loc, (slice, Series, np.ndarray, Index)
):
cols = maybe_droplevels(cols, key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
arraylike = _reindex_for_setitem(value, self.index)
self._set_item_mgr(key, arraylike)
def _iset_item_mgr(
self, loc: int | slice | np.ndarray, value, inplace: bool = False
) -> None:
# when called from _set_item_mgr loc can be anything returned from get_loc
self._mgr.iset(loc, value, inplace=inplace)
self._clear_item_cache()
def _set_item_mgr(self, key, value: ArrayLike) -> None:
try:
loc = self._info_axis.get_loc(key)
except KeyError:
# This item wasn't present, just insert at end
self._mgr.insert(len(self._info_axis), key, value)
else:
self._iset_item_mgr(loc, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _iset_item(self, loc: int, value) -> None:
arraylike = self._sanitize_column(value)
self._iset_item_mgr(loc, arraylike, inplace=True)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _set_item(self, key, value) -> None:
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
value = self._sanitize_column(value)
if (
key in self.columns
and value.ndim == 1
and not is_extension_array_dtype(value)
):
# broadcast across multiple columns if necessary
if not self.columns.is_unique or isinstance(self.columns, MultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1)).T
self._set_item_mgr(key, value)
def _set_value(
self, index: IndexLabel, col, value: Scalar, takeable: bool = False
) -> None:
"""
Put single value at passed column and index.
Parameters
----------
index : Label
row label
col : Label
column label
value : scalar
takeable : bool, default False
Sets whether or not index/col interpreted as indexers
"""
try:
if takeable:
series = self._ixs(col, axis=1)
loc = index
else:
series = self._get_item_cache(col)
loc = self.index.get_loc(index)
# setitem_inplace will do validation that may raise TypeError,
# ValueError, or LossySetitemError
series._mgr.setitem_inplace(loc, value)
except (KeyError, TypeError, ValueError, LossySetitemError):
# set using a non-recursive method & reset the cache
if takeable:
self.iloc[index, col] = value
else:
self.loc[index, col] = value
self._item_cache.pop(col, None)
def _ensure_valid_index(self, value) -> None:
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value) and len(value):
if not isinstance(value, DataFrame):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError) as err:
raise ValueError(
"Cannot set a frame with no defined index "
"and a value that cannot be converted to a Series"
) from err
# GH31368 preserve name of index
index_copy = value.index.copy()
if self.index.name is not None:
index_copy.name = self.index.name
self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan)
def _box_col_values(self, values: SingleDataManager, loc: int) -> Series:
"""
Provide boxed values for a column.
"""
# Lookup in columns so that if e.g. a str datetime was passed
# we attach the Timestamp object as the name.
name = self.columns[loc]
klass = self._constructor_sliced
# We get index=self.index bc values is a SingleDataManager
return klass(values, name=name, fastpath=True).__finalize__(self)
# ----------------------------------------------------------------------
# Lookup Caching
def _clear_item_cache(self) -> None:
self._item_cache.clear()
def _get_item_cache(self, item: Hashable) -> Series:
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
# All places that call _get_item_cache have unique columns,
# pending resolution of GH#33047
loc = self.columns.get_loc(item)
res = self._ixs(loc, axis=1)
cache[item] = res
# for a chain
res._is_copy = self._is_copy
return res
def _reset_cacher(self) -> None:
# no-op for DataFrame
pass
def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None:
"""
The object has called back to us saying maybe it has changed.
"""
loc = self._info_axis.get_loc(item)
arraylike = value._values
old = self._ixs(loc, axis=1)
if old._values is value._values and inplace:
# GH#46149 avoid making unnecessary copies/block-splitting
return
self._mgr.iset(loc, arraylike, inplace=inplace)
# ----------------------------------------------------------------------
# Unsorted
def query(self, expr: str, inplace: bool = False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : str
The query string to evaluate.
You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
You can refer to column names that are not valid Python variable names
by surrounding them in backticks. Thus, column names containing spaces
or punctuations (besides underscores) or starting with digits must be
surrounded by backticks. (For example, a column named "Area (cm^2)" would
be referenced as ```Area (cm^2)```). Column names which are Python keywords
(like "list", "for", "import", etc) cannot be used.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
.. versionadded:: 0.25.0
Backtick quoting introduced.
.. versionadded:: 1.0.0
Expanding functionality of backtick quoting for more than only spaces.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
DataFrame or None
DataFrame resulting from the provided query expression or
None if ``inplace=True``.
See Also
--------
eval : Evaluate a string describing operations on
DataFrame columns.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
*Backtick quoted variables*
Backtick quoted variables are parsed as literal Python code and
are converted internally to a Python valid identifier.
This can lead to the following problems.
During parsing a number of disallowed characters inside the backtick
quoted string are replaced by strings that are allowed as a Python identifier.
These characters include all operators in Python, the space character, the
question mark, the exclamation mark, the dollar sign, and the euro sign.
For other characters that fall outside the ASCII range (U+0001..U+007F)
and those that are not further specified in PEP 3131,
the query parser will raise an error.
This excludes whitespace different than the space character,
but also the hashtag (as it is used for comments) and the backtick
itself (backtick can also not be escaped).
In a special case, quotes that make a pair around a backtick can
confuse the parser.
For example, ```it's` > `that's``` will raise an error,
as it forms a quoted string (``'s > `that'``) with a backtick inside.
See also the Python documentation about lexical analysis
(https://docs.python.org/3/reference/lexical_analysis.html)
in combination with the source code in :mod:`pandas.core.computation.parsing`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(expr, str):
msg = f"expr must be a string to be evaluated, {type(expr)} given"
raise ValueError(msg)
kwargs["level"] = kwargs.pop("level", 0) + 1
kwargs["target"] = None
res = self.eval(expr, **kwargs)
try:
result = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
result = self[res]
if inplace:
self._update_inplace(result)
return None
else:
return result
def eval(self, expr: str, inplace: bool = False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, pandas object, or None
The result of the evaluation or None if ``inplace=True``.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
Multiple columns can be assigned to using multi-line expressions:
>>> df.eval(
... '''
... C = A + B
... D = A - B
... '''
... )
A B C D
0 1 10 11 -9
1 2 8 10 -6
2 3 6 9 -3
3 4 4 8 0
4 5 2 7 3
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, "inplace")
kwargs["level"] = kwargs.pop("level", 0) + 1
index_resolvers = self._get_index_resolvers()
column_resolvers = self._get_cleaned_column_resolvers()
resolvers = column_resolvers, index_resolvers
if "target" not in kwargs:
kwargs["target"] = self
kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None) -> DataFrame:
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
See Also
--------
DataFrame.dtypes: Return Series with the data type of each column.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<https://numpy.org/doc/stable/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int64'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = (frozenset(include), frozenset(exclude))
if not any(selection):
raise ValueError("at least one of include or exclude must be nonempty")
# convert the myriad valid dtypes object to a single representation
def check_int_infer_dtype(dtypes):
converted_dtypes: list[type] = []
for dtype in dtypes:
# Numpy maps int to different types (int32, in64) on Windows and Linux
# see https://github.com/numpy/numpy/issues/9464
if (isinstance(dtype, str) and dtype == "int") or (dtype is int):
converted_dtypes.append(np.int32)
converted_dtypes.append(np.int64)
elif dtype == "float" or dtype is float:
# GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20
converted_dtypes.extend([np.float64, np.float32])
else:
converted_dtypes.append(infer_dtype_from_object(dtype))
return frozenset(converted_dtypes)
include = check_int_infer_dtype(include)
exclude = check_int_infer_dtype(exclude)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError(f"include and exclude overlap on {(include & exclude)}")
def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool:
return issubclass(dtype.type, tuple(dtypes_set)) or (
np.number in dtypes_set and getattr(dtype, "_is_numeric", False)
)
def predicate(arr: ArrayLike) -> bool:
dtype = arr.dtype
if include:
if not dtype_predicate(dtype, include):
return False
if exclude:
if dtype_predicate(dtype, exclude):
return False
return True
mgr = self._mgr._get_data_subset(predicate)
return type(self)(mgr).__finalize__(self)
def insert(
self,
loc: int,
column: Hashable,
value: Scalar | AnyArrayLike,
allow_duplicates: bool | lib.NoDefault = lib.no_default,
) -> None:
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns).
column : str, number, or hashable object
Label of the inserted column.
value : Scalar, Series, or array-like
allow_duplicates : bool, optional, default lib.no_default
See Also
--------
Index.insert : Insert new item by index.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df
col1 col2
0 1 3
1 2 4
>>> df.insert(1, "newcol", [99, 99])
>>> df
col1 newcol col2
0 1 99 3
1 2 99 4
>>> df.insert(0, "col1", [100, 100], allow_duplicates=True)
>>> df
col1 col1 newcol col2
0 100 1 99 3
1 100 2 99 4
Notice that pandas uses index alignment in case of `value` from type `Series`:
>>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2]))
>>> df
col0 col1 col1 newcol col2
0 NaN 100 1 99 3
1 5.0 100 2 99 4
"""
if allow_duplicates is lib.no_default:
allow_duplicates = False
if allow_duplicates and not self.flags.allows_duplicate_labels:
raise ValueError(
"Cannot specify 'allow_duplicates=True' when "
"'self.flags.allows_duplicate_labels' is False."
)
if not allow_duplicates and column in self.columns:
# Should this be a different kind of error??
raise ValueError(f"cannot insert {column}, already exists")
if not isinstance(loc, int):
raise TypeError("loc must be int")
value = self._sanitize_column(value)
self._mgr.insert(loc, column, value)
def assign(self, **kwargs) -> DataFrame:
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
Later items in '\*\*kwargs' may refer to newly created or modified
columns in 'df'; items are computed and assigned into 'df' in order.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
You can create multiple columns within the same assign where one
of the columns depends on another one defined within the same assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
return data
def _sanitize_column(self, value) -> ArrayLike:
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
value : scalar, Series, or array-like
Returns
-------
numpy.ndarray or ExtensionArray
"""
self._ensure_valid_index(value)
# We should never get here with DataFrame value
if isinstance(value, Series):
return _reindex_for_setitem(value, self.index)
if is_list_like(value):
com.require_length_match(value, self.index)
return sanitize_array(value, self.index, copy=True, allow_2d=True)
@property
def _series(self):
return {
item: Series(
self._mgr.iget(idx), index=self.index, name=item, fastpath=True
)
for idx, item in enumerate(self.columns)
}
def lookup(
self, row_labels: Sequence[IndexLabel], col_labels: Sequence[IndexLabel]
) -> np.ndarray:
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
.. deprecated:: 1.2.0
DataFrame.lookup is deprecated,
use pandas.factorize and NumPy indexing instead.
For further details see
:ref:`Looking up values by index/column labels <indexing.lookup>`.
Parameters
----------
row_labels : sequence
The row labels to use for lookup.
col_labels : sequence
The column labels to use for lookup.
Returns
-------
numpy.ndarray
The found values.
"""
msg = (
"The 'lookup' method is deprecated and will be "
"removed in a future version. "
"You can use DataFrame.melt and DataFrame.loc "
"as a substitute."
)
warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
n = len(row_labels)
if n != len(col_labels):
raise ValueError("Row labels must have same size as column labels")
if not (self.index.is_unique and self.columns.is_unique):
# GH#33041
raise ValueError("DataFrame.lookup requires unique index and columns")
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError("One or more row labels was not found")
if (cidx == -1).any():
raise KeyError("One or more column labels was not found")
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype="O")
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):
frame = self
columns = axes["columns"]
if columns is not None:
frame = frame._reindex_columns(
columns, method, copy, level, fill_value, limit, tolerance
)
index = axes["index"]
if index is not None:
frame = frame._reindex_index(
index, method, copy, level, fill_value, limit, tolerance
)
return frame
def _reindex_index(
self,
new_index,
method,
copy: bool,
level: Level,
fill_value=np.nan,
limit=None,
tolerance=None,
):
new_index, indexer = self.index.reindex(
new_index, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{0: [new_index, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_columns(
self,
new_columns,
method,
copy: bool,
level: Level,
fill_value=None,
limit=None,
tolerance=None,
):
new_columns, indexer = self.columns.reindex(
new_columns, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{1: [new_columns, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_multi(
self, axes: dict[str, Index], copy: bool, fill_value
) -> DataFrame:
"""
We are guaranteed non-Nones in the axes.
"""
new_index, row_indexer = self.index.reindex(axes["index"])
new_columns, col_indexer = self.columns.reindex(axes["columns"])
if row_indexer is not None and col_indexer is not None:
# Fastpath. By doing two 'take's at once we avoid making an
# unnecessary copy.
# We only get here with `not self._is_mixed_type`, which (almost)
# ensures that self.values is cheap. It may be worth making this
# condition more specific.
indexer = row_indexer, col_indexer
new_values = take_2d_multi(self.values, indexer, fill_value=fill_value)
return self._constructor(new_values, index=new_index, columns=new_columns)
else:
return self._reindex_with_indexers(
{0: [new_index, row_indexer], 1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value,
)
@doc(NDFrame.align, **_shared_doc_kwargs)
def align(
self,
other,
join: str = "outer",
axis: Axis | None = None,
level: Level | None = None,
copy: bool = True,
fill_value=None,
method: str | None = None,
limit=None,
fill_axis: Axis = 0,
broadcast_axis: Axis | None = None,
) -> DataFrame:
return super().align(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
@overload
def set_axis(
self, labels, axis: Axis = ..., inplace: Literal[False] = ...
) -> DataFrame:
...
@overload
def set_axis(self, labels, axis: Axis, inplace: Literal[True]) -> None:
...
@overload
def set_axis(self, labels, *, inplace: Literal[True]) -> None:
...
@overload
def set_axis(
self, labels, axis: Axis = ..., inplace: bool = ...
) -> DataFrame | None:
...
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"])
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
>>> df.set_axis(['a', 'b', 'c'], axis='index')
A B
a 1 4
b 2 5
c 3 6
Change the column labels.
>>> df.set_axis(['I', 'II'], axis='columns')
I II
0 1 4
1 2 5
2 3 6
Now, update the labels inplace.
>>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
>>> df
i ii
0 1 4
1 2 5
2 3 6
"""
)
@Substitution(
**_shared_doc_kwargs,
extended_summary_sub=" column or",
axis_description_sub=", and 1 identifies the columns",
see_also_sub=" or columns",
)
@Appender(NDFrame.set_axis.__doc__)
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature(
"labels",
[
("method", None),
("copy", True),
("level", None),
("fill_value", np.nan),
("limit", None),
("tolerance", None),
],
)
def reindex(self, *args, **kwargs) -> DataFrame:
axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex")
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop("axis", None)
kwargs.pop("labels", None)
return super().reindex(**kwargs)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"])
def drop(
self,
labels=None,
axis: Axis = 0,
index=None,
columns=None,
level: Level | None = None,
inplace: bool = False,
errors: str = "raise",
):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level. See the `user guide <advanced.shown_levels>`
for more information about the now unused levels.
Parameters
----------
labels : single label or list-like
Index or column labels to drop. A tuple will be used as a single
label and not treated as a list-like.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If False, return a copy. Otherwise, do operation
inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame or None
DataFrame without the removed index or column labels or
None if ``inplace=True``.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
Drop a specific index combination from the MultiIndex
DataFrame, i.e., drop the combination ``'falcon'`` and
``'weight'``, which deletes only the corresponding row
>>> df.drop(index=('falcon', 'weight'))
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
def rename(
self,
mapper: Renamer | None = None,
*,
index: Renamer | None = None,
columns: Renamer | None = None,
axis: Axis | None = None,
copy: bool = True,
inplace: bool = False,
level: Level | None = None,
errors: str = "ignore",
) -> DataFrame | None:
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper : dict-like or function
Dict-like or function transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
index : dict-like or function
Alternative to specifying axis (``mapper, axis=0``
is equivalent to ``index=mapper``).
columns : dict-like or function
Alternative to specifying axis (``mapper, axis=1``
is equivalent to ``columns=mapper``).
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
DataFrame or None
DataFrame with the renamed axis labels or None if ``inplace=True``.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
DataFrame.rename_axis : Set the name of the axis.
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Rename columns using a mapping:
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
Rename index using a mapping:
>>> df.rename(index={0: "x", 1: "y", 2: "z"})
A B
x 1 4
y 2 5
z 3 6
Cast index labels to a different type:
>>> df.index
RangeIndex(start=0, stop=3, step=1)
>>> df.rename(index=str).index
Index(['0', '1', '2'], dtype='object')
>>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise")
Traceback (most recent call last):
KeyError: ['C'] not found in axis
Using axis-style parameters:
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
return super()._rename(
mapper=mapper,
index=index,
columns=columns,
axis=axis,
copy=copy,
inplace=inplace,
level=level,
errors=errors,
)
@overload
def fillna(
self,
value=...,
method: FillnaOptions | None = ...,
axis: Axis | None = ...,
inplace: Literal[False] = ...,
limit=...,
downcast=...,
) -> DataFrame:
...
@overload
def fillna(
self,
value,
method: FillnaOptions | None,
axis: Axis | None,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
*,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
value,
*,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
*,
method: FillnaOptions | None,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
*,
axis: Axis | None,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
*,
method: FillnaOptions | None,
axis: Axis | None,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
value,
*,
axis: Axis | None,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
value,
method: FillnaOptions | None,
*,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
value=...,
method: FillnaOptions | None = ...,
axis: Axis | None = ...,
inplace: bool = ...,
limit=...,
downcast=...,
) -> DataFrame | None:
...
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "value"])
@doc(NDFrame.fillna, **_shared_doc_kwargs)
def fillna(
self,
value: object | ArrayLike | None = None,
method: FillnaOptions | None = None,
axis: Axis | None = None,
inplace: bool = False,
limit=None,
downcast=None,
) -> DataFrame | None:
return super().fillna(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
)
def pop(self, item: Hashable) -> Series:
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : label
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
return super().pop(item=item)
@doc(NDFrame.replace, **_shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=lib.no_default,
inplace: bool = False,
limit=None,
regex: bool = False,
method: str | lib.NoDefault = lib.no_default,
):
return super().replace(
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
def _replace_columnwise(
self, mapping: dict[Hashable, tuple[Any, Any]], inplace: bool, regex
):
"""
Dispatch to Series.replace column-wise.
Parameters
----------
mapping : dict
of the form {col: (target, value)}
inplace : bool
regex : bool or same types as `to_replace` in DataFrame.replace
Returns
-------
DataFrame or None
"""
# Operate column-wise
res = self if inplace else self.copy()
ax = self.columns
for i in range(len(ax)):
if ax[i] in mapping:
ser = self.iloc[:, i]
target, value = mapping[ax[i]]
newobj = ser.replace(target, value, regex=regex)
res.iloc[:, i] = newobj
if inplace:
return
return res.__finalize__(self)
@doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"])
def shift(
self,
periods=1,
freq: Frequency | None = None,
axis: Axis = 0,
fill_value=lib.no_default,
) -> DataFrame:
axis = self._get_axis_number(axis)
ncols = len(self.columns)
if axis == 1 and periods != 0 and fill_value is lib.no_default and ncols > 0:
# We will infer fill_value to match the closest column
# Use a column that we know is valid for our column's dtype GH#38434
label = self.columns[0]
if periods > 0:
result = self.iloc[:, :-periods]
for col in range(min(ncols, abs(periods))):
# TODO(EA2D): doing this in a loop unnecessary with 2D EAs
# Define filler inside loop so we get a copy
filler = self.iloc[:, 0].shift(len(self))
result.insert(0, label, filler, allow_duplicates=True)
else:
result = self.iloc[:, -periods:]
for col in range(min(ncols, abs(periods))):
# Define filler inside loop so we get a copy
filler = self.iloc[:, -1].shift(len(self))
result.insert(
len(result.columns), label, filler, allow_duplicates=True
)
result.columns = self.columns.copy()
return result
elif (
axis == 1
and periods != 0
and fill_value is not lib.no_default
and ncols > 0
):
arrays = self._mgr.arrays
if len(arrays) > 1 or (
# If we only have one block and we know that we can't
# keep the same dtype (i.e. the _can_hold_element check)
# then we can go through the reindex_indexer path
# (and avoid casting logic in the Block method).
# The exception to this (until 2.0) is datetimelike
# dtypes with integers, which cast.
not can_hold_element(arrays[0], fill_value)
# TODO(2.0): remove special case for integer-with-datetimelike
# once deprecation is enforced
and not (
lib.is_integer(fill_value) and needs_i8_conversion(arrays[0].dtype)
)
):
# GH#35488 we need to watch out for multi-block cases
# We only get here with fill_value not-lib.no_default
nper = abs(periods)
nper = min(nper, ncols)
if periods > 0:
indexer = np.array(
[-1] * nper + list(range(ncols - periods)), dtype=np.intp
)
else:
indexer = np.array(
list(range(nper, ncols)) + [-1] * nper, dtype=np.intp
)
mgr = self._mgr.reindex_indexer(
self.columns,
indexer,
axis=0,
fill_value=fill_value,
allow_dups=True,
)
res_df = self._constructor(mgr)
return res_df.__finalize__(self, method="shift")
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "keys"])
def set_index(
self,
keys,
drop: bool = True,
append: bool = False,
inplace: bool = False,
verify_integrity: bool = False,
):
"""
Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and
instances of :class:`~collections.abc.Iterator`.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
If True, modifies the DataFrame in place (do not create a new object).
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame or None
Changed row labels or None if ``inplace=True``.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31
"""
inplace = validate_bool_kwarg(inplace, "inplace")
self._check_inplace_and_allows_duplicate_labels(inplace)
if not isinstance(keys, list):
keys = [keys]
err_msg = (
'The parameter "keys" may be a column key, one-dimensional '
"array, or a list containing only valid column keys and "
"one-dimensional arrays."
)
missing: list[Hashable] = []
for col in keys:
if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)):
# arrays are fine as long as they are one-dimensional
# iterators get converted to list below
if getattr(col, "ndim", 1) != 1:
raise ValueError(err_msg)
else:
# everything else gets tried as a key; see GH 24969
try:
found = col in self.columns
except TypeError as err:
raise TypeError(
f"{err_msg}. Received column of type {type(col)}"
) from err
else:
if not found:
missing.append(col)
if missing:
raise KeyError(f"None of {missing} are in the columns")
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names: list[Hashable] = []
if append:
names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove: list[Hashable] = []
for col in keys:
if isinstance(col, MultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (Index, Series)):
# if Index then not MultiIndex (treated above)
# error: Argument 1 to "append" of "list" has incompatible type
# "Union[Index, Series]"; expected "Index"
arrays.append(col) # type:ignore[arg-type]
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
# error: Argument 1 to "append" of "list" has incompatible type
# "Union[List[Any], ndarray]"; expected "Index"
arrays.append(col) # type: ignore[arg-type]
names.append(None)
elif isinstance(col, abc.Iterator):
# error: Argument 1 to "append" of "list" has incompatible type
# "List[Any]"; expected "Index"
arrays.append(list(col)) # type: ignore[arg-type]
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
if len(arrays[-1]) != len(self):
# check newest element against length of calling frame, since
# ensure_index_from_sequences would not raise for append=False.
raise ValueError(
f"Length mismatch: Expected {len(self)} rows, "
f"received array of length {len(arrays[-1])}"
)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError(f"Index has duplicate keys: {duplicates}")
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
@overload
def reset_index(
self,
level: Hashable | Sequence[Hashable] | None = ...,
drop: bool = ...,
inplace: Literal[False] = ...,
col_level: Hashable = ...,
col_fill: Hashable = ...,
allow_duplicates: bool | lib.NoDefault = ...,
) -> DataFrame:
...
@overload
def reset_index(
self,
level: Hashable | Sequence[Hashable] | None,
drop: bool,
inplace: Literal[True],
col_level: Hashable = ...,
col_fill: Hashable = ...,
allow_duplicates: bool | lib.NoDefault = ...,
) -> None:
...
@overload
def reset_index(
self,
*,
drop: bool,
inplace: Literal[True],
col_level: Hashable = ...,
col_fill: Hashable = ...,
allow_duplicates: bool | lib.NoDefault = ...,
) -> None:
...
@overload
def reset_index(
self,
level: Hashable | Sequence[Hashable] | None,
*,
inplace: Literal[True],
col_level: Hashable = ...,
col_fill: Hashable = ...,
allow_duplicates: bool | lib.NoDefault = ...,
) -> None:
...
@overload
def reset_index(
self,
*,
inplace: Literal[True],
col_level: Hashable = ...,
col_fill: Hashable = ...,
allow_duplicates: bool | lib.NoDefault = ...,
) -> None:
...
@overload
def reset_index(
self,
level: Hashable | Sequence[Hashable] | None = ...,
drop: bool = ...,
inplace: bool = ...,
col_level: Hashable = ...,
col_fill: Hashable = ...,
allow_duplicates: bool | lib.NoDefault = ...,
) -> DataFrame | None:
...
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "level"])
def reset_index(
self,
level: Hashable | Sequence[Hashable] | None = None,
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
col_fill: Hashable = "",
allow_duplicates: bool | lib.NoDefault = lib.no_default,
) -> DataFrame | None:
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
allow_duplicates : bool, optional, default lib.no_default
Allow duplicate column labels to be created.
.. versionadded:: 1.5.0
Returns
-------
DataFrame or None
DataFrame with the new index or None if ``inplace=True``.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, "inplace")
self._check_inplace_and_allows_duplicate_labels(inplace)
if inplace:
new_obj = self
else:
new_obj = self.copy()
if allow_duplicates is not lib.no_default:
allow_duplicates = validate_bool_kwarg(allow_duplicates, "allow_duplicates")
new_index = default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
to_insert: Iterable[tuple[Any, Any | None]]
if isinstance(self.index, MultiIndex):
names = com.fill_missing_names(self.index.names)
to_insert = zip(self.index.levels, self.index.codes)
else:
default = "index" if "index" not in self else "level_0"
names = [default] if self.index.name is None else [self.index.name]
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if level is not None and i not in level:
continue
name = names[i]
if multi_col:
col_name = list(name) if isinstance(name, tuple) else [name]
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError(
"col_fill=None is incompatible "
f"with incomplete column name {name}"
)
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = lev._values
if level_values.dtype == np.object_:
level_values = lib.maybe_convert_objects(level_values)
if lab is not None:
# if we have the codes, extract the values with a mask
level_values = algorithms.take(
level_values, lab, allow_fill=True, fill_value=lev._na_value
)
new_obj.insert(
0,
name,
level_values,
allow_duplicates=allow_duplicates,
)
new_obj.index = new_index
if not inplace:
return new_obj
return None
# ----------------------------------------------------------------------
# Reindex-based selection methods
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isna(self) -> DataFrame:
result = self._constructor(self._mgr.isna(func=isna))
return result.__finalize__(self, method="isna")
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isnull(self) -> DataFrame:
"""
DataFrame.isnull is an alias for DataFrame.isna.
"""
return self.isna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notna(self) -> DataFrame:
return ~self.isna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notnull(self) -> DataFrame:
"""
DataFrame.notnull is an alias for DataFrame.notna.
"""
return ~self.isna()
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def dropna(
self,
axis: Axis = 0,
how: str = "any",
thresh=None,
subset: IndexLabel = None,
inplace: bool = False,
):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. versionchanged:: 1.0.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : column label or sequence of labels, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame or None
DataFrame with NA entries dropped from it or None if ``inplace=True``.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'toy'])
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(axis, (tuple, list)):
# GH20987
raise TypeError("supplying multiple axes to axis is no longer supported.")
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
# subset needs to be list
if not is_list_like(subset):
subset = [subset]
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(np.array(subset)[check].tolist())
agg_obj = self.take(indices, axis=agg_axis)
if thresh is not None:
count = agg_obj.count(axis=agg_axis)
mask = count >= thresh
elif how == "any":
# faster equivalent to 'agg_obj.count(agg_axis) == self.shape[agg_axis]'
mask = notna(agg_obj).all(axis=agg_axis, bool_only=False)
elif how == "all":
# faster equivalent to 'agg_obj.count(agg_axis) > 0'
mask = notna(agg_obj).any(axis=agg_axis, bool_only=False)
else:
if how is not None:
raise ValueError(f"invalid how option: {how}")
else:
raise TypeError("must specify how or thresh")
if np.all(mask):
result = self.copy()
else:
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "subset"])
def drop_duplicates(
self,
subset: Hashable | Sequence[Hashable] | None = None,
keep: Literal["first"] | Literal["last"] | Literal[False] = "first",
inplace: bool = False,
ignore_index: bool = False,
) -> DataFrame | None:
"""
Return DataFrame with duplicate rows removed.
Considering certain columns is optional. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : bool, default False
Whether to drop duplicates in place or to return a copy.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
DataFrame or None
DataFrame with duplicates removed or None if ``inplace=True``.
See Also
--------
DataFrame.value_counts: Count unique combinations of columns.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, it removes duplicate rows based on all columns.
>>> df.drop_duplicates()
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
To remove duplicates on specific column(s), use ``subset``.
>>> df.drop_duplicates(subset=['brand'])
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
To remove duplicates and keep last occurrences, use ``keep``.
>>> df.drop_duplicates(subset=['brand', 'style'], keep='last')
brand style rating
1 Yum Yum cup 4.0
2 Indomie cup 3.5
4 Indomie pack 5.0
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, "inplace")
ignore_index = validate_bool_kwarg(ignore_index, "ignore_index")
duplicated = self.duplicated(subset, keep=keep)
result = self[-duplicated]
if ignore_index:
result.index = default_index(len(result))
if inplace:
self._update_inplace(result)
return None
else:
return result
def duplicated(
self,
subset: Hashable | Sequence[Hashable] | None = None,
keep: Literal["first"] | Literal["last"] | Literal[False] = "first",
) -> Series:
"""
Return boolean Series denoting duplicate rows.
Considering certain columns is optional.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to mark.
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series
Boolean series for each duplicated rows.
See Also
--------
Index.duplicated : Equivalent method on index.
Series.duplicated : Equivalent method on Series.
Series.drop_duplicates : Remove duplicate values from Series.
DataFrame.drop_duplicates : Remove duplicate values from DataFrame.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, for each set of duplicated values, the first occurrence
is set on False and all others on True.
>>> df.duplicated()
0 False
1 True
2 False
3 False
4 False
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True.
>>> df.duplicated(keep='last')
0 True
1 False
2 False
3 False
4 False
dtype: bool
By setting ``keep`` on False, all duplicates are True.
>>> df.duplicated(keep=False)
0 True
1 True
2 False
3 False
4 False
dtype: bool
To find duplicates on specific column(s), use ``subset``.
>>> df.duplicated(subset=['brand'])
0 False
1 True
2 False
3 True
4 True
dtype: bool
"""
if self.empty:
return self._constructor_sliced(dtype=bool)
def f(vals) -> tuple[np.ndarray, int]:
labels, shape = algorithms.factorize(vals, size_hint=len(self))
return labels.astype("i8", copy=False), len(shape)
if subset is None:
# https://github.com/pandas-dev/pandas/issues/28770
# Incompatible types in assignment (expression has type "Index", variable
# has type "Sequence[Any]")
subset = self.columns # type: ignore[assignment]
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self.columns
):
subset = (subset,)
# needed for mypy since can't narrow types using np.iterable
subset = cast(Sequence, subset)
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = set(subset) - set(self.columns)
if diff:
raise KeyError(Index(diff))
if len(subset) == 1 and self.columns.is_unique:
# GH#45236 This is faster than get_group_index below
result = self[subset[0]].duplicated(keep)
result.name = None
else:
vals = (col.values for name, col in self.items() if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(
labels,
# error: Argument 1 to "tuple" has incompatible type "List[_T]";
# expected "Iterable[int]"
tuple(shape), # type: ignore[arg-type]
sort=False,
xnull=False,
)
result = self._constructor_sliced(duplicated(ids, keep), index=self.index)
return result.__finalize__(self, method="duplicated")
# ----------------------------------------------------------------------
# Sorting
# TODO: Just move the sort_values doc here.
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "by"])
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
# error: Signature of "sort_values" incompatible with supertype "NDFrame"
def sort_values( # type: ignore[override]
self,
by,
axis: Axis = 0,
ascending=True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool = False,
key: ValueKeyFunc = None,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
ascending = validate_ascending(ascending)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError(
f"Length of ascending ({len(ascending)}) != length of by ({len(by)})"
)
if len(by) > 1:
keys = [self._get_label_or_level_values(x, axis=axis) for x in by]
# need to rewrap columns in Series to apply key function
if key is not None:
# error: List comprehension has incompatible type List[Series];
# expected List[ndarray]
keys = [
Series(k, name=name) # type: ignore[misc]
for (k, name) in zip(keys, by)
]
indexer = lexsort_indexer(
keys, orders=ascending, na_position=na_position, key=key
)
elif len(by):
# len(by) == 1
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
# need to rewrap column in Series to apply key function
if key is not None:
# error: Incompatible types in assignment (expression has type
# "Series", variable has type "ndarray")
k = Series(k, name=by) # type: ignore[assignment]
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(
k, kind=kind, ascending=ascending, na_position=na_position, key=key
)
else:
return self.copy()
new_data = self._mgr.take(
indexer, axis=self._get_block_manager_axis(axis), verify=False
)
if ignore_index:
new_data.set_axis(
self._get_block_manager_axis(axis), default_index(len(indexer))
)
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_values")
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def sort_index(
self,
axis: Axis = 0,
level: Level | None = None,
ascending: bool | int | Sequence[bool | int] = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
sort_remaining: bool = True,
ignore_index: bool = False,
key: IndexKeyFunc = None,
):
"""
Sort object by labels (along an axis).
Returns a new DataFrame sorted by label if `inplace` argument is
``False``, otherwise updates the original DataFrame and returns None.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
ascending : bool or list-like of bools, default True
Sort ascending vs. descending. When the index is a MultiIndex the
sort direction can be controlled for each level individually.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. `mergesort` and `stable` are the only stable algorithms. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
If not None, apply the key function to the index values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect an
``Index`` and return an ``Index`` of the same shape. For MultiIndex
inputs, the key is applied *per level*.
.. versionadded:: 1.1.0
Returns
-------
DataFrame or None
The original DataFrame sorted by the labels or None if ``inplace=True``.
See Also
--------
Series.sort_index : Sort Series by the index.
DataFrame.sort_values : Sort DataFrame by the value.
Series.sort_values : Sort Series by the value.
Examples
--------
>>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150],
... columns=['A'])
>>> df.sort_index()
A
1 4
29 2
100 1
150 5
234 3
By default, it sorts in ascending order, to sort in descending order,
use ``ascending=False``
>>> df.sort_index(ascending=False)
A
234 3
150 5
100 1
29 2
1 4
A key function can be specified which is applied to the index before
sorting. For a ``MultiIndex`` this is applied to each level separately.
>>> df = pd.DataFrame({"a": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd'])
>>> df.sort_index(key=lambda x: x.str.lower())
a
A 1
b 2
C 3
d 4
"""
return super().sort_index(
axis,
level,
ascending,
inplace,
kind,
na_position,
sort_remaining,
ignore_index,
key,
)
def value_counts(
self,
subset: Sequence[Hashable] | None = None,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
dropna: bool = True,
):
"""
Return a Series containing counts of unique rows in the DataFrame.
.. versionadded:: 1.1.0
Parameters
----------
subset : list-like, optional
Columns to use when counting unique combinations.
normalize : bool, default False
Return proportions rather than frequencies.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
dropna : bool, default True
Don’t include counts of rows that contain NA values.
.. versionadded:: 1.3.0
Returns
-------
Series
See Also
--------
Series.value_counts: Equivalent method on Series.
Notes
-----
The returned Series will have a MultiIndex with one level per input
column. By default, rows that contain any NA values are omitted from
the result. By default, the resulting Series will be in descending
order so that the first element is the most frequently-occurring row.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6],
... 'num_wings': [2, 0, 0, 0]},
... index=['falcon', 'dog', 'cat', 'ant'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
cat 4 0
ant 6 0
>>> df.value_counts()
num_legs num_wings
4 0 2
2 2 1
6 0 1
dtype: int64
>>> df.value_counts(sort=False)
num_legs num_wings
2 2 1
4 0 2
6 0 1
dtype: int64
>>> df.value_counts(ascending=True)
num_legs num_wings
2 2 1
6 0 1
4 0 2
dtype: int64
>>> df.value_counts(normalize=True)
num_legs num_wings
4 0 0.50
2 2 0.25
6 0 0.25
dtype: float64
With `dropna` set to `False` we can also count rows with NA values.
>>> df = pd.DataFrame({'first_name': ['John', 'Anne', 'John', 'Beth'],
... 'middle_name': ['Smith', pd.NA, pd.NA, 'Louise']})
>>> df
first_name middle_name
0 John Smith
1 Anne <NA>
2 John <NA>
3 Beth Louise
>>> df.value_counts()
first_name middle_name
Beth Louise 1
John Smith 1
dtype: int64
>>> df.value_counts(dropna=False)
first_name middle_name
Anne NaN 1
Beth Louise 1
John Smith 1
NaN 1
dtype: int64
"""
if subset is None:
subset = self.columns.tolist()
counts = self.groupby(subset, dropna=dropna).grouper.size()
if sort:
counts = counts.sort_values(ascending=ascending)
if normalize:
counts /= counts.sum()
# Force MultiIndex for single column
if len(subset) == 1:
counts.index = MultiIndex.from_arrays(
[counts.index], names=[counts.index.name]
)
return counts
def nlargest(self, n: int, columns: IndexLabel, keep: str = "first") -> DataFrame:
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : prioritize the first occurrence(s)
- ``last`` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()
def nsmallest(self, n: int, columns: IndexLabel, keep: str = "first") -> DataFrame:
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 337000,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 337000 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "population".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 337000 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
Nauru 337000 182 NR
To order by the smallest values in column "population" and then "GDP", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Nauru 337000 182 NR
"""
return algorithms.SelectNFrame(
self, n=n, keep=keep, columns=columns
).nsmallest()
@doc(
Series.swaplevel,
klass=_shared_doc_kwargs["klass"],
extra_params=dedent(
"""axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to swap levels on. 0 or 'index' for row-wise, 1 or
'columns' for column-wise."""
),
examples=dedent(
"""\
Examples
--------
>>> df = pd.DataFrame(
... {"Grade": ["A", "B", "A", "C"]},
... index=[
... ["Final exam", "Final exam", "Coursework", "Coursework"],
... ["History", "Geography", "History", "Geography"],
... ["January", "February", "March", "April"],
... ],
... )
>>> df
Grade
Final exam History January A
Geography February B
Coursework History March A
Geography April C
In the following example, we will swap the levels of the indices.
Here, we will swap the levels column-wise, but levels can be swapped row-wise
in a similar manner. Note that column-wise is the default behaviour.
By not supplying any arguments for i and j, we swap the last and second to
last indices.
>>> df.swaplevel()
Grade
Final exam January History A
February Geography B
Coursework March History A
April Geography C
By supplying one argument, we can choose which index to swap the last
index with. We can for example swap the first index with the last one as
follows.
>>> df.swaplevel(0)
Grade
January History Final exam A
February Geography Final exam B
March History Coursework A
April Geography Coursework C
We can also define explicitly which indices we want to swap by supplying values
for both i and j. Here, we for example swap the first and second indices.
>>> df.swaplevel(0, 1)
Grade
History Final exam January A
Geography Final exam February B
History Coursework March A
Geography Coursework April C"""
),
)
def swaplevel(self, i: Axis = -2, j: Axis = -1, axis: Axis = 0) -> DataFrame:
result = self.copy()
axis = self._get_axis_number(axis)
if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only swap levels on a hierarchical axis.")
if axis == 0:
assert isinstance(result.index, MultiIndex)
result.index = result.index.swaplevel(i, j)
else:
assert isinstance(result.columns, MultiIndex)
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order: Sequence[Axis], axis: Axis = 0) -> DataFrame:
"""
Rearrange index levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : {0 or 'index', 1 or 'columns'}, default 0
Where to reorder levels.
Returns
-------
DataFrame
Examples
--------
>>> data = {
... "class": ["Mammals", "Mammals", "Reptiles"],
... "diet": ["Omnivore", "Carnivore", "Carnivore"],
... "species": ["Humans", "Dogs", "Snakes"],
... }
>>> df = pd.DataFrame(data, columns=["class", "diet", "species"])
>>> df = df.set_index(["class", "diet"])
>>> df
species
class diet
Mammals Omnivore Humans
Carnivore Dogs
Reptiles Carnivore Snakes
Let's reorder the levels of the index:
>>> df.reorder_levels(["diet", "class"])
species
diet class
Omnivore Mammals Humans
Carnivore Mammals Dogs
Reptiles Snakes
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only reorder levels on a hierarchical axis.")
result = self.copy()
if axis == 0:
assert isinstance(result.index, MultiIndex)
result.index = result.index.reorder_levels(order)
else:
assert isinstance(result.columns, MultiIndex)
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic Methods
def _cmp_method(self, other, op):
axis = 1 # only relevant for Series other case
self, other = ops.align_method_FRAME(self, other, axis, flex=False, level=None)
# See GH#4537 for discussion of scalar op behavior
new_data = self._dispatch_frame_op(other, op, axis=axis)
return self._construct_result(new_data)
def _arith_method(self, other, op):
if ops.should_reindex_frame_op(self, other, op, 1, 1, None, None):
return ops.frame_arith_method_with_reindex(self, other, op)
axis = 1 # only relevant for Series other case
other = ops.maybe_prepare_scalar_for_op(other, (self.shape[axis],))
self, other = ops.align_method_FRAME(self, other, axis, flex=True, level=None)
new_data = self._dispatch_frame_op(other, op, axis=axis)
return self._construct_result(new_data)
_logical_method = _arith_method
def _dispatch_frame_op(self, right, func: Callable, axis: int | None = None):
"""
Evaluate the frame operation func(left, right) by evaluating
column-by-column, dispatching to the Series implementation.
Parameters
----------
right : scalar, Series, or DataFrame
func : arithmetic or comparison operator
axis : {None, 0, 1}
Returns
-------
DataFrame
"""
# Get the appropriate array-op to apply to each column/block's values.
array_op = ops.get_array_op(func)
right = lib.item_from_zerodim(right)
if not is_list_like(right):
# i.e. scalar, faster than checking np.ndim(right) == 0
with np.errstate(all="ignore"):
bm = self._mgr.apply(array_op, right=right)
return self._constructor(bm)
elif isinstance(right, DataFrame):
assert self.index.equals(right.index)
assert self.columns.equals(right.columns)
# TODO: The previous assertion `assert right._indexed_same(self)`
# fails in cases with empty columns reached via
# _frame_arith_method_with_reindex
# TODO operate_blockwise expects a manager of the same type
with np.errstate(all="ignore"):
bm = self._mgr.operate_blockwise(
# error: Argument 1 to "operate_blockwise" of "ArrayManager" has
# incompatible type "Union[ArrayManager, BlockManager]"; expected
# "ArrayManager"
# error: Argument 1 to "operate_blockwise" of "BlockManager" has
# incompatible type "Union[ArrayManager, BlockManager]"; expected
# "BlockManager"
right._mgr, # type: ignore[arg-type]
array_op,
)
return self._constructor(bm)
elif isinstance(right, Series) and axis == 1:
# axis=1 means we want to operate row-by-row
assert right.index.equals(self.columns)
right = right._values
# maybe_align_as_frame ensures we do not have an ndarray here
assert not isinstance(right, np.ndarray)
with np.errstate(all="ignore"):
arrays = [
array_op(_left, _right)
for _left, _right in zip(self._iter_column_arrays(), right)
]
elif isinstance(right, Series):
assert right.index.equals(self.index) # Handle other cases later
right = right._values
with np.errstate(all="ignore"):
arrays = [array_op(left, right) for left in self._iter_column_arrays()]
else:
# Remaining cases have less-obvious dispatch rules
raise NotImplementedError(right)
return type(self)._from_arrays(
arrays, self.columns, self.index, verify_integrity=False
)
def _combine_frame(self, other: DataFrame, func, fill_value=None):
# at this point we have `self._indexed_same(other)`
if fill_value is None:
# since _arith_op may be called in a loop, avoid function call
# overhead if possible by doing this check once
_arith_op = func
else:
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
new_data = self._dispatch_frame_op(other, _arith_op)
return new_data
def _construct_result(self, result) -> DataFrame:
"""
Wrap the result of an arithmetic, comparison, or logical operation.
Parameters
----------
result : DataFrame
Returns
-------
DataFrame
"""
out = self._constructor(result, copy=False)
# Pin columns instead of passing to constructor for compat with
# non-unique columns case
out.columns = self.columns
out.index = self.index
return out
def __divmod__(self, other) -> tuple[DataFrame, DataFrame]:
# Naive implementation, room for optimization
div = self // other
mod = self - div * other
return div, mod
def __rdivmod__(self, other) -> tuple[DataFrame, DataFrame]:
# Naive implementation, room for optimization
div = other // self
mod = other - div * self
return div, mod
# ----------------------------------------------------------------------
# Combination-Related
@doc(
_shared_docs["compare"],
"""
Returns
-------
DataFrame
DataFrame that shows the differences stacked side by side.
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
Raises
------
ValueError
When the two DataFrames don't have identical labels or shape.
See Also
--------
Series.compare : Compare with another Series and show differences.
DataFrame.equals : Test whether two objects contain the same elements.
Notes
-----
Matching NaNs will not appear as a difference.
Can only compare identically-labeled
(i.e. same shape, identical row and column labels) DataFrames
Examples
--------
>>> df = pd.DataFrame(
... {{
... "col1": ["a", "a", "b", "b", "a"],
... "col2": [1.0, 2.0, 3.0, np.nan, 5.0],
... "col3": [1.0, 2.0, 3.0, 4.0, 5.0]
... }},
... columns=["col1", "col2", "col3"],
... )
>>> df
col1 col2 col3
0 a 1.0 1.0
1 a 2.0 2.0
2 b 3.0 3.0
3 b NaN 4.0
4 a 5.0 5.0
>>> df2 = df.copy()
>>> df2.loc[0, 'col1'] = 'c'
>>> df2.loc[2, 'col3'] = 4.0
>>> df2
col1 col2 col3
0 c 1.0 1.0
1 a 2.0 2.0
2 b 3.0 4.0
3 b NaN 4.0
4 a 5.0 5.0
Align the differences on columns
>>> df.compare(df2)
col1 col3
self other self other
0 a c NaN NaN
2 NaN NaN 3.0 4.0
Stack the differences on rows
>>> df.compare(df2, align_axis=0)
col1 col3
0 self a NaN
other c NaN
2 self NaN 3.0
other NaN 4.0
Keep the equal values
>>> df.compare(df2, keep_equal=True)
col1 col3
self other self other
0 a c 1.0 1.0
2 b b 3.0 4.0
Keep all original rows and columns
>>> df.compare(df2, keep_shape=True)
col1 col2 col3
self other self other self other
0 a c NaN NaN NaN NaN
1 NaN NaN NaN NaN NaN NaN
2 NaN NaN NaN NaN 3.0 4.0
3 NaN NaN NaN NaN NaN NaN
4 NaN NaN NaN NaN NaN NaN
Keep all original rows and columns and also all original values
>>> df.compare(df2, keep_shape=True, keep_equal=True)
col1 col2 col3
self other self other self other
0 a c 1.0 1.0 1.0 1.0
1 a a 2.0 2.0 2.0 2.0
2 b b 3.0 3.0 3.0 4.0
3 b b NaN NaN 4.0 4.0
4 a a 5.0 5.0 5.0 5.0
""",
klass=_shared_doc_kwargs["klass"],
)
def compare(
self,
other: DataFrame,
align_axis: Axis = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> DataFrame:
return super().compare(
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def combine(
self, other: DataFrame, func, fill_value=None, overwrite: bool = True
) -> DataFrame:
"""
Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unnecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
series = series.astype(new_dtype, copy=False)
otherSeries = otherSeries.astype(new_dtype, copy=False)
arr = func(series, otherSeries)
if isinstance(new_dtype, np.dtype):
# if new_dtype is an EA Dtype, then `func` is expected to return
# the correct dtype without any additional casting
arr = maybe_downcast_to_dtype(arr, new_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index, columns=new_columns)
def combine_first(self, other: DataFrame) -> DataFrame:
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two. The resulting
dataframe contains the 'first' dataframe values and overrides the
second one values where both first.loc[index, col] and
second.loc[index, col] are not missing values, upon calling
first.combine_first(second).
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
The result of combining the provided DataFrame with the other object.
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def combiner(x, y):
mask = extract_array(isna(x))
x_values = extract_array(x, extract_numpy=True)
y_values = extract_array(y, extract_numpy=True)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
combined = self.combine(other, combiner, overwrite=False)
dtypes = {
col: find_common_type([self.dtypes[col], other.dtypes[col]])
for col in self.columns.intersection(other.columns)
if not is_dtype_equal(combined.dtypes[col], self.dtypes[col])
}
if dtypes:
combined = combined.astype(dtypes)
return combined
def update(
self,
other,
join: str = "left",
overwrite: bool = True,
filter_func=None,
errors: str = "ignore",
) -> None:
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-column(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, its name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != "left": # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ["ignore", "raise"]:
raise ValueError("The parameter errors must be either 'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all="ignore"):
mask = ~filter_func(this) | isna(that)
else:
if errors == "raise":
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unnecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]})
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean()
Max Speed
Animal
Falcon 375.0
Parrot 25.0
**Hierarchical Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},
... index=index)
>>> df
Max Speed
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
>>> df.groupby(level=0).mean()
Max Speed
Animal
Falcon 370.0
Parrot 25.0
>>> df.groupby(level="Type").mean()
Max Speed
Type
Captive 210.0
Wild 185.0
We can also choose to include NA in group keys or not by setting
`dropna` parameter, the default setting is `True`.
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum()
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum()
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
>>> l = [["a", 12, 12], [None, 12.3, 33.], ["b", 12.3, 123], ["a", 1, 1]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by="a").sum()
b c
a
a 13.0 13.0
b 12.3 123.0
>>> df.groupby(by="a", dropna=False).sum()
b c
a
a 13.0 13.0
b 12.3 123.0
NaN 12.3 33.0
"""
)
@Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
by=None,
axis: Axis = 0,
level: Level | None = None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool | lib.NoDefault = no_default,
observed: bool = False,
dropna: bool = True,
) -> DataFrameGroupBy:
from pandas.core.groupby.generic import DataFrameGroupBy
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=find_stack_level(),
)
else:
squeeze = False
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
# https://github.com/python/mypy/issues/7642
# error: Argument "squeeze" to "DataFrameGroupBy" has incompatible type
# "Union[bool, NoDefault]"; expected "bool"
return DataFrameGroupBy(
obj=self,
keys=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze, # type: ignore[arg-type]
observed=observed,
dropna=dropna,
)
_shared_docs[
"pivot"
] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------%s
index : str or object or a list of str, optional
Column to use to make new frame's index. If None, uses
existing index.
.. versionchanged:: 1.1.0
Also accept list of index names.
columns : str or object or a list of str
Column to use to make new frame's columns.
.. versionchanged:: 1.1.0
Also accept list of columns names.
values : str, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : Pivot based on the index values instead of a
column.
wide_to_long : Wide panel to long format. Less flexible but more
user-friendly than melt.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Reference :ref:`the user guide <reshaping.pivot>` for more examples.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
You could also assign a list of column names or a list of index names.
>>> df = pd.DataFrame({
... "lev1": [1, 1, 1, 2, 2, 2],
... "lev2": [1, 1, 2, 1, 1, 2],
... "lev3": [1, 2, 1, 2, 1, 2],
... "lev4": [1, 2, 3, 4, 5, 6],
... "values": [0, 1, 2, 3, 4, 5]})
>>> df
lev1 lev2 lev3 lev4 values
0 1 1 1 1 0
1 1 1 2 2 1
2 1 2 1 3 2
3 2 1 2 4 3
4 2 1 1 5 4
5 2 2 2 6 5
>>> df.pivot(index="lev1", columns=["lev2", "lev3"],values="values")
lev2 1 2
lev3 1 2 1 2
lev1
1 0.0 1.0 2.0 NaN
2 4.0 3.0 NaN 5.0
>>> df.pivot(index=["lev1", "lev2"], columns=["lev3"],values="values")
lev3 1 2
lev1 lev2
1 1 0.0 1.0
2 2.0 NaN
2 1 4.0 3.0
2 NaN 5.0
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
@Substitution("")
@Appender(_shared_docs["pivot"])
def pivot(self, index=None, columns=None, values=None) -> DataFrame:
from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs[
"pivot_table"
] = """
Create a spreadsheet-style pivot table as a DataFrame.
The levels in the pivot table will be stored in MultiIndex objects
(hierarchical indexes) on the index and columns of the result DataFrame.
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with (in the resulting pivot table,
after aggregation).
margins : bool, default False
Add all row / columns (e.g. for subtotal / grand totals).
dropna : bool, default True
Do not include columns whose entries are all NaN.
margins_name : str, default 'All'
Name of the row / column that will contain the totals
when margins is True.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionchanged:: 0.25.0
sort : bool, default True
Specifies if the result should be sorted.
.. versionadded:: 1.3.0
Returns
-------
DataFrame
An Excel style pivot table.
See Also
--------
DataFrame.pivot : Pivot without aggregation that can handle
non-numeric data.
DataFrame.melt: Unpivot a DataFrame from wide to long format,
optionally leaving identifiers set.
wide_to_long : Wide panel to long format. Less flexible but more
user-friendly than melt.
Notes
-----
Reference :ref:`the user guide <reshaping.pivot>` for more examples.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]})
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
We can also fill missing values using the `fill_value` parameter.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum, fill_value=0)
>>> table
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
The next example aggregates by taking the mean across multiple columns.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': np.mean})
>>> table
D E
A C
bar large 5.500000 7.500000
small 5.500000 8.500000
foo large 2.000000 4.500000
small 2.333333 4.333333
We can also calculate multiple types of aggregations for any given
value column.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max mean min
A C
bar large 5.500000 9 7.500000 6
small 5.500000 9 8.500000 8
foo large 2.000000 5 4.500000 4
small 2.333333 6 4.333333 2
"""
@Substitution("")
@Appender(_shared_docs["pivot_table"])
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
sort=True,
) -> DataFrame:
from pandas.core.reshape.pivot import pivot_table
return pivot_table(
self,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
sort=sort,
)
def stack(self, level: Level = -1, dropna: bool = True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Reference :ref:`the user guide <reshaping.stacking>` for more examples.
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import (
stack,
stack_multiple,
)
if isinstance(level, (tuple, list)):
result = stack_multiple(self, level, dropna=dropna)
else:
result = stack(self, level, dropna=dropna)
return result.__finalize__(self, method="stack")
def explode(
self,
column: IndexLabel,
ignore_index: bool = False,
) -> DataFrame:
"""
Transform each element of a list-like to a row, replicating index values.
.. versionadded:: 0.25.0
Parameters
----------
column : IndexLabel
Column(s) to explode.
For multiple columns, specify a non-empty list with each element
be str or tuple, and all specified columns their list-like data
on same row of the frame must have matching length.
.. versionadded:: 1.3.0
Multi-column explode
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
Exploded lists to rows of the subset columns;
index will be duplicated for these rows.
Raises
------
ValueError :
* If columns of the frame are not unique.
* If specified columns to explode is empty list.
* If specified columns to explode have not matching count of
elements rowwise in the frame.
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
index labels.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
Series.explode : Explode a DataFrame from list-like columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples, sets,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged, and empty list-likes will
result in a np.nan for that row. In addition, the ordering of rows in the
output will be non-deterministic when exploding sets.
Reference :ref:`the user guide <reshaping.explode>` for more examples.
Examples
--------
>>> df = pd.DataFrame({'A': [[0, 1, 2], 'foo', [], [3, 4]],
... 'B': 1,
... 'C': [['a', 'b', 'c'], np.nan, [], ['d', 'e']]})
>>> df
A B C
0 [0, 1, 2] 1 [a, b, c]
1 foo 1 NaN
2 [] 1 []
3 [3, 4] 1 [d, e]
Single-column explode.
>>> df.explode('A')
A B C
0 0 1 [a, b, c]
0 1 1 [a, b, c]
0 2 1 [a, b, c]
1 foo 1 NaN
2 NaN 1 []
3 3 1 [d, e]
3 4 1 [d, e]
Multi-column explode.
>>> df.explode(list('AC'))
A B C
0 0 1 a
0 1 1 b
0 2 1 c
1 foo 1 NaN
2 NaN 1 NaN
3 3 1 d
3 4 1 e
"""
if not self.columns.is_unique:
raise ValueError("columns must be unique")
columns: list[Hashable]
if is_scalar(column) or isinstance(column, tuple):
columns = [column]
elif isinstance(column, list) and all(
map(lambda c: is_scalar(c) or isinstance(c, tuple), column)
):
if not column:
raise ValueError("column must be nonempty")
if len(column) > len(set(column)):
raise ValueError("column must be unique")
columns = column
else:
raise ValueError("column must be a scalar, tuple, or list thereof")
df = self.reset_index(drop=True)
if len(columns) == 1:
result = df[columns[0]].explode()
else:
mylen = lambda x: len(x) if is_list_like(x) else -1
counts0 = self[columns[0]].apply(mylen)
for c in columns[1:]:
if not all(counts0 == self[c].apply(mylen)):
raise ValueError("columns must have matching element counts")
result = DataFrame({c: df[c].explode() for c in columns})
result = df.drop(columns, axis=1).join(result)
if ignore_index:
result.index = default_index(len(result))
else:
result.index = self.index.take(result.index)
result = result.reindex(columns=self.columns, copy=False)
return result
def unstack(self, level: Level = -1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels.
Returns a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
Parameters
----------
level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name.
fill_value : int, str or dict
Replace NaN with this value if the unstack produces missing values.
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Notes
-----
Reference :ref:`the user guide <reshaping.stacking>` for more examples.
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
result = unstack(self, level, fill_value)
return result.__finalize__(self, method="unstack")
@Appender(_shared_docs["melt"] % {"caller": "df.melt(", "other": "melt"})
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level: Level | None = None,
ignore_index: bool = True,
) -> DataFrame:
return melt(
self,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
ignore_index=ignore_index,
)
# ----------------------------------------------------------------------
# Time series-related
@doc(
Series.diff,
klass="DataFrame",
extra_params="axis : {0 or 'index', 1 or 'columns'}, default 0\n "
"Take difference over rows (0) or columns (1).\n",
other_klass="Series",
examples=dedent(
"""
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0 0
1 NaN -1 3
2 NaN -1 7
3 NaN -1 13
4 NaN 0 20
5 NaN 2 28
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
Overflow in input dtype
>>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8)
>>> df.diff()
a
0 NaN
1 255.0"""
),
)
def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame:
if not lib.is_integer(periods):
if not (
is_float(periods)
# error: "int" has no attribute "is_integer"
and periods.is_integer() # type: ignore[attr-defined]
):
raise ValueError("periods must be an integer")
periods = int(periods)
axis = self._get_axis_number(axis)
if axis == 1 and periods != 0:
return self - self.shift(periods, axis=axis)
new_data = self._mgr.diff(n=periods, axis=axis)
return self._constructor(new_data).__finalize__(self, "diff")
# ----------------------------------------------------------------------
# Function application
def _gotitem(
self,
key: IndexLabel,
ndim: int,
subset: DataFrame | Series | None = None,
) -> DataFrame | Series:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_summary_and_see_also_doc = dedent(
"""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to
``numpy.mean(arr_2d, axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
core.groupby.GroupBy : Perform operations over groups.
core.resample.Resampler : Perform operations over resampled bins.
core.window.Rolling : Perform operations over rolling window.
core.window.Expanding : Perform operations over expanding window.
core.window.ExponentialMovingWindow : Perform operation over exponential weighted
window.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
sum 12.0 NaN
min 1.0 2.0
max NaN 8.0
Aggregate different functions over the columns and rename the index of the resulting
DataFrame.
>>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))
A B C
x 7.0 NaN NaN
y NaN 2.0 NaN
z NaN NaN 6.0
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
"""
)
@doc(
_shared_docs["aggregate"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
)
def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs):
from pandas.core.apply import frame_apply
axis = self._get_axis_number(axis)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
op = frame_apply(self, func=func, axis=axis, args=args, kwargs=kwargs)
result = op.agg()
if relabeling:
# This is to keep the order to columns occurrence unchanged, and also
# keep the order of new columns occurrence unchanged
# For the return values of reconstruct_func, if relabeling is
# False, columns and order will be None.
assert columns is not None
assert order is not None
result_in_dict = relabel_result(result, func, columns, order)
result = DataFrame(result_in_dict, index=columns)
return result
agg = aggregate
@doc(
_shared_docs["transform"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
)
def transform(
self, func: AggFuncType, axis: Axis = 0, *args, **kwargs
) -> DataFrame:
from pandas.core.apply import frame_apply
op = frame_apply(self, func=func, axis=axis, args=args, kwargs=kwargs)
result = op.transform()
assert isinstance(result, DataFrame)
return result
def apply(
self,
func: AggFuncType,
axis: Axis = 0,
raw: bool = False,
result_type=None,
args=(),
**kwargs,
):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
raw : bool, default False
Determines if row or column is passed as a Series or ndarray object:
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwargs
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap: For elementwise operations.
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Returning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing ``result_type='expand'`` will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
"""
from pandas.core.apply import frame_apply
op = frame_apply(
self,
func=func,
axis=axis,
raw=raw,
result_type=result_type,
args=args,
kwargs=kwargs,
)
return op.apply().__finalize__(self, method="apply")
def applymap(
self, func: PythonFuncType, na_action: str | None = None, **kwargs
) -> DataFrame:
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
na_action : {None, 'ignore'}, default None
If ‘ignore’, propagate NaN values, without passing them to func.
.. versionadded:: 1.2
**kwargs
Additional keyword arguments to pass as keywords arguments to
`func`.
.. versionadded:: 1.3.0
Returns
-------
DataFrame
Transformed DataFrame.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Like Series.map, NA values can be ignored:
>>> df_copy = df.copy()
>>> df_copy.iloc[0, 0] = pd.NA
>>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore')
0 1
0 NaN 4
1 5.0 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
if na_action not in {"ignore", None}:
raise ValueError(
f"na_action must be 'ignore' or None. Got {repr(na_action)}"
)
ignore_na = na_action == "ignore"
func = functools.partial(func, **kwargs)
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func, ignore_na=ignore_na)
return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na)
return self.apply(infer).__finalize__(self, "applymap")
# ----------------------------------------------------------------------
# Merging / joining methods
def append(
self,
other,
ignore_index: bool = False,
verify_integrity: bool = False,
sort: bool = False,
) -> DataFrame:
"""
Append rows of `other` to the end of caller, returning a new object.
.. deprecated:: 1.4.0
Use :func:`concat` instead. For further details see
:ref:`whatsnew_140.deprecations.frame_series_append`
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
verify_integrity : bool, default False
If True, raise ValueError on creating index with duplicates.
sort : bool, default False
Sort columns if the columns of `self` and `other` are not aligned.
.. versionchanged:: 1.0.0
Changed to not sort by default.
Returns
-------
DataFrame
A new DataFrame consisting of the rows of caller and the rows of `other`.
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'), index=['x', 'y'])
>>> df
A B
x 1 2
y 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'), index=['x', 'y'])
>>> df.append(df2)
A B
x 1 2
y 3 4
x 5 6
y 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
warnings.warn(
"The frame.append method is deprecated "
"and will be removed from pandas in a future version. "
"Use pandas.concat instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return self._append(other, ignore_index, verify_integrity, sort)
def _append(
self,
other,
ignore_index: bool = False,
verify_integrity: bool = False,
sort: bool = False,
) -> DataFrame:
combined_columns = None
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
if not ignore_index:
raise TypeError("Can only append a dict if ignore_index=True")
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True "
"or if the Series has a name"
)
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
combined_columns = self.columns.append(idx_diff)
row_df = other.to_frame().T
# infer_objects is needed for
# test_append_empty_frame_to_series_with_dateutil_tz
other = row_df.infer_objects().rename_axis(index.names, copy=False)
elif isinstance(other, list):
if not other:
pass
elif not isinstance(other[0], DataFrame):
other = DataFrame(other)
if self.index.name is not None and not ignore_index:
other.index.name = self.index.name
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self, *other]
else:
to_concat = [self, other]
result = concat(
to_concat,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort,
)
if (
combined_columns is not None
and not sort
and not combined_columns.equals(result.columns)
):
# TODO: reindexing here is a kludge bc union_indexes does not
# pass sort to index.union, xref #43375
# combined_columns.equals check is necessary for preserving dtype
# in test_crosstab_normalize
result = result.reindex(combined_columns, axis=1)
return result.__finalize__(self, method="append")
def join(
self,
other: DataFrame | Series,
on: IndexLabel | None = None,
how: str = "left",
lsuffix: str = "",
rsuffix: str = "",
sort: bool = False,
) -> DataFrame:
"""
Join columns of another DataFrame.
Join columns with `other` DataFrame either on index or on a key
column. Efficiently join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame.
on : str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use `other`'s index.
* outer: form union of calling frame's index (or column if on is
specified) with `other`'s index, and sort it.
lexicographically.
* inner: form intersection of calling frame's index (or column if
on is specified) with `other`'s index, preserving the order
of the calling's one.
* cross: creates the cartesian product from both frames, preserves the order
of the left keys.
.. versionadded:: 1.2.0
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from right frame's overlapping columns.
sort : bool, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword).
Returns
-------
DataFrame
A dataframe containing columns from both the caller and `other`.
See Also
--------
DataFrame.merge : For column(s)-on-column(s) operations.
Notes
-----
Parameters `on`, `lsuffix`, and `rsuffix` are not supported when
passing a list of `DataFrame` objects.
Support for specifying index levels as the `on` parameter was added
in version 0.23.0.
Examples
--------
>>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> df
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
4 K4 A4
5 K5 A5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> df.join(other, lsuffix='_caller', rsuffix='_other')
key_caller A key_other B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 NaN NaN
4 K4 A4 NaN NaN
5 K5 A5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both `df` and `other`. The joined DataFrame will have
key as its index.
>>> df.set_index('key').join(other.set_index('key'))
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the `on`
parameter. DataFrame.join always uses `other`'s index but we can use
any column in `df`. This method preserves the original DataFrame's
index in the result.
>>> df.join(other.set_index('key'), on='key')
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K2 A2 B2
3 K3 A3 NaN
4 K4 A4 NaN
5 K5 A5 NaN
Using non-unique key values shows how they are matched.
>>> df = pd.DataFrame({'key': ['K0', 'K1', 'K1', 'K3', 'K0', 'K1'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> df
key A
0 K0 A0
1 K1 A1
2 K1 A2
3 K3 A3
4 K0 A4
5 K1 A5
>>> df.join(other.set_index('key'), on='key')
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K1 A2 B1
3 K3 A3 NaN
4 K0 A4 B0
5 K1 A5 B1
"""
return self._join_compat(
other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort
)
def _join_compat(
self,
other: DataFrame | Series,
on: IndexLabel | None = None,
how: str = "left",
lsuffix: str = "",
rsuffix: str = "",
sort: bool = False,
):
from pandas.core.reshape.concat import concat
from pandas.core.reshape.merge import merge
if isinstance(other, Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
if how == "cross":
return merge(
self,
other,
how=how,
on=on,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
return merge(
self,
other,
left_on=on,
how=how,
left_index=on is None,
right_index=True,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
else:
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == "left":
res = concat(
frames, axis=1, join="outer", verify_integrity=True, sort=sort
)
return res.reindex(self.index, copy=False)
else:
return concat(
frames, axis=1, join=how, verify_integrity=True, sort=sort
)
joined = frames[0]
for frame in frames[1:]:
joined = merge(
joined, frame, how=how, left_index=True, right_index=True
)
return joined
@Substitution("")
@Appender(_merge_doc, indents=2)
def merge(
self,
right: DataFrame | Series,
how: str = "inner",
on: IndexLabel | None = None,
left_on: IndexLabel | None = None,
right_on: IndexLabel | None = None,
left_index: bool = False,
right_index: bool = False,
sort: bool = False,
suffixes: Suffixes = ("_x", "_y"),
copy: bool = True,
indicator: bool = False,
validate: str | None = None,
) -> DataFrame:
from pandas.core.reshape.merge import merge
return merge(
self,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
def round(
self, decimals: int | dict[IndexLabel, int] | Series = 0, *args, **kwargs
) -> DataFrame:
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
*args
Additional keywords have no effect but might be accepted for
compatibility with numpy.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
A DataFrame with the affected columns rounded to the specified
number of decimal places.
See Also
--------
numpy.around : Round a numpy array to the given number of decimals.
Series.round : Round a Series to the given number of decimals.
Examples
--------
>>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specified with the column names as key and the number of decimal
places as value
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specified with the column names as index and the number of
decimal places as value
>>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
from pandas.core.reshape.concat import concat
def _dict_round(df: DataFrame, decimals):
for col, vals in df.items():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(ser: Series, decimals: int):
if is_integer_dtype(ser.dtype) or is_float_dtype(ser.dtype):
return ser.round(decimals)
return ser
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series) and not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
if is_dict_like(decimals) and not all(
is_integer(value) for _, value in decimals.items()
):
raise TypeError("Values in decimals must be integers")
new_cols = list(_dict_round(self, decimals))
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals) for _, v in self.items()]
else:
raise TypeError("decimals must be an integer, a dict-like or a Series")
if len(new_cols) > 0:
return self._constructor(
concat(new_cols, axis=1), index=self.index, columns=self.columns
).__finalize__(self, method="round")
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(
self,
method: str | Callable[[np.ndarray, np.ndarray], float] = "pearson",
min_periods: int = 1,
) -> DataFrame:
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior.
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for Pearson
and Spearman correlation.
Returns
-------
DataFrame
Correlation matrix.
See Also
--------
DataFrame.corrwith : Compute pairwise correlation with another
DataFrame or Series.
Series.corr : Compute the correlation between two Series.
Notes
-----
Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations.
* `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_
* `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_
* `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
>>> df = pd.DataFrame([(1, 1), (2, np.nan), (np.nan, 3), (4, 4)],
... columns=['dogs', 'cats'])
>>> df.corr(min_periods=3)
dogs cats
dogs 1.0 NaN
cats NaN 1.0
""" # noqa:E501
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)
if method == "pearson":
correl = libalgos.nancorr(mat, minp=min_periods)
elif method == "spearman":
correl = libalgos.nancorr_spearman(mat, minp=min_periods)
elif method == "kendall" or callable(method):
if min_periods is None:
min_periods = 1
mat = mat.T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.0
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
f"'{method}' was supplied"
)
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods: int | None = None, ddof: int | None = 1) -> DataFrame:
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-ddof.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
base_cov = np.empty((mat.shape[1], mat.shape[1]))
base_cov.fill(np.nan)
else:
base_cov = np.cov(mat.T, ddof=ddof)
base_cov = base_cov.reshape((len(cols), len(cols)))
else:
base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods)
return self._constructor(base_cov, index=idx, columns=cols)
def corrwith(self, other, axis: Axis = 0, drop=False, method="pearson") -> Series:
"""
Compute pairwise correlation.
Pairwise correlation is computed between rows or columns of
DataFrame with rows or columns of Series or DataFrame. DataFrames
are first aligned along both axes before computing the
correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for
row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float.
Returns
-------
Series
Pairwise correlations.
See Also
--------
DataFrame.corr : Compute pairwise correlation of columns.
Examples
--------
>>> index = ["a", "b", "c", "d", "e"]
>>> columns = ["one", "two", "three", "four"]
>>> df1 = pd.DataFrame(np.arange(20).reshape(5, 4), index=index, columns=columns)
>>> df2 = pd.DataFrame(np.arange(16).reshape(4, 4), index=index[:4], columns=columns)
>>> df1.corrwith(df2)
one 1.0
two 1.0
three 1.0
four 1.0
dtype: float64
>>> df2.corrwith(df1, axis=1)
a 1.0
b 1.0
c 1.0
d 1.0
e NaN
dtype: float64
""" # noqa:E501
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
# GH46174: when other is a Series object and axis=0, we achieve a speedup over
# passing .corr() to .apply() by taking the columns as ndarrays and iterating
# over the transposition row-wise. Then we delegate the correlation coefficient
# computation and null-masking to np.corrcoef and np.isnan respectively,
# which are much faster. We exploit the fact that the Spearman correlation
# of two vectors is equal to the Pearson correlation of their ranks to use
# substantially the same method for Pearson and Spearman,
# just with intermediate argsorts on the latter.
if isinstance(other, Series):
if axis == 0 and method in ["pearson", "spearman"]:
corrs = {}
numeric_cols = self.select_dtypes(include=np.number).columns
ndf = self[numeric_cols].values.transpose()
k = other.values
if method == "pearson":
for i, r in enumerate(ndf):
nonnull_mask = ~np.isnan(r) & ~np.isnan(k)
corrs[numeric_cols[i]] = np.corrcoef(
r[nonnull_mask], k[nonnull_mask]
)[0, 1]
else:
for i, r in enumerate(ndf):
nonnull_mask = ~np.isnan(r) & ~np.isnan(k)
corrs[numeric_cols[i]] = np.corrcoef(
r[nonnull_mask].argsort().argsort(),
k[nonnull_mask].argsort().argsort(),
)[0, 1]
return Series(corrs)
else:
return this.apply(lambda x: other.corr(x, method=method), axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join="inner", copy=False)
if axis == 1:
left = left.T
right = right.T
if method == "pearson":
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ["kendall", "spearman"] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = self._constructor_sliced(
map(c, zip(left.values.T, right.values.T)), index=left.columns
)
else:
raise ValueError(
f"Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable"
)
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl._append(
Series([np.nan] * len(idx_diff), index=idx_diff)
)
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(
self, axis: Axis = 0, level: Level | None = None, numeric_only: bool = False
):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each row.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.value_counts: Count unique combinations of columns.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
"""
axis = self._get_axis_number(axis)
if level is not None:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
"deprecated and will be removed in a future version. Use groupby "
"instead. df.count(level=1) should use df.groupby(level=1).count().",
FutureWarning,
stacklevel=find_stack_level(),
)
res = self._count_level(level, axis=axis, numeric_only=numeric_only)
return res.__finalize__(self, method="count")
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = self._constructor_sliced(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._mgr.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = self._constructor_sliced(
counts, index=frame._get_agg_axis(axis)
)
return result.astype("int64").__finalize__(self, method="count")
def _count_level(self, level: Level, axis: int = 0, numeric_only: bool = False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError(
f"Can only count levels on hierarchical {self._get_axis_name(axis)}."
)
# Mask NaNs: Mask rows or columns where the index level is NaN, and all
# values in the DataFrame that are NaN
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
values_mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
values_mask = notna(frame.values)
index_mask = notna(count_axis.get_level_values(level=level))
if axis == 1:
mask = index_mask & values_mask
else:
mask = index_mask.reshape(-1, 1) & values_mask
if isinstance(level, str):
level = count_axis._get_level_number(level)
level_name = count_axis._names[level]
level_index = count_axis.levels[level]._rename(name=level_name)
level_codes = ensure_platform_int(count_axis.codes[level])
counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis)
if axis == 1:
result = self._constructor(counts, index=agg_axis, columns=level_index)
else:
result = self._constructor(counts, index=level_index, columns=agg_axis)
return result
def _reduce(
self,
op,
name: str,
*,
axis: Axis = 0,
skipna: bool = True,
numeric_only: bool | None = None,
filter_type=None,
**kwds,
):
assert filter_type is None or filter_type == "bool", filter_type
out_dtype = "bool" if filter_type == "bool" else None
if numeric_only is None and name in ["mean", "median"]:
own_dtypes = [arr.dtype for arr in self._mgr.arrays]
dtype_is_dt = np.array(
[is_datetime64_any_dtype(dtype) for dtype in own_dtypes],
dtype=bool,
)
if dtype_is_dt.any():
warnings.warn(
"DataFrame.mean and DataFrame.median with numeric_only=None "
"will include datetime64 and datetime64tz columns in a "
"future version.",
FutureWarning,
stacklevel=find_stack_level(),
)
# Non-copy equivalent to
# dt64_cols = self.dtypes.apply(is_datetime64_any_dtype)
# cols = self.columns[~dt64_cols]
# self = self[cols]
predicate = lambda x: not is_datetime64_any_dtype(x.dtype)
mgr = self._mgr._get_data_subset(predicate)
self = type(self)(mgr)
# TODO: Make other agg func handle axis=None properly GH#21597
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
assert axis in [0, 1]
def func(values: np.ndarray):
# We only use this in the case that operates on self.values
return op(values, axis=axis, skipna=skipna, **kwds)
def blk_func(values, axis=1):
if isinstance(values, ExtensionArray):
if not is_1d_only_ea_dtype(values.dtype) and not isinstance(
self._mgr, ArrayManager
):
return values._reduce(name, axis=1, skipna=skipna, **kwds)
return values._reduce(name, skipna=skipna, **kwds)
else:
return op(values, axis=axis, skipna=skipna, **kwds)
def _get_data() -> DataFrame:
if filter_type is None:
data = self._get_numeric_data()
else:
# GH#25101, GH#24434
assert filter_type == "bool"
data = self._get_bool_data()
return data
if numeric_only is not None or axis == 0:
# For numeric_only non-None and axis non-None, we know
# which blocks to use and no try/except is needed.
# For numeric_only=None only the case with axis==0 and no object
# dtypes are unambiguous can be handled with BlockManager.reduce
# Case with EAs see GH#35881
df = self
if numeric_only is True:
df = _get_data()
if axis == 1:
df = df.T
axis = 0
ignore_failures = numeric_only is None
# After possibly _get_data and transposing, we are now in the
# simple case where we can use BlockManager.reduce
res, _ = df._mgr.reduce(blk_func, ignore_failures=ignore_failures)
out = df._constructor(res).iloc[0]
if out_dtype is not None:
out = out.astype(out_dtype)
if axis == 0 and len(self) == 0 and name in ["sum", "prod"]:
# Even if we are object dtype, follow numpy and return
# float64, see test_apply_funcs_over_empty
out = out.astype(np.float64)
if numeric_only is None and out.shape[0] != df.shape[1]:
# columns have been dropped GH#41480
arg_name = "numeric_only"
if name in ["all", "any"]:
arg_name = "bool_only"
warnings.warn(
"Dropping of nuisance columns in DataFrame reductions "
f"(with '{arg_name}=None') is deprecated; in a future "
"version this will raise TypeError. Select only valid "
"columns before calling the reduction.",
FutureWarning,
stacklevel=find_stack_level(),
)
return out
assert numeric_only is None
data = self
values = data.values
try:
result = func(values)
except TypeError:
# e.g. in nanops trying to convert strs to float
data = _get_data()
labels = data._get_agg_axis(axis)
values = data.values
with np.errstate(all="ignore"):
result = func(values)
# columns have been dropped GH#41480
arg_name = "numeric_only"
if name in ["all", "any"]:
arg_name = "bool_only"
warnings.warn(
"Dropping of nuisance columns in DataFrame reductions "
f"(with '{arg_name}=None') is deprecated; in a future "
"version this will raise TypeError. Select only valid "
"columns before calling the reduction.",
FutureWarning,
stacklevel=find_stack_level(),
)
if hasattr(result, "dtype"):
if filter_type == "bool" and notna(result).all():
result = result.astype(np.bool_)
elif filter_type is None and is_object_dtype(result.dtype):
try:
result = result.astype(np.float64)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
pass
result = self._constructor_sliced(result, index=labels)
return result
def _reduce_axis1(self, name: str, func, skipna: bool) -> Series:
"""
Special case for _reduce to try to avoid a potentially-expensive transpose.
Apply the reduction block-wise along axis=1 and then reduce the resulting
1D arrays.
"""
if name == "all":
result = np.ones(len(self), dtype=bool)
ufunc = np.logical_and
elif name == "any":
result = np.zeros(len(self), dtype=bool)
# error: Incompatible types in assignment
# (expression has type "_UFunc_Nin2_Nout1[Literal['logical_or'],
# Literal[20], Literal[False]]", variable has type
# "_UFunc_Nin2_Nout1[Literal['logical_and'], Literal[20],
# Literal[True]]")
ufunc = np.logical_or # type: ignore[assignment]
else:
raise NotImplementedError(name)
for arr in self._mgr.arrays:
middle = func(arr, axis=0, skipna=skipna)
result = ufunc(result, middle)
res_ser = self._constructor_sliced(result, index=self.index)
return res_ser
def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series:
"""
Count number of distinct elements in specified axis.
Return Series with number of distinct elements. Can ignore NaN
values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [4, 5, 6], 'B': [4, 1, 1]})
>>> df.nunique()
A 3
B 2
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis: Axis = 0, skipna: bool = True) -> Series:
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of minima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmin : Return index of the minimum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
... 'co2_emissions': [37.2, 19.66, 1712]},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the minimum value in each column.
>>> df.idxmin()
consumption Pork
co2_emissions Wheat Products
dtype: object
To return the index for the minimum value in each row, use ``axis="columns"``.
>>> df.idxmin(axis="columns")
Pork consumption
Wheat Products co2_emissions
Beef consumption
dtype: object
"""
axis = self._get_axis_number(axis)
res = self._reduce(
nanops.nanargmin, "argmin", axis=axis, skipna=skipna, numeric_only=False
)
indices = res._values
# indices will always be np.ndarray since axis is not None and
# values is a 2d array for DataFrame
# error: Item "int" of "Union[int, Any]" has no attribute "__iter__"
assert isinstance(indices, np.ndarray) # for mypy
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return self._constructor_sliced(result, index=self._get_agg_axis(axis))
def idxmax(self, axis: Axis = 0, skipna: bool = True) -> Series:
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of maxima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmax : Return index of the maximum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
... 'co2_emissions': [37.2, 19.66, 1712]},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the maximum value in each column.
>>> df.idxmax()
consumption Wheat Products
co2_emissions Beef
dtype: object
To return the index for the maximum value in each row, use ``axis="columns"``.
>>> df.idxmax(axis="columns")
Pork co2_emissions
Wheat Products consumption
Beef co2_emissions
dtype: object
"""
axis = self._get_axis_number(axis)
res = self._reduce(
nanops.nanargmax, "argmax", axis=axis, skipna=skipna, numeric_only=False
)
indices = res._values
# indices will always be np.ndarray since axis is not None and
# values is a 2d array for DataFrame
# error: Item "int" of "Union[int, Any]" has no attribute "__iter__"
assert isinstance(indices, np.ndarray) # for mypy
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return self._constructor_sliced(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num: int) -> Index:
"""
Let's be explicit about this.
"""
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})")
def mode(
self, axis: Axis = 0, numeric_only: bool = False, dropna: bool = True
) -> DataFrame:
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row.
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NaN/NaT.
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
Series.mode : Return the highest frequency value in a Series.
Series.value_counts : Return the counts of values in a Series.
Examples
--------
>>> df = pd.DataFrame([('bird', 2, 2),
... ('mammal', 4, np.nan),
... ('arthropod', 8, 0),
... ('bird', 2, np.nan)],
... index=('falcon', 'horse', 'spider', 'ostrich'),
... columns=('species', 'legs', 'wings'))
>>> df
species legs wings
falcon bird 2 2.0
horse mammal 4 NaN
spider arthropod 8 0.0
ostrich bird 2 NaN
By default, missing values are not considered, and the mode of wings
are both 0 and 2. Because the resulting DataFrame has two rows,
the second row of ``species`` and ``legs`` contains ``NaN``.
>>> df.mode()
species legs wings
0 bird 2.0 0.0
1 NaN NaN 2.0
Setting ``dropna=False`` ``NaN`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 NaN
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2.0 0.0
1 NaN 2.0
To compute the mode over columns and not rows, use the axis parameter:
>>> df.mode(axis='columns', numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
spider 0.0 8.0
ostrich 2.0 NaN
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
data = data.apply(f, axis=axis)
# Ensure index is type stable (should always use int index)
if data.empty:
data.index = default_index(0)
return data
def quantile(
self,
q=0.5,
axis: Axis = 0,
numeric_only: bool = True,
interpolation: str = "linear",
):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'}, default 0
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
validate_percentile(q)
axis = self._get_axis_number(axis)
if not is_list_like(q):
# BlockManager.quantile expects listlike, so we wrap and unwrap here
res_df = self.quantile(
[q], axis=axis, numeric_only=numeric_only, interpolation=interpolation
)
res = res_df.iloc[0]
if axis == 1 and len(self) == 0:
# GH#41544 try to get an appropriate dtype
dtype = find_common_type(list(self.dtypes))
if needs_i8_conversion(dtype):
return res.astype(dtype)
return res
q = Index(q, dtype=np.float64)
data = self._get_numeric_data() if numeric_only else self
if axis == 1:
data = data.T
if len(data.columns) == 0:
# GH#23925 _get_numeric_data may have dropped all columns
cols = Index([], name=self.columns.name)
dtype = np.float64
if axis == 1:
# GH#41544 try to get an appropriate dtype
cdtype = find_common_type(list(self.dtypes))
if needs_i8_conversion(cdtype):
dtype = cdtype
if is_list_like(q):
res = self._constructor([], index=q, columns=cols, dtype=dtype)
return res.__finalize__(self, method="quantile")
return self._constructor_sliced([], index=cols, name=q, dtype=dtype)
res = data._mgr.quantile(qs=q, axis=1, interpolation=interpolation)
result = self._constructor(res)
return result.__finalize__(self, method="quantile")
@doc(NDFrame.asfreq, **_shared_doc_kwargs)
def asfreq(
self,
freq: Frequency,
method=None,
how: str | None = None,
normalize: bool = False,
fill_value=None,
) -> DataFrame:
return super().asfreq(
freq=freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
@doc(NDFrame.resample, **_shared_doc_kwargs)
def resample(
self,
rule,
axis=0,
closed: str | None = None,
label: str | None = None,
convention: str = "start",
kind: str | None = None,
loffset=None,
base: int | None = None,
on=None,
level=None,
origin: str | TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
) -> Resampler:
return super().resample(
rule=rule,
axis=axis,
closed=closed,
label=label,
convention=convention,
kind=kind,
loffset=loffset,
base=base,
on=on,
level=level,
origin=origin,
offset=offset,
)
def to_timestamp(
self,
freq: Frequency | None = None,
how: str = "start",
axis: Axis = 0,
copy: bool = True,
) -> DataFrame:
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with DatetimeIndex
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
if not isinstance(old_ax, PeriodIndex):
raise TypeError(f"unsupported Type {type(old_ax).__name__}")
new_ax = old_ax.to_timestamp(freq=freq, how=how)
setattr(new_obj, axis_name, new_ax)
return new_obj
def to_period(
self, freq: Frequency | None = None, axis: Axis = 0, copy: bool = True
) -> DataFrame:
"""
Convert DataFrame from DatetimeIndex to PeriodIndex.
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
Parameters
----------
freq : str, default
Frequency of the PeriodIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with PeriodIndex
Examples
--------
>>> idx = pd.to_datetime(
... [
... "2001-03-31 00:00:00",
... "2002-05-31 00:00:00",
... "2003-08-31 00:00:00",
... ]
... )
>>> idx
DatetimeIndex(['2001-03-31', '2002-05-31', '2003-08-31'],
dtype='datetime64[ns]', freq=None)
>>> idx.to_period("M")
PeriodIndex(['2001-03', '2002-05', '2003-08'], dtype='period[M]')
For the yearly frequency
>>> idx.to_period("Y")
PeriodIndex(['2001', '2002', '2003'], dtype='period[A-DEC]')
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
if not isinstance(old_ax, DatetimeIndex):
raise TypeError(f"unsupported Type {type(old_ax).__name__}")
new_ax = old_ax.to_period(freq=freq)
setattr(new_obj, axis_name, new_ax)
return new_obj
def isin(self, values) -> DataFrame:
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
To check if ``values`` is *not* in the DataFrame, use the ``~`` operator:
>>> ~df.isin([0, 2])
num_legs num_wings
falcon False False
dog True False
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in other.
>>> other = pd.DataFrame({'num_legs': [8, 3], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon False True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
result = concat(
(
self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)
),
axis=1,
)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with a duplicate axis.")
result = self.eq(values.reindex_like(self), axis="index")
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with a duplicate axis.")
result = self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError(
"only list-like or dict-like objects are allowed "
"to be passed to DataFrame.isin(), "
f"you passed a '{type(values).__name__}'"
)
result = self._constructor(
algorithms.isin(self.values.ravel(), values).reshape(self.shape),
self.index,
self.columns,
)
return result.__finalize__(self, method="isin")
# ----------------------------------------------------------------------
# Add index and columns
_AXIS_ORDERS = ["index", "columns"]
_AXIS_TO_AXIS_NUMBER: dict[Axis, int] = {
**NDFrame._AXIS_TO_AXIS_NUMBER,
1: 1,
"columns": 1,
}
_AXIS_LEN = len(_AXIS_ORDERS)
_info_axis_number = 1
_info_axis_name = "columns"
index: Index = properties.AxisProperty(
axis=1, doc="The index (row labels) of the DataFrame."
)
columns: Index = properties.AxisProperty(
axis=0, doc="The column labels of the DataFrame."
)
@property
def _AXIS_NUMBERS(self) -> dict[str, int]:
""".. deprecated:: 1.1.0"""
super()._AXIS_NUMBERS
return {"index": 0, "columns": 1}
@property
def _AXIS_NAMES(self) -> dict[int, str]:
""".. deprecated:: 1.1.0"""
super()._AXIS_NAMES
return {0: "index", 1: "columns"}
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
hist = pandas.plotting.hist_frame
boxplot = pandas.plotting.boxplot_frame
sparse = CachedAccessor("sparse", SparseFrameAccessor)
# ----------------------------------------------------------------------
# Internal Interface Methods
def _to_dict_of_blocks(self, copy: bool = True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY - only works for BlockManager
"""
mgr = self._mgr
# convert to BlockManager if needed -> this way support ArrayManager as well
mgr = mgr_to_mgr(mgr, "block")
mgr = cast(BlockManager, mgr)
return {
k: self._constructor(v).__finalize__(self)
for k, v, in mgr.to_dict(copy=copy).items()
}
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 1),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
"""
self._consolidate_inplace()
return self._mgr.as_array()
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def ffill(
self: DataFrame,
axis: None | Axis = None,
inplace: bool = False,
limit: None | int = None,
downcast=None,
) -> DataFrame | None:
return super().ffill(axis, inplace, limit, downcast)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def bfill(
self: DataFrame,
axis: None | Axis = None,
inplace: bool = False,
limit: None | int = None,
downcast=None,
) -> DataFrame | None:
return super().bfill(axis, inplace, limit, downcast)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "lower", "upper"]
)
def clip(
self: DataFrame,
lower=None,
upper=None,
axis: Axis | None = None,
inplace: bool = False,
*args,
**kwargs,
) -> DataFrame | None:
return super().clip(lower, upper, axis, inplace, *args, **kwargs)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "method"])
def interpolate(
self: DataFrame,
method: str = "linear",
axis: Axis = 0,
limit: int | None = None,
inplace: bool = False,
limit_direction: str | None = None,
limit_area: str | None = None,
downcast: str | None = None,
**kwargs,
) -> DataFrame | None:
return super().interpolate(
method,
axis,
limit,
inplace,
limit_direction,
limit_area,
downcast,
**kwargs,
)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "cond", "other"]
)
def where(
self,
cond,
other=lib.no_default,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=lib.no_default,
):
return super().where(cond, other, inplace, axis, level, errors, try_cast)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "cond", "other"]
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=lib.no_default,
):
return super().mask(cond, other, inplace, axis, level, errors, try_cast)
DataFrame._add_numeric_operations()
ops.add_flex_arithmetic_methods(DataFrame)
def _from_nested_dict(data) -> collections.defaultdict:
new_data: collections.defaultdict = collections.defaultdict(dict)
for index, s in data.items():
for col, v in s.items():
new_data[col][index] = v
return new_data
def _reindex_for_setitem(value: DataFrame | Series, index: Index) -> ArrayLike:
# reindex if necessary
if value.index.equals(index) or not len(index):
return value._values.copy()
# GH#4107
try:
reindexed_value = value.reindex(index)._values
except ValueError as err:
# raised in MultiIndex.from_tuples, see test_insert_error_msmgs
if not value.index.is_unique:
# duplicate axis
raise err
raise TypeError(
"incompatible index of inserted column with frame index"
) from err
return reindexed_value
| pandas-dev/pandas | pandas/core/frame.py | Python | bsd-3-clause | 384,508 | [
"Elk"
] | 422256e12d3b5bbae1a89e2b422f1f37b8e4e11688f8b09ade12bb2dd36d9ac9 |
"""Utility functions for plotting M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
import math
from functools import partial
import difflib
import webbrowser
from warnings import warn
import tempfile
import numpy as np
from ..io import show_fiff
from ..utils import verbose, set_config
COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#473C8B', '#458B74',
'#CD7F32', '#FF4040', '#ADFF2F', '#8E2323', '#FF1493']
def _setup_vmin_vmax(data, vmin, vmax):
"""Aux function to handle vmin and vamx parameters"""
if vmax is None and vmin is None:
vmax = np.abs(data).max()
vmin = -vmax
else:
if callable(vmin):
vmin = vmin(data)
elif vmin is None:
vmin = np.min(data)
if callable(vmax):
vmax = vmax(data)
elif vmin is None:
vmax = np.max(data)
return vmin, vmax
def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None):
""" Adjust subplot parameters to give specified padding.
Note. For plotting please use this function instead of plt.tight_layout
Parameters
----------
pad : float
padding between the figure edge and the edges of subplots, as a
fraction of the font-size.
h_pad : float
Padding height between edges of adjacent subplots.
Defaults to `pad_inches`.
w_pad : float
Padding width between edges of adjacent subplots.
Defaults to `pad_inches`.
fig : instance of Figure
Figure to apply changes to.
"""
import matplotlib.pyplot as plt
fig = plt.gcf() if fig is None else fig
fig.canvas.draw()
try: # see https://github.com/matplotlib/matplotlib/issues/2654
fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
except Exception:
warn('Matplotlib function \'tight_layout\' is not supported.'
' Skipping subplot adjusment.')
else:
try:
fig.set_tight_layout(dict(pad=pad, h_pad=h_pad, w_pad=w_pad))
except Exception:
pass
def _check_delayed_ssp(container):
""" Aux function to be used for interactive SSP selection
"""
if container.proj is True or\
all(p['active'] for p in container.info['projs']):
raise RuntimeError('Projs are already applied. Please initialize'
' the data with proj set to False.')
elif len(container.info['projs']) < 1:
raise RuntimeError('No projs found in evoked.')
def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
"""Return a colormap similar to that used by mne_analyze
Parameters
----------
limits : list (or array) of length 3 or 6
Bounds for the colormap, which will be mirrored across zero if length
3, or completely specified (and potentially asymmetric) if length 6.
format : str
Type of colormap to return. If 'matplotlib', will return a
matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
return an RGBA array of shape (256, 4).
Returns
-------
cmap : instance of matplotlib.pyplot.colormap | array
A teal->blue->gray->red->yellow colormap.
Notes
-----
For this will return a colormap that will display correctly for data
that are scaled by the plotting function to span [-fmax, fmax].
Examples
--------
The following code will plot a STC using standard MNE limits:
colormap = mne.viz.mne_analyze_colormap(limits=[5, 10, 15])
brain = stc.plot('fsaverage', 'inflated', 'rh', colormap)
brain.scale_data_colormap(fmin=-15, fmid=0, fmax=15, transparent=False)
"""
# Ensure limits is an array
limits = np.asarray(limits, dtype='float')
if len(limits) != 3 and len(limits) != 6:
raise ValueError('limits must have 3 or 6 elements')
if len(limits) == 3 and any(limits < 0.):
raise ValueError('if 3 elements, limits must all be non-negative')
if any(np.diff(limits) <= 0):
raise ValueError('limits must be monotonically increasing')
if format == 'matplotlib':
from matplotlib import colors
if len(limits) == 3:
limits = (np.concatenate((-np.flipud(limits), limits)) +
limits[-1]) / (2 * limits[-1])
else:
limits = (limits - np.min(limits)) / np.max(limits -
np.min(limits))
cdict = {'red': ((limits[0], 0.0, 0.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 1.0, 1.0),
(limits[5], 1.0, 1.0)),
'green': ((limits[0], 1.0, 1.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 1.0, 1.0)),
'blue': ((limits[0], 1.0, 1.0),
(limits[1], 1.0, 1.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 0.0, 0.0))}
return colors.LinearSegmentedColormap('mne_analyze', cdict)
elif format == 'mayavi':
if len(limits) == 3:
limits = np.concatenate((-np.flipud(limits), [0], limits)) /\
limits[-1]
else:
limits = np.concatenate((limits[:3], [0], limits[3:]))
limits /= np.max(np.abs(limits))
r = np.array([0, 0, 0, 0, 1, 1, 1])
g = np.array([1, 0, 0, 0, 0, 0, 1])
b = np.array([1, 1, 1, 0, 0, 0, 0])
a = np.array([1, 1, 0, 0, 0, 1, 1])
xp = (np.arange(256) - 128) / 128.0
colormap = np.r_[[np.interp(xp, limits, 255 * c)
for c in [r, g, b, a]]].T
return colormap
else:
raise ValueError('format must be either matplotlib or mayavi')
def _toggle_options(event, params):
"""Toggle options (projectors) dialog"""
import matplotlib.pyplot as plt
if len(params['projs']) > 0:
if params['fig_proj'] is None:
_draw_proj_checkbox(event, params, draw_current_state=False)
else:
# turn off options dialog
plt.close(params['fig_proj'])
del params['proj_checks']
params['fig_proj'] = None
def _toggle_proj(event, params):
"""Operation to perform when proj boxes clicked"""
# read options if possible
if 'proj_checks' in params:
bools = [x[0].get_visible() for x in params['proj_checks'].lines]
for bi, (b, p) in enumerate(zip(bools, params['projs'])):
# see if they tried to deactivate an active one
if not b and p['active']:
bools[bi] = True
else:
bools = [True] * len(params['projs'])
compute_proj = False
if 'proj_bools' not in params:
compute_proj = True
elif not np.array_equal(bools, params['proj_bools']):
compute_proj = True
# if projectors changed, update plots
if compute_proj is True:
params['plot_update_proj_callback'](params, bools)
def _get_help_text(params):
"""Aux function for customizing help dialogs text."""
text, text2 = list(), list()
text.append(u'\u2190 : \n')
text.append(u'\u2192 : \n')
text.append(u'\u2193 : \n')
text.append(u'\u2191 : \n')
text.append(u'- : \n')
text.append(u'+ or = : \n')
text.append(u'Home : \n')
text.append(u'End : \n')
text.append(u'Page down : \n')
text.append(u'Page up : \n')
text.append(u'F11 : \n')
text.append(u'? : \n')
text.append(u'Esc : \n\n')
text.append(u'Mouse controls\n')
text.append(u'click on data :\n')
text2.append('Navigate left\n')
text2.append('Navigate right\n')
text2.append('Scale down\n')
text2.append('Scale up\n')
text2.append('Toggle full screen mode\n')
text2.append('Open help box\n')
text2.append('Quit\n\n\n')
if 'raw' in params:
text2.insert(4, 'Reduce the time shown per view\n')
text2.insert(5, 'Increase the time shown per view\n')
text.append(u'click elsewhere in the plot :\n')
if 'ica' in params:
text.append(u'click component name :\n')
text2.insert(2, 'Navigate components down\n')
text2.insert(3, 'Navigate components up\n')
text2.insert(8, 'Reduce the number of components per view\n')
text2.insert(9, 'Increase the number of components per view\n')
text2.append('Mark bad channel\n')
text2.append('Vertical line at a time instant\n')
text2.append('Show topography for the component\n')
else:
text.append(u'click channel name :\n')
text2.insert(2, 'Navigate channels down\n')
text2.insert(3, 'Navigate channels up\n')
text2.insert(8, 'Reduce the number of channels per view\n')
text2.insert(9, 'Increase the number of channels per view\n')
text2.append('Mark bad channel\n')
text2.append('Vertical line at a time instant\n')
text2.append('Mark bad channel\n')
elif 'epochs' in params:
text.append(u'right click :\n')
text2.insert(4, 'Reduce the number of epochs per view\n')
text2.insert(5, 'Increase the number of epochs per view\n')
if 'ica' in params:
text.append(u'click component name :\n')
text2.insert(2, 'Navigate components down\n')
text2.insert(3, 'Navigate components up\n')
text2.insert(8, 'Reduce the number of components per view\n')
text2.insert(9, 'Increase the number of components per view\n')
text2.append('Mark component for exclusion\n')
text2.append('Vertical line at a time instant\n')
text2.append('Show topography for the component\n')
else:
text.append(u'click channel name :\n')
text2.insert(2, 'Navigate channels down\n')
text2.insert(3, 'Navigate channels up\n')
text2.insert(8, 'Reduce the number of channels per view\n')
text2.insert(9, 'Increase the number of channels per view\n')
text.insert(10, u'b : \n')
text2.insert(10, 'Toggle butterfly plot on/off\n')
text.insert(11, u'h : \n')
text2.insert(11, 'Show histogram of peak-to-peak values\n')
text2.append('Mark bad epoch\n')
text2.append('Vertical line at a time instant\n')
text2.append('Mark bad channel\n')
text.append(u'middle click :\n')
text2.append('Show channel name (butterfly plot)\n')
text.insert(11, u'o : \n')
text2.insert(11, 'View settings (orig. view only)\n')
return ''.join(text), ''.join(text2)
def _prepare_trellis(n_cells, max_col):
"""Aux function
"""
import matplotlib.pyplot as plt
if n_cells == 1:
nrow = ncol = 1
elif n_cells <= max_col:
nrow, ncol = 1, n_cells
else:
nrow, ncol = int(math.ceil(n_cells / float(max_col))), max_col
fig, axes = plt.subplots(nrow, ncol, figsize=(7.4, 1.5 * nrow + 1))
axes = [axes] if ncol == nrow == 1 else axes.flatten()
for ax in axes[n_cells:]: # hide unused axes
ax.set_visible(False)
return fig, axes
def _draw_proj_checkbox(event, params, draw_current_state=True):
"""Toggle options (projectors) dialog"""
from matplotlib import widgets
projs = params['projs']
# turn on options dialog
labels = [p['desc'] for p in projs]
actives = ([p['active'] for p in projs] if draw_current_state else
[True] * len(params['projs']))
width = max([len(p['desc']) for p in projs]) / 6.0 + 0.5
height = len(projs) / 6.0 + 0.5
fig_proj = figure_nobar(figsize=(width, height))
fig_proj.canvas.set_window_title('SSP projection vectors')
params['fig_proj'] = fig_proj # necessary for proper toggling
ax_temp = fig_proj.add_axes((0, 0, 1, 1), frameon=False)
proj_checks = widgets.CheckButtons(ax_temp, labels=labels, actives=actives)
# change already-applied projectors to red
for ii, p in enumerate(projs):
if p['active'] is True:
for x in proj_checks.lines[ii]:
x.set_color('r')
# make minimal size
# pass key presses from option dialog over
proj_checks.on_clicked(partial(_toggle_proj, params=params))
params['proj_checks'] = proj_checks
# this should work for non-test cases
try:
fig_proj.canvas.draw()
fig_proj.show()
except Exception:
pass
def _layout_figure(params):
"""Function for setting figure layout. Shared with raw and epoch plots"""
size = params['fig'].get_size_inches() * params['fig'].dpi
scroll_width = 25
hscroll_dist = 25
vscroll_dist = 10
l_border = 100
r_border = 10
t_border = 35
b_border = 40
# only bother trying to reset layout if it's reasonable to do so
if size[0] < 2 * scroll_width or size[1] < 2 * scroll_width + hscroll_dist:
return
# convert to relative units
scroll_width_x = scroll_width / size[0]
scroll_width_y = scroll_width / size[1]
vscroll_dist /= size[0]
hscroll_dist /= size[1]
l_border /= size[0]
r_border /= size[0]
t_border /= size[1]
b_border /= size[1]
# main axis (traces)
ax_width = 1.0 - scroll_width_x - l_border - r_border - vscroll_dist
ax_y = hscroll_dist + scroll_width_y + b_border
ax_height = 1.0 - ax_y - t_border
pos = [l_border, ax_y, ax_width, ax_height]
params['ax'].set_position(pos)
if 'ax2' in params:
params['ax2'].set_position(pos)
params['ax'].set_position(pos)
# vscroll (channels)
pos = [ax_width + l_border + vscroll_dist, ax_y,
scroll_width_x, ax_height]
params['ax_vscroll'].set_position(pos)
# hscroll (time)
pos = [l_border, b_border, ax_width, scroll_width_y]
params['ax_hscroll'].set_position(pos)
if 'ax_button' in params:
# options button
pos = [l_border + ax_width + vscroll_dist, b_border,
scroll_width_x, scroll_width_y]
params['ax_button'].set_position(pos)
if 'ax_help_button' in params:
pos = [l_border - vscroll_dist - scroll_width_x * 2, b_border,
scroll_width_x * 2, scroll_width_y]
params['ax_help_button'].set_position(pos)
params['fig'].canvas.draw()
@verbose
def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent=' ',
read_limit=np.inf, max_str=30, verbose=None):
"""Compare the contents of two fiff files using diff and show_fiff
Parameters
----------
fname_1 : str
First file to compare.
fname_2 : str
Second file to compare.
fname_out : str | None
Filename to store the resulting diff. If None, a temporary
file will be created.
show : bool
If True, show the resulting diff in a new tab in a web browser.
indent : str
How to indent the lines.
read_limit : int
Max number of bytes of data to read from a tag. Can be np.inf
to always read all data (helps test read completion).
max_str : int
Max number of characters of string representation to print for
each tag's data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fname_out : str
The filename used for storing the diff. Could be useful for
when a temporary file is used.
"""
file_1 = show_fiff(fname_1, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
file_2 = show_fiff(fname_2, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2)
if fname_out is not None:
f = open(fname_out, 'w')
else:
f = tempfile.NamedTemporaryFile('w', delete=False, suffix='.html')
fname_out = f.name
with f as fid:
fid.write(diff)
if show is True:
webbrowser.open_new_tab(fname_out)
return fname_out
def figure_nobar(*args, **kwargs):
"""Make matplotlib figure with no toolbar"""
from matplotlib import rcParams, pyplot as plt
old_val = rcParams['toolbar']
try:
rcParams['toolbar'] = 'none'
fig = plt.figure(*args, **kwargs)
# remove button press catchers (for toolbar)
cbs = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
for key in cbs:
fig.canvas.callbacks.disconnect(key)
except Exception as ex:
raise ex
finally:
rcParams['toolbar'] = old_val
return fig
def _helper_raw_resize(event, params):
"""Helper for resizing"""
size = ','.join([str(s) for s in params['fig'].get_size_inches()])
set_config('MNE_BROWSE_RAW_SIZE', size)
_layout_figure(params)
def _plot_raw_onscroll(event, params, len_channels=None):
"""Interpret scroll events"""
if len_channels is None:
len_channels = len(params['info']['ch_names'])
orig_start = params['ch_start']
if event.step < 0:
params['ch_start'] = min(params['ch_start'] + params['n_channels'],
len_channels - params['n_channels'])
else: # event.key == 'up':
params['ch_start'] = max(params['ch_start'] - params['n_channels'], 0)
if orig_start != params['ch_start']:
_channels_changed(params, len_channels)
def _channels_changed(params, len_channels):
"""Helper function for dealing with the vertical shift of the viewport."""
if params['ch_start'] + params['n_channels'] > len_channels:
params['ch_start'] = len_channels - params['n_channels']
if params['ch_start'] < 0:
params['ch_start'] = 0
params['plot_fun']()
def _plot_raw_time(value, params):
"""Deal with changed time value"""
info = params['info']
max_times = params['n_times'] / float(info['sfreq']) - params['duration']
if value > max_times:
value = params['n_times'] / info['sfreq'] - params['duration']
if value < 0:
value = 0
if params['t_start'] != value:
params['t_start'] = value
params['hsel_patch'].set_x(value)
def _plot_raw_onkey(event, params):
"""Interpret key presses"""
import matplotlib.pyplot as plt
if event.key == 'escape':
plt.close(params['fig'])
elif event.key == 'down':
params['ch_start'] += params['n_channels']
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'up':
params['ch_start'] -= params['n_channels']
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'right':
value = params['t_start'] + params['duration']
_plot_raw_time(value, params)
params['update_fun']()
params['plot_fun']()
elif event.key == 'left':
value = params['t_start'] - params['duration']
_plot_raw_time(value, params)
params['update_fun']()
params['plot_fun']()
elif event.key in ['+', '=']:
params['scale_factor'] *= 1.1
params['plot_fun']()
elif event.key == '-':
params['scale_factor'] /= 1.1
params['plot_fun']()
elif event.key == 'pageup':
n_channels = params['n_channels'] + 1
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'pagedown':
n_channels = params['n_channels'] - 1
if n_channels == 0:
return
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
if len(params['lines']) > n_channels: # remove line from view
params['lines'][n_channels].set_xdata([])
params['lines'][n_channels].set_ydata([])
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'home':
duration = params['duration'] - 1.0
if duration <= 0:
return
params['duration'] = duration
params['hsel_patch'].set_width(params['duration'])
params['update_fun']()
params['plot_fun']()
elif event.key == 'end':
duration = params['duration'] + 1.0
if duration > params['raw'].times[-1]:
duration = params['raw'].times[-1]
params['duration'] = duration
params['hsel_patch'].set_width(params['duration'])
params['update_fun']()
params['plot_fun']()
elif event.key == '?':
_onclick_help(event, params)
elif event.key == 'f11':
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
def _mouse_click(event, params):
"""Vertical select callback"""
if event.button != 1:
return
if event.inaxes is None:
if params['n_channels'] > 100:
return
ax = params['ax']
ylim = ax.get_ylim()
pos = ax.transData.inverted().transform((event.x, event.y))
if pos[0] > params['t_start'] or pos[1] < 0 or pos[1] > ylim[0]:
return
params['label_click_fun'](pos)
# vertical scrollbar changed
if event.inaxes == params['ax_vscroll']:
ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
if params['ch_start'] != ch_start:
params['ch_start'] = ch_start
params['plot_fun']()
# horizontal scrollbar changed
elif event.inaxes == params['ax_hscroll']:
_plot_raw_time(event.xdata - params['duration'] / 2, params)
params['update_fun']()
params['plot_fun']()
elif event.inaxes == params['ax']:
params['pick_bads_fun'](event)
def _select_bads(event, params, bads):
"""Helper for selecting bad channels onpick. Returns updated bads list."""
# trade-off, avoid selecting more than one channel when drifts are present
# however for clean data don't click on peaks but on flat segments
def f(x, y):
return y(np.mean(x), x.std() * 2)
lines = event.inaxes.lines
for line in lines:
ydata = line.get_ydata()
if not isinstance(ydata, list) and not np.isnan(ydata).any():
ymin, ymax = f(ydata, np.subtract), f(ydata, np.add)
if ymin <= event.ydata <= ymax:
this_chan = vars(line)['ch_name']
if this_chan in params['info']['ch_names']:
ch_idx = params['ch_start'] + lines.index(line)
if this_chan not in bads:
bads.append(this_chan)
color = params['bad_color']
line.set_zorder(-1)
else:
while this_chan in bads:
bads.remove(this_chan)
color = vars(line)['def_color']
line.set_zorder(0)
line.set_color(color)
params['ax_vscroll'].patches[ch_idx].set_color(color)
break
else:
x = np.array([event.xdata] * 2)
params['ax_vertline'].set_data(x, np.array(params['ax'].get_ylim()))
params['ax_hscroll_vertline'].set_data(x, np.array([0., 1.]))
params['vertline_t'].set_text('%0.3f' % x[0])
return bads
def _onclick_help(event, params):
"""Function for drawing help window"""
import matplotlib.pyplot as plt
text, text2 = _get_help_text(params)
width = 6
height = 5
fig_help = figure_nobar(figsize=(width, height), dpi=80)
fig_help.canvas.set_window_title('Help')
ax = plt.subplot2grid((8, 5), (0, 0), colspan=5)
ax.set_title('Keyboard shortcuts')
plt.axis('off')
ax1 = plt.subplot2grid((8, 5), (1, 0), rowspan=7, colspan=2)
ax1.set_yticklabels(list())
plt.text(0.99, 1, text, fontname='STIXGeneral', va='top', weight='bold',
ha='right')
plt.axis('off')
ax2 = plt.subplot2grid((8, 5), (1, 2), rowspan=7, colspan=3)
ax2.set_yticklabels(list())
plt.text(0, 1, text2, fontname='STIXGeneral', va='top')
plt.axis('off')
tight_layout(fig=fig_help)
# this should work for non-test cases
try:
fig_help.canvas.draw()
fig_help.show()
except Exception:
pass
class ClickableImage(object):
"""
Display an image so you can click on it and store x/y positions.
Takes as input an image array (can be any array that works with imshow,
but will work best with images. Displays the image and lets you
click on it. Stores the xy coordinates of each click, so now you can
superimpose something on top of it.
Upon clicking, the x/y coordinate of the cursor will be stored in
self.coords, which is a list of (x, y) tuples.
Parameters
----------
imdata: ndarray
The image that you wish to click on for 2-d points.
**kwargs : dict
Keyword arguments. Passed to ax.imshow.
Notes
-----
.. versionadded:: 0.9.0
"""
def __init__(self, imdata, **kwargs):
"""Display the image for clicking."""
from matplotlib.pyplot import figure, show
self.coords = []
self.imdata = imdata
self.fig = figure()
self.ax = self.fig.add_subplot(111)
self.ymax = self.imdata.shape[0]
self.xmax = self.imdata.shape[1]
self.im = self.ax.imshow(imdata, aspect='auto',
extent=(0, self.xmax, 0, self.ymax),
picker=True, **kwargs)
self.ax.axis('off')
self.fig.canvas.mpl_connect('pick_event', self.onclick)
show()
def onclick(self, event):
"""Mouse click handler.
Parameters
----------
event: matplotlib event object
The matplotlib object that we use to get x/y position.
"""
mouseevent = event.mouseevent
self.coords.append((mouseevent.xdata, mouseevent.ydata))
def plot_clicks(self, **kwargs):
"""Plot the x/y positions stored in self.coords.
Parameters
----------
**kwargs : dict
Arguments are passed to imshow in displaying the bg image.
"""
from matplotlib.pyplot import subplots, show
f, ax = subplots()
ax.imshow(self.imdata, extent=(0, self.xmax, 0, self.ymax), **kwargs)
xlim, ylim = [ax.get_xlim(), ax.get_ylim()]
xcoords, ycoords = zip(*self.coords)
ax.scatter(xcoords, ycoords, c='r')
ann_text = np.arange(len(self.coords)).astype(str)
for txt, coord in zip(ann_text, self.coords):
ax.annotate(txt, coord, fontsize=20, color='r')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
show()
def to_layout(self, **kwargs):
"""Turn coordinates into an MNE Layout object.
Normalizes by the image you used to generate clicks
Parameters
----------
**kwargs : dict
Arguments are passed to generate_2d_layout
"""
from mne.channels.layout import generate_2d_layout
coords = np.array(self.coords)
lt = generate_2d_layout(coords, bg_image=self.imdata, **kwargs)
return lt
def _fake_click(fig, ax, point, xform='ax'):
"""Helper to fake a click at a relative point within axes."""
if xform == 'ax':
x, y = ax.transAxes.transform_point(point)
elif xform == 'data':
x, y = ax.transData.transform_point(point)
else:
raise ValueError('unknown transform')
try:
fig.canvas.button_press_event(x, y, 1, False, None)
except Exception: # for old MPL
fig.canvas.button_press_event(x, y, 1, False)
def add_background_image(fig, im, set_ratios=None):
"""Add a background image to a plot.
Adds the image specified in `im` to the
figure `fig`. This is generally meant to
be done with topo plots, though it could work
for any plot.
Note: This modifies the figure and/or axes
in place.
Parameters
----------
fig: plt.figure
The figure you wish to add a bg image to.
im: ndarray
A numpy array that works with a call to
plt.imshow(im). This will be plotted
as the background of the figure.
set_ratios: None | str
Set the aspect ratio of any axes in fig
to the value in set_ratios. Defaults to None,
which does nothing to axes.
Returns
-------
ax_im: instance of the create matplotlib axis object
corresponding to the image you added.
Notes
-----
.. versionadded:: 0.9.0
"""
if set_ratios is not None:
for ax in fig.axes:
ax.set_aspect(set_ratios)
ax_im = fig.add_axes([0, 0, 1, 1])
ax_im.imshow(im, aspect='auto')
ax_im.set_zorder(-1)
return ax_im
| trachelr/mne-python | mne/viz/utils.py | Python | bsd-3-clause | 29,921 | [
"Mayavi"
] | 9088610e0574fa84df278d8f232b2edd0a333058eaabaac4135ab31ae7126d48 |
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
import logging
from datetime import datetime
from PyQt4 import QtCore, QtGui
from openlp.core.lib import Plugin, Registry, Settings, StringContent, build_icon, translate
from openlp.core.lib.db import Manager
from openlp.core.lib.ui import create_action
from openlp.core.utils.actions import ActionList
from openlp.plugins.songusage.forms import SongUsageDetailForm, SongUsageDeleteForm
from openlp.plugins.songusage.lib import upgrade
from openlp.plugins.songusage.lib.db import init_schema, SongUsageItem
log = logging.getLogger(__name__)
YEAR = QtCore.QDate().currentDate().year()
if QtCore.QDate().currentDate().month() < 9:
YEAR -= 1
__default_settings__ = {
'songusage/db type': 'sqlite',
'songusage/active': False,
'songusage/to date': QtCore.QDate(YEAR, 8, 31),
'songusage/from date': QtCore.QDate(YEAR - 1, 9, 1),
'songusage/last directory export': ''
}
class SongUsagePlugin(Plugin):
log.info('SongUsage Plugin loaded')
def __init__(self):
super(SongUsagePlugin, self).__init__('songusage', __default_settings__)
self.manager = Manager('songusage', init_schema, upgrade_mod=upgrade)
self.weight = -4
self.icon = build_icon(':/plugins/plugin_songusage.png')
self.active_icon = build_icon(':/songusage/song_usage_active.png')
self.inactive_icon = build_icon(':/songusage/song_usage_inactive.png')
self.song_usage_active = False
def check_pre_conditions(self):
"""
Check the plugin can run.
"""
return self.manager.session is not None
def add_tools_menu_item(self, tools_menu):
"""
Give the SongUsage plugin the opportunity to add items to the **Tools** menu.
``tools_menu``
The actual **Tools** menu item, so that your actions can use it as their parent.
"""
log.info('add tools menu')
self.toolsMenu = tools_menu
self.song_usage_menu = QtGui.QMenu(tools_menu)
self.song_usage_menu.setObjectName('song_usage_menu')
self.song_usage_menu.setTitle(translate('SongUsagePlugin', '&Song Usage Tracking'))
# SongUsage Delete
self.song_usage_delete = create_action(tools_menu, 'songUsageDelete',
text=translate('SongUsagePlugin', '&Delete Tracking Data'),
statustip=translate('SongUsagePlugin', 'Delete song usage data up to a specified date.'),
triggers=self.on_song_usage_delete)
# SongUsage Report
self.song_usage_report = create_action(tools_menu, 'songUsageReport',
text=translate('SongUsagePlugin', '&Extract Tracking Data'),
statustip=translate('SongUsagePlugin', 'Generate a report on song usage.'),
triggers=self.on_song_usage_report)
# SongUsage activation
self.song_usage_status = create_action(tools_menu, 'songUsageStatus',
text=translate('SongUsagePlugin', 'Toggle Tracking'),
statustip=translate('SongUsagePlugin', 'Toggle the tracking of song usage.'), checked=False,
can_shortcuts=True, triggers=self.toggle_song_usage_state)
# Add Menus together
self.toolsMenu.addAction(self.song_usage_menu.menuAction())
self.song_usage_menu.addAction(self.song_usage_status)
self.song_usage_menu.addSeparator()
self.song_usage_menu.addAction(self.song_usage_report)
self.song_usage_menu.addAction(self.song_usage_delete)
self.song_usage_active_button = QtGui.QToolButton(self.main_window.status_bar)
self.song_usage_active_button.setCheckable(True)
self.song_usage_active_button.setAutoRaise(True)
self.song_usage_active_button.setStatusTip(translate('SongUsagePlugin', 'Toggle the tracking of song usage.'))
self.song_usage_active_button.setObjectName('song_usage_active_button')
self.main_window.status_bar.insertPermanentWidget(1, self.song_usage_active_button)
self.song_usage_active_button.hide()
# Signals and slots
QtCore.QObject.connect(self.song_usage_status, QtCore.SIGNAL('visibilityChanged(bool)'),
self.song_usage_status.setChecked)
self.song_usage_active_button.toggled.connect(self.toggle_song_usage_state)
self.song_usage_menu.menuAction().setVisible(False)
def initialise(self):
log.info('SongUsage Initialising')
super(SongUsagePlugin, self).initialise()
Registry().register_function('slidecontroller_live_started', self.display_song_usage)
Registry().register_function('print_service_started', self.print_song_usage)
self.song_usage_active = Settings().value(self.settings_section + '/active')
# Set the button and checkbox state
self.set_button_state()
action_list = ActionList.get_instance()
action_list.add_action(self.song_usage_status, translate('SongUsagePlugin', 'Song Usage'))
action_list.add_action(self.song_usage_delete, translate('SongUsagePlugin', 'Song Usage'))
action_list.add_action(self.song_usage_report, translate('SongUsagePlugin', 'Song Usage'))
self.song_usage_delete_form = SongUsageDeleteForm(self.manager, self.main_window)
self.song_usage_detail_form = SongUsageDetailForm(self, self.main_window)
self.song_usage_menu.menuAction().setVisible(True)
self.song_usage_active_button.show()
def finalise(self):
"""
Tidy up on exit
"""
log.info('Plugin Finalise')
self.manager.finalise()
super(SongUsagePlugin, self).finalise()
self.song_usage_menu.menuAction().setVisible(False)
action_list = ActionList.get_instance()
action_list.remove_action(self.song_usage_status, translate('SongUsagePlugin', 'Song Usage'))
action_list.remove_action(self.song_usage_delete, translate('SongUsagePlugin', 'Song Usage'))
action_list.remove_action(self.song_usage_report, translate('SongUsagePlugin', 'Song Usage'))
self.song_usage_active_button.hide()
# stop any events being processed
self.song_usage_active = False
def toggle_song_usage_state(self):
"""
Manage the state of the audit collection and amend
the UI when necessary,
"""
self.song_usage_active = not self.song_usage_active
Settings().setValue(self.settings_section + '/active', self.song_usage_active)
self.set_button_state()
def set_button_state(self):
"""
Keep buttons inline. Turn of signals to stop dead loop but we need the
button and check box set correctly.
"""
self.song_usage_active_button.blockSignals(True)
self.song_usage_status.blockSignals(True)
if self.song_usage_active:
self.song_usage_active_button.setIcon(self.active_icon)
self.song_usage_status.setChecked(True)
self.song_usage_active_button.setChecked(True)
self.song_usage_active_button.setToolTip(translate('SongUsagePlugin', 'Song usage tracking is active.'))
else:
self.song_usage_active_button.setIcon(self.inactive_icon)
self.song_usage_status.setChecked(False)
self.song_usage_active_button.setChecked(False)
self.song_usage_active_button.setToolTip(translate('SongUsagePlugin', 'Song usage tracking is inactive.'))
self.song_usage_active_button.blockSignals(False)
self.song_usage_status.blockSignals(False)
def display_song_usage(self, item):
"""
Song Usage for which has been displayed
"""
self._add_song_usage(translate('SongUsagePlugin', 'display'), item)
def print_song_usage(self, item):
"""
Song Usage for which has been printed
"""
self._add_song_usage(translate('SongUsagePlugin', 'printed'), item)
def _add_song_usage(self, source, item):
audit = item[0].audit
if self.song_usage_active and audit:
song_usage_item = SongUsageItem()
song_usage_item.usagedate = datetime.today()
song_usage_item.usagetime = datetime.now().time()
song_usage_item.title = audit[0]
song_usage_item.copyright = audit[2]
song_usage_item.ccl_number = audit[3]
song_usage_item.authors = ' '.join(audit[1])
song_usage_item.plugin_name = item[0].name
song_usage_item.source = source
self.manager.save_object(song_usage_item)
def on_song_usage_delete(self):
self.song_usage_delete_form.exec_()
def on_song_usage_report(self):
self.song_usage_detail_form.initialise()
self.song_usage_detail_form.exec_()
def about(self):
about_text = translate('SongUsagePlugin',
'<strong>SongUsage Plugin</strong><br />This plugin tracks the usage of songs in services.')
return about_text
def set_plugin_text_strings(self):
"""
Called to define all translatable texts of the plugin
"""
## Name PluginList ##
self.text_strings[StringContent.Name] = {
'singular': translate('SongUsagePlugin', 'SongUsage', 'name singular'),
'plural': translate('SongUsagePlugin', 'SongUsage', 'name plural')
}
## Name for MediaDockManager, SettingsManager ##
self.text_strings[StringContent.VisibleName] = {
'title': translate('SongUsagePlugin', 'SongUsage', 'container title')
}
| marmyshev/item_title | openlp/plugins/songusage/songusageplugin.py | Python | gpl-2.0 | 11,613 | [
"Brian"
] | b3001d5ea5d5c009b94255a3a5632cf0e29af3a8f15ad78be1351e2fbcec205b |
"""Pygame-based media controller for MPF, based on the Backbox Control Protocol
(BCP) v1.0"""
# media_controller.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# The Backbox Control Protocol was conceived and developed by:
# Quinn Capen
# Kevin Kelm
# Gabe Knuth
# Brian Madden
# Mike ORourke
# Documentation and more info at http://missionpinball.com/mpf
import logging
import os
import socket
import sys
import time
import threading
from distutils.version import LooseVersion
import Queue
import traceback
import pygame
from mpf.media_controller.core import *
from mpf.system.config import Config, CaseInsensitiveDict
from mpf.system.events import EventManager
from mpf.system.timing import Timing
from mpf.system.tasks import Task, DelayManager
from mpf.game.player import Player
import mpf.system.bcp as bcp
import version
class MediaController(object):
def __init__(self, options):
self.options = options
self.log = logging.getLogger("MediaController")
self.log.info("Media Controller Version %s", version.__version__)
self.log.info("Backbox Control Protocol Version %s",
version.__bcp_version__)
self.log.info("Config File Version %s",
version.__config_version__)
python_version = sys.version_info
self.log.info("Python version: %s.%s.%s", python_version[0],
python_version[1], python_version[2])
self.log.info("Platform: %s", sys.platform)
self.log.info("Python executable location: %s", sys.executable)
self.log.info("32-bit Python? %s", sys.maxsize < 2**32)
self.config = dict()
self.done = False # todo
self.machine_path = None
self.asset_managers = dict()
self.num_assets_to_load = 0
self.window = None
self.window_manager = None
self.pygame = False
self.pygame_requested = False
self.registered_pygame_handlers = dict()
self.pygame_allowed_events = list()
self.socket_thread = None
self.receive_queue = Queue.Queue()
self.sending_queue = Queue.Queue()
self.crash_queue = Queue.Queue()
self.game_modes = CaseInsensitiveDict()
self.player_list = list()
self.player = None
self.HZ = 0
self.next_tick_time = 0
self.secs_per_tick = 0
Task.Create(self._check_crash_queue)
self.bcp_commands = {'hello': self.bcp_hello,
'goodbye': self.bcp_goodbye,
'reset': self.reset,
'mode_start': self.bcp_mode_start,
'mode_stop': self.bcp_mode_stop,
'error': self.bcp_error,
'ball_start': self.bcp_ball_start,
'ball_end': self.bcp_ball_end,
'game_start': self.bcp_game_start,
'game_end': self.bcp_game_end,
'player_added': self.bcp_player_add,
'player_variable': self.bcp_player_variable,
'player_score': self.bcp_player_score,
'player_turn_start': self.bcp_player_turn_start,
'attract_start': self.bcp_attract_start,
'attract_stop': self.bcp_attract_stop,
'trigger': self.bcp_trigger,
'switch': self.bcp_switch,
'get': self.bcp_get,
'set': self.bcp_set,
'config': self.bcp_config,
'timer': self.bcp_timer
}
# load the MPF config & machine defaults
self.config = (
Config.load_config_yaml(config=self.config,
yaml_file=self.options['mcconfigfile']))
# Find the machine_files location. If it starts with a forward or
# backward slash, then we assume it's from the mpf root. Otherwise we
# assume it's from the subfolder location specified in the
# mpfconfigfile location
if (options['machinepath'].startswith('/') or
options['machinepath'].startswith('\\')):
machine_path = options['machinepath']
else:
machine_path = os.path.join(self.config['mediacontroller']['paths']
['machine_files'],
options['machinepath'])
self.machine_path = os.path.abspath(machine_path)
# Add the machine folder to our path so we can import modules from it
sys.path.append(self.machine_path)
self.log.info("Machine folder: %s", machine_path)
# Now find the config file location. Same as machine_file with the
# slash uses to specify an absolute path
if (options['configfile'].startswith('/') or
options['configfile'].startswith('\\')):
config_file = options['configfile']
else:
if not options['configfile'].endswith('.yaml'):
options['configfile'] += '.yaml'
config_file = os.path.join(self.machine_path,
self.config['mediacontroller']['paths']
['config'],
options['configfile'])
self.log.info("Base machine config file: %s", config_file)
# Load the machine-specific config
self.config = Config.load_config_yaml(config=self.config,
yaml_file=config_file)
mediacontroller_config_spec = '''
exit_on_disconnect: boolean|True
port: int|5050
'''
self.config['mediacontroller'] = (
Config.process_config(mediacontroller_config_spec,
self.config['mediacontroller']))
self.events = EventManager(self)
self.timing = Timing(self)
# Load the media controller modules
self.config['mediacontroller']['modules'] = (
self.config['mediacontroller']['modules'].split(' '))
for module in self.config['mediacontroller']['modules']:
self.log.info("Loading module: %s", module)
module_parts = module.split('.')
exec('self.' + module_parts[0] + '=' + module + '(self)')
# todo there's probably a more pythonic way to do this, and I know
# exec() is supposedly unsafe, but meh, if you have access to put
# malicious files in the system folder then you have access to this
# code too.
self.start_socket_thread()
self.events.post("init_phase_1")
self.events.post("init_phase_2")
self.events.post("init_phase_3")
self.events.post("init_phase_4")
self.events.post("init_phase_5")
self.reset()
def _check_crash_queue(self):
try:
crash = self.crash_queue.get(block=False)
except Queue.Empty:
yield 1000
else:
self.log.critical("MPF Shutting down due to child thread crash")
self.log.critical("Crash details: %s", crash)
self.done = True
def reset(self, **kwargs):
"""Processes an incoming BCP 'reset' command."""
self.player = None
self.player_list = list()
self.events.post('mc_reset_phase_1')
self.events.post('mc_reset_phase_2')
self.events.post('mc_reset_phase_3')
def get_window(self):
""" Returns a reference to the onscreen display window.
This method will set up a window if one doesn't exist yet. This method
exists because there are several different modules and plugins which
may want to use a window, but we don't know which combinations might
be used, so we centralize the creation and management of an onscreen
window here.
"""
if not self.window:
self.window_manager = window.WindowManager(self)
self.window = self.window_manager.window
return self.window
def request_pygame(self):
"""Called by a module to let the system know it would like to use
Pygame. We centralize the requests instead of letting each module do
their own pygame.init() so we get it in one place and can get everthing
initialized in the right order.
Returns: True or False, depending on whether pygame is available or not.
"""
if pygame and not self.pygame_requested:
self.events.add_handler('init_phase_3', self._pygame_init)
self.pygame_requested = True
return True
else:
return False
def _pygame_init(self):
# performs the actual pygame initialization
if not pygame:
self.log.critical("Pygame is needed but not available. Please "
"install Pygame and try again.")
raise Exception("Pygame is needed but not available. Please install"
" Pygame and try again.")
if not self.pygame:
self.log.debug("Initializing Pygame, version %s",
pygame.version.ver)
pygame.init()
self.pygame = True
self.events.add_handler('timer_tick', self.get_pygame_events,
priority=1000)
self.events.post('pygame_initialized')
def register_pygame_handler(self, event, handler):
"""Registers a method to be a handler for a certain type of Pygame
event.
Args:
event: A string of the Pygame event name you're registering this
handler for.
handler: A method that will be called when this Pygame event is
posted.
"""
if event not in self.registered_pygame_handlers:
self.registered_pygame_handlers[event] = set()
self.registered_pygame_handlers[event].add(handler)
self.pygame_allowed_events.append(event)
self.log.debug("Adding Window event handler. Event:%s, Handler:%s",
event, handler)
pygame.event.set_allowed(self.pygame_allowed_events)
def get_pygame_events(self):
"""Gets (and dispatches) Pygame events. Automatically called every
machine loop via the timer_tick event.
"""
for event in pygame.event.get():
if event.type in self.registered_pygame_handlers:
for handler in self.registered_pygame_handlers[event.type]:
if (event.type == pygame.KEYDOWN or
event.type == pygame.KEYUP):
handler(event.key, event.mod)
else:
handler()
def _process_command(self, bcp_command, **kwargs):
self.log.debug("Processing command: %s %s", bcp_command, kwargs)
try:
self.bcp_commands[bcp_command](**kwargs)
except KeyError:
self.log.warning("Received invalid BCP command: %s", bcp_command)
self.send('error', message='invalid command', command=bcp_command)
def send(self, bcp_command, callback=None, **kwargs):
"""Sends a BCP command to the connected pinball controller.
Args:
bcp_command: String of the BCP command name.
callback: Optional callback method that will be called when the
command is sent.
**kwargs: Optional additional kwargs will be added to the BCP
command string.
"""
self.sending_queue.put(bcp.encode_command_string(bcp_command,
**kwargs))
if callback:
callback()
def send_dmd_frame(self, data):
"""Sends a DMD frame to the BCP client.
Args:
data: A 4096-length raw byte string.
"""
dmd_string = 'dmd_frame?' + data
self.sending_queue.put(dmd_string)
def _timer_init(self):
self.HZ = 30
self.next_tick_time = time.time()
self.secs_per_tick = 1.0 / self.HZ
def timer_tick(self):
"""Called by the platform each machine tick based on self.HZ"""
self.timing.timer_tick() # notifies the timing module
self.events.post('timer_tick') # sends the timer_tick system event
Task.timer_tick() # notifies tasks
DelayManager.timer_tick()
def run(self):
"""Main run loop."""
self._timer_init()
self.log.info("Starting the run loop at %sHz", self.HZ)
start_time = time.time()
loops = 0
secs_per_tick = self.secs_per_tick
self.next_tick_time = time.time()
try:
while self.done is False:
time.sleep(0.001)
self.get_from_queue()
if self.next_tick_time <= time.time(): # todo change this
self.timer_tick()
self.next_tick_time += secs_per_tick
loops += 1
self._do_shutdown()
self.log.info("Target loop rate: %s Hz", self.HZ)
self.log.info("Actual loop rate: %s Hz",
loops / (time.time() - start_time))
except KeyboardInterrupt:
self.shutdown()
def shutdown(self):
"""Shuts down and exits the media controller.
This method will also send the BCP 'goodbye' command to any connected
clients.
"""
self.socket_thread.stop()
def _do_shutdown(self):
if self.pygame:
pygame.quit()
def socket_thread_stopped(self):
"""Notifies the media controller that the socket thread has stopped."""
self.done = True
def start_socket_thread(self):
"""Starts the BCPServer socket thread."""
self.socket_thread = BCPServer(self, self.receive_queue,
self.sending_queue)
self.socket_thread.daemon = True
self.socket_thread.start()
def get_from_queue(self):
"""Gets and processes all queued up incoming BCP commands."""
while not self.receive_queue.empty():
cmd, kwargs = bcp.decode_command_string(
self.receive_queue.get(False))
self._process_command(cmd, **kwargs)
def bcp_hello(self, **kwargs):
"""Processes an incoming BCP 'hello' command."""
try:
if LooseVersion(kwargs['version']) == (
LooseVersion(version.__bcp_version__)):
self.send('hello', version=version.__bcp_version__)
else:
self.send('hello', version='unknown protocol version')
except:
self.log.warning("Received invalid 'version' parameter with "
"'hello'")
def bcp_goodbye(self, **kwargs):
"""Processes an incoming BCP 'goodbye' command."""
if self.config['mediacontroller']['exit_on_disconnect']:
self.socket_thread.sending_thread.stop()
sys.exit()
def bcp_mode_start(self, name=None, priority=0, **kwargs):
"""Processes an incoming BCP 'mode_start' command."""
if not name:
return
#todo raise error
if name in self.game_modes:
self.game_modes[name].start(priority=priority)
def bcp_mode_stop(self, name, **kwargs):
"""Processes an incoming BCP 'mode_stop' command."""
if not name:
return
#todo raise error
if name in self.game_modes:
self.game_modes[name].stop()
def bcp_error(self, **kwargs):
"""Processes an incoming BCP 'error' command."""
self.log.warning('Received error command from client')
def bcp_ball_start(self, **kwargs):
"""Processes an incoming BCP 'ball_start' command."""
self.events.post('ball_started', **kwargs)
def bcp_ball_end(self, **kwargs):
"""Processes an incoming BCP 'ball_end' command."""
self.events.post('ball_ended', **kwargs)
def bcp_game_start(self, **kargs):
"""Processes an incoming BCP 'game_start' command."""
self.bcp_player_add(number=1)
self.bcp_player_turn_start(player=1)
self.events.post('game_started', **kargs)
def bcp_game_end(self, **kwargs):
"""Processes an incoming BCP 'game_end' command."""
self.player = None
self.events.post('game_ended', **kwargs)
def bcp_player_add(self, number, **kwargs):
"""Processes an incoming BCP 'player_add' command."""
if number > len(self.player_list):
new_player = Player(self)
self.player_list.append(new_player)
new_player.score = 0
self.events.post('player_add_success', num=number)
def bcp_player_variable(self, name, value, prev_value, change, **kwargs):
"""Processes an incoming BCP 'player_variable' command."""
if self.player:
self.player[name] = value
def bcp_player_score(self, value, prev_value, change, **kwargs):
"""Processes an incoming BCP 'player_score' command."""
if self.player:
self.player['score'] = int(value)
def bcp_attract_start(self, **kwargs):
"""Processes an incoming BCP 'attract_start' command."""
self.events.post('machineflow_Attract_start')
def bcp_attract_stop(self, **kwargs):
self.events.post('machineflow_Attract_stop')
"""Processes an incoming BCP 'attract_stop' command."""
def bcp_player_turn_start(self, player, **kwargs):
"""Processes an incoming BCP 'player_turn_start' command."""
if ((self.player and self.player.number != player) or
not self.player):
self.player = self.player_list[int(player)-1]
def bcp_trigger(self, name, **kwargs):
"""Processes an incoming BCP 'trigger' command."""
blocked_event_prefixes = ('player_',
'machinemode_',
)
blocked_events = ('ball_started',
'ball_ended',
'game_started',
'game_ended',
)
if not (name.startswith(blocked_event_prefixes) and
name in blocked_events):
self.events.post(name, **kwargs)
def bcp_switch(self, name, state, **kwargs):
"""Processes an incoming BCP 'switch' command."""
if int(state):
self.events.post('switch_' + name + '_active')
else:
self.events.post('switch_' + name + '_inactive')
def bcp_get(self, **kwargs):
"""Processes an incoming BCP 'get' command.
Note that this media controller doesn't implement the 'get' command at
this time, but it's included here for completeness since the 'get'
command is part of the BCP 1.0 specification so we don't want to return
an error if we receive an incoming 'get' command.
"""
pass
def bcp_set(self, **kwargs):
"""Processes an incoming BCP 'set' command.
Note that this media controller doesn't implement the 'set' command at
this time, but it's included here for completeness since the 'set'
command is part of the BCP 1.0 specification so we don't want to return
an error if we receive an incoming 'set' command.
"""
pass
def bcp_config(self, **kwargs):
"""Processes an incoming BCP 'config' command."""
for k, v in kwargs.iteritems():
if k.startswith('volume_'):
self.bcp_set_volume(track=k.split('volume_')[1], value=v)
def bcp_timer(self, name, action, **kwargs):
"""Processes an incoming BCP 'config' command."""
self.events.post('timer_' + name + '_' + action, **kwargs)
def bcp_set_volume(self, track, value):
"""Sets the volume based on an incoming BCP 'config' command.
Args:
track: String name of the track the volume will set.
value: Float between 0 and 1 which represents the volume level to
set.
Note: At this time only the master volume can be set with this method.
"""
if track == 'master':
self.sound.set_volume(value)
#if track in self.sound.tracks:
#self.sound.tracks[track]
# todo add per-track volume support to sound system
class BCPServer(threading.Thread):
"""Parent class for the BCP Server thread.
Args:
mc: A reference to the main MediaController instance.
receiving_queue: A shared Queue() object which holds incoming BCP
commands.
sending_queue: A shared Queue() object which holds outgoing BCP
commands.
"""
def __init__(self, mc, receiving_queue, sending_queue):
threading.Thread.__init__(self)
self.mc = mc
self.log = logging.getLogger('BCP')
self.receive_queue = receiving_queue
self.sending_queue = sending_queue
self.connection = None
self.socket = None
self.done = False
self.setup_server_socket()
self.sending_thread = threading.Thread(target=self.sending_loop)
self.sending_thread.daemon = True
self.sending_thread.start()
def setup_server_socket(self, interface='localhost', port=5050):
"""Sets up the socket listener.
Args:
interface: String name of which interface this socket will listen
on.
port: Integer TCP port number the socket will listen on.
"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.log.info('Starting up on %s port %s', interface, port)
try:
self.socket.bind((interface, port))
except IOError:
self.log.critical('Socket bind IOError')
raise
self.socket.listen(1)
def run(self):
"""The socket thread's run loop."""
try:
while True:
self.log.info("Waiting for a connection...")
self.mc.events.post('client_disconnected')
self.connection, client_address = self.socket.accept()
self.log.info("Received connection from: %s:%s",
client_address[0], client_address[1])
self.mc.events.post('client_connected',
address=client_address[0],
port=client_address[1])
# Receive the data in small chunks and retransmit it
while True:
try:
data = self.connection.recv(4096)
if data:
commands = data.split("\n")
for cmd in commands:
if cmd:
self.process_received_message(cmd)
else:
# no more data
break
except:
if self.mc.config['mediacontroller']['exit_on_disconnect']:
self.mc.shutdown()
else:
break
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
msg = ''.join(line for line in lines)
self.mc.crash_queue.put(msg)
def stop(self):
""" Stops and shuts down the BCP server."""
if not self.done:
self.log.info("Socket thread stopping.")
self.sending_queue.put('goodbye')
time.sleep(1) # give it a chance to send goodbye before quitting
self.done = True
self.mc.done = True
def sending_loop(self):
"""Sending loop which transmits data from the sending queue to the
remote socket.
This method is run as a thread.
"""
try:
while not self.done:
msg = self.sending_queue.get()
if not msg.startswith('dmd_frame'):
self.log.debug('Sending "%s"', msg)
try:
self.connection.sendall(msg + '\n')
except (AttributeError, socket.error):
pass
# Do we just keep on trying, waiting until a new client
# connects?
self.socket.close()
self.socket = None
self.mc.socket_thread_stopped()
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
msg = ''.join(line for line in lines)
self.mc.crash_queue.put(msg)
def process_received_message(self, message):
"""Puts a received BCP message into the receiving queue.
Args:
message: The incoming BCP message
"""
self.log.debug('Received "%s"', message)
self.receive_queue.put(message)
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
| jabdoa2/mpf | mpf/media_controller/core/media_controller.py | Python | mit | 26,941 | [
"Brian"
] | 6e2a8ec23597ef10cd5368bfda89470dd5e26c07975ab2f795b46a17696e7b2a |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import numpy as np
import unittest
from pymatgen.analysis.eos import EOS, NumericalEOS
from pymatgen.util.testing import PymatgenTest
class EOSTest(PymatgenTest):
def setUp(self):
# Si data from Cormac
self.volumes = [25.987454833, 26.9045702104, 27.8430241908,
28.8029649591, 29.7848370694, 30.7887887064,
31.814968055, 32.8638196693, 33.9353435494,
35.0299842495, 36.1477417695, 37.2892088485,
38.4543854865, 39.6437162376, 40.857201102,
42.095136449, 43.3579668329, 44.6456922537,
45.9587572656, 47.2973100535, 48.6614988019,
50.0517680652, 51.4682660281, 52.9112890601,
54.3808371612, 55.8775030703, 57.4014349722,
58.9526328669]
self.energies = [-7.63622156576, -8.16831294894, -8.63871612686,
-9.05181213218, -9.41170988374, -9.72238224345,
-9.98744832526, -10.210309552, -10.3943401353,
-10.5427238068, -10.6584266073, -10.7442240979,
-10.8027285713, -10.8363890521, -10.8474912964,
-10.838157792, -10.8103477586, -10.7659387815,
-10.7066179666, -10.6339907853, -10.5495538639,
-10.4546677714, -10.3506386542, -10.2386366017,
-10.1197772808, -9.99504030111, -9.86535084973,
-9.73155247952]
num_eos = EOS(eos_name="numerical_eos")
self.num_eos_fit = num_eos.fit(self.volumes, self.energies)
def test_run_all_models(self):
for eos_name in EOS.MODELS:
eos = EOS(eos_name=eos_name)
_ = eos.fit(self.volumes, self.energies)
def test_numerical_eoswrapper(self):
# using numerical eos directly vs via EOS wrapper
numerical_eos = NumericalEOS(self.volumes, self.energies)
numerical_eos.fit()
self.assertGreater(len(numerical_eos.eos_params), 3)
self.assertAlmostEqual(float(numerical_eos.e0), self.num_eos_fit.e0, 3)
self.assertAlmostEqual(float(numerical_eos.v0), self.num_eos_fit.v0, 3)
self.assertAlmostEqual(float(numerical_eos.b0), self.num_eos_fit.b0, 3)
self.assertAlmostEqual(float(numerical_eos.b1), self.num_eos_fit.b1, 3)
self.assertArrayAlmostEqual(numerical_eos.eos_params, self.num_eos_fit.eos_params)
def test_numerical_eos_values(self):
np.testing.assert_almost_equal(self.num_eos_fit.e0, -10.84749, decimal=3)
np.testing.assert_almost_equal(self.num_eos_fit.v0, 40.857201, decimal=1)
np.testing.assert_almost_equal(self.num_eos_fit.b0, 0.55, decimal=2)
#np.testing.assert_almost_equal(self.num_eos_fit.b0_GPa, 89.0370727, decimal=1)
#np.testing.assert_almost_equal(self.num_eos_fit.b1, 4.344039, decimal=2)
def test_eos_func(self):
# list vs np.array arguments
np.testing.assert_almost_equal(self.num_eos_fit.func([0,1,2]),
self.num_eos_fit.func(np.array([0,1,2])),
decimal=10)
# func vs _func
np.testing.assert_almost_equal(self.num_eos_fit.func(0.),
self.num_eos_fit._func(
0., self.num_eos_fit.eos_params),
decimal=10)
# test the eos function: energy = f(volume)
# numerical eos evaluated at volume=0 == a0 of the fit polynomial
np.testing.assert_almost_equal(self.num_eos_fit.func(0.),
self.num_eos_fit.eos_params[-1], decimal=6)
birch_eos = EOS(eos_name="birch")
birch_eos_fit = birch_eos.fit(self.volumes, self.energies)
# birch eos evaluated at v0 == e0
np.testing.assert_almost_equal(birch_eos_fit.func(birch_eos_fit.v0),
birch_eos_fit.e0, decimal=6)
def test_eos_func_call(self):
# eos_fit_obj.func(volume) == eos_fit_obj(volume)
np.testing.assert_almost_equal(self.num_eos_fit.func(0.),
self.num_eos_fit(0.), decimal=10)
def test_summary_dict(self):
d = {"e0": self.num_eos_fit.e0, "b0": self.num_eos_fit.b0,
"b1": self.num_eos_fit.b1, "v0": self.num_eos_fit.v0}
self.assertDictEqual(self.num_eos_fit.results, d)
if __name__ == "__main__":
unittest.main()
| matk86/pymatgen | pymatgen/analysis/tests/test_eos.py | Python | mit | 4,777 | [
"pymatgen"
] | a2365e0aa789bef6e4d4a5d8d4c35df7d13b332d70e17102dc04f9787c6aa8fd |
import pathlib
import h5py
import numpy as np
import pytest
from scipy.ndimage.filters import gaussian_filter
import dclab
from dclab.rtdc_dataset.feat_anc_plugin.plugin_feature import (
PlugInFeature, import_plugin_feature_script,
remove_plugin_feature, remove_all_plugin_features,
PluginImportError)
from dclab.rtdc_dataset.feat_anc_core.ancillary_feature import (
BadFeatureSizeWarning)
from helper_methods import retrieve_data
data_dir = pathlib.Path(__file__).parent / "data"
@pytest.fixture(autouse=True)
def cleanup_plugin_features():
"""Fixture used to cleanup plugin feature tests"""
# code run before the test
pass
# then the test is run
yield
# code run after the test
# remove our test plugin examples
remove_all_plugin_features()
def compute_single_plugin_feature(rtdc_ds):
"""Basic plugin method"""
circ_per_area = rtdc_ds["circ"] / rtdc_ds["area_um"]
return circ_per_area
def compute_multiple_plugin_features(rtdc_ds):
"""Basic plugin method with dictionary returned"""
circ_per_area = rtdc_ds["circ"] / rtdc_ds["area_um"]
circ_times_area = rtdc_ds["circ"] * rtdc_ds["area_um"]
return {"circ_per_area": circ_per_area, "circ_times_area": circ_times_area}
def compute_non_scalar_plugin_feature(rtdc_ds):
"""Basic non-scalar plugin method"""
image_gauss_filter = gaussian_filter(rtdc_ds["image"], sigma=(0, 1, 1))
return {"image_gauss_filter": image_gauss_filter}
def example_plugin_info_single_feature():
"""plugin info for a single feature"""
info = {
"method": compute_single_plugin_feature,
"description": "This plugin will compute a feature",
"long description": "Even longer description that "
"can span multiple lines",
"feature names": ["circ_per_area"],
"feature labels": ["Circularity per Area"],
"features required": ["circ", "area_um"],
"config required": [],
"method check required": lambda x: True,
"scalar feature": [True],
"version": "0.1.0",
}
return info
def example_plugin_info_multiple_feature():
"""plugin info for multiple features"""
info = {
"method": compute_multiple_plugin_features,
"description": "This plugin will compute some features",
"long description": "Even longer description that "
"can span multiple lines",
"feature names": ["circ_per_area", "circ_times_area"],
"feature labels": ["Circularity per Area", "Circularity times Area"],
"features required": ["circ", "area_um"],
"config required": [],
"method check required": lambda x: True,
"scalar feature": [True, True],
"version": "0.1.0",
}
return info
def example_plugin_info_non_scalar_feature():
"""plugin info for non-scalar feature"""
info = {
"method": compute_non_scalar_plugin_feature,
"description": "This plugin will compute a non-scalar feature",
"long description": "This non-scalar feature is a Gaussian filter of "
"the image",
"feature names": ["image_gauss_filter"],
"feature labels": ["Gaussian Filtered Image"],
"features required": ["image"],
"config required": [],
"method check required": lambda x: True,
"scalar feature": [False],
"version": "0.1.0",
}
return info
def compute_with_user_section(rtdc_ds):
"""setup a plugin method that uses user config section
The "user:n_constrictions" metadata must be set
"""
nc = rtdc_ds.config["user"]["n_constrictions"]
assert isinstance(nc, int), (
'"n_constrictions" should be an integer value.')
area_of_region = rtdc_ds["area_um"] * nc
return {"area_of_region": area_of_region}
def test_pf_attribute_ancill_info():
"""Check the plugin feature attribute input to AncillaryFeature"""
info = example_plugin_info_single_feature()
pf = PlugInFeature("circ_per_area", info)
assert pf.plugin_feature_info["feature name"] == "circ_per_area"
assert pf.plugin_feature_info["method"] is compute_single_plugin_feature
assert pf.plugin_feature_info["config required"] == []
assert pf.plugin_feature_info["features required"] == ["circ", "area_um"]
def test_pf_attribute_plugin_feature_info():
"""Check the plugin feature info attribute"""
info = example_plugin_info_single_feature()
# comparing lambda functions fails due to differing memory locations
info.pop("method check required")
pf = PlugInFeature("circ_per_area", info)
pf.plugin_feature_info.pop("method check required")
plugin_feature_info = {
"method": compute_single_plugin_feature,
"description": "This plugin will compute a feature",
"long description": "Even longer description that "
"can span multiple lines",
"feature name": "circ_per_area",
"feature label": "Circularity per Area",
"feature shape": (1,),
"features required": ["circ", "area_um"],
"config required": [],
"scalar feature": True,
"version": "0.1.0",
"plugin path": None,
"identifier": "3a3e72c4cb015424ebbe6d4af63f2170",
}
assert pf.plugin_feature_info == plugin_feature_info
def test_pf_attributes():
"""Check the plugin feature attributes"""
plugin_path = data_dir / "feat_anc_plugin_creative.py"
plugin_list = dclab.load_plugin_feature(plugin_path)
pf1, pf2 = plugin_list
plugin_file_info = import_plugin_feature_script(plugin_path)
assert pf1.feature_name == pf1.feature_name == \
plugin_file_info["feature names"][0]
assert pf2.feature_name == pf2.feature_name == \
plugin_file_info["feature names"][1]
assert plugin_path.samefile(pf1.plugin_path)
assert plugin_path.samefile(pf1.plugin_feature_info["plugin path"])
assert plugin_path.samefile(pf2.plugin_path)
assert plugin_path.samefile(pf2.plugin_feature_info["plugin path"])
assert pf1._original_info == plugin_file_info
assert pf2._original_info == plugin_file_info
def test_pf_attributes_af_inherited():
"""Check the plugin feature attributes inherited from AncillaryFeature"""
plugin_path = data_dir / "feat_anc_plugin_creative.py"
plugin_list = dclab.load_plugin_feature(plugin_path)
pf, _ = plugin_list
plugin_file_info = import_plugin_feature_script(plugin_path)
assert pf.feature_name == plugin_file_info["feature names"][0]
assert pf.method == plugin_file_info["method"]
assert pf.req_config == plugin_file_info["config required"]
assert pf.req_features == plugin_file_info["features required"]
assert pf.req_func == plugin_file_info["method check required"]
assert pf.priority == 0
def test_pf_bad_plugin_feature_name_list():
"""Basic test of a bad feature name for PlugInFeature"""
info = example_plugin_info_single_feature()
info["feature names"] = "Peter-Pan's Best Friend!"
with pytest.raises(ValueError, match="must be a list, got"):
PlugInFeature("Peter-Pan's Best Friend!", info)
def test_pf_bad_plugin_feature_name():
"""Basic test of a bad feature name for PlugInFeature"""
info = example_plugin_info_single_feature()
info["feature names"] = ["Peter-Pan's Best Friend!"]
with pytest.raises(ValueError, match="only contain lower-case characters"):
PlugInFeature("Peter-Pan's Best Friend!", info)
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_exists_in_hierarchy():
"""Test that RTDCHierarchy works with PlugInFeature"""
info = example_plugin_info_single_feature()
pf = PlugInFeature("circ_per_area", info)
h5path = retrieve_data("fmt-hdf5_fl_2018.zip")
with dclab.new_dataset(h5path) as ds:
assert pf.feature_name in ds
assert dclab.dfn.feature_exists(pf.feature_name)
child = dclab.new_dataset(ds)
assert pf.feature_name in child
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_export_and_load():
"""Check that exported and loaded hdf5 file will keep a plugin feature"""
h5path = retrieve_data("fmt-hdf5_fl_2018.zip")
# initialize PlugInFeature instance
info = example_plugin_info_single_feature()
pf = PlugInFeature("circ_per_area", info)
with dclab.new_dataset(h5path) as ds:
# extract the feature information from the dataset
assert pf in PlugInFeature.features
circ_per_area = ds[pf.feature_name]
# export the data to a new file
expath = h5path.with_name("exported.rtdc")
ds.export.hdf5(expath, features=ds.features_innate + [pf.feature_name])
# make sure that worked
with h5py.File(expath, "r") as h5:
assert pf.feature_name in h5["events"]
assert np.allclose(h5["events"][pf.feature_name], circ_per_area)
# now check again with dclab
with dclab.new_dataset(expath) as ds2:
assert pf in PlugInFeature.features
assert pf.feature_name in ds2
assert pf.feature_name in ds2.features_innate
assert np.allclose(ds2[pf.feature_name], circ_per_area)
# and a control check
remove_plugin_feature(pf)
assert pf.feature_name not in ds2
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_export_non_scalar():
h5path = retrieve_data("fmt-hdf5_image-bg_2020.zip")
# initialize PlugInFeature instance
info = example_plugin_info_non_scalar_feature()
pf = PlugInFeature("image_gauss_filter", info)
with dclab.new_dataset(h5path) as ds:
# extract the feature information from the dataset
assert pf in PlugInFeature.features
image_gauss_filter = ds[pf.feature_name]
# export the data to a new file
expath = h5path.with_name("exported.rtdc")
with pytest.warns(UserWarning, match="out on a limb"):
ds.export.hdf5(expath,
features=[pf.feature_name])
# make sure that worked
with h5py.File(expath, "r") as h5:
assert pf.feature_name in h5["events"]
assert np.allclose(h5["events"][pf.feature_name], image_gauss_filter)
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_export_non_scalar_single_event():
h5path = retrieve_data("fmt-hdf5_image-bg_2020.zip")
# initialize PlugInFeature instance
info = example_plugin_info_non_scalar_feature()
info["feature shapes"] = [(80, 250)]
pf = PlugInFeature("image_gauss_filter", info)
with dclab.new_dataset(h5path) as ds:
# extract the feature information from the dataset
assert pf in PlugInFeature.features
image_gauss_filter = ds[pf.feature_name]
# export the data to a new file
expath = h5path.with_name("exported.rtdc")
ds.export.hdf5(expath, features=["image", pf.feature_name])
# write another single event
with dclab.RTDCWriter(expath) as hw:
hw.store_feature(pf.feature_name, ds["image"][0])
hw.store_feature("image", ds["image"][0])
# make sure that worked
with h5py.File(expath, "r") as h5:
assert pf.feature_name in h5["events"]
assert np.allclose(h5["events"][pf.feature_name][:-1],
image_gauss_filter)
assert np.allclose(h5["events"][pf.feature_name][-1],
h5["events/image"][0])
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_export_non_scalar_no_warning():
h5path = retrieve_data("fmt-hdf5_image-bg_2020.zip")
# initialize PlugInFeature instance
info = example_plugin_info_non_scalar_feature()
info["feature shapes"] = [(80, 250)]
pf = PlugInFeature("image_gauss_filter", info)
with dclab.new_dataset(h5path) as ds:
# extract the feature information from the dataset
assert pf in PlugInFeature.features
image_gauss_filter = ds[pf.feature_name]
# export the data to a new file
expath = h5path.with_name("exported.rtdc")
ds.export.hdf5(expath, features=[pf.feature_name])
# make sure that worked
with h5py.File(expath, "r") as h5:
assert pf.feature_name in h5["events"]
assert np.allclose(h5["events"][pf.feature_name], image_gauss_filter)
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_export_non_scalar_bad_shape():
h5path = retrieve_data("fmt-hdf5_image-bg_2020.zip")
# initialize PlugInFeature instance
info = example_plugin_info_non_scalar_feature()
info["feature shapes"] = [(42, 27)]
pf = PlugInFeature("image_gauss_filter", info)
with dclab.new_dataset(h5path) as ds:
# extract the feature information from the dataset
assert pf in PlugInFeature.features
# export the data to a new file
expath = h5path.with_name("exported.rtdc")
with pytest.raises(ValueError, match="Bad shape"):
ds.export.hdf5(expath, features=[pf.feature_name])
def test_pf_feature_exists():
"""Basic check that the plugin feature name exists in definitions"""
plugin_path = data_dir / "feat_anc_plugin_creative.py"
plugin_list = dclab.load_plugin_feature(plugin_path)
assert dclab.dfn.feature_exists(plugin_list[0].feature_name)
assert dclab.dfn.feature_exists(plugin_list[1].feature_name)
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_filtering_with_plugin_feature():
"""Filtering with plugin feature"""
h5path = retrieve_data("fmt-hdf5_fl_2018.zip")
with dclab.new_dataset(h5path) as ds:
info = example_plugin_info_single_feature()
pf = PlugInFeature("circ_per_area", info)
ds.config["filtering"][f"{pf.feature_name} min"] = 0.030
ds.config["filtering"][f"{pf.feature_name} max"] = 0.031
ds.apply_filter()
assert np.sum(ds.filter.all) == 1
assert ds.filter.all[4]
def test_pf_import_plugin_info():
"""Check the plugin test example info is a dict"""
plugin_path = data_dir / "feat_anc_plugin_creative.py"
info = import_plugin_feature_script(plugin_path)
assert isinstance(info, dict)
def test_pf_import_plugin_info_bad_path():
"""Raise error when a bad pathname is given"""
bad_plugin_path = "not/a/real/path/plugin.py"
with pytest.raises(PluginImportError, match="could be not be found"):
import_plugin_feature_script(bad_plugin_path)
def test_pf_incorrect_input_info():
"""Raise error when info is not a dictionary"""
info = ["this", "is", "not", "a", "dict"]
with pytest.raises(ValueError, match="must be a dict"):
PlugInFeature("feature_1", info)
def test_pf_incorrect_input_feature_name():
"""Raise error when the feature_name doesn't match info feature name"""
info = example_plugin_info_single_feature()
# `feature_name` is "circ_per_area" in info
with pytest.raises(ValueError, match="is not defined"):
PlugInFeature("not_the_correct_name", info)
def test_pf_incorrect_input_method():
"""Raise error when method is not callable"""
info = example_plugin_info_single_feature()
# set `info["method"]` to something that isn't callable
info["method"] = "this_is_a_string"
with pytest.raises(ValueError, match="is not callable"):
PlugInFeature("circ_per_area", info)
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_initialize_plugin_after_loading():
"""plugin feature loads correctly after feature added to hdf5 file"""
h5path = retrieve_data("fmt-hdf5_fl_2018.zip")
with dclab.new_dataset(h5path) as ds:
circ_per_area = compute_single_plugin_feature(ds)
with h5py.File(h5path, "a") as h5:
h5["events"]["circ_per_area"] = circ_per_area
with dclab.new_dataset(h5path) as ds:
assert "circ_per_area" not in ds
info = example_plugin_info_single_feature()
PlugInFeature("circ_per_area", info)
assert "circ_per_area" in ds
assert "circ_per_area" in ds.features_innate
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_initialize_plugin_feature_single():
"""Check that single plugin feature exists independant of loaded dataset"""
ds = dclab.new_dataset(retrieve_data("fmt-hdf5_fl_2018.zip"))
info = example_plugin_info_single_feature()
PlugInFeature("circ_per_area", info)
assert "circ_per_area" in ds
circ_per_area = ds["circ_per_area"]
assert np.allclose(circ_per_area, ds["circ"] / ds["area_um"])
# check that PlugInFeature exists independent of loaded ds
ds2 = dclab.new_dataset(retrieve_data("fmt-hdf5_fl_2018.zip"))
assert "circ_per_area" in ds2
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_initialize_plugin_feature_non_scalar():
"""Check that the non-scalar plugin feature works"""
ds = dclab.new_dataset(retrieve_data("fmt-hdf5_fl_2018.zip"))
info = example_plugin_info_non_scalar_feature()
PlugInFeature("image_gauss_filter", info)
assert "image_gauss_filter" in ds
image_gauss_filter = ds["image_gauss_filter"]
assert np.allclose(image_gauss_filter,
gaussian_filter(ds["image"], sigma=(0, 1, 1)))
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_initialize_plugin_features_multiple():
"""Check multiple plugin features exist independant of loaded dataset"""
ds = dclab.new_dataset(retrieve_data("fmt-hdf5_fl_2018.zip"))
assert "circ_per_area" not in ds.features_innate
assert "circ_times_area" not in ds.features_innate
info = example_plugin_info_multiple_feature()
PlugInFeature("circ_per_area", info)
PlugInFeature("circ_times_area", info)
assert "circ_per_area" in ds
assert "circ_times_area" in ds
assert dclab.dfn.feature_exists("circ_per_area")
assert dclab.dfn.feature_exists("circ_times_area")
circ_per_area = ds["circ_per_area"]
circ_times_area = ds["circ_times_area"]
assert np.allclose(circ_per_area, ds["circ"] / ds["area_um"])
assert np.allclose(circ_times_area, ds["circ"] * ds["area_um"])
def test_pf_input_no_feature_labels():
"""Check that feature labels are populated even if not given"""
info = example_plugin_info_single_feature()
info.pop("feature labels")
feature_name = "circ_per_area"
pf = PlugInFeature(feature_name, info)
assert dclab.dfn.feature_exists(feature_name)
label = dclab.dfn.get_feature_label(feature_name)
assert label == "Plugin feature {}".format(feature_name)
assert label == pf.plugin_feature_info["feature label"]
def test_pf_input_no_scalar_feature():
"""Check that scalar feature bools are populated even if not given"""
info = example_plugin_info_single_feature()
info.pop("scalar feature")
pf = PlugInFeature("circ_per_area", info)
assert pf.plugin_feature_info["scalar feature"]
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_load_plugin():
"""Basic check for loading a plugin feature via a script"""
ds = dclab.new_dataset(retrieve_data("fmt-hdf5_fl_2018.zip"))
assert "circ_per_area" not in ds.features_innate
assert "circ_times_area" not in ds.features_innate
plugin_path = data_dir / "feat_anc_plugin_creative.py"
plugin_list = dclab.load_plugin_feature(plugin_path)
assert isinstance(plugin_list[0], PlugInFeature)
assert isinstance(plugin_list[1], PlugInFeature)
assert "circ_per_area" in ds
assert "circ_times_area" in ds
circ_per_area = ds["circ_per_area"]
circ_times_area = ds["circ_times_area"]
assert np.allclose(circ_per_area, ds["circ"] / ds["area_um"])
assert np.allclose(circ_times_area, ds["circ"] * ds["area_um"])
def test_pf_minimum_info_input():
"""Only method and feature names are required to create PlugInFeature"""
info = {"method": compute_single_plugin_feature,
"feature names": ["circ_per_area"]}
pf = PlugInFeature("circ_per_area", info)
# check that all other plugin_feature_info is populated
assert "method" in pf.plugin_feature_info
assert callable(pf.plugin_feature_info["method"])
assert "description" in pf.plugin_feature_info
assert "long description" in pf.plugin_feature_info
assert "feature name" in pf.plugin_feature_info
assert "feature label" in pf.plugin_feature_info
assert "features required" in pf.plugin_feature_info
assert "config required" in pf.plugin_feature_info
assert "method check required" in pf.plugin_feature_info
assert "scalar feature" in pf.plugin_feature_info
assert "version" in pf.plugin_feature_info
assert "plugin path" in pf.plugin_feature_info
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_remove_all_plugin_features():
"""Remove all plugin features at once"""
ds = dclab.new_dataset(retrieve_data("fmt-hdf5_fl_2018.zip"))
assert "circ_per_area" not in ds.features_innate
assert "circ_times_area" not in ds.features_innate
plugin_path = data_dir / "feat_anc_plugin_creative.py"
dclab.load_plugin_feature(plugin_path)
assert "circ_per_area" in ds
assert "circ_times_area" in ds
assert dclab.dfn.feature_exists("circ_per_area")
assert dclab.dfn.feature_exists("circ_times_area")
remove_all_plugin_features()
assert "circ_per_area" not in ds
assert "circ_times_area" not in ds
assert not dclab.dfn.feature_exists("circ_per_area")
assert not dclab.dfn.feature_exists("circ_times_area")
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_remove_plugin_feature():
"""Remove individual plugin features"""
ds = dclab.new_dataset(retrieve_data("fmt-hdf5_fl_2018.zip"))
assert "circ_per_area" not in ds
assert "circ_times_area" not in ds
plugin_path = data_dir / "feat_anc_plugin_creative.py"
plugin_list = dclab.load_plugin_feature(plugin_path)
assert len(plugin_list) == 2
assert "circ_per_area" in ds
assert "circ_per_area" not in ds.features_innate
assert "circ_times_area" in ds
assert "circ_times_area" not in ds.features_innate
assert dclab.dfn.feature_exists("circ_per_area")
assert dclab.dfn.feature_exists("circ_times_area")
remove_plugin_feature(plugin_list[0])
remove_plugin_feature(plugin_list[1])
assert "circ_per_area" not in ds
assert "circ_times_area" not in ds
assert not dclab.dfn.feature_exists("circ_per_area")
assert not dclab.dfn.feature_exists("circ_times_area")
with pytest.raises(TypeError,
match="hould be an instance of PlugInFeature"):
not_a_plugin_instance = [4, 6, 5]
remove_plugin_feature(not_a_plugin_instance)
def test_pf_try_existing_feature_fails():
"""An existing feature name is not allowed"""
info = example_plugin_info_single_feature()
info["feature names"] = ["deform"]
with pytest.raises(ValueError, match="Feature 'deform' already exists"):
PlugInFeature("deform", info)
def test_pf_with_empty_feature_label_string():
"""An empty string is replaced with a real feature label
Show that an empty `feature_label` will still give a descriptive
feature label. See `dclab.dfn._add_feature_to_definitions` for details.
"""
info = example_plugin_info_single_feature()
info["feature labels"] = [""]
feature_name = "circ_per_area"
PlugInFeature(feature_name, info)
assert dclab.dfn.feature_exists("circ_per_area")
label = dclab.dfn.get_feature_label("circ_per_area")
assert label != ""
assert label == "Plugin feature {}".format(feature_name)
def test_pf_with_feature_label():
"""Check that a plugin feature label is added to definitions"""
info = example_plugin_info_single_feature()
info["feature labels"] = ["Circ / Area [1/µm²]"]
feature_name = "circ_per_area"
PlugInFeature(feature_name, info)
assert dclab.dfn.feature_exists("circ_per_area")
label = dclab.dfn.get_feature_label("circ_per_area")
assert label == "Circ / Area [1/µm²]"
def test_pf_with_no_feature_label():
"""A feature label of None is replaced with a real feature label
Show that `feature_label=None` will still give a descriptive
feature label. See `dclab.dfn._add_feature_to_definitions` for details.
"""
info = example_plugin_info_single_feature()
info["feature labels"] = [None]
feature_name = "circ_per_area"
PlugInFeature(feature_name, info)
assert dclab.dfn.feature_exists("circ_per_area")
label = dclab.dfn.get_feature_label("circ_per_area")
assert label is not None
assert label == "Plugin feature {}".format(feature_name)
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_with_user_config_section():
"""Use a plugin feature with the user defined config section"""
info = {"method": compute_with_user_section,
"feature names": ["area_of_region"],
"config required": [["user", ["n_constrictions"]]]}
PlugInFeature("area_of_region", info)
ds = dclab.new_dataset(retrieve_data("fmt-hdf5_fl_2018.zip"))
assert "area_of_region" not in ds, "not available b/c missing metadata"
# add some metadata to the user config section
metadata = {"channel": True,
"n_constrictions": 3}
ds.config["user"].update(metadata)
assert ds.config["user"] == metadata
assert "area_of_region" in ds, "available b/c metadata is set"
area_of_region1 = ds["area_of_region"]
area_of_region1_calc = (ds["area_um"] *
ds.config["user"]["n_constrictions"])
assert np.allclose(area_of_region1, area_of_region1_calc)
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_with_user_config_section_fails():
"""Use a plugin feature with the user defined config section"""
info = {"method": compute_with_user_section,
"feature names": ["area_of_region"],
"config required": [["user", ["n_constrictions"]]]}
PlugInFeature("area_of_region", info)
ds = dclab.new_dataset(retrieve_data("fmt-hdf5_fl_2018.zip"))
# show that the plugin feature is not available before setting the
# user metadata
ds.config["user"].clear()
with pytest.raises(KeyError,
match=r"Feature \'area_of_region\' does not exist"):
ds["area_of_region"]
# show that the plugin fails when the user metadata type is wrong
ds.config["user"]["n_constrictions"] = 4.99
with pytest.raises(AssertionError, match="should be an integer value"):
ds["area_of_region"]
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_wrong_data_shape_1():
h5path = retrieve_data("fmt-hdf5_fl_2018.zip")
with dclab.new_dataset(h5path) as ds:
info = example_plugin_info_single_feature()
info["scalar feature"] = [False]
pf = PlugInFeature("circ_per_area", info)
with pytest.raises(ValueError, match="is not a scalar feature"):
ds[pf.feature_name]
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_wrong_data_shape_2():
h5path = retrieve_data("fmt-hdf5_fl_2018.zip")
with dclab.new_dataset(h5path) as ds:
info = example_plugin_info_single_feature()
info["scalar feature"] = [True]
info["method"] = lambda x: np.arange(len(ds) * 2).reshape(-1, 2)
pf = PlugInFeature("circ_per_area", info)
with pytest.raises(ValueError, match="is a scalar feature"):
ds[pf.feature_name]
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_wrong_length_1():
"""plugin feature should have same length"""
h5path = retrieve_data("fmt-hdf5_fl_2018.zip")
with dclab.new_dataset(h5path) as ds:
info = example_plugin_info_single_feature()
info["method"] = lambda x: np.arange(len(ds) // 2)
pf = PlugInFeature("circ_per_area", info)
with pytest.warns(BadFeatureSizeWarning,
match="to match event number"):
ds[pf.feature_name]
@pytest.mark.filterwarnings(
"ignore::dclab.rtdc_dataset.config.WrongConfigurationTypeWarning")
def test_pf_wrong_length_2():
"""plugin feature should have same length"""
h5path = retrieve_data("fmt-hdf5_fl_2018.zip")
with dclab.new_dataset(h5path) as ds:
info = example_plugin_info_single_feature()
info["method"] = lambda x: np.arange(len(ds) * 2)
pf = PlugInFeature("circ_per_area", info)
with pytest.warns(BadFeatureSizeWarning,
match="to match event number"):
ds[pf.feature_name]
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
remove_all_plugin_features()
| ZellMechanik-Dresden/dclab | tests/test_rtdc_feat_anc_plugin.py | Python | gpl-2.0 | 29,760 | [
"Gaussian"
] | a573fde4c0963a69bc28d4e9add61f7e13b615a500046643eaaefc7644990877 |
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import sys
from setuptools import setup
def get_static_files(path):
return [os.path.join(dirpath.replace("luigi/", ""), ext)
for (dirpath, dirnames, filenames) in os.walk(path)
for ext in ["*.html", "*.js", "*.css", "*.png",
"*.eot", "*.svg", "*.ttf", "*.woff", "*.woff2"]]
luigi_package_data = sum(map(get_static_files, ["luigi/static", "luigi/templates"]), [])
readme_note = """\
.. note::
For the latest source, discussion, etc, please visit the
`GitHub repository <https://github.com/spotify/luigi>`_\n\n
"""
with open('README.rst') as fobj:
long_description = readme_note + fobj.read()
install_requires = [
'tornado>=4.0,<5',
'python-daemon<3.0',
]
if os.environ.get('READTHEDOCS', None) == 'True':
# So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla
install_requires.append('sqlalchemy')
# readthedocs don't like python-daemon, see #1342
install_requires.remove('python-daemon<3.0')
install_requires.append('sphinx>=1.4.4') # Value mirrored in doc/conf.py
if sys.version_info < (3, 4):
install_requires.append('enum34>1.1.0')
setup(
name='luigi',
version='2.7.8+fs9',
description='Workflow mgmgt + task scheduling + dependency resolution',
long_description=long_description,
author='The Luigi Authors',
url='https://github.com/spotify/luigi',
license='Apache License 2.0',
packages=[
'luigi',
'luigi.configuration',
'luigi.contrib',
'luigi.contrib.hdfs',
'luigi.tools'
],
package_data={
'luigi': luigi_package_data
},
entry_points={
'console_scripts': [
'luigi = luigi.cmdline:luigi_run',
'luigid = luigi.cmdline:luigid',
'luigi-grep = luigi.tools.luigi_grep:main',
'luigi-deps = luigi.tools.deps:main',
'luigi-deps-tree = luigi.tools.deps_tree:main'
]
},
install_requires=install_requires,
extras_require={
'toml': ['toml<2.0.0'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Monitoring',
],
)
| foursquare/luigi | setup.py | Python | apache-2.0 | 3,217 | [
"VisIt"
] | c42a976ecf349f70a4cdcc9e5fef88ee24e034b66eefa1bbb92894a4ef6c3805 |
"""
Automatically find examples of a Brian object or function.
"""
import os, re
from collections import defaultdict
from .generate_examples import GlobDirectoryWalker
from brian2.utils.stringtools import get_identifiers
__all__ = ['auto_find_examples']
the_examples_map = defaultdict(list)
the_tutorials_map = defaultdict(list)
def get_map(environ_var, relrootdir, pattern, the_map, path_exclusions=[]):
if the_map:
return the_map
if environ_var in os.environ:
rootdir = os.environ[environ_var]
else:
rootdir, _ = os.path.split(__file__)
rootdir = os.path.normpath(os.path.join(rootdir, relrootdir))
fnames = [fname for fname in GlobDirectoryWalker(rootdir, f"*{pattern}")]
for exclude in path_exclusions:
fnames = [fname for fname in fnames if exclude not in fname]
shortfnames = [os.path.relpath(fname, rootdir) for fname in fnames]
exnames = [
fname.replace('/', '.').replace('\\', '.').replace(pattern, '')
for fname in shortfnames
]
for fname, shortfname, exname in zip(fnames, shortfnames, exnames):
with open(fname, 'r') as f:
ex = f.read()
ids = get_identifiers(ex)
for id in ids:
the_map[id].append((shortfname.replace('\\', '/'), exname))
return the_map
def get_examples_map():
return get_map('BRIAN2_DOCS_EXAMPLE_DIR', '../../examples', '.py', the_examples_map)
def get_tutorials_map():
return get_map(
'BRIAN2_DOCS_TUTORIALS_DIR',
'../../tutorials',
'.ipynb',
the_tutorials_map,
path_exclusions=['.ipynb_checkpoints'],
)
def auto_find_examples(obj, headersymbol='='):
"""
Returns a restructured text section listing all the examples and
tutorials making use of the specified object (as determined by
the name being in the list of identifiers, which may occasionally
make mistakes but is usually going to be correct).
"""
name = obj.__name__
examples_map = get_examples_map()
examples = sorted(the_examples_map[name])
tutorials_map = get_tutorials_map()
tutorials = sorted(the_tutorials_map[name])
if len(examples + tutorials) == 0:
return ''
txt = 'Tutorials and examples using this'
txt = f"{txt}\n{headersymbol * len(txt)}\n\n"
for tutname, tutloc in tutorials:
tutname = tutname.replace('.ipynb', '')
txt += f'* Tutorial :doc:`{tutname} </resources/tutorials/{tutloc}>`\n'
for exname, exloc in examples:
exname = exname.replace('.py', '')
txt += f'* Example :doc:`{exname} </examples/{exloc}>`\n'
return f"{txt}\n"
if __name__ == '__main__':
from brian2 import NeuronGroup, SpatialNeuron
print(auto_find_examples(NeuronGroup))
| brian-team/brian2cuda | brian2cuda/sphinxext/examplefinder.py | Python | gpl-2.0 | 2,770 | [
"Brian"
] | bd0e0592cadda3f05d4d8c41bd438b0c35b3b2450baf3accef652649f9af5cd0 |
"""
Test functions for stats module
"""
import warnings
import re
import sys
import pickle
import os
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_warns,
assert_array_less, suppress_warnings)
import pytest
from pytest import raises as assert_raises
import numpy
import numpy as np
from numpy import typecodes, array
from numpy.lib.recfunctions import rec_append_fields
from scipy import special
from scipy._lib._util import check_random_state
from scipy.integrate import IntegrationWarning, quad, trapezoid
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import scipy.stats.distributions
from scipy.special import xlogy, polygamma, entr
from scipy.stats._distr_params import distcont, invdistcont
from .test_discrete_basic import distdiscrete, invdistdiscrete
from scipy.stats._continuous_distns import FitDataError
from scipy.optimize import root, fmin
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
# distributions to skip while testing the fix for the support method
# introduced in gh-13294. These distributions are skipped as they
# always return a non-nan support for every parametrization.
skip_test_support_gh13294_regression = ['tukeylambda', 'pearson3']
def _assert_hasattr(a, b, msg=None):
if msg is None:
msg = '%s does not have attribute %s' % (a, b)
assert_(hasattr(a, b), msg=msg)
def test_api_regression():
# https://github.com/scipy/scipy/issues/3802
_assert_hasattr(scipy.stats.distributions, 'f_gen')
def check_vonmises_pdf_periodic(k, L, s, x):
vm = stats.vonmises(k, loc=L, scale=s)
assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s)))
def check_vonmises_cdf_periodic(k, L, s, x):
vm = stats.vonmises(k, loc=L, scale=s)
assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1)
def test_distributions_submodule():
actual = set(scipy.stats.distributions.__all__)
continuous = [dist[0] for dist in distcont] # continuous dist names
discrete = [dist[0] for dist in distdiscrete] # discrete dist names
other = ['rv_discrete', 'rv_continuous', 'rv_histogram',
'entropy', 'trapz']
expected = continuous + discrete + other
# need to remove, e.g.,
# <scipy.stats._continuous_distns.trapezoid_gen at 0x1df83bbc688>
expected = set(filter(lambda s: not str(s).startswith('<'), expected))
assert actual == expected
def test_vonmises_pdf_periodic():
for k in [0.1, 1, 101]:
for x in [0, 1, numpy.pi, 10, 100]:
check_vonmises_pdf_periodic(k, 0, 1, x)
check_vonmises_pdf_periodic(k, 1, 1, x)
check_vonmises_pdf_periodic(k, 0, 10, x)
check_vonmises_cdf_periodic(k, 0, 1, x)
check_vonmises_cdf_periodic(k, 1, 1, x)
check_vonmises_cdf_periodic(k, 0, 10, x)
def test_vonmises_line_support():
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical():
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
# Expected values of the vonmises PDF were computed using
# mpmath with 50 digits of precision:
#
# def vmpdf_mp(x, kappa):
# x = mpmath.mpf(x)
# kappa = mpmath.mpf(kappa)
# num = mpmath.exp(kappa*mpmath.cos(x))
# den = 2 * mpmath.pi * mpmath.besseli(0, kappa)
# return num/den
#
@pytest.mark.parametrize('x, kappa, expected_pdf',
[(0.1, 0.01, 0.16074242744907072),
(0.1, 25.0, 1.7515464099118245),
(0.1, 800, 0.2073272544458798),
(2.0, 0.01, 0.15849003875385817),
(2.0, 25.0, 8.356882934278192e-16),
(2.0, 800, 0.0)])
def test_vonmises_pdf(x, kappa, expected_pdf):
pdf = stats.vonmises.pdf(x, kappa)
assert_allclose(pdf, expected_pdf, rtol=1e-15)
def _assert_less_or_close_loglike(dist, data, func, **kwds):
"""
This utility function checks that the log-likelihood (computed by
func) of the result computed using dist.fit() is less than or equal
to the result computed using the generic fit method. Because of
normal numerical imprecision, the "equality" check is made using
`np.allclose` with a relative tolerance of 1e-15.
"""
mle_analytical = dist.fit(data, **kwds)
numerical_opt = super(type(dist), dist).fit(data, **kwds)
ll_mle_analytical = func(mle_analytical, data)
ll_numerical_opt = func(numerical_opt, data)
assert (ll_mle_analytical <= ll_numerical_opt or
np.allclose(ll_mle_analytical, ll_numerical_opt, rtol=1e-15))
def assert_fit_warnings(dist):
param = ['floc', 'fscale']
if dist.shapes:
nshapes = len(dist.shapes.split(","))
param += ['f0', 'f1', 'f2'][:nshapes]
all_fixed = dict(zip(param, np.arange(len(param))))
data = [1, 2, 3]
with pytest.raises(RuntimeError,
match="All parameters fixed. There is nothing "
"to optimize."):
dist.fit(data, **all_fixed)
with pytest.raises(RuntimeError,
match="The data contains non-finite values"):
dist.fit([np.nan])
with pytest.raises(RuntimeError,
match="The data contains non-finite values"):
dist.fit([np.inf])
with pytest.raises(TypeError, match="Unknown keyword arguments:"):
dist.fit(data, extra_keyword=2)
with pytest.raises(TypeError, match="Too many positional arguments."):
dist.fit(data, *[1]*(len(param) - 1))
@pytest.mark.parametrize('dist',
['alpha', 'betaprime',
'fatiguelife', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gilbrat',
'powerlognorm', 'rayleigh', 'wald'])
def test_support(dist):
"""gh-6235"""
dct = dict(distcont)
args = dct[dist]
dist = getattr(stats, dist)
assert_almost_equal(dist.pdf(dist.a, *args), 0)
assert_equal(dist.logpdf(dist.a, *args), -np.inf)
assert_almost_equal(dist.pdf(dist.b, *args), 0)
assert_equal(dist.logpdf(dist.b, *args), -np.inf)
class TestRandInt:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.randint.rvs(5, 30, size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5, 30, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15, 46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
val = stats.randint(15, 46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k, 5, 30)
assert_array_almost_equal(vals, out)
def test_cdf(self):
x = np.linspace(0, 36, 100)
k = numpy.floor(x)
out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0)
vals = stats.randint.cdf(x, 5, 30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for Ticket #1842
vals1 = stats.binom.pmf(100, 100, 1)
vals2 = stats.binom.pmf(0, 100, 0)
assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
def test_entropy(self):
# Basic entropy tests.
b = stats.binom(2, 0.5)
expected_p = np.array([0.25, 0.5, 0.25])
expected_h = -sum(xlogy(expected_p, expected_p))
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.binom(2, 0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.binom(2, 1.0)
h = b.entropy()
assert_equal(h, 0.0)
def test_warns_p0(self):
# no spurious warnigns are generated for p=0; gh-3817
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_equal(stats.binom(n=2, p=0).mean(), 0)
assert_equal(stats.binom(n=2, p=0).std(), 0)
class TestArcsine:
def test_endpoints(self):
# Regression test for gh-13697. The following calculation
# should not generate a warning.
p = stats.arcsine.pdf([0, 1])
assert_equal(p, [np.inf, np.inf])
class TestBernoulli:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_entropy(self):
# Simple tests of entropy.
b = stats.bernoulli(0.25)
expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.bernoulli(0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.bernoulli(1.0)
h = b.entropy()
assert_equal(h, 0.0)
class TestBradford:
# gh-6216
def test_cdf_ppf(self):
c = 0.1
x = np.logspace(-20, -4)
q = stats.bradford.cdf(x, c)
xx = stats.bradford.ppf(q, c)
assert_allclose(x, xx)
class TestChi:
# "Exact" value of chi.sf(10, 4), as computed by Wolfram Alpha with
# 1 - CDF[ChiDistribution[4], 10]
CHI_SF_10_4 = 9.83662422461598e-21
def test_sf(self):
s = stats.chi.sf(10, 4)
assert_allclose(s, self.CHI_SF_10_4, rtol=1e-15)
def test_isf(self):
x = stats.chi.isf(self.CHI_SF_10_4, 4)
assert_allclose(x, 10, rtol=1e-15)
class TestNBinom:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0, 1, 1)
assert_equal(val, 0)
class TestGenInvGauss:
def setup_method(self):
np.random.seed(1234)
@pytest.mark.slow
def test_rvs_with_mode_shift(self):
# ratio_unif w/ mode shift
gig = stats.geninvgauss(2.3, 1.5)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_without_mode_shift(self):
# ratio_unif w/o mode shift
gig = stats.geninvgauss(0.9, 0.75)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_new_method(self):
# new algorithm of Hoermann / Leydold
gig = stats.geninvgauss(0.1, 0.2)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_p_zero(self):
def my_ks_check(p, b):
gig = stats.geninvgauss(p, b)
rvs = gig.rvs(size=1500, random_state=1234)
return stats.kstest(rvs, gig.cdf)[1] > 0.05
# boundary cases when p = 0
assert_equal(my_ks_check(0, 0.2), True) # new algo
assert_equal(my_ks_check(0, 0.9), True) # ratio_unif w/o shift
assert_equal(my_ks_check(0, 1.5), True) # ratio_unif with shift
def test_rvs_negative_p(self):
# if p negative, return inverse
assert_equal(
stats.geninvgauss(-1.5, 2).rvs(size=10, random_state=1234),
1 / stats.geninvgauss(1.5, 2).rvs(size=10, random_state=1234))
def test_invgauss(self):
# test that invgauss is special case
ig = stats.geninvgauss.rvs(size=1500, p=-0.5, b=1, random_state=1234)
assert_equal(stats.kstest(ig, 'invgauss', args=[1])[1] > 0.15, True)
# test pdf and cdf
mu, x = 100, np.linspace(0.01, 1, 10)
pdf_ig = stats.geninvgauss.pdf(x, p=-0.5, b=1 / mu, scale=mu)
assert_allclose(pdf_ig, stats.invgauss(mu).pdf(x))
cdf_ig = stats.geninvgauss.cdf(x, p=-0.5, b=1 / mu, scale=mu)
assert_allclose(cdf_ig, stats.invgauss(mu).cdf(x))
def test_pdf_R(self):
# test against R package GIGrvg
# x <- seq(0.01, 5, length.out = 10)
# GIGrvg::dgig(x, 0.5, 1, 1)
vals_R = np.array([2.081176820e-21, 4.488660034e-01, 3.747774338e-01,
2.693297528e-01, 1.905637275e-01, 1.351476913e-01,
9.636538981e-02, 6.909040154e-02, 4.978006801e-02,
3.602084467e-02])
x = np.linspace(0.01, 5, 10)
assert_allclose(vals_R, stats.geninvgauss.pdf(x, 0.5, 1))
def test_pdf_zero(self):
# pdf at 0 is 0, needs special treatment to avoid 1/x in pdf
assert_equal(stats.geninvgauss.pdf(0, 0.5, 0.5), 0)
# if x is large and p is moderate, make sure that pdf does not
# overflow because of x**(p-1); exp(-b*x) forces pdf to zero
assert_equal(stats.geninvgauss.pdf(2e6, 50, 2), 0)
class TestGenHyperbolic:
def setup_method(self):
np.random.seed(1234)
def test_pdf_r(self):
# test against R package GeneralizedHyperbolic
# x <- seq(-10, 10, length.out = 10)
# GeneralizedHyperbolic::dghyp(
# x = x, lambda = 2, alpha = 2, beta = 1, delta = 1.5, mu = 0.5
# )
vals_R = np.array([
2.94895678275316e-13, 1.75746848647696e-10, 9.48149804073045e-08,
4.17862521692026e-05, 0.0103947630463822, 0.240864958986839,
0.162833527161649, 0.0374609592899472, 0.00634894847327781,
0.000941920705790324
])
lmbda, alpha, beta = 2, 2, 1
mu, delta = 0.5, 1.5
args = (lmbda, alpha*delta, beta*delta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
x = np.linspace(-10, 10, 10)
assert_allclose(gh.pdf(x), vals_R, atol=0, rtol=1e-13)
def test_cdf_r(self):
# test against R package GeneralizedHyperbolic
# q <- seq(-10, 10, length.out = 10)
# GeneralizedHyperbolic::pghyp(
# q = q, lambda = 2, alpha = 2, beta = 1, delta = 1.5, mu = 0.5
# )
vals_R = np.array([
1.01881590921421e-13, 6.13697274983578e-11, 3.37504977637992e-08,
1.55258698166181e-05, 0.00447005453832497, 0.228935323956347,
0.755759458895243, 0.953061062884484, 0.992598013917513,
0.998942646586662
])
lmbda, alpha, beta = 2, 2, 1
mu, delta = 0.5, 1.5
args = (lmbda, alpha*delta, beta*delta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
x = np.linspace(-10, 10, 10)
assert_allclose(gh.cdf(x), vals_R, atol=0, rtol=1e-6)
def test_moments_r(self):
# test against R package GeneralizedHyperbolic
# sapply(1:4,
# function(x) GeneralizedHyperbolic::ghypMom(
# order = x, lambda = 2, alpha = 2,
# beta = 1, delta = 1.5, mu = 0.5,
# momType = 'raw')
# )
vals_R = [2.36848366948115, 8.4739346779246,
37.8870502710066, 205.76608511485]
lmbda, alpha, beta = 2, 2, 1
mu, delta = 0.5, 1.5
args = (lmbda, alpha*delta, beta*delta)
vals_us = [
stats.genhyperbolic(*args, loc=mu, scale=delta).moment(i)
for i in range(1, 5)
]
assert_allclose(vals_us, vals_R, atol=0, rtol=1e-13)
def test_rvs(self):
# Kolmogorov-Smirnov test to ensure alignemnt
# of analytical and empirical cdfs
lmbda, alpha, beta = 2, 2, 1
mu, delta = 0.5, 1.5
args = (lmbda, alpha*delta, beta*delta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
_, p = stats.kstest(gh.rvs(size=1500, random_state=1234), gh.cdf)
assert_equal(p > 0.05, True)
def test_pdf_t(self):
# Test Against T-Student with 1 - 30 df
df = np.linspace(1, 30, 10)
# in principle alpha should be zero in practice for big lmbdas
# alpha cannot be too small else pdf does not integrate
alpha, beta = np.float_power(df, 2)*np.finfo(np.float32).eps, 0
mu, delta = 0, np.sqrt(df)
args = (-df/2, alpha, beta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
x = np.linspace(gh.ppf(0.01), gh.ppf(0.99), 50)[:, np.newaxis]
assert_allclose(
gh.pdf(x), stats.t.pdf(x, df),
atol=0, rtol=1e-6
)
def test_pdf_cauchy(self):
# Test Against Cauchy distribution
# in principle alpha should be zero in practice for big lmbdas
# alpha cannot be too small else pdf does not integrate
lmbda, alpha, beta = -0.5, np.finfo(np.float32).eps, 0
mu, delta = 0, 1
args = (lmbda, alpha, beta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
x = np.linspace(gh.ppf(0.01), gh.ppf(0.99), 50)[:, np.newaxis]
assert_allclose(
gh.pdf(x), stats.cauchy.pdf(x),
atol=0, rtol=1e-6
)
def test_pdf_laplace(self):
# Test Against Laplace with location param [-10, 10]
loc = np.linspace(-10, 10, 10)
# in principle delta should be zero in practice for big loc delta
# cannot be too small else pdf does not integrate
delta = np.finfo(np.float32).eps
lmbda, alpha, beta = 1, 1, 0
args = (lmbda, alpha*delta, beta*delta)
# ppf does not integrate for scale < 5e-4
# therefore using simple linspace to define the support
gh = stats.genhyperbolic(*args, loc=loc, scale=delta)
x = np.linspace(-20, 20, 50)[:, np.newaxis]
assert_allclose(
gh.pdf(x), stats.laplace.pdf(x, loc=loc, scale=1),
atol=0, rtol=1e-11
)
def test_pdf_norminvgauss(self):
# Test Against NIG with varying alpha/beta/delta/mu
alpha, beta, delta, mu = (
np.linspace(1, 20, 10),
np.linspace(0, 19, 10)*np.float_power(-1, range(10)),
np.linspace(1, 1, 10),
np.linspace(-100, 100, 10)
)
lmbda = - 0.5
args = (lmbda, alpha * delta, beta * delta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
x = np.linspace(gh.ppf(0.01), gh.ppf(0.99), 50)[:, np.newaxis]
assert_allclose(
gh.pdf(x), stats.norminvgauss.pdf(
x, a=alpha, b=beta, loc=mu, scale=delta),
atol=0, rtol=1e-13
)
class TestNormInvGauss:
def setup_method(self):
np.random.seed(1234)
def test_cdf_R(self):
# test pdf and cdf vals against R
# require("GeneralizedHyperbolic")
# x_test <- c(-7, -5, 0, 8, 15)
# r_cdf <- GeneralizedHyperbolic::pnig(x_test, mu = 0, a = 1, b = 0.5)
# r_pdf <- GeneralizedHyperbolic::dnig(x_test, mu = 0, a = 1, b = 0.5)
r_cdf = np.array([8.034920282e-07, 2.512671945e-05, 3.186661051e-01,
9.988650664e-01, 9.999848769e-01])
x_test = np.array([-7, -5, 0, 8, 15])
vals_cdf = stats.norminvgauss.cdf(x_test, a=1, b=0.5)
assert_allclose(vals_cdf, r_cdf, atol=1e-9)
def test_pdf_R(self):
# values from R as defined in test_cdf_R
r_pdf = np.array([1.359600783e-06, 4.413878805e-05, 4.555014266e-01,
7.450485342e-04, 8.917889931e-06])
x_test = np.array([-7, -5, 0, 8, 15])
vals_pdf = stats.norminvgauss.pdf(x_test, a=1, b=0.5)
assert_allclose(vals_pdf, r_pdf, atol=1e-9)
def test_stats(self):
a, b = 1, 0.5
gamma = np.sqrt(a**2 - b**2)
v_stats = (b / gamma, a**2 / gamma**3, 3.0 * b / (a * np.sqrt(gamma)),
3.0 * (1 + 4 * b**2 / a**2) / gamma)
assert_equal(v_stats, stats.norminvgauss.stats(a, b, moments='mvsk'))
def test_ppf(self):
a, b = 1, 0.5
x_test = np.array([0.001, 0.5, 0.999])
vals = stats.norminvgauss.ppf(x_test, a, b)
assert_allclose(x_test, stats.norminvgauss.cdf(vals, a, b))
class TestGeom:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
vals = stats.geom.pmf([1, 2, 3], 0.5)
assert_array_almost_equal(vals, [0.5, 0.25, 0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))
vals2 = stats.geom.logpmf([1, 2, 3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
def test_ppf_underflow(self):
# this should not underflow
assert_allclose(stats.geom.ppf(1e-20, 1e-20), 1.0, atol=1e-14)
class TestPlanck:
def setup_method(self):
np.random.seed(1234)
def test_sf(self):
vals = stats.planck.sf([1, 2, 3], 5.)
expected = array([4.5399929762484854e-05,
3.0590232050182579e-07,
2.0611536224385579e-09])
assert_array_almost_equal(vals, expected)
def test_logsf(self):
vals = stats.planck.logsf([1000., 2000., 3000.], 1000.)
expected = array([-1001000., -2001000., -3001000.])
assert_array_almost_equal(vals, expected)
class TestGennorm:
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
class TestHalfgennorm:
def test_expon(self):
# test against exponential (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 1)
pdf2 = stats.expon.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_halfnorm(self):
# test against half normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 2)
pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_gennorm(self):
# test against generalized normal
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, .497324)
pdf2 = stats.gennorm.pdf(points, .497324)
assert_almost_equal(pdf1, 2*pdf2)
class TestLaplaceasymmetric:
def test_laplace(self):
# test against Laplace (special case for kappa=1)
points = np.array([1, 2, 3])
pdf1 = stats.laplace_asymmetric.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_allclose(pdf1, pdf2)
def test_asymmetric_laplace_pdf(self):
# test assymetric Laplace
points = np.array([1, 2, 3])
kappa = 2
kapinv = 1/kappa
pdf1 = stats.laplace_asymmetric.pdf(points, kappa)
pdf2 = stats.laplace_asymmetric.pdf(points*(kappa**2), kapinv)
assert_allclose(pdf1, pdf2)
def test_asymmetric_laplace_log_10_16(self):
# test assymetric Laplace
points = np.array([-np.log(16), np.log(10)])
kappa = 2
pdf1 = stats.laplace_asymmetric.pdf(points, kappa)
cdf1 = stats.laplace_asymmetric.cdf(points, kappa)
sf1 = stats.laplace_asymmetric.sf(points, kappa)
pdf2 = np.array([1/10, 1/250])
cdf2 = np.array([1/5, 1 - 1/500])
sf2 = np.array([4/5, 1/500])
ppf1 = stats.laplace_asymmetric.ppf(cdf2, kappa)
ppf2 = points
isf1 = stats.laplace_asymmetric.isf(sf2, kappa)
isf2 = points
assert_allclose(np.concatenate((pdf1, cdf1, sf1, ppf1, isf1)),
np.concatenate((pdf2, cdf2, sf2, ppf2, isf2)))
class TestTruncnorm:
def setup_method(self):
np.random.seed(1234)
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
def test_gh_2477_small_values(self):
# Check a case that worked in the original issue.
low, high = -11, -10
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# Check a case that failed in the original issue.
low, high = 10, 11
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_2477_large_values(self):
# Check a case that used to fail because of extreme tailness.
low, high = 100, 101
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low <= x.min() <= x.max() <= high), str([low, high, x])
# Check some additional extreme tails
low, high = 1000, 1001
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
low, high = 10000, 10001
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
low, high = -10001, -10000
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_9403_nontail_values(self):
for low, high in [[3, 4], [-4, -3]]:
xvals = np.array([-np.inf, low, high, np.inf])
xmid = (high+low)/2.0
cdfs = stats.truncnorm.cdf(xvals, low, high)
sfs = stats.truncnorm.sf(xvals, low, high)
pdfs = stats.truncnorm.pdf(xvals, low, high)
expected_cdfs = np.array([0, 0, 1, 1])
expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])
expected_pdfs = np.array([0, 3.3619772, 0.1015229, 0])
if low < 0:
expected_pdfs = np.array([0, 0.1015229, 3.3619772, 0])
assert_almost_equal(cdfs, expected_cdfs)
assert_almost_equal(sfs, expected_sfs)
assert_almost_equal(pdfs, expected_pdfs)
assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]),
low + 0.5)
pvals = np.array([0, 0.5, 1.0])
ppfs = stats.truncnorm.ppf(pvals, low, high)
expected_ppfs = np.array([low, np.sign(low)*3.1984741, high])
assert_almost_equal(ppfs, expected_ppfs)
if low < 0:
assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
0.8475544278436675)
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
0.1524455721563326)
else:
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
0.8475544278436675)
assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
0.1524455721563326)
pdf = stats.truncnorm.pdf(xmid, low, high)
assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)
def test_gh_9403_medium_tail_values(self):
for low, high in [[39, 40], [-40, -39]]:
xvals = np.array([-np.inf, low, high, np.inf])
xmid = (high+low)/2.0
cdfs = stats.truncnorm.cdf(xvals, low, high)
sfs = stats.truncnorm.sf(xvals, low, high)
pdfs = stats.truncnorm.pdf(xvals, low, high)
expected_cdfs = np.array([0, 0, 1, 1])
expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])
expected_pdfs = np.array([0, 3.90256074e+01, 2.73349092e-16, 0])
if low < 0:
expected_pdfs = np.array([0, 2.73349092e-16,
3.90256074e+01, 0])
assert_almost_equal(cdfs, expected_cdfs)
assert_almost_equal(sfs, expected_sfs)
assert_almost_equal(pdfs, expected_pdfs)
assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]),
low + 0.5)
pvals = np.array([0, 0.5, 1.0])
ppfs = stats.truncnorm.ppf(pvals, low, high)
expected_ppfs = np.array([low, np.sign(low)*39.01775731, high])
assert_almost_equal(ppfs, expected_ppfs)
cdfs = stats.truncnorm.cdf(ppfs, low, high)
assert_almost_equal(cdfs, pvals)
if low < 0:
assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
0.9999999970389126)
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
2.961048103554866e-09)
else:
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
0.9999999970389126)
assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
2.961048103554866e-09)
pdf = stats.truncnorm.pdf(xmid, low, high)
assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)
xvals = np.linspace(low, high, 11)
xvals2 = -xvals[::-1]
assert_almost_equal(stats.truncnorm.cdf(xvals, low, high),
stats.truncnorm.sf(xvals2, -high, -low)[::-1])
assert_almost_equal(stats.truncnorm.sf(xvals, low, high),
stats.truncnorm.cdf(xvals2, -high, -low)[::-1])
assert_almost_equal(stats.truncnorm.pdf(xvals, low, high),
stats.truncnorm.pdf(xvals2, -high, -low)[::-1])
def _test_moments_one_range(self, a, b, expected, decimal_s=7):
m0, v0, s0, k0 = expected[:4]
m, v, s, k = stats.truncnorm.stats(a, b, moments='mvsk')
assert_almost_equal(m, m0)
assert_almost_equal(v, v0)
assert_almost_equal(s, s0, decimal=decimal_s)
assert_almost_equal(k, k0)
@pytest.mark.xfail_on_32bit("reduced accuracy with 32bit platforms.")
def test_moments(self):
# Values validated by changing TRUNCNORM_TAIL_X so as to evaluate
# using both the _norm_XXX() and _norm_logXXX() functions, and by
# removing the _stats and _munp methods in truncnorm tp force
# numerical quadrature.
# For m,v,s,k expect k to have the largest error as it is
# constructed from powers of lower moments
self._test_moments_one_range(-30, 30, [0, 1, 0.0, 0.0])
self._test_moments_one_range(-10, 10, [0, 1, 0.0, 0.0])
self._test_moments_one_range(-3, 3, [0.0, 0.9733369246625415,
0.0, -0.1711144363977444])
self._test_moments_one_range(-2, 2, [0.0, 0.7737413035499232,
0.0, -0.6344632828703505])
self._test_moments_one_range(0, np.inf, [0.7978845608028654,
0.3633802276324186,
0.9952717464311565,
0.8691773036059725])
self._test_moments_one_range(-np.inf, 0, [-0.7978845608028654,
0.3633802276324186,
-0.9952717464311565,
0.8691773036059725])
self._test_moments_one_range(-1, 3, [0.2827861107271540,
0.6161417353578292,
0.5393018494027878,
-0.2058206513527461])
self._test_moments_one_range(-3, 1, [-0.2827861107271540,
0.6161417353578292,
-0.5393018494027878,
-0.2058206513527461])
self._test_moments_one_range(-10, -9, [-9.1084562880124764,
0.0114488058210104,
-1.8985607337519652,
5.0733457094223553])
self._test_moments_one_range(-20, -19, [-19.0523439459766628,
0.0027250730180314,
-1.9838694022629291,
5.8717850028287586])
self._test_moments_one_range(-30, -29, [-29.0344012377394698,
0.0011806603928891,
-1.9930304534611458,
5.8854062968996566],
decimal_s=6)
self._test_moments_one_range(-40, -39, [-39.0256074199326264,
0.0006548826719649,
-1.9963146354109957,
5.6167758371700494])
self._test_moments_one_range(39, 40, [39.0256074199326264,
0.0006548826719649,
1.9963146354109957,
5.6167758371700494])
def test_9902_moments(self):
m, v = stats.truncnorm.stats(0, np.inf, moments='mv')
assert_almost_equal(m, 0.79788456)
assert_almost_equal(v, 0.36338023)
def test_gh_1489_trac_962_rvs(self):
# Check the original example.
low, high = 10, 15
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_11299_rvs(self):
# Arose from investigating gh-11299
# Test multiple shape parameters simultaneously.
low = [-10, 10, -np.inf, -5, -np.inf, -np.inf, -45, -45, 40, -10, 40]
high = [-5, 11, 5, np.inf, 40, -40, 40, -40, 45, np.inf, np.inf]
x = stats.truncnorm.rvs(low, high, size=(5, len(low)))
assert np.shape(x) == (5, len(low))
assert_(np.all(low <= x.min(axis=0)))
assert_(np.all(x.max(axis=0) <= high))
def test_rvs_Generator(self):
# check that rvs can use a Generator
if hasattr(np.random, "default_rng"):
stats.truncnorm.rvs(-10, -5, size=5,
random_state=np.random.default_rng())
class TestGenLogistic:
# Expected values computed with mpmath with 50 digits of precision.
@pytest.mark.parametrize('x, expected', [(-1000, -1499.5945348918917),
(-125, -187.09453489189184),
(0, -1.3274028432916989),
(100, -99.59453489189184),
(1000, -999.5945348918918)])
def test_logpdf(self, x, expected):
c = 1.5
logp = stats.genlogistic.logpdf(x, c)
assert_allclose(logp, expected, rtol=1e-13)
class TestHypergeom:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = [stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten)
for eaten in fruits_eaten]
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
def test_logsf(self):
# Test logsf for very large numbers. See issue #4982
# Results compare with those from R (v3.2.0):
# phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)
# -2239.771
k = 1e4
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2239.771 # From R
assert_almost_equal(result, expected, decimal=3)
k = 1
M = 1600
n = 600
N = 300
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2.566567e-68 # From R
assert_almost_equal(result, expected, decimal=15)
def test_logcdf(self):
# Test logcdf for very large numbers. See issue #8692
# Results compare with those from R (v3.3.2):
# phyper(k, n, M-n, N, lower.tail=TRUE, log.p=TRUE)
# -5273.335
k = 1
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -5273.335 # From R
assert_almost_equal(result, expected, decimal=3)
# Same example as in issue #8692
k = 40
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -7.565148879229e-23 # From R
assert_almost_equal(result, expected, decimal=15)
k = 125
M = 1600
n = 250
N = 500
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -4.242688e-12 # From R
assert_almost_equal(result, expected, decimal=15)
# test broadcasting robustness based on reviewer
# concerns in PR 9603; using an array version of
# the example from issue #8692
k = np.array([40, 40, 40])
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = np.full(3, -7.565148879229e-23) # filled from R result
assert_almost_equal(result, expected, decimal=15)
class TestLoggamma:
# Expected sf values were computed with mpmath. For given x and c,
# x = mpmath.mpf(x)
# c = mpmath.mpf(c)
# sf = mpmath.gammainc(c, mpmath.exp(x), mpmath.inf,
# regularized=True)
@pytest.mark.parametrize('x, c, sf', [(4, 1.5, 1.6341528919488565e-23),
(6, 100, 8.23836829202024e-74)])
def test_sf_isf(self, x, c, sf):
s = stats.loggamma.sf(x, c)
assert_allclose(s, sf, rtol=1e-12)
y = stats.loggamma.isf(s, c)
assert_allclose(y, x, rtol=1e-12)
def test_logpdf(self):
# Test logpdf with x=-500, c=2. ln(gamma(2)) = 0, and
# exp(-500) ~= 7e-218, which is far smaller than the ULP
# of c*x=-1000, so logpdf(-500, 2) = c*x - exp(x) - ln(gamma(2))
# should give -1000.0.
lp = stats.loggamma.logpdf(-500, 2)
assert_allclose(lp, -1000.0, rtol=1e-14)
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
class TestLogistic:
# gh-6226
def test_cdf_ppf(self):
x = np.linspace(-20, 20)
y = stats.logistic.cdf(x)
xx = stats.logistic.ppf(y)
assert_allclose(x, xx)
def test_sf_isf(self):
x = np.linspace(-20, 20)
y = stats.logistic.sf(x)
xx = stats.logistic.isf(y)
assert_allclose(x, xx)
def test_extreme_values(self):
# p is chosen so that 1 - (1 - p) == p in double precision
p = 9.992007221626409e-16
desired = 34.53957599234088
assert_allclose(stats.logistic.ppf(1 - p), desired)
assert_allclose(stats.logistic.isf(p), desired)
def test_logpdf_basic(self):
logp = stats.logistic.logpdf([-15, 0, 10])
# Expected values computed with mpmath with 50 digits of precision.
expected = [-15.000000611804547,
-1.3862943611198906,
-10.000090797798434]
assert_allclose(logp, expected, rtol=1e-13)
def test_logpdf_extreme_values(self):
logp = stats.logistic.logpdf([800, -800])
# For such large arguments, logpdf(x) = -abs(x) when computed
# with 64 bit floating point.
assert_equal(logp, [-800, -800])
@pytest.mark.parametrize("loc_rvs,scale_rvs", [np.random.rand(2)])
def test_fit(self, loc_rvs, scale_rvs):
data = stats.logistic.rvs(size=100, loc=loc_rvs, scale=scale_rvs)
# test that result of fit method is the same as optimization
def func(input, data):
a, b = input
n = len(data)
x1 = np.sum(np.exp((data - a) / b) /
(1 + np.exp((data - a) / b))) - n / 2
x2 = np.sum(((data - a) / b) *
((np.exp((data - a) / b) - 1) /
(np.exp((data - a) / b) + 1))) - n
return x1, x2
expected_solution = root(func, stats.logistic._fitstart(data), args=(
data,)).x
fit_method = stats.logistic.fit(data)
# other than computational variances, the fit method and the solution
# to this system of equations are equal
assert_allclose(fit_method, expected_solution, atol=1e-30)
@pytest.mark.parametrize("loc_rvs,scale_rvs", [np.random.rand(2)])
def test_fit_comp_optimizer(self, loc_rvs, scale_rvs):
data = stats.logistic.rvs(size=100, loc=loc_rvs, scale=scale_rvs)
# obtain objective function to compare results of the fit methods
args = [data, (stats.logistic._fitstart(data),)]
func = stats.logistic._reduce_func(args, {})[1]
_assert_less_or_close_loglike(stats.logistic, data, func)
class TestLogser:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf_small_p(self):
m = stats.logser.pmf(4, 1e-20)
# The expected value was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 64
# >>> k = 4
# >>> p = mpmath.mpf('1e-20')
# >>> float(-(p**k)/k/mpmath.log(1-p))
# 2.5e-61
# It is also clear from noticing that for very small p,
# log(1-p) is approximately -p, and the formula becomes
# p**(k-1) / k
assert_allclose(m, 2.5e-61)
def test_mean_small_p(self):
m = stats.logser.mean(1e-8)
# The expected mean was computed using mpmath:
# >>> import mpmath
# >>> mpmath.dps = 60
# >>> p = mpmath.mpf('1e-8')
# >>> float(-p / ((1 - p)*mpmath.log(1 - p)))
# 1.000000005
assert_allclose(m, 1.000000005)
class TestGumbel_r_l:
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize("dist", [stats.gumbel_r, stats.gumbel_l])
@pytest.mark.parametrize("loc_rvs,scale_rvs", ([np.random.rand(2)]))
def test_fit_comp_optimizer(self, dist, loc_rvs, scale_rvs):
data = dist.rvs(size=100, loc=loc_rvs, scale=scale_rvs)
# obtain objective function to compare results of the fit methods
args = [data, (dist._fitstart(data),)]
func = dist._reduce_func(args, {})[1]
# test that the gumbel_* fit method is better than super method
_assert_less_or_close_loglike(dist, data, func)
@pytest.mark.parametrize("dist, sgn", [(stats.gumbel_r, 1),
(stats.gumbel_l, -1)])
def test_fit(self, dist, sgn):
z = sgn*np.array([3, 3, 3, 3, 3, 3, 3, 3.00000001])
loc, scale = dist.fit(z)
# The expected values were computed with mpmath with 60 digits
# of precision.
assert_allclose(loc, sgn*3.0000000001667906)
assert_allclose(scale, 1.2495222465145514e-09, rtol=1e-6)
class TestPareto:
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
assert_equal(m, 3.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
assert_equal(m, 2.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
assert_allclose(m, 2.5 / 1.5)
assert_allclose(v, 2.5 / (1.5*1.5*0.5))
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
assert_allclose(m, 1.5)
assert_allclose(v, 0.75)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
assert_allclose(m, 3.5 / 2.5)
assert_allclose(v, 3.5 / (2.5*2.5*1.5))
assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5*3.5*2.5))
assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
def test_sf(self):
x = 1e9
b = 2
scale = 1.5
p = stats.pareto.sf(x, b, loc=0, scale=scale)
expected = (scale/x)**b # 2.25e-18
assert_allclose(p, expected)
@pytest.mark.filterwarnings("ignore:invalid value encountered in "
"double_scalars")
@pytest.mark.parametrize("rvs_shape", [1, 2])
@pytest.mark.parametrize("rvs_loc", [0, 2])
@pytest.mark.parametrize("rvs_scale", [1, 5])
def test_fit(self, rvs_shape, rvs_loc, rvs_scale):
data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,
loc=rvs_loc)
# shape can still be fixed with multiple names
shape_mle_analytical1 = stats.pareto.fit(data, floc=0, f0=1.04)[0]
shape_mle_analytical2 = stats.pareto.fit(data, floc=0, fix_b=1.04)[0]
shape_mle_analytical3 = stats.pareto.fit(data, floc=0, fb=1.04)[0]
assert (shape_mle_analytical1 == shape_mle_analytical2 ==
shape_mle_analytical3 == 1.04)
# data can be shifted with changes to `loc`
data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,
loc=(rvs_loc + 2))
shape_mle_a, loc_mle_a, scale_mle_a = stats.pareto.fit(data, floc=2)
assert_equal(scale_mle_a + 2, data.min())
assert_equal(shape_mle_a, 1/((1/len(data - 2)) *
np.sum(np.log((data
- 2)/(data.min() - 2)))))
assert_equal(loc_mle_a, 2)
@pytest.mark.filterwarnings("ignore:invalid value encountered in "
"double_scalars")
@pytest.mark.parametrize("rvs_shape", [1, 2])
@pytest.mark.parametrize("rvs_loc", [0, 2])
@pytest.mark.parametrize("rvs_scale", [1, 5])
def test_fit_MLE_comp_optimzer(self, rvs_shape, rvs_loc, rvs_scale):
data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,
loc=rvs_loc)
args = [data, (stats.pareto._fitstart(data), )]
func = stats.pareto._reduce_func(args, {})[1]
# fixed `floc` to actual location provides a better fit than the
# super method
_assert_less_or_close_loglike(stats.pareto, data, func, floc=rvs_loc)
# fixing `floc` to an arbitrary number, 0, still provides a better
# fit than the super method
_assert_less_or_close_loglike(stats.pareto, data, func, floc=0)
# fixed shape still uses MLE formula and provides a better fit than
# the super method
_assert_less_or_close_loglike(stats.pareto, data, func, floc=0, f0=4)
# valid fixed fscale still uses MLE formulas and provides a better
# fit than the super method
_assert_less_or_close_loglike(stats.pareto, data, func, floc=0,
fscale=rvs_scale/2)
def test_fit_warnings(self):
assert_fit_warnings(stats.pareto)
# `floc` that causes invalid negative data
assert_raises(FitDataError, stats.pareto.fit, [1, 2, 3], floc=2)
# `floc` and `fscale` combination causes invalid data
assert_raises(FitDataError, stats.pareto.fit, [5, 2, 3], floc=1,
fscale=3)
class TestGenpareto:
def test_ab(self):
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
a, b = stats.genpareto._get_support(c)
assert_equal(a, 0.)
assert_(np.isposinf(b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
a, b = stats.genpareto._get_support(c)
assert_allclose([a, b], [0., 0.5])
def test_c0(self):
# with c=0, genpareto reduces to the exponential distribution
# rv = stats.genpareto(c=0.)
rv = stats.genpareto(c=0.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.expon.pdf(x))
assert_allclose(rv.cdf(x), stats.expon.cdf(x))
assert_allclose(rv.sf(x), stats.expon.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.expon.ppf(q))
def test_cm1(self):
# with c=-1, genpareto reduces to the uniform distr on [0, 1]
rv = stats.genpareto(c=-1.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
assert_allclose(rv.sf(x), stats.uniform.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
# logpdf(1., c=-1) should be zero
assert_allclose(rv.logpdf(1), 0)
def test_x_inf(self):
# make sure x=inf is handled gracefully
rv = stats.genpareto(c=0.1)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=0.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=-1.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
def test_c_continuity(self):
# pdf is continuous at c=0, -1
x = np.linspace(0, 10, 30)
for c in [0, -1]:
pdf0 = stats.genpareto.pdf(x, c)
for dc in [1e-14, -1e-14]:
pdfc = stats.genpareto.pdf(x, c + dc)
assert_allclose(pdf0, pdfc, atol=1e-12)
cdf0 = stats.genpareto.cdf(x, c)
for dc in [1e-14, 1e-14]:
cdfc = stats.genpareto.cdf(x, c + dc)
assert_allclose(cdf0, cdfc, atol=1e-12)
def test_c_continuity_ppf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
ppf0 = stats.genpareto.ppf(q, c)
for dc in [1e-14, -1e-14]:
ppfc = stats.genpareto.ppf(q, c + dc)
assert_allclose(ppf0, ppfc, atol=1e-12)
def test_c_continuity_isf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
isf0 = stats.genpareto.isf(q, c)
for dc in [1e-14, -1e-14]:
isfc = stats.genpareto.isf(q, c + dc)
assert_allclose(isf0, isfc, atol=1e-12)
def test_cdf_ppf_roundtrip(self):
# this should pass with machine precision. hat tip @pbrod
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [1e-8, -1e-18, 1e-15, -1e-15]:
assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
q, atol=1e-15)
def test_logsf(self):
logp = stats.genpareto.logsf(1e10, .01, 0, 1)
assert_allclose(logp, -1842.0680753952365)
# Values in 'expected_stats' are
# [mean, variance, skewness, excess kurtosis].
@pytest.mark.parametrize(
'c, expected_stats',
[(0, [1, 1, 2, 6]),
(1/4, [4/3, 32/9, 10/np.sqrt(2), np.nan]),
(1/9, [9/8, (81/64)*(9/7), (10/9)*np.sqrt(7), 754/45]),
(-1, [1/2, 1/12, 0, -6/5])])
def test_stats(self, c, expected_stats):
result = stats.genpareto.stats(c, moments='mvsk')
assert_allclose(result, expected_stats, rtol=1e-13, atol=1e-15)
def test_var(self):
# Regression test for gh-11168.
v = stats.genpareto.var(1e-8)
assert_allclose(v, 1.000000040000001, rtol=1e-13)
class TestPearson3:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllFloat'])
val = stats.pearson3.rvs(0.5)
assert_(isinstance(val, float))
val = stats.pearson3(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllFloat'])
assert_(len(val) == 3)
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
def test_negative_cdf_bug_11186(self):
# incorrect CDFs for negative skews in gh-11186; fixed in gh-12640
# Also check vectorization w/ negative, zero, and positive skews
skews = [-3, -1, 0, 0.5]
x_eval = 0.5
neg_inf = -30 # avoid RuntimeWarning caused by np.log(0)
cdfs = stats.pearson3.cdf(x_eval, skews)
int_pdfs = [quad(stats.pearson3(skew).pdf, neg_inf, x_eval)[0]
for skew in skews]
assert_allclose(cdfs, int_pdfs)
def test_return_array_bug_11746(self):
# pearson3.moment was returning size 0 or 1 array instead of float
# The first moment is equal to the loc, which defaults to zero
moment = stats.pearson3.moment(1, 2)
assert_equal(moment, 0)
assert_equal(type(moment), float)
moment = stats.pearson3.moment(1, 0.000001)
assert_equal(moment, 0)
assert_equal(type(moment), float)
class TestKappa4:
def test_cdf_genpareto(self):
# h = 1 and k != 0 is generalized Pareto
x = [0.0, 0.1, 0.2, 0.5]
h = 1.0
for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0,
1.9]:
vals = stats.kappa4.cdf(x, h, k)
# shape parameter is opposite what is expected
vals_comp = stats.genpareto.cdf(x, -k)
assert_allclose(vals, vals_comp)
def test_cdf_genextreme(self):
# h = 0 and k != 0 is generalized extreme value
x = np.linspace(-5, 5, 10)
h = 0.0
k = np.linspace(-3, 3, 10)
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.genextreme.cdf(x, k)
assert_allclose(vals, vals_comp)
def test_cdf_expon(self):
# h = 1 and k = 0 is exponential
x = np.linspace(0, 10, 10)
h = 1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.expon.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_gumbel_r(self):
# h = 0 and k = 0 is gumbel_r
x = np.linspace(-5, 5, 10)
h = 0.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.gumbel_r.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_logistic(self):
# h = -1 and k = 0 is logistic
x = np.linspace(-5, 5, 10)
h = -1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.logistic.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_uniform(self):
# h = 1 and k = 1 is uniform
x = np.linspace(-5, 5, 10)
h = 1.0
k = 1.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.uniform.cdf(x)
assert_allclose(vals, vals_comp)
def test_integers_ctor(self):
# regression test for gh-7416: _argcheck fails for integer h and k
# in numpy 1.12
stats.kappa4(1, 2)
class TestPoisson:
def setup_method(self):
np.random.seed(1234)
def test_pmf_basic(self):
# Basic case
ln2 = np.log(2)
vals = stats.poisson.pmf([0, 1, 2], ln2)
expected = [0.5, ln2/2, ln2**2/4]
assert_allclose(vals, expected)
def test_mu0(self):
# Edge case: mu=0
vals = stats.poisson.pmf([0, 1, 2], 0)
expected = [1, 0, 0]
assert_array_equal(vals, expected)
interval = stats.poisson.interval(0.95, 0)
assert_equal(interval, (0, 0))
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
mu = np.array([0.0, 1.0, 2.0])
result = stats.poisson.stats(mu, moments='mvsk')
expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])
assert_allclose(result, expected)
class TestKSTwo:
def setup_method(self):
np.random.seed(1234)
def test_cdf(self):
for n in [1, 2, 3, 10, 100, 1000]:
# Test x-values:
# 0, 1/2n, where the cdf should be 0
# 1/n, where the cdf should be n!/n^n
# 0.5, where the cdf should match ksone.cdf
# 1-1/n, where cdf = 1-2/n^n
# 1, where cdf == 1
# (E.g. Exact values given by Eqn 1 in Simard / L'Ecuyer)
x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])
v1 = (1.0/n)**n
lg = scipy.special.gammaln(n+1)
elg = (np.exp(lg) if v1 != 0 else 0)
expected = np.array([0, 0, v1 * elg,
1 - 2*stats.ksone.sf(0.5, n),
max(1 - 2*v1, 0.0),
1.0])
vals_cdf = stats.kstwo.cdf(x, n)
assert_allclose(vals_cdf, expected)
def test_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
# Same x values as in test_cdf, and use sf = 1 - cdf
x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])
v1 = (1.0/n)**n
lg = scipy.special.gammaln(n+1)
elg = (np.exp(lg) if v1 != 0 else 0)
expected = np.array([1.0, 1.0,
1 - v1 * elg,
2*stats.ksone.sf(0.5, n),
min(2*v1, 1.0), 0])
vals_sf = stats.kstwo.sf(x, n)
assert_allclose(vals_sf, expected)
def test_cdf_sqrtn(self):
# For fixed a, cdf(a/sqrt(n), n) -> kstwobign(a) as n->infinity
# cdf(a/sqrt(n), n) is an increasing function of n (and a)
# Check that the function is indeed increasing (allowing for some
# small floating point and algorithm differences.)
x = np.linspace(0, 2, 11)[1:]
ns = [50, 100, 200, 400, 1000, 2000]
for _x in x:
xn = _x / np.sqrt(ns)
probs = stats.kstwo.cdf(xn, ns)
diffs = np.diff(probs)
assert_array_less(diffs, 1e-8)
def test_cdf_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
vals_cdf = stats.kstwo.cdf(x, n)
vals_sf = stats.kstwo.sf(x, n)
assert_array_almost_equal(vals_cdf, 1 - vals_sf)
def test_cdf_sf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x / np.sqrt(n)
vals_cdf = stats.kstwo.cdf(xn, n)
vals_sf = stats.kstwo.sf(xn, n)
assert_array_almost_equal(vals_cdf, 1 - vals_sf)
def test_ppf_of_cdf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x[x > 0.5/n]
vals_cdf = stats.kstwo.cdf(xn, n)
# CDFs close to 1 are better dealt with using the SF
cond = (0 < vals_cdf) & (vals_cdf < 0.99)
vals = stats.kstwo.ppf(vals_cdf, n)
assert_allclose(vals[cond], xn[cond], rtol=1e-4)
def test_isf_of_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x[x > 0.5/n]
vals_isf = stats.kstwo.isf(xn, n)
cond = (0 < vals_isf) & (vals_isf < 1.0)
vals = stats.kstwo.sf(vals_isf, n)
assert_allclose(vals[cond], xn[cond], rtol=1e-4)
def test_ppf_of_cdf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = (x / np.sqrt(n))[x > 0.5/n]
vals_cdf = stats.kstwo.cdf(xn, n)
cond = (0 < vals_cdf) & (vals_cdf < 1.0)
vals = stats.kstwo.ppf(vals_cdf, n)
assert_allclose(vals[cond], xn[cond])
def test_isf_of_sf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = (x / np.sqrt(n))[x > 0.5/n]
vals_sf = stats.kstwo.sf(xn, n)
# SFs close to 1 are better dealt with using the CDF
cond = (0 < vals_sf) & (vals_sf < 0.95)
vals = stats.kstwo.isf(vals_sf, n)
assert_allclose(vals[cond], xn[cond])
def test_ppf(self):
probs = np.linspace(0, 1, 11)[1:]
for n in [1, 2, 3, 10, 100, 1000]:
xn = stats.kstwo.ppf(probs, n)
vals_cdf = stats.kstwo.cdf(xn, n)
assert_allclose(vals_cdf, probs)
def test_simard_lecuyer_table1(self):
# Compute the cdf for values near the mean of the distribution.
# The mean u ~ log(2)*sqrt(pi/(2n))
# Compute for x in [u/4, u/3, u/2, u, 2u, 3u]
# This is the computation of Table 1 of Simard, R., L'Ecuyer, P. (2011)
# "Computing the Two-Sided Kolmogorov-Smirnov Distribution".
# Except that the values below are not from the published table, but
# were generated using an independent SageMath implementation of
# Durbin's algorithm (with the exponentiation and scaling of
# Marsaglia/Tsang/Wang's version) using 500 bit arithmetic.
# Some of the values in the published table have relative
# errors greater than 1e-4.
ns = [10, 50, 100, 200, 500, 1000]
ratios = np.array([1.0/4, 1.0/3, 1.0/2, 1, 2, 3])
expected = np.array([
[1.92155292e-08, 5.72933228e-05, 2.15233226e-02, 6.31566589e-01,
9.97685592e-01, 9.99999942e-01],
[2.28096224e-09, 1.99142563e-05, 1.42617934e-02, 5.95345542e-01,
9.96177701e-01, 9.99998662e-01],
[1.00201886e-09, 1.32673079e-05, 1.24608594e-02, 5.86163220e-01,
9.95866877e-01, 9.99998240e-01],
[4.93313022e-10, 9.52658029e-06, 1.12123138e-02, 5.79486872e-01,
9.95661824e-01, 9.99997964e-01],
[2.37049293e-10, 6.85002458e-06, 1.01309221e-02, 5.73427224e-01,
9.95491207e-01, 9.99997750e-01],
[1.56990874e-10, 5.71738276e-06, 9.59725430e-03, 5.70322692e-01,
9.95409545e-01, 9.99997657e-01]
])
for idx, n in enumerate(ns):
x = ratios * np.log(2) * np.sqrt(np.pi/2/n)
vals_cdf = stats.kstwo.cdf(x, n)
assert_allclose(vals_cdf, expected[idx], rtol=1e-5)
class TestZipf:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
class TestDLaplace:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
N = 37
xx = np.arange(-N, N+1)
pp = dl.pmf(xx)
m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
assert_equal((m, s), (0, 0))
assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
def test_stats2(self):
a = np.log(2.)
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
assert_equal((m, s), (0., 0.))
assert_allclose((v, k), (4., 3.25))
class TestInvgauss:
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize("rvs_mu,rvs_loc,rvs_scale",
[(2, 0, 1), (np.random.rand(3)*10)])
def test_fit(self, rvs_mu, rvs_loc, rvs_scale):
data = stats.invgauss.rvs(size=100, mu=rvs_mu,
loc=rvs_loc, scale=rvs_scale)
# Analytical MLEs are calculated with formula when `floc` is fixed
mu, loc, scale = stats.invgauss.fit(data, floc=rvs_loc)
data = data - rvs_loc
mu_temp = np.mean(data)
scale_mle = len(data) / (np.sum(data**(-1) - mu_temp**(-1)))
mu_mle = mu_temp/scale_mle
# `mu` and `scale` match analytical formula
assert_allclose(mu_mle, mu, atol=1e-15, rtol=1e-15)
assert_allclose(scale_mle, scale, atol=1e-15, rtol=1e-15)
assert_equal(loc, rvs_loc)
data = stats.invgauss.rvs(size=100, mu=rvs_mu,
loc=rvs_loc, scale=rvs_scale)
# fixed parameters are returned
mu, loc, scale = stats.invgauss.fit(data, floc=rvs_loc - 1,
fscale=rvs_scale + 1)
assert_equal(rvs_scale + 1, scale)
assert_equal(rvs_loc - 1, loc)
# shape can still be fixed with multiple names
shape_mle1 = stats.invgauss.fit(data, fmu=1.04)[0]
shape_mle2 = stats.invgauss.fit(data, fix_mu=1.04)[0]
shape_mle3 = stats.invgauss.fit(data, f0=1.04)[0]
assert shape_mle1 == shape_mle2 == shape_mle3 == 1.04
@pytest.mark.parametrize("rvs_mu,rvs_loc,rvs_scale",
[(2, 0, 1), (np.random.rand(3)*10)])
def test_fit_MLE_comp_optimzer(self, rvs_mu, rvs_loc, rvs_scale):
data = stats.invgauss.rvs(size=100, mu=rvs_mu,
loc=rvs_loc, scale=rvs_scale)
super_fit = super(type(stats.invgauss), stats.invgauss).fit
# fitting without `floc` uses superclass fit method
super_fitted = super_fit(data)
invgauss_fit = stats.invgauss.fit(data)
assert_equal(super_fitted, invgauss_fit)
# fitting with `fmu` is uses superclass fit method
super_fitted = super_fit(data, floc=0, fmu=2)
invgauss_fit = stats.invgauss.fit(data, floc=0, fmu=2)
assert_equal(super_fitted, invgauss_fit)
# obtain log-likelihood objective function to compare results
args = [data, (stats.invgauss._fitstart(data), )]
func = stats.invgauss._reduce_func(args, {})[1]
# fixed `floc` uses analytical formula and provides better fit than
# super method
_assert_less_or_close_loglike(stats.invgauss, data, func, floc=rvs_loc)
# fixed `floc` not resulting in invalid data < 0 uses analytical
# formulas and provides a better fit than the super method
assert np.all((data - (rvs_loc - 1)) > 0)
_assert_less_or_close_loglike(stats.invgauss, data, func,
floc=rvs_loc - 1)
# fixed `floc` to an arbitrary number, 0, still provides a better fit
# than the super method
_assert_less_or_close_loglike(stats.invgauss, data, func, floc=0)
# fixed `fscale` to an arbitrary number still provides a better fit
# than the super method
_assert_less_or_close_loglike(stats.invgauss, data, func, floc=rvs_loc,
fscale=np.random.rand(1)[0])
def test_fit_raise_errors(self):
assert_fit_warnings(stats.invgauss)
# FitDataError is raised when negative invalid data
with pytest.raises(FitDataError):
stats.invgauss.fit([1, 2, 3], floc=2)
def test_cdf_sf(self):
# Regression tests for gh-13614.
# Ground truth from R's statmod library (pinvgauss), e.g.
# library(statmod)
# options(digits=15)
# mu = c(4.17022005e-04, 7.20324493e-03, 1.14374817e-06,
# 3.02332573e-03, 1.46755891e-03)
# print(pinvgauss(5, mu, 1))
# make sure a finite value is returned when mu is very small. see
# GH-13614
mu = [4.17022005e-04, 7.20324493e-03, 1.14374817e-06,
3.02332573e-03, 1.46755891e-03]
expected = [1, 1, 1, 1, 1]
actual = stats.invgauss.cdf(0.4, mu=mu)
assert_equal(expected, actual)
# test if the function can distinguish small left/right tail
# probabilities from zero.
cdf_actual = stats.invgauss.cdf(0.001, mu=1.05)
assert_allclose(cdf_actual, 4.65246506892667e-219)
sf_actual = stats.invgauss.sf(110, mu=1.05)
assert_allclose(sf_actual, 4.12851625944048e-25)
# test if x does not cause numerical issues when mu is very small
# and x is close to mu in value.
# slightly smaller than mu
actual = stats.invgauss.cdf(0.00009, 0.0001)
assert_allclose(actual, 2.9458022894924e-26)
# slightly bigger than mu
actual = stats.invgauss.cdf(0.000102, 0.0001)
assert_allclose(actual, 0.976445540507925)
def test_logcdf_logsf(self):
# Regression tests for improvements made in gh-13616.
# Ground truth from R's statmod library (pinvgauss), e.g.
# library(statmod)
# options(digits=15)
# print(pinvgauss(0.001, 1.05, 1, log.p=TRUE, lower.tail=FALSE))
# test if logcdf and logsf can compute values too small to
# be represented on the unlogged scale. See: gh-13616
logcdf = stats.invgauss.logcdf(0.0001, mu=1.05)
assert_allclose(logcdf, -5003.87872590367)
logcdf = stats.invgauss.logcdf(110, 1.05)
assert_allclose(logcdf, -4.12851625944087e-25)
logsf = stats.invgauss.logsf(0.001, mu=1.05)
assert_allclose(logsf, -4.65246506892676e-219)
logsf = stats.invgauss.logsf(110, 1.05)
assert_allclose(logsf, -56.1467092416426)
class TestLaplace:
@pytest.mark.parametrize("rvs_loc", [-5, 0, 1, 2])
@pytest.mark.parametrize("rvs_scale", [1, 2, 3, 10])
def test_fit(self, rvs_loc, rvs_scale):
# tests that various inputs follow expected behavior
# for a variety of `loc` and `scale`.
data = stats.laplace.rvs(size=100, loc=rvs_loc, scale=rvs_scale)
# MLE estimates are given by
loc_mle = np.median(data)
scale_mle = np.sum(np.abs(data - loc_mle)) / len(data)
# standard outputs should match analytical MLE formulas
loc, scale = stats.laplace.fit(data)
assert_allclose(loc, loc_mle, atol=1e-15, rtol=1e-15)
assert_allclose(scale, scale_mle, atol=1e-15, rtol=1e-15)
# fixed parameter should use analytical formula for other
loc, scale = stats.laplace.fit(data, floc=loc_mle)
assert_allclose(scale, scale_mle, atol=1e-15, rtol=1e-15)
loc, scale = stats.laplace.fit(data, fscale=scale_mle)
assert_allclose(loc, loc_mle)
# test with non-mle fixed parameter
# create scale with non-median loc
loc = rvs_loc * 2
scale_mle = np.sum(np.abs(data - loc)) / len(data)
# fixed loc to non median, scale should match
# scale calculation with modified loc
loc, scale = stats.laplace.fit(data, floc=loc)
assert_equal(scale_mle, scale)
# fixed scale created with non median loc,
# loc output should still be the data median.
loc, scale = stats.laplace.fit(data, fscale=scale_mle)
assert_equal(loc_mle, loc)
# error raised when both `floc` and `fscale` are fixed
assert_raises(RuntimeError, stats.laplace.fit, data, floc=loc_mle,
fscale=scale_mle)
# error is raised with non-finite values
assert_raises(RuntimeError, stats.laplace.fit, [np.nan])
assert_raises(RuntimeError, stats.laplace.fit, [np.inf])
@pytest.mark.parametrize("rvs_scale,rvs_loc", [(10, -5),
(5, 10),
(.2, .5)])
def test_fit_MLE_comp_optimzer(self, rvs_loc, rvs_scale):
data = stats.laplace.rvs(size=1000, loc=rvs_loc, scale=rvs_scale)
# the log-likelihood function for laplace is given by
def ll(loc, scale, data):
return -1 * (- (len(data)) * np.log(2*scale) -
(1/scale)*np.sum(np.abs(data - loc)))
# test that the objective function result of the analytical MLEs is
# less than or equal to that of the numerically optimized estimate
loc, scale = stats.laplace.fit(data)
loc_opt, scale_opt = super(type(stats.laplace),
stats.laplace).fit(data)
ll_mle = ll(loc, scale, data)
ll_opt = ll(loc_opt, scale_opt, data)
assert ll_mle < ll_opt or np.allclose(ll_mle, ll_opt,
atol=1e-15, rtol=1e-15)
def test_fit_simple_non_random_data(self):
data = np.array([1.0, 1.0, 3.0, 5.0, 8.0, 14.0])
# with `floc` fixed to 6, scale should be 4.
loc, scale = stats.laplace.fit(data, floc=6)
assert_allclose(scale, 4, atol=1e-15, rtol=1e-15)
# with `fscale` fixed to 6, loc should be 4.
loc, scale = stats.laplace.fit(data, fscale=6)
assert_allclose(loc, 4, atol=1e-15, rtol=1e-15)
def test_sf_cdf_extremes(self):
# These calculations should not generate warnings.
x = 1000
p0 = stats.laplace.cdf(-x)
# The exact value is smaller than can be represented with
# 64 bit floating point, so the exected result is 0.
assert p0 == 0.0
# The closest 64 bit floating point representation of the
# exact value is 1.0.
p1 = stats.laplace.cdf(x)
assert p1 == 1.0
p0 = stats.laplace.sf(x)
# The exact value is smaller than can be represented with
# 64 bit floating point, so the exected result is 0.
assert p0 == 0.0
# The closest 64 bit floating point representation of the
# exact value is 1.0.
p1 = stats.laplace.sf(-x)
assert p1 == 1.0
def test_sf(self):
x = 200
p = stats.laplace.sf(x)
assert_allclose(p, np.exp(-x)/2, rtol=1e-13)
def test_isf(self):
p = 1e-25
x = stats.laplace.isf(p)
assert_allclose(x, -np.log(2*p), rtol=1e-13)
class TestInvGamma:
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
expected = [0.05461496450, 0.0001723162534, 1.020362676,
2.055616582]
assert_allclose(mvsk, expected)
a = [1.1, 3.1, 5.6]
mvsk = stats.invgamma.stats(a=a, moments='mvsk')
expected = ([10., 0.476190476, 0.2173913043], # mmm
[np.inf, 0.2061430632, 0.01312749422], # vvv
[np.nan, 41.95235392, 2.919025532], # sss
[np.nan, np.nan, 24.51923076]) # kkk
for x, y in zip(mvsk, expected):
assert_almost_equal(x, y)
def test_cdf_ppf(self):
# gh-6245
x = np.logspace(-2.6, 0)
y = stats.invgamma.cdf(x, 1)
xx = stats.invgamma.ppf(y, 1)
assert_allclose(x, xx)
def test_sf_isf(self):
# gh-6245
if sys.maxsize > 2**32:
x = np.logspace(2, 100)
else:
# Invgamme roundtrip on 32-bit systems has relative accuracy
# ~1e-15 until x=1e+15, and becomes inf above x=1e+18
x = np.logspace(2, 18)
y = stats.invgamma.sf(x, 1)
xx = stats.invgamma.isf(y, 1)
assert_allclose(x, xx, rtol=1.0)
class TestF:
def test_endpoints(self):
# Compute the pdf at the left endpoint dst.a.
data = [[stats.f, (2, 1), 1.0]]
for _f, _args, _correct in data:
ans = _f.pdf(_f.a, *_args)
ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
correct = [_correct_ for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
def test_f_moments(self):
# n-th moment of F distributions is only finite for n < dfd / 2
m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
assert_(np.isfinite(m))
assert_(np.isfinite(v))
assert_(np.isfinite(s))
assert_(not np.isfinite(k))
def test_moments_warnings(self):
# no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')
def test_stats_broadcast(self):
dfn = np.array([[3], [11]])
dfd = np.array([11, 12])
m, v, s, k = stats.f.stats(dfn=dfn, dfd=dfd, moments='mvsk')
m2 = [dfd / (dfd - 2)]*2
assert_allclose(m, m2)
v2 = 2 * dfd**2 * (dfn + dfd - 2) / dfn / (dfd - 2)**2 / (dfd - 4)
assert_allclose(v, v2)
s2 = ((2*dfn + dfd - 2) * np.sqrt(8*(dfd - 4)) /
((dfd - 6) * np.sqrt(dfn*(dfn + dfd - 2))))
assert_allclose(s, s2)
k2num = 12 * (dfn * (5*dfd - 22) * (dfn + dfd - 2) +
(dfd - 4) * (dfd - 2)**2)
k2den = dfn * (dfd - 6) * (dfd - 8) * (dfn + dfd - 2)
k2 = k2num / k2den
assert_allclose(k, k2)
def test_rvgeneric_std():
# Regression test for #1191
assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])
def test_moments_t():
# regression test for #8786
assert_equal(stats.t.stats(df=1, moments='mvsk'),
(np.inf, np.nan, np.nan, np.nan))
assert_equal(stats.t.stats(df=1.01, moments='mvsk'),
(0.0, np.inf, np.nan, np.nan))
assert_equal(stats.t.stats(df=2, moments='mvsk'),
(0.0, np.inf, np.nan, np.nan))
assert_equal(stats.t.stats(df=2.01, moments='mvsk'),
(0.0, 2.01/(2.01-2.0), np.nan, np.inf))
assert_equal(stats.t.stats(df=3, moments='sk'), (np.nan, np.inf))
assert_equal(stats.t.stats(df=3.01, moments='sk'), (0.0, np.inf))
assert_equal(stats.t.stats(df=4, moments='sk'), (0.0, np.inf))
assert_equal(stats.t.stats(df=4.01, moments='sk'), (0.0, 6.0/(4.01 - 4.0)))
def test_t_entropy():
df = [1, 2, 25, 100]
# Expected values were computed with mpmath.
expected = [2.5310242469692907, 1.9602792291600821,
1.459327578078393, 1.4289633653182439]
assert_allclose(stats.t.entropy(df), expected, rtol=1e-13)
class TestRvDiscrete:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
states = [-1, 0, 1, 2, 3, 4]
probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]
samples = 1000
r = stats.rv_discrete(name='sample', values=(states, probability))
x = r.rvs(size=samples)
assert_(isinstance(x, numpy.ndarray))
for s, p in zip(states, probability):
assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
x = r.rvs()
assert_(isinstance(x, int))
def test_entropy(self):
# Basic tests of entropy.
pvals = np.array([0.25, 0.45, 0.3])
p = stats.rv_discrete(values=([0, 1, 2], pvals))
expected_h = -sum(xlogy(pvals, pvals))
h = p.entropy()
assert_allclose(h, expected_h)
p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
h = p.entropy()
assert_equal(h, 0.0)
def test_pmf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x = [[1., 4.],
[3., 2]]
assert_allclose(rv.pmf(x),
[[0.5, 0.2],
[0., 0.3]], atol=1e-14)
def test_cdf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5]
expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1]
assert_allclose(rv.cdf(x_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.cdf(xx) for xx in x_values],
expected, atol=1e-14)
def test_ppf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.]
expected = [1, 1, 2, 2, 4, 4]
assert_allclose(rv.ppf(q_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.ppf(q) for q in q_values],
expected, atol=1e-14)
def test_cdf_ppf_next(self):
# copied and special cased from test_discrete_basic
vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1])
rv = stats.rv_discrete(values=vals)
assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8),
rv.xk[1:])
def test_multidimension(self):
xk = np.arange(12).reshape((3, 4))
pk = np.array([[0.1, 0.1, 0.15, 0.05],
[0.1, 0.1, 0.05, 0.05],
[0.1, 0.1, 0.05, 0.05]])
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_bad_input(self):
xk = [1, 2, 3]
pk = [0.5, 0.5]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
pk = [1, 2, 3]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3]
pk = [0.5, 1.2, -0.7]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3, 4, 5]
pk = [0.3, 0.3, 0.3, 0.3, -0.2]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
def test_shape_rv_sample(self):
# tests added for gh-9565
# mismatch of 2d inputs
xk, pk = np.arange(4).reshape((2, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same number of elements, but shapes not compatible
xk, pk = np.arange(6).reshape((3, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same shapes => no error
xk, pk = np.arange(6).reshape((3, 2)), np.full((3, 2), 1/6)
assert_equal(stats.rv_discrete(values=(xk, pk)).pmf(0), 1/6)
def test_expect1(self):
xk = [1, 2, 4, 6, 7, 11]
pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1]
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_expect2(self):
# rv_sample should override _expect. Bug report from
# https://stackoverflow.com/questions/63199792
y = [200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0,
1100.0, 1200.0, 1300.0, 1400.0, 1500.0, 1600.0, 1700.0, 1800.0,
1900.0, 2000.0, 2100.0, 2200.0, 2300.0, 2400.0, 2500.0, 2600.0,
2700.0, 2800.0, 2900.0, 3000.0, 3100.0, 3200.0, 3300.0, 3400.0,
3500.0, 3600.0, 3700.0, 3800.0, 3900.0, 4000.0, 4100.0, 4200.0,
4300.0, 4400.0, 4500.0, 4600.0, 4700.0, 4800.0]
py = [0.0004, 0.0, 0.0033, 0.006500000000000001, 0.0, 0.0,
0.004399999999999999, 0.6862, 0.0, 0.0, 0.0,
0.00019999999999997797, 0.0006000000000000449,
0.024499999999999966, 0.006400000000000072,
0.0043999999999999595, 0.019499999999999962,
0.03770000000000007, 0.01759999999999995, 0.015199999999999991,
0.018100000000000005, 0.04500000000000004, 0.0025999999999999357,
0.0, 0.0041000000000001036, 0.005999999999999894,
0.0042000000000000925, 0.0050000000000000044,
0.0041999999999999815, 0.0004999999999999449,
0.009199999999999986, 0.008200000000000096,
0.0, 0.0, 0.0046999999999999265, 0.0019000000000000128,
0.0006000000000000449, 0.02510000000000001, 0.0,
0.007199999999999984, 0.0, 0.012699999999999934, 0.0, 0.0,
0.008199999999999985, 0.005600000000000049, 0.0]
rv = stats.rv_discrete(values=(y, py))
# check the mean
assert_allclose(rv.expect(), rv.mean(), atol=1e-14)
assert_allclose(rv.expect(),
sum(v * w for v, w in zip(y, py)), atol=1e-14)
# also check the second moment
assert_allclose(rv.expect(lambda x: x**2),
sum(v**2 * w for v, w in zip(y, py)), atol=1e-14)
class TestSkewCauchy:
def test_cauchy(self):
x = np.linspace(-5, 5, 100)
assert_array_almost_equal(stats.skewcauchy.pdf(x, a=0),
stats.cauchy.pdf(x))
assert_array_almost_equal(stats.skewcauchy.cdf(x, a=0),
stats.cauchy.cdf(x))
assert_array_almost_equal(stats.skewcauchy.ppf(x, a=0),
stats.cauchy.ppf(x))
def test_skewcauchy_R(self):
# options(digits=16)
# library(sgt)
# # lmbda, x contain the values generated for a, x below
# lmbda <- c(0.0976270078546495, 0.430378732744839, 0.2055267521432877,
# 0.0897663659937937, -0.15269040132219, 0.2917882261333122,
# -0.12482557747462, 0.7835460015641595, 0.9273255210020589,
# -0.2331169623484446)
# x <- c(2.917250380826646, 0.2889491975290444, 0.6804456109393229,
# 4.25596638292661, -4.289639418021131, -4.1287070029845925,
# -4.797816025596743, 3.32619845547938, 2.7815675094985046,
# 3.700121482468191)
# pdf = dsgt(x, mu=0, lambda=lambda, sigma=1, q=1/2, mean.cent=FALSE,
# var.adj = sqrt(2))
# cdf = psgt(x, mu=0, lambda=lambda, sigma=1, q=1/2, mean.cent=FALSE,
# var.adj = sqrt(2))
# qsgt(cdf, mu=0, lambda=lambda, sigma=1, q=1/2, mean.cent=FALSE,
# var.adj = sqrt(2))
np.random.seed(0)
a = np.random.rand(10) * 2 - 1
x = np.random.rand(10) * 10 - 5
pdf = [0.039473975217333909, 0.305829714049903223, 0.24140158118994162,
0.019585772402693054, 0.021436553695989482, 0.00909817103867518,
0.01658423410016873, 0.071083288030394126, 0.103250045941454524,
0.013110230778426242]
cdf = [0.87426677718213752, 0.37556468910780882, 0.59442096496538066,
0.91304659850890202, 0.09631964100300605, 0.03829624330921733,
0.08245240578402535, 0.72057062945510386, 0.62826415852515449,
0.95011308463898292]
assert_allclose(stats.skewcauchy.pdf(x, a), pdf)
assert_allclose(stats.skewcauchy.cdf(x, a), cdf)
assert_allclose(stats.skewcauchy.ppf(cdf, a), x)
class TestSkewNorm:
def setup_method(self):
self.rng = check_random_state(1234)
def test_normal(self):
# When the skewness is 0 the distribution is normal
x = np.linspace(-5, 5, 100)
assert_array_almost_equal(stats.skewnorm.pdf(x, a=0),
stats.norm.pdf(x))
def test_rvs(self):
shape = (3, 4, 5)
x = stats.skewnorm.rvs(a=0.75, size=shape, random_state=self.rng)
assert_equal(shape, x.shape)
x = stats.skewnorm.rvs(a=-3, size=shape, random_state=self.rng)
assert_equal(shape, x.shape)
def test_moments(self):
X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2,
random_state=self.rng)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2,
random_state=self.rng)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
def test_cdf_large_x(self):
# Regression test for gh-7746.
# The x values are large enough that the closest 64 bit floating
# point representation of the exact CDF is 1.0.
p = stats.skewnorm.cdf([10, 20, 30], -1)
assert_allclose(p, np.ones(3), rtol=1e-14)
p = stats.skewnorm.cdf(25, 2.5)
assert_allclose(p, 1.0, rtol=1e-14)
def test_cdf_sf_small_values(self):
# Triples are [x, a, cdf(x, a)]. These values were computed
# using CDF[SkewNormDistribution[0, 1, a], x] in Wolfram Alpha.
cdfvals = [
[-8, 1, 3.870035046664392611e-31],
[-4, 2, 8.1298399188811398e-21],
[-2, 5, 1.55326826787106273e-26],
[-9, -1, 2.257176811907681295e-19],
[-10, -4, 1.523970604832105213e-23],
]
for x, a, cdfval in cdfvals:
p = stats.skewnorm.cdf(x, a)
assert_allclose(p, cdfval, rtol=1e-8)
# For the skew normal distribution, sf(-x, -a) = cdf(x, a).
p = stats.skewnorm.sf(-x, -a)
assert_allclose(p, cdfval, rtol=1e-8)
class TestExpon:
def test_zero(self):
assert_equal(stats.expon.pdf(0), 1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.expon.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.expon.fit, x)
class TestNorm:
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.norm.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.norm.fit, x)
def test_bad_keyword_arg(self):
x = [1, 2, 3]
assert_raises(TypeError, stats.norm.fit, x, plate="shrimp")
class TestUniform:
"""gh-10300"""
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.uniform.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.uniform.fit, x)
class TestExponNorm:
def test_moments(self):
# Some moment test cases based on non-loc/scaled formula
def get_moms(lam, sig, mu):
# See wikipedia for these formulae
# where it is listed as an exponentially modified gaussian
opK2 = 1.0 + 1 / (lam*sig)**2
exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
mu, sig, lam = 0, 1, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -3, 2, 0.1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = 0, 3, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -5, 11, 3.5
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.exponnorm.fit, x, floc=0, fscale=1)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.exponnorm.fit, x, floc=0, fscale=1)
def test_extremes_x(self):
# Test for extreme values against overflows
assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(-900, 0.01), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 0.01), 0.0)
# Expected values for the PDF were computed with mpmath, with
# the following function, and with mpmath.mp.dps = 50.
#
# def exponnorm_stdpdf(x, K):
# x = mpmath.mpf(x)
# K = mpmath.mpf(K)
# t1 = mpmath.exp(1/(2*K**2) - x/K)
# erfcarg = -(x - 1/K)/mpmath.sqrt(2)
# t2 = mpmath.erfc(erfcarg)
# return t1 * t2 / (2*K)
#
@pytest.mark.parametrize('x, K, expected',
[(20, 0.01, 6.90010764753618e-88),
(1, 0.01, 0.24438994313247364),
(-1, 0.01, 0.23955149623472075),
(-20, 0.01, 4.6004708690125477e-88),
(10, 1, 7.48518298877006e-05),
(10, 10000, 9.990005048283775e-05)])
def test_std_pdf(self, x, K, expected):
assert_allclose(stats.exponnorm.pdf(x, K), expected, rtol=1e-12)
# Expected values for the CDF were computed with mpmath using
# the following function and with mpmath.mp.dps = 60:
#
# def mp_exponnorm_cdf(x, K, loc=0, scale=1):
# x = mpmath.mpf(x)
# K = mpmath.mpf(K)
# loc = mpmath.mpf(loc)
# scale = mpmath.mpf(scale)
# z = (x - loc)/scale
# return (mpmath.ncdf(z)
# - mpmath.exp((1/(2*K) - z)/K)*mpmath.ncdf(z - 1/K))
#
@pytest.mark.parametrize('x, K, scale, expected',
[[0, 0.01, 1, 0.4960109760186432],
[-5, 0.005, 1, 2.7939945412195734e-07],
[-1e4, 0.01, 100, 0.0],
[-1e4, 0.01, 1000, 6.920401854427357e-24],
[5, 0.001, 1, 0.9999997118542392]])
def test_cdf_small_K(self, x, K, scale, expected):
p = stats.exponnorm.cdf(x, K, scale=scale)
if expected == 0.0:
assert p == 0.0
else:
assert_allclose(p, expected, rtol=1e-13)
# Expected values for the SF were computed with mpmath using
# the following function and with mpmath.mp.dps = 60:
#
# def mp_exponnorm_sf(x, K, loc=0, scale=1):
# x = mpmath.mpf(x)
# K = mpmath.mpf(K)
# loc = mpmath.mpf(loc)
# scale = mpmath.mpf(scale)
# z = (x - loc)/scale
# return (mpmath.ncdf(-z)
# + mpmath.exp((1/(2*K) - z)/K)*mpmath.ncdf(z - 1/K))
#
@pytest.mark.parametrize('x, K, scale, expected',
[[10, 0.01, 1, 8.474702916146657e-24],
[2, 0.005, 1, 0.02302280664231312],
[5, 0.005, 0.5, 8.024820681931086e-24],
[10, 0.005, 0.5, 3.0603340062892486e-89],
[20, 0.005, 0.5, 0.0],
[-3, 0.001, 1, 0.9986545205566117]])
def test_sf_small_K(self, x, K, scale, expected):
p = stats.exponnorm.sf(x, K, scale=scale)
if expected == 0.0:
assert p == 0.0
else:
assert_allclose(p, expected, rtol=5e-13)
class TestGenExpon:
def test_pdf_unity_area(self):
from scipy.integrate import simps
# PDF should integrate to one
p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_almost_equal(simps(p, dx=0.01), 1, 1)
def test_cdf_bounds(self):
# CDF should always be positive
cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
def test_sf_tail(self):
# Expected value computed with mpmath. This script
# import mpmath
# mpmath.mp.dps = 80
# x = mpmath.mpf('15.0')
# a = mpmath.mpf('1.0')
# b = mpmath.mpf('2.0')
# c = mpmath.mpf('1.5')
# print(float(mpmath.exp((-a-b)*x + (b/c)*-mpmath.expm1(-c*x))))
# prints
# 1.0859444834514553e-19
s = stats.genexpon.sf(15, 1, 2, 1.5)
assert_allclose(s, 1.0859444834514553e-19, rtol=1e-13)
class TestExponpow:
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8),
5)
class TestSkellam:
def test_pmf(self):
# comparison to R
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = numpy.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
# comparison to R, only 5 decimals
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = numpy.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
class TestLognorm:
def test_pdf(self):
# Regression test for Ticket #1471: avoid nan with 0/0 situation
# Also make sure there are no warnings at x=0, cf gh-5202
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
pdf = stats.lognorm.pdf([0, 0.5, 1], 1)
assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])
def test_logcdf(self):
# Regression test for gh-5940: sf et al would underflow too early
x2, mu, sigma = 201.68, 195, 0.149
assert_allclose(stats.lognorm.sf(x2-mu, s=sigma),
stats.norm.sf(np.log(x2-mu)/sigma))
assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma),
stats.norm.logsf(np.log(x2-mu)/sigma))
class TestBeta:
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0, 1, 0.5)
assert_almost_equal(logpdf, -0.69314718056)
logpdf = stats.beta.logpdf(0, 0.5, 1)
assert_almost_equal(logpdf, np.inf)
def test_logpdf_ticket_1866(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.beta(alpha, beta)
assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.beta.fit, x, floc=0, fscale=1,
plate="shrimp")
def test_fit_duplicated_fixed_parameter(self):
# At most one of 'f0', 'fa' or 'fix_a' can be given to the fit method.
# More than one raises a ValueError.
x = [0.1, 0.5, 0.6]
assert_raises(ValueError, stats.beta.fit, x, fa=0.5, fix_a=0.5)
def test_issue_12635(self):
# Confirm that Boost's beta distribution resolves gh-12635.
# Check against R:
# options(digits=16)
# p = 0.9999999999997369
# a = 75.0
# b = 66334470.0
# print(qbeta(p, a, b))
p, a, b = 0.9999999999997369, 75.0, 66334470.0
assert_allclose(stats.beta.ppf(p, a, b), 2.343620802982393e-06)
def test_issue_12794(self):
# Confirm that Boost's beta distribution resolves gh-12794.
# Check against R.
# options(digits=16)
# p = 1e-11
# count_list = c(10,100,1000)
# print(qbeta(1-p, count_list + 1, 100000 - count_list))
inv_R = np.array([0.0004944464889611935,
0.0018360586912635726,
0.0122663919942518351])
count_list = np.array([10, 100, 1000])
p = 1e-11
inv = stats.beta.isf(p, count_list + 1, 100000 - count_list)
assert_allclose(inv, inv_R)
res = stats.beta.sf(inv, count_list + 1, 100000 - count_list)
assert_allclose(res, p)
def test_issue_12796(self):
# Confirm that Boost's beta distribution succeeds in the case
# of gh-12796
alpha_2 = 5e-6
count_ = np.arange(1, 20)
nobs = 100000
q, a, b = 1 - alpha_2, count_ + 1, nobs - count_
inv = stats.beta.ppf(q, a, b)
res = stats.beta.cdf(inv, a, b)
assert_allclose(res, 1 - alpha_2)
def test_endpoints(self):
# Confirm that boost's beta distribution returns inf at x=1
# when b<1
a, b = 1, 0.5
assert_equal(stats.beta.pdf(1, a, b), np.inf)
# Confirm that boost's beta distribution returns inf at x=0
# when a<1
a, b = 0.2, 3
assert_equal(stats.beta.pdf(0, a, b), np.inf)
class TestBetaPrime:
def test_logpdf(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.betaprime(alpha, beta)
assert_(np.isfinite(b.logpdf(x)).all())
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_cdf(self):
# regression test for gh-4030: Implementation of
# scipy.stats.betaprime.cdf()
x = stats.betaprime.cdf(0, 0.2, 0.3)
assert_equal(x, 0.0)
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
cdfs = stats.betaprime.cdf(x, alpha, beta)
assert_(np.isfinite(cdfs).all())
# check the new cdf implementation vs generic one:
gen_cdf = stats.rv_continuous._cdf_single
cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]
assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)
class TestGamma:
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1./5)
assert_almost_equal(pdf, 0.002312341)
pdf = stats.gamma.pdf(3, 10, scale=1./5)
assert_almost_equal(pdf, 0.1620358)
def test_logpdf(self):
# Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
# situation
logpdf = stats.gamma.logpdf(0, 1)
assert_almost_equal(logpdf, 0)
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.gamma.fit, x, floc=0, plate="shrimp")
def test_isf(self):
# Test cases for when the probability is very small. See gh-13664.
# The expected values can be checked with mpmath. With mpmath,
# the survival function sf(x, k) can be computed as
#
# mpmath.gammainc(k, x, mpmath.inf, regularized=True)
#
# Here we have:
#
# >>> mpmath.mp.dps = 60
# >>> float(mpmath.gammainc(1, 39.14394658089878, mpmath.inf,
# ... regularized=True))
# 9.99999999999999e-18
# >>> float(mpmath.gammainc(100, 330.6557590436547, mpmath.inf,
# regularized=True))
# 1.000000000000028e-50
#
assert np.isclose(stats.gamma.isf(1e-17, 1),
39.14394658089878, atol=1e-14)
assert np.isclose(stats.gamma.isf(1e-50, 100),
330.6557590436547, atol=1e-13)
class TestChi2:
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003,
decimal=14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778,
decimal=14)
def test_ppf(self):
# Expected values computed with mpmath.
df = 4.8
x = stats.chi2.ppf(2e-47, df)
assert_allclose(x, 1.098472479575179840604902808e-19, rtol=1e-10)
x = stats.chi2.ppf(0.5, df)
assert_allclose(x, 4.15231407598589358660093156, rtol=1e-10)
df = 13
x = stats.chi2.ppf(2e-77, df)
assert_allclose(x, 1.0106330688195199050507943e-11, rtol=1e-10)
x = stats.chi2.ppf(0.1, df)
assert_allclose(x, 7.041504580095461859307179763, rtol=1e-10)
class TestGumbelL:
# gh-6228
def test_cdf_ppf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.cdf(x)
xx = stats.gumbel_l.ppf(y)
assert_allclose(x, xx)
def test_logcdf_logsf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.logcdf(x)
z = stats.gumbel_l.logsf(x)
u = np.exp(y)
v = -special.expm1(z)
assert_allclose(u, v)
def test_sf_isf(self):
x = np.linspace(-20, 5)
y = stats.gumbel_l.sf(x)
xx = stats.gumbel_l.isf(y)
assert_allclose(x, xx)
class TestGumbelR:
def test_sf(self):
# Expected value computed with mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 40
# >>> float(mpmath.mp.one - mpmath.exp(-mpmath.exp(-50)))
# 1.9287498479639178e-22
assert_allclose(stats.gumbel_r.sf(50), 1.9287498479639178e-22,
rtol=1e-14)
def test_isf(self):
# Expected value computed with mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 40
# >>> float(-mpmath.log(-mpmath.log(mpmath.mp.one - 1e-17)))
# 39.14394658089878
assert_allclose(stats.gumbel_r.isf(1e-17), 39.14394658089878,
rtol=1e-14)
class TestLevyStable:
def test_fit(self):
# construct data to have percentiles that match
# example in McCulloch 1986.
x = [-.05413, -.05413,
0., 0., 0., 0.,
.00533, .00533, .00533, .00533, .00533,
.03354, .03354, .03354, .03354, .03354,
.05309, .05309, .05309, .05309, .05309]
alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)
assert_allclose(alpha1, 1.48, rtol=0, atol=0.01)
assert_almost_equal(beta1, -.22, 2)
assert_almost_equal(scale1, 0.01717, 4)
# to 2 dps due to rounding error in McCulloch86
assert_almost_equal(loc1, 0.00233, 2)
# cover alpha=2 scenario
x2 = x + [.05309, .05309, .05309, .05309, .05309]
alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2)
assert_equal(alpha2, 2)
assert_equal(beta2, -1)
assert_almost_equal(scale2, .02503, 4)
assert_almost_equal(loc2, .03354, 4)
@pytest.mark.slow
def test_pdf_nolan_samples(self):
""" Test pdf values against Nolan's stablec.exe output
see - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
Repeat following with beta = -1, -.5, 0, .5 and 1
stablec.exe <<
1 # pdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
fn = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/stable-pdf-sample-data.npy'))
data = np.load(fn)
data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta')
# support numpy 1.8.2 for travis
npisin = np.isin if hasattr(np, "isin") else np.in1d
tests = [
# best selects
['best', None, 8, None],
# quadrature is accurate for most alpha except 0.25; perhaps
# limitation of Nolan stablec?
# we reduce size of x to speed up computation as numerical
# integration slow.
['quadrature', None, 8,
lambda r: ((r['alpha'] > 0.25) &
(npisin(r['x'], [-10, -5, 0, 5, 10])))],
# zolatarev is accurate except at alpha==1, beta != 0
['zolotarev', None, 8, lambda r: r['alpha'] != 1],
['zolotarev', None, 8,
lambda r: (r['alpha'] == 1) & (r['beta'] == 0)],
['zolotarev', None, 1,
lambda r: (r['alpha'] == 1) & (r['beta'] != 0)],
# fft accuracy reduces as alpha decreases, fails at low values of
# alpha and x=0
['fft', 0, 4, lambda r: r['alpha'] > 1],
['fft', 0, 3, lambda r: (r['alpha'] < 1) & (r['alpha'] > 0.25)],
# not useful here
['fft', 0, 1, lambda r: (r['alpha'] == 0.25) & (r['x'] != 0)],
]
for ix, (default_method, fft_min_points,
decimal_places, filter_func) in enumerate(tests):
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
subdata = (data[filter_func(data)] if filter_func is not None else
data)
with suppress_warnings() as sup:
sup.record(RuntimeWarning,
"Density calculation unstable for alpha=1 "
"and beta!=0.*")
sup.record(RuntimeWarning,
"Density calculations experimental for FFT "
"method.*")
p = stats.levy_stable.pdf(subdata['x'], subdata['alpha'],
subdata['beta'], scale=1, loc=0)
subdata2 = rec_append_fields(subdata, 'calc', p)
padiff = np.abs(p-subdata['p'])
failures = subdata2[(padiff >= 1.5*10.**(-decimal_places)) |
np.isnan(p)]
assert_almost_equal(p, subdata['p'], decimal_places,
("pdf test %s failed with method '%s'\n%s"
% (ix, default_method, failures)),
verbose=False)
@pytest.mark.slow
def test_cdf_nolan_samples(self):
""" Test cdf values against Nolan's stablec.exe output
see - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
Repeat following with beta = -1, -.5, 0, .5 and 1
stablec.exe <<
2 # cdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
fn = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/stable-cdf-sample-data.npy'))
data = np.load(fn)
data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta')
tests = [
# zolatarev is accurate for all values
['zolotarev', None, 8, None],
# fft accuracy poor, very poor alpha < 1
['fft', 0, 2, lambda r: r['alpha'] > 1],
]
for ix, (default_method, fft_min_points, decimal_places,
filter_func) in enumerate(tests):
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
subdata = (data[filter_func(data)] if filter_func is not None else
data)
with suppress_warnings() as sup:
sup.record(RuntimeWarning, 'FFT method is considered ' +
'experimental for cumulative distribution ' +
'function evaluations.*')
p = stats.levy_stable.cdf(subdata['x'], subdata['alpha'],
subdata['beta'], scale=1, loc=0)
subdata2 = rec_append_fields(subdata, 'calc', p)
padiff = np.abs(p - subdata['p'])
failures = subdata2[(padiff >= 1.5*10.**(-decimal_places)) |
np.isnan(p)]
assert_almost_equal(p, subdata['p'], decimal_places,
("cdf test %s failed with method '%s'\n%s"
% (ix, default_method, failures)),
verbose=False)
def test_pdf_alpha_equals_one_beta_non_zero(self):
"""
sample points extracted from Tables and Graphs of Stable Probability
Density Functions - Donald R Holt - 1973 - p 187.
"""
xs = np.array([0, 0, 0, 0,
1, 1, 1, 1,
2, 2, 2, 2,
3, 3, 3, 3,
4, 4, 4, 4])
density = np.array([.3183, .3096, .2925, .2622,
.1591, .1587, .1599, .1635,
.0637, .0729, .0812, .0955,
.0318, .0390, .0458, .0586,
.0187, .0236, .0285, .0384])
betas = np.array([0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1])
tests = [
['quadrature', None, 4],
['zolotarev', None, 1],
]
with np.errstate(all='ignore'), suppress_warnings() as sup:
sup.filter(category=RuntimeWarning,
message="Density calculation unstable.*")
for default_method, fft_min_points, decimal_places in tests:
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
pdf = stats.levy_stable.pdf(xs, 1, betas, scale=1, loc=0)
assert_almost_equal(pdf, density, decimal_places,
default_method)
def test_stats(self):
param_sets = [
[(1.48, -.22, 0, 1), (0, np.inf, np.NaN, np.NaN)],
[(2, .9, 10, 1.5), (10, 4.5, 0, 0)]
]
for args, exp_stats in param_sets:
calc_stats = stats.levy_stable.stats(args[0], args[1],
loc=args[2], scale=args[3],
moments='mvsk')
assert_almost_equal(calc_stats, exp_stats)
@pytest.mark.slow
@pytest.mark.parametrize('beta', [0.5, 1])
def test_rvs_alpha1(self, beta):
np.random.seed(987654321)
alpha = 1.0
loc = 0.5
scale = 1.5
x = stats.levy_stable.rvs(alpha, beta, loc=loc, scale=scale,
size=5000)
stat, p = stats.kstest(x, 'levy_stable',
args=(alpha, beta, loc, scale))
assert p > 0.01
class TestArrayArgument: # test for ticket:992
def setup_method(self):
np.random.seed(1234)
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5),
size=(10, 5))
assert_equal(rvs.shape, (10, 5))
class TestDocstring:
def test_docstrings(self):
# See ticket #761
if stats.rayleigh.__doc__ is not None:
assert_("rayleigh" in stats.rayleigh.__doc__.lower())
if stats.bernoulli.__doc__ is not None:
assert_("bernoulli" in stats.bernoulli.__doc__.lower())
def test_no_name_arg(self):
# If name is not given, construction shouldn't fail. See #1508.
stats.rv_continuous()
stats.rv_discrete()
def TestArgsreduce():
a = array([1, 3, 2, 1, 2, 3, 3])
b, c = argsreduce(a > 1, a, 2)
assert_array_equal(b, [3, 2, 2, 3, 3])
assert_array_equal(c, [2, 2, 2, 2, 2])
b, c = argsreduce(2 > 1, a, 2)
assert_array_equal(b, a[0])
assert_array_equal(c, [2])
b, c = argsreduce(a > 0, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * numpy.size(a))
class TestFitMethod:
skip = ['ncf', 'ksone', 'kstwo']
def setup_method(self):
np.random.seed(1234)
# skip these b/c deprecated, or only loc and scale arguments
fitSkipNonFinite = ['expon', 'norm', 'uniform']
@pytest.mark.parametrize('dist,args', distcont)
def test_fit_w_non_finite_data_values(self, dist, args):
"""gh-10300"""
if dist in self.fitSkipNonFinite:
pytest.skip("%s fit known to fail or deprecated" % dist)
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
y = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
distfunc = getattr(stats, dist)
assert_raises(RuntimeError, distfunc.fit, x, floc=0, fscale=1)
assert_raises(RuntimeError, distfunc.fit, y, floc=0, fscale=1)
def test_fix_fit_2args_lognorm(self):
# Regression test for #1551.
np.random.seed(12345)
with np.errstate(all='ignore'):
x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)
expected_shape = np.sqrt(((np.log(x) - np.log(20))**2).mean())
assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
[expected_shape, 0, 20], atol=1e-8)
def test_fix_fit_norm(self):
x = np.arange(1, 6)
loc, scale = stats.norm.fit(x)
assert_almost_equal(loc, 3)
assert_almost_equal(scale, np.sqrt(2))
loc, scale = stats.norm.fit(x, floc=2)
assert_equal(loc, 2)
assert_equal(scale, np.sqrt(3))
loc, scale = stats.norm.fit(x, fscale=2)
assert_almost_equal(loc, 3)
assert_equal(scale, 2)
def test_fix_fit_gamma(self):
x = np.arange(1, 6)
meanlog = np.log(x).mean()
# A basic test of gamma.fit with floc=0.
floc = 0
a, loc, scale = stats.gamma.fit(x, floc=floc)
s = np.log(x.mean()) - meanlog
assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# Regression tests for gh-2514.
# The problem was that if `floc=0` was given, any other fixed
# parameters were ignored.
f0 = 1
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
f0 = 2
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# loc and scale fixed.
floc = 0
fscale = 2
a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
assert_equal(loc, floc)
assert_equal(scale, fscale)
c = meanlog - np.log(fscale)
assert_almost_equal(special.digamma(a), c)
def test_fix_fit_beta(self):
# Test beta.fit when both floc and fscale are given.
def mlefunc(a, b, x):
# Zeros of this function are critical points of
# the maximum likelihood function.
n = len(x)
s1 = np.log(x).sum()
s2 = np.log(1-x).sum()
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
# Basic test with floc and fscale given.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
assert_equal(loc, 0)
assert_equal(scale, 1)
assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6)
# Basic test with f0, floc and fscale given.
# This is also a regression test for gh-2514.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
# and fix b (f1).
x2 = 1 - x
a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
assert_equal(b2, 2)
assert_equal(loc2, 0)
assert_equal(scale2, 1)
da, db = mlefunc(a2, b2, x2)
assert_allclose(da, 0, atol=1e-5)
# a2 of this test should equal b from above.
assert_almost_equal(a2, b)
# Check for detection of data out of bounds when floc and fscale
# are given.
assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
y = np.array([0, .5, 1])
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
# Check that attempting to fix all the parameters raises a ValueError.
assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
floc=2, fscale=3)
def test_expon_fit(self):
x = np.array([2, 2, 4, 4, 4, 4, 4, 8])
loc, scale = stats.expon.fit(x)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 2) # x.mean() - x.min()
loc, scale = stats.expon.fit(x, fscale=3)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 3) # fscale
loc, scale = stats.expon.fit(x, floc=0)
assert_equal(loc, 0) # floc
assert_equal(scale, 4) # x.mean() - loc
def test_lognorm_fit(self):
x = np.array([1.5, 3, 10, 15, 23, 59])
lnxm1 = np.log(x - 1)
shape, loc, scale = stats.lognorm.fit(x, floc=1)
assert_allclose(shape, lnxm1.std(), rtol=1e-12)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fscale=6)
assert_allclose(shape, np.sqrt(((lnxm1 - np.log(6))**2).mean()),
rtol=1e-12)
assert_equal(loc, 1)
assert_equal(scale, 6)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fix_s=0.75)
assert_equal(shape, 0.75)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
def test_uniform_fit(self):
x = np.array([1.0, 1.1, 1.2, 9.0])
loc, scale = stats.uniform.fit(x)
assert_equal(loc, x.min())
assert_equal(scale, x.ptp())
loc, scale = stats.uniform.fit(x, floc=0)
assert_equal(loc, 0)
assert_equal(scale, x.max())
loc, scale = stats.uniform.fit(x, fscale=10)
assert_equal(loc, 0)
assert_equal(scale, 10)
assert_raises(ValueError, stats.uniform.fit, x, floc=2.0)
assert_raises(ValueError, stats.uniform.fit, x, fscale=5.0)
@pytest.mark.parametrize("method", ["MLE", "MM"])
def test_fshapes(self, method):
# take a beta distribution, with shapes='a, b', and make sure that
# fa is equivalent to f0, and fb is equivalent to f1
a, b = 3., 4.
x = stats.beta.rvs(a, b, size=100, random_state=1234)
res_1 = stats.beta.fit(x, f0=3., method=method)
res_2 = stats.beta.fit(x, fa=3., method=method)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_2 = stats.beta.fit(x, fix_a=3., method=method)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_3 = stats.beta.fit(x, f1=4., method=method)
res_4 = stats.beta.fit(x, fb=4., method=method)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
res_4 = stats.beta.fit(x, fix_b=4., method=method)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
# cannot specify both positional and named args at the same time
assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2, method=method)
# check that attempting to fix all parameters raises a ValueError
assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,
floc=2, fscale=3, method=method)
# check that specifying floc, fscale and fshapes works for
# beta and gamma which override the generic fit method
res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1, method=method)
aa, bb, ll, ss = res_5
assert_equal([aa, ll, ss], [3., 0, 1])
# gamma distribution
a = 3.
data = stats.gamma.rvs(a, size=100)
aa, ll, ss = stats.gamma.fit(data, fa=a, method=method)
assert_equal(aa, a)
@pytest.mark.parametrize("method", ["MLE", "MM"])
def test_extra_params(self, method):
# unknown parameters should raise rather than be silently ignored
dist = stats.exponnorm
data = dist.rvs(K=2, size=100)
dct = dict(enikibeniki=-101)
assert_raises(TypeError, dist.fit, data, **dct, method=method)
class TestFrozen:
def setup_method(self):
np.random.seed(1234)
# Test that a frozen distribution gives the same results as the original
# object.
#
# Only tested for the normal distribution (with loc and scale specified)
# and for the gamma distribution (with a shape parameter specified).
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, loc=10.0, scale=3.0)
assert_equal(result_f, result)
assert_equal(frozen.a, dist.a)
assert_equal(frozen.b, dist.b)
def test_gamma(self):
a = 2.0
dist = stats.gamma
frozen = stats.gamma(a)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, a)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(a)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(a)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(a)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(a)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(a)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, a)
assert_equal(result_f, result)
assert_equal(frozen.a, frozen.dist.a)
assert_equal(frozen.b, frozen.dist.b)
def test_regression_ticket_1293(self):
# Create a frozen distribution.
frozen = stats.lognorm(1)
# Call one of its methods that does not take any keyword arguments.
m1 = frozen.moment(2)
# Now call a method that takes a keyword argument.
frozen.stats(moments='mvsk')
# Call moment(2) again.
# After calling stats(), the following was raising an exception.
# So this test passes if the following does not raise an exception.
m2 = frozen.moment(2)
# The following should also be true, of course. But it is not
# the focus of this test.
assert_equal(m1, m2)
def test_ab(self):
# test that the support of a frozen distribution
# (i) remains frozen even if it changes for the original one
# (ii) is actually correct if the shape parameters are such that
# the values of [a, b] are not the default [0, inf]
# take a genpareto as an example where the support
# depends on the value of the shape parameter:
# for c > 0: a, b = 0, inf
# for c < 0: a, b = 0, -1/c
c = -0.1
rv = stats.genpareto(c=c)
a, b = rv.dist._get_support(c)
assert_equal([a, b], [0., 10.])
c = 0.1
stats.genpareto.pdf(0, c=c)
assert_equal(rv.dist._get_support(c), [0, np.inf])
c = -0.1
rv = stats.genpareto(c=c)
a, b = rv.dist._get_support(c)
assert_equal([a, b], [0., 10.])
c = 0.1
stats.genpareto.pdf(0, c) # this should NOT change genpareto.b
assert_equal((rv.dist.a, rv.dist.b), stats.genpareto._get_support(c))
rv1 = stats.genpareto(c=0.1)
assert_(rv1.dist is not rv.dist)
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
rv = stats.genpareto(c=c)
a, b = rv.a, rv.b
assert_equal(a, 0.)
assert_(np.isposinf(b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
a, b = stats.genpareto._get_support(c)
assert_allclose([a, b], [0., 0.5])
def test_rv_frozen_in_namespace(self):
# Regression test for gh-3522
assert_(hasattr(stats.distributions, 'rv_frozen'))
def test_random_state(self):
# only check that the random_state attribute exists,
frozen = stats.norm()
assert_(hasattr(frozen, 'random_state'))
# ... that it can be set,
frozen.random_state = 42
assert_equal(frozen.random_state.get_state(),
np.random.RandomState(42).get_state())
# ... and that .rvs method accepts it as an argument
rndm = np.random.RandomState(1234)
frozen.rvs(size=8, random_state=rndm)
def test_pickling(self):
# test that a frozen instance pickles and unpickles
# (this method is a clone of common_tests.check_pickling)
beta = stats.beta(2.3098496451481823, 0.62687954300963677)
poiss = stats.poisson(3.)
sample = stats.rv_discrete(values=([0, 1, 2, 3],
[0.1, 0.2, 0.3, 0.4]))
for distfn in [beta, poiss, sample]:
distfn.random_state = 1234
distfn.rvs(size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(size=8)
assert_equal(r0, r1)
# also smoke test some methods
medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]
assert_equal(medians[0], medians[1])
assert_equal(distfn.cdf(medians[0]),
unpickled.cdf(medians[1]))
def test_expect(self):
# smoke test the expect method of the frozen distribution
# only take a gamma w/loc and scale and poisson with loc specified
def func(x):
return x
gm = stats.gamma(a=2, loc=3, scale=4)
gm_val = gm.expect(func, lb=1, ub=2, conditional=True)
gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,
lb=1, ub=2, conditional=True)
assert_allclose(gm_val, gamma_val)
p = stats.poisson(3, loc=4)
p_val = p.expect(func)
poisson_val = stats.poisson.expect(func, args=(3,), loc=4)
assert_allclose(p_val, poisson_val)
class TestExpect:
# Test for expect method.
#
# Uses normal distribution and beta distribution for finite bounds, and
# hypergeom for discrete distribution with finite support
def test_norm(self):
v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)
assert_almost_equal(v, 4, decimal=14)
m = stats.norm.expect(lambda x: (x), loc=5, scale=2)
assert_almost_equal(m, 5, decimal=14)
lb = stats.norm.ppf(0.05, loc=5, scale=2)
ub = stats.norm.ppf(0.95, loc=5, scale=2)
prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)
assert_almost_equal(prob90, 0.9, decimal=14)
prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,
conditional=True)
assert_almost_equal(prob90c, 1., decimal=14)
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5),
loc=5, scale=2)
assert_almost_equal(v, 1./18., decimal=13)
m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.)
assert_almost_equal(m, 19/3., decimal=13)
ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5.,
scale=2., lb=lb, ub=ub, conditional=False)
assert_almost_equal(prob90, 0.9, decimal=13)
prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5,
scale=2, lb=lb, ub=ub, conditional=True)
assert_almost_equal(prob90c, 1., decimal=13)
def test_hypergeom(self):
# test case with finite bounds
# without specifying bounds
m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)
m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)
assert_almost_equal(m, m_true, decimal=13)
v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5.)
assert_almost_equal(v, v_true, decimal=14)
# with bounds, bounds equal to shifted support
v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2,
args=(20, 10, 8),
loc=5., lb=5, ub=13)
assert_almost_equal(v_bounds, v_true, decimal=14)
# drop boundary points
prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()
prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
loc=5., lb=6, ub=12)
assert_almost_equal(prob_bounds, prob_true, decimal=13)
# conditional
prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,
lb=6, ub=12, conditional=True)
assert_almost_equal(prob_bc, 1, decimal=14)
# check simple integral
prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
lb=0, ub=8)
assert_almost_equal(prob_b, 1, decimal=13)
def test_poisson(self):
# poisson, use lower bound only
prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,
conditional=False)
prob_b_true = 1-stats.poisson.cdf(2, 2)
assert_almost_equal(prob_bounds, prob_b_true, decimal=14)
prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,
conditional=True)
assert_almost_equal(prob_lb, 1, decimal=14)
def test_genhalflogistic(self):
# genhalflogistic, changes upper bound of support in _argcheck
# regression test for gh-2622
halflog = stats.genhalflogistic
# check consistency when calling expect twice with the same input
res1 = halflog.expect(args=(1.5,))
halflog.expect(args=(0.5,))
res2 = halflog.expect(args=(1.5,))
assert_almost_equal(res1, res2, decimal=14)
def test_rice_overflow(self):
# rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows
# check that using i0e fixes it
assert_(np.isfinite(stats.rice.pdf(999, 0.74)))
assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))
def test_logser(self):
# test a discrete distribution with infinite support and loc
p, loc = 0.3, 3
res_0 = stats.logser.expect(lambda k: k, args=(p,))
# check against the correct answer (sum of a geom series)
assert_allclose(res_0,
p / (p - 1.) / np.log(1. - p), atol=1e-15)
# now check it with `loc`
res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc)
assert_allclose(res_l, res_0 + loc, atol=1e-15)
def test_skellam(self):
# Use a discrete distribution w/ bi-infinite support. Compute two first
# moments and compare to known values (cf skellam.stats)
p1, p2 = 18, 22
m1 = stats.skellam.expect(lambda x: x, args=(p1, p2))
m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2))
assert_allclose(m1, p1 - p2, atol=1e-12)
assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12)
def test_randint(self):
# Use a discrete distribution w/ parameter-dependent support, which
# is larger than the default chunksize
lo, hi = 0, 113
res = stats.randint.expect(lambda x: x, (lo, hi))
assert_allclose(res,
sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15)
def test_zipf(self):
# Test that there is no infinite loop even if the sum diverges
assert_warns(RuntimeWarning, stats.zipf.expect,
lambda x: x**2, (2,))
def test_discrete_kwds(self):
# check that discrete expect accepts keywords to control the summation
n0 = stats.poisson.expect(lambda x: 1, args=(2,))
n1 = stats.poisson.expect(lambda x: 1, args=(2,),
maxcount=1001, chunksize=32, tolerance=1e-8)
assert_almost_equal(n0, n1, decimal=14)
def test_moment(self):
# test the .moment() method: compute a higher moment and compare to
# a known value
def poiss_moment5(mu):
return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu
for mu in [5, 7]:
m5 = stats.poisson.moment(5, mu)
assert_allclose(m5, poiss_moment5(mu), rtol=1e-10)
class TestNct:
def test_nc_parameter(self):
# Parameter values c<=0 were not enabled (gh-2402).
# For negative values c and for c=0 results of rv.cdf(0) below were nan
rv = stats.nct(5, 0)
assert_equal(rv.cdf(0), 0.5)
rv = stats.nct(5, -1)
assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
def test_broadcasting(self):
res = stats.nct.pdf(5, np.arange(4, 7)[:, None],
np.linspace(0.1, 1, 4))
expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
[0.00217142, 0.00395366, 0.00683888, 0.01126276],
[0.00153078, 0.00291093, 0.00525206, 0.00900815]])
assert_allclose(res, expected, rtol=1e-5)
def test_variance_gh_issue_2401(self):
# Computation of the variance of a non-central t-distribution resulted
# in a TypeError: ufunc 'isinf' not supported for the input types,
# and the inputs could not be safely coerced to any supported types
# according to the casting rule 'safe'
rv = stats.nct(4, 0)
assert_equal(rv.var(), 2.0)
def test_nct_inf_moments(self):
# n-th moment of nct only exists for df > n
m, v, s, k = stats.nct.stats(df=0.9, nc=0.3, moments='mvsk')
assert_equal([m, v, s, k], [np.nan, np.nan, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')
assert_(np.isfinite(m))
assert_equal([v, s, k], [np.nan, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')
assert_(np.isfinite([m, v, s]).all())
assert_equal(k, np.nan)
def test_nct_stats_large_df_values(self):
# previously gamma function was used which lost precision at df=345
# cf. https://github.com/scipy/scipy/issues/12919 for details
nct_mean_df_1000 = stats.nct.mean(1000, 2)
nct_stats_df_1000 = stats.nct.stats(1000, 2)
# These expected values were computed with mpmath. They were also
# verified with the Wolfram Alpha expressions:
# Mean[NoncentralStudentTDistribution[1000, 2]]
# Var[NoncentralStudentTDistribution[1000, 2]]
expected_stats_df_1000 = [2.0015015641422464, 1.0040115288163005]
assert_allclose(nct_mean_df_1000, expected_stats_df_1000[0],
rtol=1e-10)
assert_allclose(nct_stats_df_1000, expected_stats_df_1000,
rtol=1e-10)
# and a bigger df value
nct_mean = stats.nct.mean(100000, 2)
nct_stats = stats.nct.stats(100000, 2)
# These expected values were computed with mpmath.
expected_stats = [2.0000150001562518, 1.0000400011500288]
assert_allclose(nct_mean, expected_stats[0], rtol=1e-10)
assert_allclose(nct_stats, expected_stats, rtol=1e-9)
class TestRice:
def test_rice_zero_b(self):
# rice distribution should work with b=0, cf gh-2164
x = [0.2, 1., 5.]
assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())
q = [0.1, 0.1, 0.5, 0.9]
assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())
mvsk = stats.rice.stats(0, moments='mvsk')
assert_(np.isfinite(mvsk).all())
# furthermore, pdf is continuous as b\to 0
# rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2)
# see e.g. Abramovich & Stegun 9.6.7 & 9.6.10
b = 1e-8
assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),
atol=b, rtol=0)
def test_rice_rvs(self):
rvs = stats.rice.rvs
assert_equal(rvs(b=3.).size, 1)
assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))
def test_rice_gh9836(self):
# test that gh-9836 is resolved; previously jumped to 1 at the end
cdf = stats.rice.cdf(np.arange(10, 160, 10), np.arange(10, 160, 10))
# Generated in R
# library(VGAM)
# options(digits=16)
# x = seq(10, 150, 10)
# print(price(x, sigma=1, vee=x))
cdf_exp = [0.4800278103504522, 0.4900233218590353, 0.4933500379379548,
0.4950128317658719, 0.4960103776798502, 0.4966753655438764,
0.4971503395812474, 0.4975065620443196, 0.4977836197921638,
0.4980052636649550, 0.4981866072661382, 0.4983377260666599,
0.4984655952615694, 0.4985751970541413, 0.4986701850071265]
assert_allclose(cdf, cdf_exp)
probabilities = np.arange(0.1, 1, 0.1)
ppf = stats.rice.ppf(probabilities, 500/4, scale=4)
# Generated in R
# library(VGAM)
# options(digits=16)
# p = seq(0.1, .9, by = .1)
# print(qrice(p, vee = 500, sigma = 4))
ppf_exp = [494.8898762347361, 496.6495690858350, 497.9184315188069,
499.0026277378915, 500.0159999146250, 501.0293721352668,
502.1135684981884, 503.3824312270405, 505.1421247157822]
assert_allclose(ppf, ppf_exp)
ppf = scipy.stats.rice.ppf(0.5, np.arange(10, 150, 10))
# Generated in R
# library(VGAM)
# options(digits=16)
# b <- seq(10, 140, 10)
# print(qrice(0.5, vee = b, sigma = 1))
ppf_exp = [10.04995862522287, 20.02499480078302, 30.01666512465732,
40.01249934924363, 50.00999966676032, 60.00833314046875,
70.00714273568241, 80.00624991862573, 90.00555549840364,
100.00499995833597, 110.00454542324384, 120.00416664255323,
130.00384613488120, 140.00357141338748]
assert_allclose(ppf, ppf_exp)
class TestErlang:
def setup_method(self):
np.random.seed(1234)
def test_erlang_runtimewarning(self):
# erlang should generate a RuntimeWarning if a non-integer
# shape parameter is used.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
# The non-integer shape parameter 1.3 should trigger a
# RuntimeWarning
assert_raises(RuntimeWarning,
stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
# Calling the fit method with `f0` set to an integer should
# *not* trigger a RuntimeWarning. It should return the same
# values as gamma.fit(...).
data = [0.5, 1.0, 2.0, 4.0]
result_erlang = stats.erlang.fit(data, f0=1)
result_gamma = stats.gamma.fit(data, f0=1)
assert_allclose(result_erlang, result_gamma, rtol=1e-3)
def test_gh_pr_10949_argcheck(self):
assert_equal(stats.erlang.pdf(0.5, a=[1, -1]),
stats.gamma.pdf(0.5, a=[1, -1]))
class TestRayleigh:
def setup_method(self):
np.random.seed(987654321)
# gh-6227
def test_logpdf(self):
y = stats.rayleigh.logpdf(50)
assert_allclose(y, -1246.0879769945718)
def test_logsf(self):
y = stats.rayleigh.logsf(50)
assert_allclose(y, -1250)
@pytest.mark.parametrize("rvs_loc,rvs_scale", [np.random.rand(2)])
def test_fit(self, rvs_loc, rvs_scale):
data = stats.rayleigh.rvs(size=250, loc=rvs_loc, scale=rvs_scale)
def scale_mle(data, floc):
return (np.sum((data - floc) ** 2) / (2 * len(data))) ** .5
# when `floc` is provided, `scale` is found with an analytical formula
scale_expect = scale_mle(data, rvs_loc)
loc, scale = stats.rayleigh.fit(data, floc=rvs_loc)
assert_equal(loc, rvs_loc)
assert_equal(scale, scale_expect)
# when `fscale` is fixed, superclass fit is used to determine `loc`.
loc, scale = stats.rayleigh.fit(data, fscale=.6)
assert_equal(scale, .6)
# with both parameters free, one dimensional optimization is done
# over a new function that takes into account the dependent relation
# of `scale` to `loc`.
loc, scale = stats.rayleigh.fit(data)
# test that `scale` is defined by its relation to `loc`
assert_equal(scale, scale_mle(data, loc))
@pytest.mark.parametrize("rvs_loc,rvs_scale", [[0.74, 0.01],
np.random.rand(2)])
def test_fit_comparison_super_method(self, rvs_loc, rvs_scale):
# test that the objective function result of the analytical MLEs is
# less than or equal to that of the numerically optimized estimate
data = stats.rayleigh.rvs(size=250, loc=rvs_loc, scale=rvs_scale)
# obtain objective function with same method as `rv_continuous.fit`
args = [data, (stats.rayleigh._fitstart(data), )]
func = stats.rayleigh._reduce_func(args, {})[1]
_assert_less_or_close_loglike(stats.rayleigh, data, func)
def test_fit_warnings(self):
assert_fit_warnings(stats.rayleigh)
class TestExponWeib:
def test_pdf_logpdf(self):
# Regression test for gh-3508.
x = 0.1
a = 1.0
c = 100.0
p = stats.exponweib.pdf(x, a, c)
logp = stats.exponweib.logpdf(x, a, c)
# Expected values were computed with mpmath.
assert_allclose([p, logp],
[1.0000000000000054e-97, -223.35075402042244])
def test_a_is_1(self):
# For issue gh-3508.
# Check that when a=1, the pdf and logpdf methods of exponweib are the
# same as those of weibull_min.
x = np.logspace(-4, -1, 4)
a = 1
c = 100
p = stats.exponweib.pdf(x, a, c)
expected = stats.weibull_min.pdf(x, c)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.weibull_min.logpdf(x, c)
assert_allclose(logp, expected)
def test_a_is_1_c_is_1(self):
# When a = 1 and c = 1, the distribution is exponential.
x = np.logspace(-8, 1, 10)
a = 1
c = 1
p = stats.exponweib.pdf(x, a, c)
expected = stats.expon.pdf(x)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.expon.logpdf(x)
assert_allclose(logp, expected)
class TestFatigueLife:
def test_sf_tail(self):
# Expected value computed with mpmath:
# import mpmath
# mpmath.mp.dps = 80
# x = mpmath.mpf(800.0)
# c = mpmath.mpf(2.5)
# s = float(1 - mpmath.ncdf(1/c * (mpmath.sqrt(x)
# - 1/mpmath.sqrt(x))))
# print(s)
# Output:
# 6.593376447038406e-30
s = stats.fatiguelife.sf(800.0, 2.5)
assert_allclose(s, 6.593376447038406e-30, rtol=1e-13)
def test_isf_tail(self):
# See test_sf_tail for the mpmath code.
p = 6.593376447038406e-30
q = stats.fatiguelife.isf(p, 2.5)
assert_allclose(q, 800.0, rtol=1e-13)
class TestWeibull:
def test_logpdf(self):
# gh-6217
y = stats.weibull_min.logpdf(0, 1)
assert_equal(y, 0)
def test_with_maxima_distrib(self):
# Tests for weibull_min and weibull_max.
# The expected values were computed using the symbolic algebra
# program 'maxima' with the package 'distrib', which has
# 'pdf_weibull' and 'cdf_weibull'. The mapping between the
# scipy and maxima functions is as follows:
# -----------------------------------------------------------------
# scipy maxima
# --------------------------------- ------------------------------
# weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b)
# weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b))
# weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b)
# weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b))
# weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b)
# weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b))
#
# weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b)
# weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b))
# weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b)
# weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b))
# weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b)
# weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b))
# -----------------------------------------------------------------
x = 1.5
a = 2.0
b = 3.0
# weibull_min
p = stats.weibull_min.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_min.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_min.cdf(x, a, scale=b)
assert_allclose(c, -special.expm1(-0.25))
lc = stats.weibull_min.logcdf(x, a, scale=b)
assert_allclose(lc, np.log(-special.expm1(-0.25)))
s = stats.weibull_min.sf(x, a, scale=b)
assert_allclose(s, np.exp(-0.25))
ls = stats.weibull_min.logsf(x, a, scale=b)
assert_allclose(ls, -0.25)
# Also test using a large value x, for which computing the survival
# function using the CDF would result in 0.
s = stats.weibull_min.sf(30, 2, scale=3)
assert_allclose(s, np.exp(-100))
ls = stats.weibull_min.logsf(30, 2, scale=3)
assert_allclose(ls, -100)
# weibull_max
x = -1.5
p = stats.weibull_max.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_max.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_max.cdf(x, a, scale=b)
assert_allclose(c, np.exp(-0.25))
lc = stats.weibull_max.logcdf(x, a, scale=b)
assert_allclose(lc, -0.25)
s = stats.weibull_max.sf(x, a, scale=b)
assert_allclose(s, -special.expm1(-0.25))
ls = stats.weibull_max.logsf(x, a, scale=b)
assert_allclose(ls, np.log(-special.expm1(-0.25)))
# Also test using a value of x close to 0, for which computing the
# survival function using the CDF would result in 0.
s = stats.weibull_max.sf(-1e-9, 2, scale=3)
assert_allclose(s, -special.expm1(-1/9000000000000000000))
ls = stats.weibull_max.logsf(-1e-9, 2, scale=3)
assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000)))
class TestRdist:
def test_rdist_cdf_gh1285(self):
# check workaround in rdist._cdf for issue gh-1285.
distfn = stats.rdist
values = [0.001, 0.5, 0.999]
assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),
values, decimal=5)
def test_rdist_beta(self):
# rdist is a special case of stats.beta
x = np.linspace(-0.99, 0.99, 10)
c = 2.7
assert_almost_equal(0.5*stats.beta(c/2, c/2).pdf((x + 1)/2),
stats.rdist(c).pdf(x))
class TestTrapezoid:
def test_reduces_to_triang(self):
modes = [0, 0.3, 0.5, 1]
for mode in modes:
x = [0, mode, 1]
assert_almost_equal(stats.trapezoid.pdf(x, mode, mode),
stats.triang.pdf(x, mode))
assert_almost_equal(stats.trapezoid.cdf(x, mode, mode),
stats.triang.cdf(x, mode))
def test_reduces_to_uniform(self):
x = np.linspace(0, 1, 10)
assert_almost_equal(stats.trapezoid.pdf(x, 0, 1), stats.uniform.pdf(x))
assert_almost_equal(stats.trapezoid.cdf(x, 0, 1), stats.uniform.cdf(x))
def test_cases(self):
# edge cases
assert_almost_equal(stats.trapezoid.pdf(0, 0, 0), 2)
assert_almost_equal(stats.trapezoid.pdf(1, 1, 1), 2)
assert_almost_equal(stats.trapezoid.pdf(0.5, 0, 0.8),
1.11111111111111111)
assert_almost_equal(stats.trapezoid.pdf(0.5, 0.2, 1.0),
1.11111111111111111)
# straightforward case
assert_almost_equal(stats.trapezoid.pdf(0.1, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapezoid.pdf(0.5, 0.2, 0.8), 1.25)
assert_almost_equal(stats.trapezoid.pdf(0.9, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapezoid.cdf(0.1, 0.2, 0.8), 0.03125)
assert_almost_equal(stats.trapezoid.cdf(0.2, 0.2, 0.8), 0.125)
assert_almost_equal(stats.trapezoid.cdf(0.5, 0.2, 0.8), 0.5)
assert_almost_equal(stats.trapezoid.cdf(0.9, 0.2, 0.8), 0.96875)
assert_almost_equal(stats.trapezoid.cdf(1.0, 0.2, 0.8), 1.0)
def test_moments_and_entropy(self):
# issue #11795: improve precision of trapezoid stats
# Apply formulas from Wikipedia for the following parameters:
a, b, c, d = -3, -1, 2, 3 # => 1/3, 5/6, -3, 6
p1, p2, loc, scale = (b-a) / (d-a), (c-a) / (d-a), a, d-a
h = 2 / (d+c-b-a)
def moment(n):
return (h * ((d**(n+2) - c**(n+2)) / (d-c)
- (b**(n+2) - a**(n+2)) / (b-a)) /
(n+1) / (n+2))
mean = moment(1)
var = moment(2) - mean**2
entropy = 0.5 * (d-c+b-a) / (d+c-b-a) + np.log(0.5 * (d+c-b-a))
assert_almost_equal(stats.trapezoid.mean(p1, p2, loc, scale),
mean, decimal=13)
assert_almost_equal(stats.trapezoid.var(p1, p2, loc, scale),
var, decimal=13)
assert_almost_equal(stats.trapezoid.entropy(p1, p2, loc, scale),
entropy, decimal=13)
# Check boundary cases where scipy d=0 or d=1.
assert_almost_equal(stats.trapezoid.mean(0, 0, -3, 6), -1, decimal=13)
assert_almost_equal(stats.trapezoid.mean(0, 1, -3, 6), 0, decimal=13)
assert_almost_equal(stats.trapezoid.var(0, 1, -3, 6), 3, decimal=13)
def test_trapezoid_vect(self):
# test that array-valued shapes and arguments are handled
c = np.array([0.1, 0.2, 0.3])
d = np.array([0.5, 0.6])[:, None]
x = np.array([0.15, 0.25, 0.9])
v = stats.trapezoid.pdf(x, c, d)
cc, dd, xx = np.broadcast_arrays(c, d, x)
res = np.empty(xx.size, dtype=xx.dtype)
ind = np.arange(xx.size)
for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()):
res[i] = stats.trapezoid.pdf(x1, c1, d1)
assert_allclose(v, res.reshape(v.shape), atol=1e-15)
# Check that the stats() method supports vector arguments.
v = np.asarray(stats.trapezoid.stats(c, d, moments="mvsk"))
cc, dd = np.broadcast_arrays(c, d)
res = np.empty((cc.size, 4)) # 4 stats returned per value
ind = np.arange(cc.size)
for i, c1, d1 in zip(ind, cc.ravel(), dd.ravel()):
res[i] = stats.trapezoid.stats(c1, d1, moments="mvsk")
assert_allclose(v, res.T.reshape(v.shape), atol=1e-15)
def test_trapz(self):
# Basic test for alias
x = np.linspace(0, 1, 10)
assert_almost_equal(stats.trapz.pdf(x, 0, 1), stats.uniform.pdf(x))
class TestTriang:
def test_edge_cases(self):
with np.errstate(all='raise'):
assert_equal(stats.triang.pdf(0, 0), 2.)
assert_equal(stats.triang.pdf(0.5, 0), 1.)
assert_equal(stats.triang.pdf(1, 0), 0.)
assert_equal(stats.triang.pdf(0, 1), 0)
assert_equal(stats.triang.pdf(0.5, 1), 1.)
assert_equal(stats.triang.pdf(1, 1), 2)
assert_equal(stats.triang.cdf(0., 0.), 0.)
assert_equal(stats.triang.cdf(0.5, 0.), 0.75)
assert_equal(stats.triang.cdf(1.0, 0.), 1.0)
assert_equal(stats.triang.cdf(0., 1.), 0.)
assert_equal(stats.triang.cdf(0.5, 1.), 0.25)
assert_equal(stats.triang.cdf(1., 1.), 1)
class TestMielke:
def test_moments(self):
k, s = 4.642, 0.597
# n-th moment exists only if n < s
assert_equal(stats.mielke(k, s).moment(1), np.inf)
assert_equal(stats.mielke(k, 1.0).moment(1), np.inf)
assert_(np.isfinite(stats.mielke(k, 1.01).moment(1)))
def test_burr_equivalence(self):
x = np.linspace(0.01, 100, 50)
k, s = 2.45, 5.32
assert_allclose(stats.burr.pdf(x, s, k/s), stats.mielke.pdf(x, k, s))
class TestBurr:
def test_endpoints_7491(self):
# gh-7491
# Compute the pdf at the left endpoint dst.a.
data = [
[stats.fisk, (1,), 1],
[stats.burr, (0.5, 2), 1],
[stats.burr, (1, 1), 1],
[stats.burr, (2, 0.5), 1],
[stats.burr12, (1, 0.5), 0.5],
[stats.burr12, (1, 1), 1.0],
[stats.burr12, (1, 2), 2.0]]
ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
correct = [_correct_ for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
ans = [_f.logpdf(_f.a, *_args) for _f, _args, _ in data]
correct = [np.log(_correct_) for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
def test_burr_stats_9544(self):
# gh-9544. Test from gh-9978
c, d = 5.0, 3
mean, variance = stats.burr(c, d).stats()
# mean = sc.beta(3 + 1/5, 1. - 1/5) * 3 = 1.4110263...
# var = sc.beta(3 + 2 / 5, 1. - 2 / 5) * 3 -
# (sc.beta(3 + 1 / 5, 1. - 1 / 5) * 3) ** 2
mean_hc, variance_hc = 1.4110263183925857, 0.22879948026191643
assert_allclose(mean, mean_hc)
assert_allclose(variance, variance_hc)
def test_burr_nan_mean_var_9544(self):
# gh-9544. Test from gh-9978
c, d = 0.5, 3
mean, variance = stats.burr(c, d).stats()
assert_(np.isnan(mean))
assert_(np.isnan(variance))
c, d = 1.5, 3
mean, variance = stats.burr(c, d).stats()
assert_(np.isfinite(mean))
assert_(np.isnan(variance))
c, d = 0.5, 3
e1, e2, e3, e4 = stats.burr._munp(np.array([1, 2, 3, 4]), c, d)
assert_(np.isnan(e1))
assert_(np.isnan(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 1.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isnan(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 2.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 3.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isfinite(e3))
assert_(np.isnan(e4))
c, d = 4.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isfinite(e3))
assert_(np.isfinite(e4))
def test_540_567():
# test for nan returned in tickets 540, 567
assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),
0.98353464004309321,
decimal=10, err_msg='test_540_567')
def test_regression_ticket_1316():
# The following was raising an exception, because _construct_default_doc()
# did not handle the default keyword extradoc=None. See ticket #1316.
stats._continuous_distns.gamma_gen(name='gamma')
def test_regression_ticket_1326():
# adjust to avoid nan with 0*log(0)
assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)
def test_regression_tukey_lambda():
# Make sure that Tukey-Lambda distribution correctly handles
# non-positive lambdas.
x = np.linspace(-5.0, 5.0, 101)
with np.errstate(divide='ignore'):
for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:
p = stats.tukeylambda.pdf(x, lam)
assert_((p != 0.0).all())
assert_(~np.isnan(p).all())
lam = np.array([[-1.0], [0.0], [2.0]])
p = stats.tukeylambda.pdf(x, lam)
assert_(~np.isnan(p).all())
assert_((p[0] != 0.0).all())
assert_((p[1] != 0.0).all())
assert_((p[2] != 0.0).any())
assert_((p[2] == 0.0).any())
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
def test_regression_ticket_1421():
assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
assert_('pmf(x,' in stats.poisson.__doc__)
def test_nan_arguments_gh_issue_1362():
with np.errstate(invalid='ignore'):
assert_(np.isnan(stats.t.logcdf(1, np.nan)))
assert_(np.isnan(stats.t.cdf(1, np.nan)))
assert_(np.isnan(stats.t.logsf(1, np.nan)))
assert_(np.isnan(stats.t.sf(1, np.nan)))
assert_(np.isnan(stats.t.pdf(1, np.nan)))
assert_(np.isnan(stats.t.logpdf(1, np.nan)))
assert_(np.isnan(stats.t.ppf(1, np.nan)))
assert_(np.isnan(stats.t.isf(1, np.nan)))
assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
def test_frozen_fit_ticket_1536():
np.random.seed(5678)
true = np.array([0.25, 0., 0.5])
x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)
with np.errstate(divide='ignore'):
params = np.array(stats.lognorm.fit(x, floc=0.))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
assert_almost_equal(params, true, decimal=2)
np.random.seed(5678)
loc = 1
floc = 0.9
x = stats.norm.rvs(loc, 2., size=100)
params = np.array(stats.norm.fit(x, floc=floc))
expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
assert_almost_equal(params, expected, decimal=4)
def test_regression_ticket_1530():
# Check the starting value works for Cauchy distribution fit.
np.random.seed(654321)
rvs = stats.cauchy.rvs(size=100)
params = stats.cauchy.fit(rvs)
expected = (0.045, 1.142)
assert_almost_equal(params, expected, decimal=1)
def test_gh_pr_4806():
# Check starting values for Cauchy distribution fit.
np.random.seed(1234)
x = np.random.randn(42)
for offset in 10000.0, 1222333444.0:
loc, scale = stats.cauchy.fit(x + offset)
assert_allclose(loc, offset, atol=1.0)
assert_allclose(scale, 0.6, atol=1.0)
def test_tukeylambda_stats_ticket_1545():
# Some test for the variance and kurtosis of the Tukey Lambda distr.
# See test_tukeylamdba_stats.py for more tests.
mv = stats.tukeylambda.stats(0, moments='mvsk')
# Known exact values:
expected = [0, np.pi**2/3, 0, 1.2]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(3.13, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(0.14, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]
assert_almost_equal(mv, expected, decimal=10)
def test_poisson_logpmf_ticket_1436():
assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
def test_powerlaw_stats():
"""Test the powerlaw stats function.
This unit test is also a regression test for ticket 1548.
The exact values are:
mean:
mu = a / (a + 1)
variance:
sigma**2 = a / ((a + 2) * (a + 1) ** 2)
skewness:
One formula (see https://en.wikipedia.org/wiki/Skewness) is
gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
A short calculation shows that E[X**k] is a / (a + k), so gamma_1
can be implemented as
n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
d = sqrt(a/((a+2)*(a+1)**2)) ** 3
gamma_1 = n/d
Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
one gets the more concise formula:
gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
kurtosis: (See https://en.wikipedia.org/wiki/Kurtosis)
The excess kurtosis is
gamma_2 = mu_4 / sigma**4 - 3
A bit of calculus and algebra (sympy helps) shows that
mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
so
gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
which can be rearranged to
gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
"""
cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
(2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
for a, exact_mvsk in cases:
mvsk = stats.powerlaw.stats(a, moments="mvsk")
assert_array_almost_equal(mvsk, exact_mvsk)
def test_powerlaw_edge():
# Regression test for gh-3986.
p = stats.powerlaw.logpdf(0, 1)
assert_equal(p, 0.0)
def test_exponpow_edge():
# Regression test for gh-3982.
p = stats.exponpow.logpdf(0, 1)
assert_equal(p, 0.0)
# Check pdf and logpdf at x = 0 for other values of b.
p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 1.0, 0.0])
p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 0.0, -np.inf])
def test_gengamma_edge():
# Regression test for gh-3985.
p = stats.gengamma.pdf(0, 1, 1)
assert_equal(p, 1.0)
# Regression tests for gh-4724.
p = stats.gengamma._munp(-2, 200, 1.)
assert_almost_equal(p, 1./199/198)
p = stats.gengamma._munp(-2, 10, 1.)
assert_almost_equal(p, 1./9/8)
def test_ksone_fit_freeze():
# Regression test for ticket #1638.
d = np.array(
[-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
-0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
-0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
-0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
-0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
-0.06037974, 0.37670779, -0.21684405])
with np.errstate(invalid='ignore'):
with suppress_warnings() as sup:
sup.filter(IntegrationWarning,
"The maximum number of subdivisions .50. has been "
"achieved.")
sup.filter(RuntimeWarning,
"floating point number truncated to an integer")
stats.ksone.fit(d)
def test_norm_logcdf():
# Test precision of the logcdf of the normal distribution.
# This precision was enhanced in ticket 1614.
x = -np.asarray(list(range(0, 120, 4)))
# Values from R
expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
-131.69539607, -203.91715537, -292.09872100, -396.25241451,
-516.38564863, -652.50322759, -804.60844201, -972.70364403,
-1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
-2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
-3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
-4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
-6277.63751711, -6733.67260303]
assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
# also test the complex-valued code path
assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8)
# test the accuracy: d(logcdf)/dx = pdf / cdf \equiv exp(logpdf - logcdf)
deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag
deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x))
assert_allclose(deriv, deriv_expected, atol=1e-10)
def test_levy_cdf_ppf():
# Test levy.cdf, including small arguments.
x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
# Expected values were calculated separately with mpmath.
# E.g.
# >>> mpmath.mp.dps = 100
# >>> x = mpmath.mp.mpf('0.01')
# >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
expected = np.array([0.9747728793699604,
0.3173105078629141,
0.1572992070502851,
0.0015654022580025495,
1.523970604832105e-23,
1.795832784800726e-219])
y = stats.levy.cdf(x)
assert_allclose(y, expected, rtol=1e-10)
# ppf(expected) should get us back to x.
xx = stats.levy.ppf(expected)
assert_allclose(xx, x, rtol=1e-13)
def test_levy_sf():
# Large values, far into the tail of the distribution.
x = np.array([1e15, 1e25, 1e35, 1e50])
# Expected values were calculated with mpmath.
expected = np.array([2.5231325220201597e-08,
2.52313252202016e-13,
2.52313252202016e-18,
7.978845608028653e-26])
y = stats.levy.sf(x)
assert_allclose(y, expected, rtol=1e-14)
def test_levy_l_sf():
# Test levy_l.sf for small arguments.
x = np.array([-0.016, -0.01, -0.005, -0.0015])
# Expected values were calculated with mpmath.
expected = np.array([2.6644463892359302e-15,
1.523970604832107e-23,
2.0884875837625492e-45,
5.302850374626878e-147])
y = stats.levy_l.sf(x)
assert_allclose(y, expected, rtol=1e-13)
def test_levy_l_isf():
# Test roundtrip sf(isf(p)), including a small input value.
p = np.array([3.0e-15, 0.25, 0.99])
x = stats.levy_l.isf(p)
q = stats.levy_l.sf(x)
assert_allclose(q, p, rtol=5e-14)
def test_hypergeom_interval_1802():
# these two had endless loops
assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
(152.0, 197.0))
assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
(152.0, 197.0))
# this was working also before
assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
(153.0, 196.0))
# degenerate case .a == .b
assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
def test_distribution_too_many_args():
np.random.seed(1234)
# Check that a TypeError is raised when too many args are given to a method
# Regression test for ticket 1815.
x = np.linspace(0.1, 0.7, num=5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
# These should not give errors
stats.gamma.pdf(x, 2, 3) # loc=3
stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4
stats.gamma.stats(2., 3)
stats.gamma.stats(2., 3, 4)
stats.gamma.stats(2., 3, 4, 'mv')
stats.gamma.rvs(2., 3, 4, 5)
stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)
# Also for a discrete distribution
stats.geom.pmf(x, 2, loc=3) # no error, loc=3
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
# And for distributions with 0, 2 and 3 args respectively
assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale
def test_ncx2_tails_ticket_955():
# Trac #955 -- check that the cdf computed by special functions
# matches the integrated pdf
a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
assert_allclose(a, b, rtol=1e-3, atol=0)
def test_ncx2_tails_pdf():
# ncx2.pdf does not return nans in extreme tails(example from gh-1577)
# NB: this is to check that nan_to_num is not needed in ncx2.pdf
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0)
logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2)
assert_(np.isneginf(logval).all())
# Verify logpdf has extended precision when pdf underflows to 0
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
assert_equal(stats.ncx2.pdf(10000, 3, 12), 0)
assert_allclose(stats.ncx2.logpdf(10000, 3, 12), -4662.444377524883)
@pytest.mark.parametrize('method, expected', [
('cdf', np.array([2.497951336e-09, 3.437288941e-10])),
('pdf', np.array([1.238579980e-07, 1.710041145e-08])),
('logpdf', np.array([-15.90413011, -17.88416331])),
('ppf', np.array([4.865182052, 7.017182271]))
])
def test_ncx2_zero_nc(method, expected):
# gh-5441
# ncx2 with nc=0 is identical to chi2
# Comparison to R (v3.5.1)
# > options(digits=10)
# > pchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4), log=TRUE)
# > qchisq(0.1, df=10, ncp=c(0,4))
result = getattr(stats.ncx2, method)(0.1, nc=[0, 4], df=10)
assert_allclose(result, expected, atol=1e-15)
def test_ncx2_zero_nc_rvs():
# gh-5441
# ncx2 with nc=0 is identical to chi2
result = stats.ncx2.rvs(df=10, nc=0, random_state=1)
expected = stats.chi2.rvs(df=10, random_state=1)
assert_allclose(result, expected, atol=1e-15)
def test_ncx2_gh12731():
# test that gh-12731 is resolved; previously these were all 0.5
nc = 10**np.arange(5, 10)
assert_equal(stats.ncx2.cdf(1e4, df=1, nc=nc), 0)
def test_ncx2_gh8665():
# test that gh-8665 is resolved; previously this tended to nonzero value
x = np.array([4.99515382e+00, 1.07617327e+01, 2.31854502e+01,
4.99515382e+01, 1.07617327e+02, 2.31854502e+02,
4.99515382e+02, 1.07617327e+03, 2.31854502e+03,
4.99515382e+03, 1.07617327e+04, 2.31854502e+04,
4.99515382e+04])
nu, lam = 20, 499.51538166556196
sf = stats.ncx2.sf(x, df=nu, nc=lam)
# computed in R. Couldn't find a survival function implementation
# options(digits=16)
# x <- c(4.99515382e+00, 1.07617327e+01, 2.31854502e+01, 4.99515382e+01,
# 1.07617327e+02, 2.31854502e+02, 4.99515382e+02, 1.07617327e+03,
# 2.31854502e+03, 4.99515382e+03, 1.07617327e+04, 2.31854502e+04,
# 4.99515382e+04)
# nu <- 20
# lam <- 499.51538166556196
# 1 - pchisq(x, df = nu, ncp = lam)
sf_expected = [1.0000000000000000, 1.0000000000000000, 1.0000000000000000,
1.0000000000000000, 1.0000000000000000, 0.9999999999999888,
0.6646525582135460, 0.0000000000000000, 0.0000000000000000,
0.0000000000000000, 0.0000000000000000, 0.0000000000000000,
0.0000000000000000]
assert_allclose(sf, sf_expected, atol=1e-12)
def test_foldnorm_zero():
# Parameter value c=0 was not enabled, see gh-2399.
rv = stats.foldnorm(0, scale=1)
assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan
def test_stats_shapes_argcheck():
# stats method was failing for vector shapes if some of the values
# were outside of the allowed range, see gh-2678
mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`
mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# -1 is not a legal shape parameter
mv3 = stats.lognorm.stats([2, 2.4, -1])
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
# stats method with multiple shape parameters is not properly vectorized
# anyway, so some distributions may or may not fail.
# Test subclassing distributions w/ explicit shapes
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, a):
return 42
class _distr2_gen(stats.rv_continuous):
def _cdf(self, x, a):
return 42 * a + x
class _distr3_gen(stats.rv_continuous):
def _pdf(self, x, a, b):
return a + b
def _cdf(self, x, a):
# Different # of shape params from _pdf, to be able to check that
# inspection catches the inconsistency."""
return 42 * a + x
class _distr6_gen(stats.rv_continuous):
# Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
def _pdf(self, x, a, b):
return a*x + b
def _cdf(self, x, a, b):
return 42 * a + x
class TestSubclassingExplicitShapes:
# Construct a distribution w/ explicit shapes parameter and test it.
def test_correct_shapes(self):
dummy_distr = _distr_gen(name='dummy', shapes='a')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_wrong_shapes_1(self):
dummy_distr = _distr_gen(name='dummy', shapes='A')
assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
def test_wrong_shapes_2(self):
dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
dct = dict(a=1, b=2, c=3)
assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
def test_shapes_string(self):
# shapes must be a string
dct = dict(name='dummy', shapes=42)
assert_raises(TypeError, _distr_gen, **dct)
def test_shapes_identifiers_1(self):
# shapes must be a comma-separated list of valid python identifiers
dct = dict(name='dummy', shapes='(!)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_2(self):
dct = dict(name='dummy', shapes='4chan')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_3(self):
dct = dict(name='dummy', shapes='m(fti)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_nodefaults(self):
dct = dict(name='dummy', shapes='a=2')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_args(self):
dct = dict(name='dummy', shapes='*args')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_kwargs(self):
dct = dict(name='dummy', shapes='**kwargs')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_keywords(self):
# python keywords cannot be used for shape parameters
dct = dict(name='dummy', shapes='a, b, c, lambda')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a')
assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a, b')
assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
def test_star_args(self):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
dist = _dist_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
def test_star_args_2(self):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
dist = _dist_gen(shapes='offset, extra_kwarg')
assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
stats.norm.pdf(0.5)*33 + 111)
assert_equal(dist.pdf(0.5, 111, 33),
stats.norm.pdf(0.5)*33 + 111)
def test_extra_kwarg(self):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling"
# is ignoring *args and looking for ``extra_kwarg`` and using
# that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
dist = _distr_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
def shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
dist = _dist_gen(shapes='')
assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
class TestSubclassingNoShapes:
# Construct a distribution w/o explicit shapes parameter and test it.
def test_only__pdf(self):
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_only__cdf(self):
# _pdf is determined from _cdf by taking numerical derivative
dummy_distr = _distr2_gen(name='dummy')
assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection(self):
# check that _pdf signature inspection works correctly, and is used in
# the class docstring
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.numargs, 1)
assert_equal(dummy_distr.shapes, 'a')
res = re.findall(r'logpdf\(x, a, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection_2args(self):
# same for 2 shape params and both _pdf and _cdf defined
dummy_distr = _distr6_gen(name='dummy')
assert_equal(dummy_distr.numargs, 2)
assert_equal(dummy_distr.shapes, 'a, b')
res = re.findall(r'logpdf\(x, a, b, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
def test_signature_inspection_2args_incorrect_shapes(self):
# both _pdf and _cdf defined, but shapes are inconsistent: raises
assert_raises(TypeError, _distr3_gen, name='dummy')
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_docstrings():
badones = [r',\s*,', r'\(\s*,', r'^\s*:']
for distname in stats.__all__:
dist = getattr(stats, distname)
if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
for regex in badones:
assert_(re.search(regex, dist.__doc__) is None)
def test_infinite_input():
assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
def test_lomax_accuracy():
# regression test for gh-4033
p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_gompertz_accuracy():
# Regression test for gh-4031
p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_truncexpon_accuracy():
# regression test for gh-4035
p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_rayleigh_accuracy():
# regression test for gh-4034
p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1)
assert_almost_equal(p, 9.0, decimal=15)
def test_genextreme_give_no_warnings():
"""regression test for gh-6219"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
stats.genextreme.cdf(.5, 0)
stats.genextreme.pdf(.5, 0)
stats.genextreme.ppf(.5, 0)
stats.genextreme.logpdf(-np.inf, 0.0)
number_of_warnings_thrown = len(w)
assert_equal(number_of_warnings_thrown, 0)
def test_genextreme_entropy():
# regression test for gh-5181
euler_gamma = 0.5772156649015329
h = stats.genextreme.entropy(-1.0)
assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(0)
assert_allclose(h, euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(1.0)
assert_equal(h, 1)
h = stats.genextreme.entropy(-2.0, scale=10)
assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)
h = stats.genextreme.entropy(10)
assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(-10)
assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14)
def test_genextreme_sf_isf():
# Expected values were computed using mpmath:
#
# import mpmath
#
# def mp_genextreme_sf(x, xi, mu=0, sigma=1):
# # Formula from wikipedia, which has a sign convention for xi that
# # is the opposite of scipy's shape parameter.
# if xi != 0:
# t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)
# else:
# t = mpmath.exp(-(x - mu)/sigma)
# return 1 - mpmath.exp(-t)
#
# >>> mpmath.mp.dps = 1000
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("1e8"), mpmath.mp.mpf("0.125"))
# >>> float(s)
# 1.6777205262585625e-57
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("-0.125"))
# >>> float(s)
# 1.52587890625e-21
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("0"))
# >>> float(s)
# 0.00034218086528426593
x = 1e8
s = stats.genextreme.sf(x, -0.125)
assert_allclose(s, 1.6777205262585625e-57)
x2 = stats.genextreme.isf(s, -0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0.125)
assert_allclose(s, 1.52587890625e-21)
x2 = stats.genextreme.isf(s, 0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0)
assert_allclose(s, 0.00034218086528426593)
x2 = stats.genextreme.isf(s, 0)
assert_allclose(x2, x)
def test_burr12_ppf_small_arg():
prob = 1e-16
quantile = stats.burr12.ppf(prob, 2, 3)
# The expected quantile was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 100
# >>> prob = mpmath.mpf('1e-16')
# >>> c = mpmath.mpf(2)
# >>> d = mpmath.mpf(3)
# >>> float(((1-prob)**(-1/d) - 1)**(1/c))
# 5.7735026918962575e-09
assert_allclose(quantile, 5.7735026918962575e-09)
def test_crystalball_function():
"""
All values are calculated using the independent implementation of the
ROOT framework (see https://root.cern.ch/).
Corresponding ROOT code is given in the comments.
"""
X = np.linspace(-5.0, 5.0, 21)[:-1]
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_pdf(x, 1.0, 2.0, 1.0) << ", ";
calculated = stats.crystalball.pdf(X, beta=1.0, m=2.0)
expected = np.array([0.0202867, 0.0241428, 0.0292128, 0.0360652, 0.045645,
0.059618, 0.0811467, 0.116851, 0.18258, 0.265652,
0.301023, 0.265652, 0.18258, 0.097728, 0.0407391,
0.013226, 0.00334407, 0.000658486, 0.000100982,
1.20606e-05])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 1.0) << ", ";
calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0)
expected = np.array([0.0019648, 0.00279754, 0.00417592, 0.00663121,
0.0114587, 0.0223803, 0.0530497, 0.12726, 0.237752,
0.345928, 0.391987, 0.345928, 0.237752, 0.12726,
0.0530497, 0.0172227, 0.00435458, 0.000857469,
0.000131497, 1.57051e-05])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5) {
# std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 2.0, 0.5);
# std::cout << ", ";
# }
calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
expected = np.array([0.00785921, 0.0111902, 0.0167037, 0.0265249,
0.0423866, 0.0636298, 0.0897324, 0.118876, 0.147944,
0.172964, 0.189964, 0.195994, 0.189964, 0.172964,
0.147944, 0.118876, 0.0897324, 0.0636298, 0.0423866,
0.0265249])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_cdf(x, 1.0, 2.0, 1.0) << ", ";
calculated = stats.crystalball.cdf(X, beta=1.0, m=2.0)
expected = np.array([0.12172, 0.132785, 0.146064, 0.162293, 0.18258,
0.208663, 0.24344, 0.292128, 0.36516, 0.478254,
0.622723, 0.767192, 0.880286, 0.94959, 0.982834,
0.995314, 0.998981, 0.999824, 0.999976, 0.999997])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 1.0) << ", ";
calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0)
expected = np.array([0.00442081, 0.00559509, 0.00730787, 0.00994682,
0.0143234, 0.0223803, 0.0397873, 0.0830763, 0.173323,
0.320592, 0.508717, 0.696841, 0.844111, 0.934357,
0.977646, 0.993899, 0.998674, 0.999771, 0.999969,
0.999997])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5) {
# std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 2.0, 0.5);
# std::cout << ", ";
# }
calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
expected = np.array([0.0176832, 0.0223803, 0.0292315, 0.0397873, 0.0567945,
0.0830763, 0.121242, 0.173323, 0.24011, 0.320592,
0.411731, 0.508717, 0.605702, 0.696841, 0.777324,
0.844111, 0.896192, 0.934357, 0.960639, 0.977646])
assert_allclose(expected, calculated, rtol=0.001)
def test_crystalball_function_moments():
"""
All values are calculated using the pdf formula and the integrate function
of Mathematica
"""
# The Last two (alpha, n) pairs test the special case n == alpha**2
beta = np.array([2.0, 1.0, 3.0, 2.0, 3.0])
m = np.array([3.0, 3.0, 2.0, 4.0, 9.0])
# The distribution should be correctly normalised
expected_0th_moment = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
calculated_0th_moment = stats.crystalball._munp(0, beta, m)
assert_allclose(expected_0th_moment, calculated_0th_moment, rtol=0.001)
# calculated using wolframalpha.com
# e.g. for beta = 2 and m = 3 we calculate the norm like this:
# integrate exp(-x^2/2) from -2 to infinity +
# integrate (3/2)^3*exp(-2^2/2)*(3/2-2-x)^(-3) from -infinity to -2
norm = np.array([2.5511, 3.01873, 2.51065, 2.53983, 2.507410455])
a = np.array([-0.21992, -3.03265, np.inf, -0.135335, -0.003174])
expected_1th_moment = a / norm
calculated_1th_moment = stats.crystalball._munp(1, beta, m)
assert_allclose(expected_1th_moment, calculated_1th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, 3.2616, 2.519908])
expected_2th_moment = a / norm
calculated_2th_moment = stats.crystalball._munp(2, beta, m)
assert_allclose(expected_2th_moment, calculated_2th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, -0.0577668])
expected_3th_moment = a / norm
calculated_3th_moment = stats.crystalball._munp(3, beta, m)
assert_allclose(expected_3th_moment, calculated_3th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, 7.78468])
expected_4th_moment = a / norm
calculated_4th_moment = stats.crystalball._munp(4, beta, m)
assert_allclose(expected_4th_moment, calculated_4th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, -1.31086])
expected_5th_moment = a / norm
calculated_5th_moment = stats.crystalball._munp(5, beta, m)
assert_allclose(expected_5th_moment, calculated_5th_moment, rtol=0.001)
def test_crystalball_entropy():
# regression test for gh-13602
cb = stats.crystalball(2, 3)
res1 = cb.entropy()
# -20000 and 30 are negative and positive infinity, respectively
lo, hi, N = -20000, 30, 200000
x = np.linspace(lo, hi, N)
res2 = trapezoid(entr(cb.pdf(x)), x)
assert_allclose(res1, res2, rtol=1e-7)
def test_invweibull():
"""
Test fitting invweibull to data.
Here is a the same calculation in R:
> library(evd)
> library(fitdistrplus)
> x = c(1, 1.25, 2, 2.5, 2.8, 3, 3.8, 4, 5, 8, 10, 12, 64, 99)
> result = fitdist(x, 'frechet', control=list(reltol=1e-13),
+ fix.arg=list(loc=0), start=list(shape=2, scale=3))
> result
Fitting of the distribution ' frechet ' by maximum likelihood
Parameters:
estimate Std. Error
shape 1.048482 0.2261815
scale 3.099456 0.8292887
Fixed parameters:
value
loc 0
"""
def optimizer(func, x0, args=(), disp=0):
return fmin(func, x0, args=args, disp=disp, xtol=1e-12, ftol=1e-12)
x = np.array([1, 1.25, 2, 2.5, 2.8, 3, 3.8, 4, 5, 8, 10, 12, 64, 99])
c, loc, scale = stats.invweibull.fit(x, floc=0, optimizer=optimizer)
assert_allclose(c, 1.048482, rtol=5e-6)
assert loc == 0
assert_allclose(scale, 3.099456, rtol=5e-6)
@pytest.mark.parametrize(
'df1,df2,x',
[(2, 2, [-0.5, 0.2, 1.0, 2.3]),
(4, 11, [-0.5, 0.2, 1.0, 2.3]),
(7, 17, [1, 2, 3, 4, 5])]
)
def test_ncf_edge_case(df1, df2, x):
# Test for edge case described in gh-11660.
# Non-central Fisher distribution when nc = 0
# should be the same as Fisher distribution.
nc = 0
expected_cdf = stats.f.cdf(x, df1, df2)
calculated_cdf = stats.ncf.cdf(x, df1, df2, nc)
assert_allclose(expected_cdf, calculated_cdf, rtol=1e-14)
# when ncf_gen._skip_pdf will be used instead of generic pdf,
# this additional test will be useful.
expected_pdf = stats.f.pdf(x, df1, df2)
calculated_pdf = stats.ncf.pdf(x, df1, df2, nc)
assert_allclose(expected_pdf, calculated_pdf, rtol=1e-6)
def test_ncf_variance():
# Regression test for gh-10658 (incorrect variance formula for ncf).
# The correct value of ncf.var(2, 6, 4), 42.75, can be verified with, for
# example, Wolfram Alpha with the expression
# Variance[NoncentralFRatioDistribution[2, 6, 4]]
# or with the implementation of the noncentral F distribution in the C++
# library Boost.
v = stats.ncf.var(2, 6, 4)
assert_allclose(v, 42.75, rtol=1e-14)
class TestHistogram:
def setup_method(self):
np.random.seed(1234)
# We have 8 bins
# [1,2), [2,3), [3,4), [4,5), [5,6), [6,7), [7,8), [8,9)
# But actually np.histogram will put the last 9 also in the [8,9) bin!
# Therefore there is a slight difference below for the last bin, from
# what you might have expected.
histogram = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5,
6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8)
self.template = stats.rv_histogram(histogram)
data = stats.norm.rvs(loc=1.0, scale=2.5, size=10000, random_state=123)
norm_histogram = np.histogram(data, bins=50)
self.norm_template = stats.rv_histogram(norm_histogram)
def test_pdf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
pdf_values = np.asarray([0.0/25.0, 0.0/25.0, 1.0/25.0, 1.0/25.0,
2.0/25.0, 2.0/25.0, 3.0/25.0, 3.0/25.0,
4.0/25.0, 4.0/25.0, 5.0/25.0, 5.0/25.0,
4.0/25.0, 4.0/25.0, 3.0/25.0, 3.0/25.0,
3.0/25.0, 3.0/25.0, 0.0/25.0, 0.0/25.0])
assert_allclose(self.template.pdf(values), pdf_values)
# Test explicitly the corner cases:
# As stated above the pdf in the bin [8,9) is greater than
# one would naively expect because np.histogram putted the 9
# into the [8,9) bin.
assert_almost_equal(self.template.pdf(8.0), 3.0/25.0)
assert_almost_equal(self.template.pdf(8.5), 3.0/25.0)
# 9 is outside our defined bins [8,9) hence the pdf is already 0
# for a continuous distribution this is fine, because a single value
# does not have a finite probability!
assert_almost_equal(self.template.pdf(9.0), 0.0/25.0)
assert_almost_equal(self.template.pdf(10.0), 0.0/25.0)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.pdf(x),
stats.norm.pdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_cdf_ppf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
cdf_values = np.asarray([0.0/25.0, 0.0/25.0, 0.0/25.0, 0.5/25.0,
1.0/25.0, 2.0/25.0, 3.0/25.0, 4.5/25.0,
6.0/25.0, 8.0/25.0, 10.0/25.0, 12.5/25.0,
15.0/25.0, 17.0/25.0, 19.0/25.0, 20.5/25.0,
22.0/25.0, 23.5/25.0, 25.0/25.0, 25.0/25.0])
assert_allclose(self.template.cdf(values), cdf_values)
# First three and last two values in cdf_value are not unique
assert_allclose(self.template.ppf(cdf_values[2:-1]), values[2:-1])
# Test of cdf and ppf are inverse functions
x = np.linspace(1.0, 9.0, 100)
assert_allclose(self.template.ppf(self.template.cdf(x)), x)
x = np.linspace(0.0, 1.0, 100)
assert_allclose(self.template.cdf(self.template.ppf(x)), x)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.cdf(x),
stats.norm.cdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_rvs(self):
N = 10000
sample = self.template.rvs(size=N, random_state=123)
assert_equal(np.sum(sample < 1.0), 0.0)
assert_allclose(np.sum(sample <= 2.0), 1.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 2.5), 2.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 3.0), 3.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 3.5), 4.5/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.0), 6.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.5), 8.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 5.0), 10.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 5.5), 12.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.0), 15.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.5), 17.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.0), 19.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.5), 20.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.0), 22.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.5), 23.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_equal(np.sum(sample > 9.0), 0.0)
def test_munp(self):
for n in range(4):
assert_allclose(self.norm_template._munp(n),
stats.norm(1.0, 2.5).moment(n), rtol=0.05)
def test_entropy(self):
assert_allclose(self.norm_template.entropy(),
stats.norm.entropy(loc=1.0, scale=2.5), rtol=0.05)
def test_loguniform():
# This test makes sure the alias of "loguniform" is log-uniform
rv = stats.loguniform(10 ** -3, 10 ** 0)
rvs = rv.rvs(size=10000, random_state=42)
vals, _ = np.histogram(np.log10(rvs), bins=10)
assert 900 <= vals.min() <= vals.max() <= 1100
assert np.abs(np.median(vals) - 1000) <= 10
class TestArgus:
def test_argus_rvs_large_chi(self):
# test that the algorithm can handle large values of chi
x = stats.argus.rvs(50, size=500, random_state=325)
assert_almost_equal(stats.argus(50).mean(), x.mean(), decimal=4)
def test_argus_rvs_ratio_uniforms(self):
# test that the ratio of uniforms algorithms works for chi > 2.611
x = stats.argus.rvs(3.5, size=1500, random_state=1535)
assert_almost_equal(stats.argus(3.5).mean(), x.mean(), decimal=3)
assert_almost_equal(stats.argus(3.5).std(), x.std(), decimal=3)
# Expected values were computed with mpmath.
@pytest.mark.parametrize('chi, expected_mean',
[(1, 0.6187026683551835),
(10, 0.984805536783744),
(40, 0.9990617659702923),
(60, 0.9995831885165300),
(99, 0.9998469348663028)])
def test_mean(self, chi, expected_mean):
m = stats.argus.mean(chi, scale=1)
assert_allclose(m, expected_mean, rtol=1e-13)
# Expected values were computed with mpmath.
@pytest.mark.parametrize('chi, expected_var, rtol',
[(1, 0.05215651254197807, 1e-13),
(10, 0.00015805472008165595, 1e-11),
(40, 5.877763210262901e-07, 1e-8),
(60, 1.1590179389611416e-07, 1e-8),
(99, 1.5623277006064666e-08, 1e-8)])
def test_var(self, chi, expected_var, rtol):
v = stats.argus.var(chi, scale=1)
assert_allclose(v, expected_var, rtol=rtol)
class TestNakagami:
def test_logpdf(self):
# Test nakagami logpdf for an input where the PDF is smaller
# than can be represented with 64 bit floating point.
# The expected value of logpdf was computed with mpmath:
#
# def logpdf(x, nu):
# x = mpmath.mpf(x)
# nu = mpmath.mpf(nu)
# return (mpmath.log(2) + nu*mpmath.log(nu) -
# mpmath.loggamma(nu) + (2*nu - 1)*mpmath.log(x) -
# nu*x**2)
#
nu = 2.5
x = 25
logp = stats.nakagami.logpdf(x, nu)
assert_allclose(logp, -1546.9253055607549)
def test_sf_isf(self):
# Test nakagami sf and isf when the survival function
# value is very small.
# The expected value of the survival function was computed
# with mpmath:
#
# def sf(x, nu):
# x = mpmath.mpf(x)
# nu = mpmath.mpf(nu)
# return mpmath.gammainc(nu, nu*x*x, regularized=True)
#
nu = 2.5
x0 = 5.0
sf = stats.nakagami.sf(x0, nu)
assert_allclose(sf, 2.736273158588307e-25, rtol=1e-13)
# Check round trip back to x0.
x1 = stats.nakagami.isf(sf, nu)
assert_allclose(x1, x0, rtol=1e-13)
@pytest.mark.parametrize('nu', [1.6, 2.5, 3.9])
@pytest.mark.parametrize('loc', [25.0, 10, 35])
@pytest.mark.parametrize('scale', [13, 5, 20])
def test_fit(self, nu, loc, scale):
# Regression test for gh-13396 (21/27 cases failed previously)
# The first tuple of the parameters' values is discussed in gh-10908
N = 100
samples = stats.nakagami.rvs(size=N, nu=nu, loc=loc,
scale=scale, random_state=1337)
nu_est, loc_est, scale_est = stats.nakagami.fit(samples)
assert_allclose(nu_est, nu, rtol=0.2)
assert_allclose(loc_est, loc, rtol=0.2)
assert_allclose(scale_est, scale, rtol=0.2)
def dlogl_dnu(nu, loc, scale):
return ((-2*nu + 1) * np.sum(1/(samples - loc))
+ 2*nu/scale**2 * np.sum(samples - loc))
def dlogl_dloc(nu, loc, scale):
return (N * (1 + np.log(nu) - polygamma(0, nu)) +
2 * np.sum(np.log((samples - loc) / scale))
- np.sum(((samples - loc) / scale)**2))
def dlogl_dscale(nu, loc, scale):
return (- 2 * N * nu / scale
+ 2 * nu / scale ** 3 * np.sum((samples - loc) ** 2))
assert_allclose(dlogl_dnu(nu_est, loc_est, scale_est), 0, atol=1e-3)
assert_allclose(dlogl_dloc(nu_est, loc_est, scale_est), 0, atol=1e-3)
assert_allclose(dlogl_dscale(nu_est, loc_est, scale_est), 0, atol=1e-3)
@pytest.mark.parametrize('loc', [25.0, 10, 35])
@pytest.mark.parametrize('scale', [13, 5, 20])
def test_fit_nu(self, loc, scale):
# For nu = 0.5, we have analytical values for
# the MLE of the loc and the scale
nu = 0.5
n = 100
samples = stats.nakagami.rvs(size=n, nu=nu, loc=loc,
scale=scale, random_state=1337)
nu_est, loc_est, scale_est = stats.nakagami.fit(samples, f0=nu)
# Analytical values
loc_theo = np.min(samples)
scale_theo = np.sqrt(np.mean((samples - loc_est) ** 2))
assert_allclose(nu_est, nu, rtol=1e-7)
assert_allclose(loc_est, loc_theo, rtol=1e-7)
assert_allclose(scale_est, scale_theo, rtol=1e-7)
class TestWrapCauchy:
def test_cdf_shape_broadcasting(self):
# Regression test for gh-13791.
# Check that wrapcauchy.cdf broadcasts the shape parameter
# correctly.
c = np.array([[0.03, 0.25], [0.5, 0.75]])
x = np.array([[1.0], [4.0]])
p = stats.wrapcauchy.cdf(x, c)
assert p.shape == (2, 2)
scalar_values = [stats.wrapcauchy.cdf(x1, c1)
for (x1, c1) in np.nditer((x, c))]
assert_allclose(p.ravel(), scalar_values, rtol=1e-13)
def test_cdf_center(self):
p = stats.wrapcauchy.cdf(np.pi, 0.03)
assert_allclose(p, 0.5, rtol=1e-14)
def test_cdf(self):
x1 = 1.0 # less than pi
x2 = 4.0 # greater than pi
c = 0.75
p = stats.wrapcauchy.cdf([x1, x2], c)
cr = (1 + c)/(1 - c)
assert_allclose(p[0], np.arctan(cr*np.tan(x1/2))/np.pi)
assert_allclose(p[1], 1 - np.arctan(cr*np.tan(np.pi - x2/2))/np.pi)
def test_rvs_no_size_warning():
class rvs_no_size_gen(stats.rv_continuous):
def _rvs(self):
return 1
rvs_no_size = rvs_no_size_gen(name='rvs_no_size')
with assert_warns(np.VisibleDeprecationWarning):
rvs_no_size.rvs()
@pytest.mark.parametrize('distname, args', invdistdiscrete + invdistcont)
def test_support_gh13294_regression(distname, args):
if distname in skip_test_support_gh13294_regression:
pytest.skip(f"skipping test for the support method for "
f"distribution {distname}.")
dist = getattr(stats, distname)
# test support method with invalid arguents
if isinstance(dist, stats.rv_continuous):
# test with valid scale
if len(args) != 0:
a0, b0 = dist.support(*args)
assert_equal(a0, np.nan)
assert_equal(b0, np.nan)
# test with invalid scale
# For some distributions, that take no parameters,
# the case of only invalid scale occurs and hence,
# it is implicitly tested in this test case.
loc1, scale1 = 0, -1
a1, b1 = dist.support(*args, loc1, scale1)
assert_equal(a1, np.nan)
assert_equal(b1, np.nan)
else:
a, b = dist.support(*args)
assert_equal(a, np.nan)
assert_equal(b, np.nan)
def test_support_broadcasting_gh13294_regression():
a0, b0 = stats.norm.support([0, 0, 0, 1], [1, 1, 1, -1])
ex_a0 = np.array([-np.inf, -np.inf, -np.inf, np.nan])
ex_b0 = np.array([np.inf, np.inf, np.inf, np.nan])
assert_equal(a0, ex_a0)
assert_equal(b0, ex_b0)
assert a0.shape == ex_a0.shape
assert b0.shape == ex_b0.shape
a1, b1 = stats.norm.support([], [])
ex_a1, ex_b1 = np.array([]), np.array([])
assert_equal(a1, ex_a1)
assert_equal(b1, ex_b1)
assert a1.shape == ex_a1.shape
assert b1.shape == ex_b1.shape
a2, b2 = stats.norm.support([0, 0, 0, 1], [-1])
ex_a2 = np.array(4*[np.nan])
ex_b2 = np.array(4*[np.nan])
assert_equal(a2, ex_a2)
assert_equal(b2, ex_b2)
assert a2.shape == ex_a2.shape
assert b2.shape == ex_b2.shape
# Check a few values of the cosine distribution's cdf, sf, ppf and
# isf methods. Expected values were computed with mpmath.
@pytest.mark.parametrize('x, expected',
[(-3.14159, 4.956444476505336e-19),
(3.14, 0.9999999998928399)])
def test_cosine_cdf_sf(x, expected):
assert_allclose(stats.cosine.cdf(x), expected)
assert_allclose(stats.cosine.sf(-x), expected)
@pytest.mark.parametrize('p, expected',
[(1e-6, -3.1080612413765905),
(1e-17, -3.141585429601399),
(0.975, 2.1447547020964923)])
def test_cosine_ppf_isf(p, expected):
assert_allclose(stats.cosine.ppf(p), expected)
assert_allclose(stats.cosine.isf(p), -expected)
def test_distr_params_lists():
# distribution objects are extra distributions added in
# test_discrete_basic. All other distributions are strings (names)
# and so we only choose those to compare whether both lists match.
discrete_distnames = {name for name, _ in distdiscrete
if isinstance(name, str)}
invdiscrete_distnames = {name for name, _ in invdistdiscrete}
assert discrete_distnames == invdiscrete_distnames
cont_distnames = {name for name, _ in distcont}
invcont_distnames = {name for name, _ in invdistcont}
assert cont_distnames == invcont_distnames
| WarrenWeckesser/scipy | scipy/stats/tests/test_distributions.py | Python | bsd-3-clause | 230,495 | [
"Gaussian"
] | 1bad3db6dc8845d43a5174f737768a2f635e460beeee0db9fb01165c515a940e |
# -*- coding: utf-8 -*-
"""
A real simple app for using webapp2 with auth and session.
It just covers the basics. Creating a user, login, logout
and a decorator for protecting certain handlers.
Routes are setup in routes.py and added in main.py
"""
# standard library imports
import logging
import json
# related third party imports
import webapp2
import httpagentparser
from webapp2_extras import security
from webapp2_extras.auth import InvalidAuthIdError, InvalidPasswordError
from webapp2_extras.i18n import gettext as _
from webapp2_extras.appengine.auth.models import Unique
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.api.datastore_errors import BadValueError
from google.appengine.runtime import apiproxy_errors
from github import github
from linkedin import linkedin
# local application/library specific imports
import models
import forms as forms
from lib import utils, captcha, twitter
from lib.basehandler import BaseHandler
from lib.decorators import user_required
from lib.decorators import taskqueue_method
from lib import facebook
class LoginRequiredHandler(BaseHandler):
def get(self):
continue_url, = self.request.get('continue', allow_multiple=True)
self.redirect(users.create_login_url(dest_url=continue_url))
class RegisterBaseHandler(BaseHandler):
"""
Base class for handlers with registration and login forms.
"""
@webapp2.cached_property
def form(self):
return forms.RegisterForm(self)
class SendEmailHandler(BaseHandler):
"""
Core Handler for sending Emails
Use with TaskQueue
"""
@taskqueue_method
def post(self):
from google.appengine.api import mail, app_identity
to = self.request.get("to")
subject = self.request.get("subject")
body = self.request.get("body")
sender = self.request.get("sender")
if sender != '' or not utils.is_email_valid(sender):
if utils.is_email_valid(self.app.config.get('contact_sender')):
sender = self.app.config.get('contact_sender')
else:
app_id = app_identity.get_application_id()
sender = "%s <no-reply@%s.appspotmail.com>" % (app_id, app_id)
if self.app.config['log_email']:
try:
logEmail = models.LogEmail(
sender=sender,
to=to,
subject=subject,
body=body,
when=utils.get_date_time("datetimeProperty")
)
logEmail.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Email Log in datastore")
try:
message = mail.EmailMessage()
message.sender = sender
message.to = to
message.subject = subject
message.html = body
message.send()
except Exception, e:
logging.error("Error sending email: %s" % e)
class LoginHandler(BaseHandler):
"""
Handler for authentication
"""
def get(self):
""" Returns a simple HTML form for login """
if self.user:
self.redirect_to('home')
params = {}
return self.render_template('login.html', **params)
def post(self):
"""
username: Get the username from POST dict
password: Get the password from POST dict
"""
if not self.form.validate():
return self.get()
username = self.form.username.data.lower()
continue_url = self.request.get('continue_url').encode('ascii', 'ignore')
try:
if utils.is_email_valid(username):
user = models.User.get_by_email(username)
if user:
auth_id = user.auth_ids[0]
else:
raise InvalidAuthIdError
else:
auth_id = "own:%s" % username
user = models.User.get_by_auth_id(auth_id)
password = self.form.password.data.strip()
remember_me = True if str(self.request.POST.get('remember_me')) == 'on' else False
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
# Try to login user with password
# Raises InvalidAuthIdError if user is not found
# Raises InvalidPasswordError if provided password
# doesn't match with specified user
self.auth.get_user_by_password(
auth_id, password, remember=remember_me)
# if user account is not activated, logout and redirect to home
if (user.activated == False):
# logout
self.auth.unset_session()
# redirect to home with error message
resend_email_uri = self.uri_for('resend-account-activation', user_id=user.get_id(),
token=models.User.create_resend_token(user.get_id()))
message = _('Your account has not yet been activated. Please check your email to activate it or') + \
' <a href="' + resend_email_uri + '">' + _('click here') + '</a> ' + _('to resend the email.')
self.add_message(message, 'error')
return self.redirect_to('home')
# check twitter association in session
twitter_helper = twitter.TwitterAuth(self)
twitter_association_data = twitter_helper.get_association_data()
if twitter_association_data is not None:
if models.SocialUser.check_unique(user.key, 'twitter', str(twitter_association_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='twitter',
uid=str(twitter_association_data['id']),
extra_data=twitter_association_data
)
social_user.put()
# check facebook association
fb_data = None
try:
fb_data = json.loads(self.session['facebook'])
except:
pass
if fb_data is not None:
if models.SocialUser.check_unique(user.key, 'facebook', str(fb_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='facebook',
uid=str(fb_data['id']),
extra_data=fb_data
)
social_user.put()
# check linkedin association
li_data = None
try:
li_data = json.loads(self.session['linkedin'])
except:
pass
if li_data is not None:
if models.SocialUser.check_unique(user.key, 'linkedin', str(li_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='linkedin',
uid=str(li_data['id']),
extra_data=li_data
)
social_user.put()
# end linkedin
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in
# the BaseHandler.dispatcher
message = _("Your username or password is incorrect. "
"Please try again (make sure your caps lock is off)")
self.add_message(message, 'error')
self.redirect_to('login', continue_url=continue_url) if continue_url else self.redirect_to('login')
@webapp2.cached_property
def form(self):
return forms.LoginForm(self)
class SocialLoginHandler(BaseHandler):
"""
Handler for Social authentication
"""
def get(self, provider_name):
provider = self.provider_info[provider_name]
if not self.app.config.get('enable_federated_login'):
message = _('Federated login is disabled.')
self.add_message(message, 'warning')
return self.redirect_to('login')
callback_url = "%s/social_login/%s/complete" % (self.request.host_url, provider_name)
if provider_name == "twitter":
twitter_helper = twitter.TwitterAuth(self, redirect_uri=callback_url)
self.redirect(twitter_helper.auth_url())
elif provider_name == "facebook":
self.session['linkedin'] = None
perms = ['email', 'publish_stream']
self.redirect(facebook.auth_url(self.app.config.get('fb_api_key'), callback_url, perms))
elif provider_name == 'linkedin':
self.session['facebook'] = None
authentication = linkedin.LinkedInAuthentication(
self.app.config.get('linkedin_api'),
self.app.config.get('linkedin_secret'),
callback_url,
[linkedin.PERMISSIONS.BASIC_PROFILE, linkedin.PERMISSIONS.EMAIL_ADDRESS])
self.redirect(authentication.authorization_url)
elif provider_name == "github":
scope = 'gist'
github_helper = github.GithubAuth(self.app.config.get('github_server'),
self.app.config.get('github_client_id'), \
self.app.config.get('github_client_secret'),
self.app.config.get('github_redirect_uri'), scope)
self.redirect(github_helper.get_authorize_url())
elif provider_name in models.SocialUser.open_id_providers():
continue_url = self.request.get('continue_url')
if continue_url:
dest_url = self.uri_for('social-login-complete', provider_name=provider_name, continue_url=continue_url)
else:
dest_url = self.uri_for('social-login-complete', provider_name=provider_name)
try:
login_url = users.create_login_url(federated_identity=provider['uri'], dest_url=dest_url)
self.redirect(login_url)
except users.NotAllowedError:
self.add_message('You must enable Federated Login Before for this application.<br> '
'<a href="http://appengine.google.com" target="_blank">Google App Engine Control Panel</a> -> '
'Administration -> Application Settings -> Authentication Options', 'error')
self.redirect_to('login')
else:
message = _('%s authentication is not yet implemented.' % provider.get('label'))
self.add_message(message, 'warning')
self.redirect_to('login')
class CallbackSocialLoginHandler(BaseHandler):
"""
Callback (Save Information) for Social Authentication
"""
def get(self, provider_name):
if not self.app.config.get('enable_federated_login'):
message = _('Federated login is disabled.')
self.add_message(message, 'warning')
return self.redirect_to('login')
continue_url = self.request.get('continue_url')
if provider_name == "twitter":
oauth_token = self.request.get('oauth_token')
oauth_verifier = self.request.get('oauth_verifier')
twitter_helper = twitter.TwitterAuth(self)
user_data = twitter_helper.auth_complete(oauth_token,
oauth_verifier)
logging.info('twitter user_data: ' + str(user_data))
if self.user:
# new association with twitter
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'twitter', str(user_data['user_id'])):
social_user = models.SocialUser(
user=user_info.key,
provider='twitter',
uid=str(user_data['user_id']),
extra_data=user_data
)
social_user.put()
message = _('Twitter association added.')
self.add_message(message, 'success')
else:
message = _('This Twitter account is already in use.')
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with twitter
social_user = models.SocialUser.get_by_provider_and_uid('twitter',
str(user_data['user_id']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
uid = str(user_data['user_id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
# github association
elif provider_name == "github":
# get our request code back from the social login handler above
code = self.request.get('code')
# create our github auth object
scope = 'gist'
github_helper = github.GithubAuth(self.app.config.get('github_server'),
self.app.config.get('github_client_id'), \
self.app.config.get('github_client_secret'),
self.app.config.get('github_redirect_uri'), scope)
# retrieve the access token using the code and auth object
access_token = github_helper.get_access_token(code)
user_data = github_helper.get_user_info(access_token)
logging.info('github user_data: ' + str(user_data))
if self.user:
# user is already logged in so we set a new association with twitter
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'github', str(user_data['login'])):
social_user = models.SocialUser(
user=user_info.key,
provider='github',
uid=str(user_data['login']),
extra_data=user_data
)
social_user.put()
message = _('Github association added.')
self.add_message(message, 'success')
else:
message = _('This Github account is already in use.')
self.add_message(message, 'error')
self.redirect_to('edit-profile')
else:
# user is not logged in, but is trying to log in via github
social_user = models.SocialUser.get_by_provider_and_uid('github', str(user_data['login']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
self.redirect_to('home')
else:
uid = str(user_data['id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
#end github
# facebook association
elif provider_name == "facebook":
code = self.request.get('code')
callback_url = "%s/social_login/%s/complete" % (self.request.host_url, provider_name)
token = facebook.get_access_token_from_code(code, callback_url, self.app.config.get('fb_api_key'),
self.app.config.get('fb_secret'))
access_token = token['access_token']
fb = facebook.GraphAPI(access_token)
user_data = fb.get_object('me')
logging.info('facebook user_data: ' + str(user_data))
if self.user:
# new association with facebook
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'facebook', str(user_data['id'])):
social_user = models.SocialUser(
user=user_info.key,
provider='facebook',
uid=str(user_data['id']),
extra_data=user_data
)
social_user.put()
message = _('Facebook association added!')
self.add_message(message, 'success')
else:
message = _('This Facebook account is already in use!')
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with Facebook
social_user = models.SocialUser.get_by_provider_and_uid('facebook',
str(user_data['id']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
uid = str(user_data['id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
# end facebook
# association with linkedin
elif provider_name == "linkedin":
callback_url = "%s/social_login/%s/complete" % (self.request.host_url, provider_name)
authentication = linkedin.LinkedInAuthentication(
self.app.config.get('linkedin_api'),
self.app.config.get('linkedin_secret'),
callback_url,
[linkedin.PERMISSIONS.BASIC_PROFILE, linkedin.PERMISSIONS.EMAIL_ADDRESS])
authentication.authorization_code = self.request.get('code')
access_token = authentication.get_access_token()
link = linkedin.LinkedInApplication(authentication)
u_data = link.get_profile(selectors=['id', 'first-name', 'last-name', 'email-address'])
user_data = {
'first_name': u_data.get('firstName'),
'last_name': u_data.get('lastName'),
'id': u_data.get('id'),
'email': u_data.get('emailAddress')}
self.session['linkedin'] = json.dumps(user_data)
logging.info('linkedin user_data: ' + str(user_data))
if self.user:
# new association with linkedin
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'linkedin', str(user_data['id'])):
social_user = models.SocialUser(
user=user_info.key,
provider='linkedin',
uid=str(user_data['id']),
extra_data=user_data
)
social_user.put()
message = _('Linkedin association added!')
self.add_message(message, 'success')
else:
message = _('This Linkedin account is already in use!')
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with Linkedin
social_user = models.SocialUser.get_by_provider_and_uid('linkedin',
str(user_data['id']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
uid = str(user_data['id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
#end linkedin
# google, myopenid, yahoo OpenID Providers
elif provider_name in models.SocialUser.open_id_providers():
provider_display_name = models.SocialUser.PROVIDERS_INFO[provider_name]['label']
# get info passed from OpenId Provider
from google.appengine.api import users
current_user = users.get_current_user()
if current_user:
if current_user.federated_identity():
uid = current_user.federated_identity()
else:
uid = current_user.user_id()
email = current_user.email()
else:
message = _('No user authentication information received from %s. '
'Please ensure you are logging in from an authorized OpenID Provider (OP).'
% provider_display_name)
self.add_message(message, 'error')
return self.redirect_to('login', continue_url=continue_url) if continue_url else self.redirect_to(
'login')
if self.user:
# add social account to user
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, provider_name, uid):
social_user = models.SocialUser(
user=user_info.key,
provider=provider_name,
uid=uid
)
social_user.put()
message = _('%s association successfully added.' % provider_display_name)
self.add_message(message, 'success')
else:
message = _('This %s account is already in use.' % provider_display_name)
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with OpenId Provider
social_user = models.SocialUser.get_by_provider_and_uid(provider_name, uid)
if social_user:
# Social user found. Authenticate the user
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
self.create_account_from_social_provider(provider_name, uid, email, continue_url)
else:
message = _('This authentication method is not yet implemented.')
self.add_message(message, 'warning')
self.redirect_to('login', continue_url=continue_url) if continue_url else self.redirect_to('login')
def create_account_from_social_provider(self, provider_name, uid, email=None, continue_url=None, user_data=None):
"""Social user does not exist yet so create it with the federated identity provided (uid)
and create prerequisite user and log the user account in
"""
provider_display_name = models.SocialUser.PROVIDERS_INFO[provider_name]['label']
if models.SocialUser.check_unique_uid(provider_name, uid):
# create user
# Returns a tuple, where first value is BOOL.
# If True ok, If False no new user is created
# Assume provider has already verified email address
# if email is provided so set activated to True
auth_id = "%s:%s" % (provider_name, uid)
if email:
unique_properties = ['email']
user_info = self.auth.store.user_model.create_user(
auth_id, unique_properties, email=email,
activated=True
)
else:
user_info = self.auth.store.user_model.create_user(
auth_id, activated=True
)
if not user_info[0]: #user is a tuple
message = _('The account %s is already in use.' % provider_display_name)
self.add_message(message, 'error')
return self.redirect_to('register')
user = user_info[1]
# create social user and associate with user
social_user = models.SocialUser(
user=user.key,
provider=provider_name,
uid=uid,
)
if user_data:
social_user.extra_data = user_data
self.session[provider_name] = json.dumps(user_data) # TODO is this needed?
social_user.put()
# authenticate user
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
message = _(
'Welcome! You have been registered as a new user through %s and logged in.' % provider_display_name)
self.add_message(message, 'success')
else:
message = _('This %s account is already in use.' % provider_display_name)
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
class DeleteSocialProviderHandler(BaseHandler):
"""
Delete Social association with an account
"""
@user_required
def post(self, provider_name):
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
if len(user_info.get_social_providers_info()['used']) > 1 or (user_info.password is not None):
social_user = models.SocialUser.get_by_user_and_provider(user_info.key, provider_name)
if social_user:
social_user.key.delete()
message = _('%s successfully disassociated.' % provider_name)
self.add_message(message, 'success')
else:
message = _('Social account on %s not found for this user.' % provider_name)
self.add_message(message, 'error')
else:
message = ('Social account on %s cannot be deleted for user.'
' Please create a username and password to delete social account.' % provider_name)
self.add_message(message, 'error')
self.redirect_to('edit-profile')
class LogoutHandler(BaseHandler):
"""
Destroy user session and redirect to login
"""
def get(self):
if self.user:
message = _("You've signed out successfully. Warning: Please clear all cookies and logout "
"of OpenId providers too if you logged in on a public computer.")
self.add_message(message, 'info')
self.auth.unset_session()
# User is logged out, let's try redirecting to login page
try:
self.redirect(self.auth_config['login_url'])
except (AttributeError, KeyError), e:
logging.error("Error logging out: %s" % e)
message = _("User is logged out, but there was an error on the redirection.")
self.add_message(message, 'error')
return self.redirect_to('home')
class RegisterHandler(BaseHandler):
"""
Handler for Sign Up Users
"""
def get(self):
""" Returns a simple HTML form for create a new user """
if self.user:
self.redirect_to('home')
params = {}
return self.render_template('register.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
username = self.form.username.data.lower()
name = self.form.name.data.strip()
last_name = self.form.last_name.data.strip()
email = self.form.email.data.lower()
password = self.form.password.data.strip()
country = self.form.country.data
tz = self.form.tz.data
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
# Passing password_raw=password so password will be hashed
# Returns a tuple, where first value is BOOL.
# If True ok, If False no new user is created
unique_properties = ['username', 'email']
auth_id = "own:%s" % username
user = self.auth.store.user_model.create_user(
auth_id, unique_properties, password_raw=password,
username=username, name=name, last_name=last_name, email=email,
ip=self.request.remote_addr, country=country, tz=tz
)
if not user[0]: #user is a tuple
if "username" in str(user[1]):
message = _(
'Sorry, The username <strong>{}</strong> is already registered.').format(username)
elif "email" in str(user[1]):
message = _('Sorry, The email <strong>{}</strong> is already registered.').format(email)
else:
message = _('Sorry, The user is already registered.')
self.add_message(message, 'error')
return self.redirect_to('register')
else:
# User registered successfully
# But if the user registered using the form, the user has to check their email to activate the account ???
try:
if not user[1].activated:
# send email
subject = _("%s Account Verification" % self.app.config.get('app_name'))
confirmation_url = self.uri_for("account-activation",
user_id=user[1].get_id(),
token=models.User.create_auth_token(user[1].get_id()),
_full=True)
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"username": username,
"confirmation_url": confirmation_url,
"support_url": self.uri_for("contact", _full=True)
}
body_path = "emails/account_activation.txt"
body = self.jinja2.render_template(body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': str(email),
'subject': subject,
'body': body,
})
message = _('You were successfully registered. '
'Please check your email to activate your account.')
self.add_message(message, 'success')
return self.redirect_to('home')
# If the user didn't register using registration form ???
db_user = self.auth.get_user_by_password(user[1].auth_ids[0], password)
# Check Twitter association in session
twitter_helper = twitter.TwitterAuth(self)
twitter_association_data = twitter_helper.get_association_data()
if twitter_association_data is not None:
if models.SocialUser.check_unique(user[1].key, 'twitter', str(twitter_association_data['id'])):
social_user = models.SocialUser(
user=user[1].key,
provider='twitter',
uid=str(twitter_association_data['id']),
extra_data=twitter_association_data
)
social_user.put()
#check Facebook association
fb_data = json.loads(self.session['facebook'])
if fb_data is not None:
if models.SocialUser.check_unique(user.key, 'facebook', str(fb_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='facebook',
uid=str(fb_data['id']),
extra_data=fb_data
)
social_user.put()
#check LinkedIn association
li_data = json.loads(self.session['linkedin'])
if li_data is not None:
if models.SocialUser.check_unique(user.key, 'linkedin', str(li_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='linkedin',
uid=str(li_data['id']),
extra_data=li_data
)
social_user.put()
message = _('Welcome <strong>{}</strong>, you are now logged in.').format(username)
self.add_message(message, 'success')
return self.redirect_to('home')
except (AttributeError, KeyError), e:
logging.error('Unexpected error creating the user %s: %s' % (username, e ))
message = _('Unexpected error creating the user %s' % username)
self.add_message(message, 'error')
return self.redirect_to('home')
@webapp2.cached_property
def form(self):
f = forms.RegisterForm(self)
f.country.choices = self.countries_tuple
f.tz.choices = self.tz
return f
class AccountActivationHandler(BaseHandler):
"""
Handler for account activation
"""
def get(self, user_id, token):
try:
if not models.User.validate_auth_token(user_id, token):
message = _('The link is invalid.')
self.add_message(message, 'error')
return self.redirect_to('home')
user = models.User.get_by_id(long(user_id))
# activate the user's account
user.activated = True
user.put()
# Login User
self.auth.get_user_by_token(int(user_id), token)
# Delete token
models.User.delete_auth_token(user_id, token)
message = _('Congratulations, Your account <strong>{}</strong> has been successfully activated.').format(
user.username)
self.add_message(message, 'success')
self.redirect_to('home')
except (AttributeError, KeyError, InvalidAuthIdError, NameError), e:
logging.error("Error activating an account: %s" % e)
message = _('Sorry, Some error occurred.')
self.add_message(message, 'error')
return self.redirect_to('home')
class ResendActivationEmailHandler(BaseHandler):
"""
Handler to resend activation email
"""
def get(self, user_id, token):
try:
if not models.User.validate_resend_token(user_id, token):
message = _('The link is invalid.')
self.add_message(message, 'error')
return self.redirect_to('home')
user = models.User.get_by_id(long(user_id))
email = user.email
if (user.activated == False):
# send email
subject = _("%s Account Verification" % self.app.config.get('app_name'))
confirmation_url = self.uri_for("account-activation",
user_id=user.get_id(),
token=models.User.create_auth_token(user.get_id()),
_full=True)
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"username": user.username,
"confirmation_url": confirmation_url,
"support_url": self.uri_for("contact", _full=True)
}
body_path = "emails/account_activation.txt"
body = self.jinja2.render_template(body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': str(email),
'subject': subject,
'body': body,
})
models.User.delete_resend_token(user_id, token)
message = _('The verification email has been resent to %s. '
'Please check your email to activate your account.' % email)
self.add_message(message, 'success')
return self.redirect_to('home')
else:
message = _('Your account has been activated. Please <a href="/login/">sign in</a> to your account.')
self.add_message(message, 'warning')
return self.redirect_to('home')
except (KeyError, AttributeError), e:
logging.error("Error resending activation email: %s" % e)
message = _('Sorry, Some error occurred.')
self.add_message(message, 'error')
return self.redirect_to('home')
class ContactHandler(BaseHandler):
"""
Handler for Contact Form
"""
def get(self):
""" Returns a simple HTML for contact form """
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
if user_info.name or user_info.last_name:
self.form.name.data = user_info.name + " " + user_info.last_name
if user_info.email:
self.form.email.data = user_info.email
params = {
"exception": self.request.get('exception')
}
return self.render_template('contact.html', **params)
def post(self):
""" validate contact form """
if not self.form.validate():
return self.get()
remoteip = self.request.remote_addr
user_agent = self.request.user_agent
exception = self.request.POST.get('exception')
name = self.form.name.data.strip()
email = self.form.email.data.lower()
message = self.form.message.data.strip()
try:
# parsing user_agent and getting which os key to use
# windows uses 'os' while other os use 'flavor'
ua = httpagentparser.detect(user_agent)
_os = ua.has_key('flavor') and 'flavor' or 'os'
operating_system = str(ua[_os]['name']) if "name" in ua[_os] else "-"
if 'version' in ua[_os]:
operating_system += ' ' + str(ua[_os]['version'])
if 'dist' in ua:
operating_system += ' ' + str(ua['dist'])
browser = str(ua['browser']['name']) if 'browser' in ua else "-"
browser_version = str(ua['browser']['version']) if 'browser' in ua else "-"
template_val = {
"name": name,
"email": email,
"browser": browser,
"browser_version": browser_version,
"operating_system": operating_system,
"ip": remoteip,
"message": message
}
except Exception as e:
logging.error("error getting user agent info: %s" % e)
try:
subject = _("Contact")
# exceptions for error pages that redirect to contact
if exception != "":
subject = subject + " (Exception error: %s)" % exception
body_path = "emails/contact.txt"
body = self.jinja2.render_template(body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': self.app.config.get('contact_recipient'),
'subject': subject,
'body': body,
'sender': self.app.config.get('contact_sender'),
})
message = _('Your message was sent successfully.')
self.add_message(message, 'success')
return self.redirect_to('contact')
except (AttributeError, KeyError), e:
logging.error('Error sending contact form: %s' % e)
message = _('Error sending the message. Please try again later.')
self.add_message(message, 'error')
return self.redirect_to('contact')
@webapp2.cached_property
def form(self):
return forms.ContactForm(self)
class CategoriesHandler(BaseHandler):
#Handler for Category
def get(self):
# Sends the vistor to the categories page
return self.render_template('categories.html')
class EditProfileHandler(BaseHandler):
"""
Handler for Edit User Profile
"""
@user_required
def get(self):
""" Returns a simple HTML form for edit profile """
params = {}
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
self.form.username.data = user_info.username
self.form.name.data = user_info.name
self.form.last_name.data = user_info.last_name
self.form.country.data = user_info.country
self.form.tz.data = user_info.tz
providers_info = user_info.get_social_providers_info()
if not user_info.password:
params['local_account'] = False
else:
params['local_account'] = True
params['used_providers'] = providers_info['used']
params['unused_providers'] = providers_info['unused']
params['country'] = user_info.country
params['tz'] = user_info.tz
return self.render_template('edit_profile.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
username = self.form.username.data.lower()
name = self.form.name.data.strip()
last_name = self.form.last_name.data.strip()
country = self.form.country.data
tz = self.form.tz.data
try:
user_info = models.User.get_by_id(long(self.user_id))
try:
message = ''
# update username if it has changed and it isn't already taken
if username != user_info.username:
user_info.unique_properties = ['username', 'email']
uniques = [
'User.username:%s' % username,
'User.auth_id:own:%s' % username,
]
# Create the unique username and auth_id.
success, existing = Unique.create_multi(uniques)
if success:
# free old uniques
Unique.delete_multi(
['User.username:%s' % user_info.username, 'User.auth_id:own:%s' % user_info.username])
# The unique values were created, so we can save the user.
user_info.username = username
user_info.auth_ids[0] = 'own:%s' % username
message += _('Your new username is <strong>{}</strong>').format(username)
else:
message += _(
'The username <strong>{}</strong> is already taken. Please choose another.').format(
username)
# At least one of the values is not unique.
self.add_message(message, 'error')
return self.get()
user_info.name = name
user_info.last_name = last_name
user_info.country = country
user_info.tz = tz
user_info.put()
message += " " + _('Thanks, your settings have been saved.')
self.add_message(message, 'success')
return self.get()
except (AttributeError, KeyError, ValueError), e:
logging.error('Error updating profile: ' + e)
message = _('Unable to update profile. Please try again later.')
self.add_message(message, 'error')
return self.get()
except (AttributeError, TypeError), e:
login_error_message = _('Sorry you are not logged in.')
self.add_message(login_error_message, 'error')
self.redirect_to('login')
@webapp2.cached_property
def form(self):
f = forms.EditProfileForm(self)
f.country.choices = self.countries_tuple
f.tz.choices = self.tz
return f
class EditPasswordHandler(BaseHandler):
"""
Handler for Edit User Password
"""
@user_required
def get(self):
""" Returns a simple HTML form for editing password """
params = {}
return self.render_template('edit_password.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
current_password = self.form.current_password.data.strip()
password = self.form.password.data.strip()
try:
user_info = models.User.get_by_id(long(self.user_id))
auth_id = "own:%s" % user_info.username
# Password to SHA512
current_password = utils.hashing(current_password, self.app.config.get('salt'))
try:
user = models.User.get_by_auth_password(auth_id, current_password)
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
user.password = security.generate_password_hash(password, length=12)
user.put()
# send email
subject = self.app.config.get('app_name') + " Account Password Changed"
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"first_name": user.name,
"username": user.username,
"email": user.email,
"reset_password_url": self.uri_for("password-reset", _full=True)
}
email_body_path = "emails/password_changed.txt"
email_body = self.jinja2.render_template(email_body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': user.email,
'subject': subject,
'body': email_body,
'sender': self.app.config.get('contact_sender'),
})
#Login User
self.auth.get_user_by_password(user.auth_ids[0], password)
self.add_message(_('Password changed successfully.'), 'success')
return self.redirect_to('edit-profile')
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in
# the BaseHandler.dispatcher
message = _("Incorrect password! Please enter your current password to change your account settings.")
self.add_message(message, 'error')
return self.redirect_to('edit-password')
except (AttributeError, TypeError), e:
login_error_message = _('Sorry you are not logged in.')
self.add_message(login_error_message, 'error')
self.redirect_to('login')
@webapp2.cached_property
def form(self):
return forms.EditPasswordForm(self)
class EditEmailHandler(BaseHandler):
"""
Handler for Edit User's Email
"""
@user_required
def get(self):
""" Returns a simple HTML form for edit email """
params = {}
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
params['current_email'] = user_info.email
return self.render_template('edit_email.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
new_email = self.form.new_email.data.strip()
password = self.form.password.data.strip()
try:
user_info = models.User.get_by_id(long(self.user_id))
auth_id = "own:%s" % user_info.username
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
try:
# authenticate user by its password
user = models.User.get_by_auth_password(auth_id, password)
# if the user change his/her email address
if new_email != user.email:
# check whether the new email has been used by another user
aUser = models.User.get_by_email(new_email)
if aUser is not None:
message = _("The email %s is already registered." % new_email)
self.add_message(message, 'error')
return self.redirect_to("edit-email")
# send email
subject = _("%s Email Changed Notification" % self.app.config.get('app_name'))
user_token = models.User.create_auth_token(self.user_id)
confirmation_url = self.uri_for("email-changed-check",
user_id=user_info.get_id(),
encoded_email=utils.encode(new_email),
token=user_token,
_full=True)
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"first_name": user.name,
"username": user.username,
"new_email": new_email,
"confirmation_url": confirmation_url,
"support_url": self.uri_for("contact", _full=True)
}
old_body_path = "emails/email_changed_notification_old.txt"
old_body = self.jinja2.render_template(old_body_path, **template_val)
new_body_path = "emails/email_changed_notification_new.txt"
new_body = self.jinja2.render_template(new_body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': user.email,
'subject': subject,
'body': old_body,
})
taskqueue.add(url=email_url, params={
'to': new_email,
'subject': subject,
'body': new_body,
})
# display successful message
msg = _(
"Please check your new email for confirmation. Your email will be updated after confirmation.")
self.add_message(msg, 'success')
return self.redirect_to('edit-profile')
else:
self.add_message(_("You didn't change your email."), "warning")
return self.redirect_to("edit-email")
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in
# the BaseHandler.dispatcher
message = _("Incorrect password! Please enter your current password to change your account settings.")
self.add_message(message, 'error')
return self.redirect_to('edit-email')
except (AttributeError, TypeError), e:
login_error_message = _('Sorry you are not logged in.')
self.add_message(login_error_message, 'error')
self.redirect_to('login')
@webapp2.cached_property
def form(self):
return forms.EditEmailForm(self)
class PasswordResetHandler(BaseHandler):
"""
Password Reset Handler with Captcha
"""
def get(self):
chtml = captcha.displayhtml(
public_key=self.app.config.get('captcha_public_key'),
use_ssl=(self.request.scheme == 'https'),
error=None)
if self.app.config.get('captcha_public_key') == "PUT_YOUR_RECAPCHA_PUBLIC_KEY_HERE" or \
self.app.config.get('captcha_private_key') == "PUT_YOUR_RECAPCHA_PUBLIC_KEY_HERE":
chtml = '<div class="alert alert-error"><strong>Error</strong>: You have to ' \
'<a href="http://www.google.com/recaptcha/whyrecaptcha" target="_blank">sign up ' \
'for API keys</a> in order to use reCAPTCHA.</div>' \
'<input type="hidden" name="recaptcha_challenge_field" value="manual_challenge" />' \
'<input type="hidden" name="recaptcha_response_field" value="manual_challenge" />'
params = {
'captchahtml': chtml,
}
return self.render_template('password_reset.html', **params)
def post(self):
# check captcha
challenge = self.request.POST.get('recaptcha_challenge_field')
response = self.request.POST.get('recaptcha_response_field')
remoteip = self.request.remote_addr
cResponse = captcha.submit(
challenge,
response,
self.app.config.get('captcha_private_key'),
remoteip)
if cResponse.is_valid:
# captcha was valid... carry on..nothing to see here
pass
else:
_message = _('Wrong image verification code. Please try again.')
self.add_message(_message, 'error')
return self.redirect_to('password-reset')
#check if we got an email or username
email_or_username = str(self.request.POST.get('email_or_username')).lower().strip()
if utils.is_email_valid(email_or_username):
user = models.User.get_by_email(email_or_username)
_message = _("If the email address you entered") + " (<strong>%s</strong>) " % email_or_username
else:
auth_id = "own:%s" % email_or_username
user = models.User.get_by_auth_id(auth_id)
_message = _("If the username you entered") + " (<strong>%s</strong>) " % email_or_username
_message = _message + _("is associated with an account in our records, you will receive "
"an email from us with instructions for resetting your password. "
"<br>If you don't receive instructions within a minute or two, "
"check your email's spam and junk filters, or ") + \
'<a href="' + self.uri_for('contact') + '">' + _('contact us') + '</a> ' + _(
"for further assistance.")
if user is not None:
user_id = user.get_id()
token = models.User.create_auth_token(user_id)
email_url = self.uri_for('taskqueue-send-email')
reset_url = self.uri_for('password-reset-check', user_id=user_id, token=token, _full=True)
subject = _("%s Password Assistance" % self.app.config.get('app_name'))
# load email's template
template_val = {
"username": user.username,
"email": user.email,
"reset_password_url": reset_url,
"support_url": self.uri_for("contact", _full=True),
"app_name": self.app.config.get('app_name'),
}
body_path = "emails/reset_password.txt"
body = self.jinja2.render_template(body_path, **template_val)
taskqueue.add(url=email_url, params={
'to': user.email,
'subject': subject,
'body': body,
'sender': self.app.config.get('contact_sender'),
})
self.add_message(_message, 'warning')
return self.redirect_to('login')
class PasswordResetCompleteHandler(BaseHandler):
"""
Handler to process the link of reset password that received the user
"""
def get(self, user_id, token):
verify = models.User.get_by_auth_token(int(user_id), token)
params = {}
if verify[0] is None:
message = _('The URL you tried to use is either incorrect or no longer valid. '
'Enter your details again below to get a new one.')
self.add_message(message, 'warning')
return self.redirect_to('password-reset')
else:
return self.render_template('password_reset_complete.html', **params)
def post(self, user_id, token):
verify = models.User.get_by_auth_token(int(user_id), token)
user = verify[0]
password = self.form.password.data.strip()
if user and self.form.validate():
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
user.password = security.generate_password_hash(password, length=12)
user.put()
# Delete token
models.User.delete_auth_token(int(user_id), token)
# Login User
self.auth.get_user_by_password(user.auth_ids[0], password)
self.add_message(_('Password changed successfully.'), 'success')
return self.redirect_to('home')
else:
self.add_message(_('The two passwords must match.'), 'error')
return self.redirect_to('password-reset-check', user_id=user_id, token=token)
@webapp2.cached_property
def form(self):
return forms.PasswordResetCompleteForm(self)
class EmailChangedCompleteHandler(BaseHandler):
"""
Handler for completed email change
Will be called when the user click confirmation link from email
"""
def get(self, user_id, encoded_email, token):
verify = models.User.get_by_auth_token(int(user_id), token)
email = utils.decode(encoded_email)
if verify[0] is None:
message = _('The URL you tried to use is either incorrect or no longer valid.')
self.add_message(message, 'warning')
self.redirect_to('home')
else:
# save new email
user = verify[0]
user.email = email
user.put()
# delete token
models.User.delete_auth_token(int(user_id), token)
# add successful message and redirect
message = _('Your email has been successfully updated.')
self.add_message(message, 'success')
self.redirect_to('edit-profile')
class HomeRequestHandler(RegisterBaseHandler):
"""
Handler to show the home page
"""
def get(self):
""" Returns a simple HTML form for home """
params = {}
return self.render_template('home.html', **params)
| skumar07/Air-Share-Real | boilerplate/handlers.py | Python | lgpl-3.0 | 65,606 | [
"VisIt"
] | b457b5458b29f8dc3fb60e45ddc747132caf449f57a6af9badb2ea71623d86a0 |
# NOTE: Do not add any dependencies to this file - it needs to be run in a
# subprocess by a python version that might not have any installed packages,
# including importlab itself.
from __future__ import print_function
import ast
import json
import os
import sys
# Pytype doesn't recognize the `major` attribute:
# https://github.com/google/pytype/issues/127.
if sys.version_info[0] >= 3:
# Note that `import importlib` does not work: accessing `importlib.util`
# will give an attribute error. This is hard to reproduce in a unit test but
# can be seen by installing importlab in a Python 3 environment and running
# `importlab --tree --trim` on a file that imports one of:
# * jsonschema (`pip install jsonschema`)
# * pytype (`pip install pytype`),
# * dotenv (`pip install python-dotenv`)
# * IPython (`pip install ipython`)
# A correct output will look like:
# Reading 1 files
# Source tree:
# + foo.py
# :: jsonschema/__init__.py
# An incorrect output will be missing the line with the import.
import importlib.util
else:
import imp
class ImportFinder(ast.NodeVisitor):
"""Walk an AST collecting import statements."""
def __init__(self):
# tuples of (name, alias, is_from, is_star)
self.imports = []
def visit_Import(self, node):
for alias in node.names:
self.imports.append((alias.name, alias.asname, False, False))
def visit_ImportFrom(self, node):
module_name = '.'*node.level + (node.module or '')
for alias in node.names:
if alias.name == '*':
self.imports.append((module_name, alias.asname, True, True))
else:
if not module_name.endswith('.'):
module_name = module_name + '.'
name = module_name + alias.name
asname = alias.asname or alias.name
self.imports.append((name, asname, True, False))
def _find_package(parts):
"""Helper function for _resolve_import_versioned."""
for i in range(len(parts), 0, -1):
prefix = '.'.join(parts[0:i])
if prefix in sys.modules:
return i, sys.modules[prefix]
return 0, None
def is_builtin(name):
return name in sys.builtin_module_names or name.startswith("__future__")
# Pytype doesn't recognize the `major` attribute:
# https://github.com/google/pytype/issues/127.
if sys.version_info[0] < 3:
def _resolve_import_versioned(name):
"""Python 2 helper function for resolve_import."""
parts = name.split('.')
i, mod = _find_package(parts)
if mod:
if hasattr(mod, '__file__'):
path = [os.path.dirname(mod.__file__)]
elif hasattr(mod, '__path__'):
path = mod.__path__
else:
path = None
else:
path = None
for part in parts[i:]:
try:
if path:
spec = imp.find_module(part, [path])
else:
spec = imp.find_module(part)
except ImportError:
return None
path = spec[1]
return path
else:
def _resolve_import_versioned(name):
"""Python 3 helper function for resolve_import."""
try:
spec = importlib.util.find_spec(name)
return spec and spec.origin
except Exception:
# find_spec may re-raise an arbitrary exception encountered while
# inspecting a module. Since we aren't able to get the file path in
# this case, we consider the import unresolved.
return None
def _resolve_import(name):
"""Helper function for resolve_import."""
if name in sys.modules:
return getattr(sys.modules[name], '__file__', name + '.so')
return _resolve_import_versioned(name)
def resolve_import(name, is_from, is_star):
"""Use python to resolve an import.
Args:
name: The fully qualified module name.
Returns:
The path to the module source file or None.
"""
# Don't try to resolve relative imports or builtins here; they will be
# handled by resolve.Resolver
if name.startswith('.') or is_builtin(name):
return None
ret = _resolve_import(name)
if ret is None and is_from and not is_star:
package, _ = name.rsplit('.', 1)
ret = _resolve_import(package)
return ret
def get_imports(filename):
"""Get all the imports in a file.
Each import is a tuple of:
(name, alias, is_from, is_star, source_file)
"""
with open(filename, "rb") as f:
src = f.read()
finder = ImportFinder()
finder.visit(ast.parse(src, filename=filename))
imports = []
for i in finder.imports:
name, _, is_from, is_star = i
imports.append(i + (resolve_import(name, is_from, is_star),))
return imports
def print_imports(filename):
"""Print imports in csv format to stdout."""
print(json.dumps(get_imports(filename)))
def read_imports(imports_str):
"""Print imports in csv format to stdout."""
return json.loads(imports_str)
if __name__ == "__main__":
# This is used to parse a file with a different python version, launching a
# subprocess and communicating with it via reading stdout.
filename = sys.argv[1]
print_imports(filename)
| google/importlab | importlab/import_finder.py | Python | apache-2.0 | 5,424 | [
"VisIt"
] | 05ca81c7fbfea01ff776e78913e03f1ed94a3666f4e2ac44be03b601a5e669d6 |
# proxy module
from __future__ import absolute_import
from mayavi.tools.data_wizards.csv_source_factory import *
| enthought/etsproxy | enthought/mayavi/tools/data_wizards/csv_source_factory.py | Python | bsd-3-clause | 113 | [
"Mayavi"
] | 25de5fde38b6ca8eacf5f2996b126f8e68375a08a5def6df66a01774a458ea7b |
# encoding=utf8
# python plugin framework
# snowdreamist@live.cn
# 2014/1/4
# NOTE
__version__ = '1.0.0.0'
import sys
import os
import os.path
import types
import logging
import time
from ConfigParser import ConfigParser
class PluginSource(object):
"""
The plugin source which engine absorbs plugins from
"""
def __init__(self):
self.__metaPlugins = None
self.__loaded = False
self.__exports = None
def __iter__(self):
"""
Iterate all plugins
"""
if not self.__loaded:
self.load()
if not self.__exports is None:
for exp in self.__exports:
yield exp
def __getplugins__(self):
"""
Get all plugins in this source
This method required [ InternalExportedPlugin ] or a generator as return value
"""
pass
def __scanforexports__(self, itertypes, deep = 0):
"""
Scan iterable types for exported type
"""
exports = []
for key, pyType in itertypes:
# check type
if key == __default_plugin_local_repository_name__:
# Found one
exports.extend(pyType.values())
elif isinstance(pyType, types.ModuleType) and deep < __max_module_depth__:
# A module, do scan
exports.extend(self.__scanforexports__(pyType.__dict__.iteritems(), deep + 1))
return exports
def load(self):
"""
Load all plugins
"""
self.__loaded = True
exports = list(self.__getplugins__()) # In order to support generator as return value
if exports is None or len(exports) == 0:
return
self.__exports = exports
class FilePluginSource(PluginSource):
"""
Import plugins from python file
"""
def __init__(self, filename):
self.__filename = filename
super(FilePluginSource, self).__init__()
@property
def filename(self): return self.__filename
def __getplugins__(self):
"""
Import file
"""
module = __import__(self.filename)
return self.__scanforexports__(module.__dict__.iteritems())
class PackagePluginSource(PluginSource):
"""
Import plugins from package
"""
def __init__(self, packageName, pathToAdd = None):
self.__packageName = packageName
self.__pathToAdd = pathToAdd
super(PackagePluginSource, self).__init__()
@property
def packageName(self): return self.__packageName
def __getplugins__(self):
"""
Import package
"""
if not self.__pathToAdd is None and not self.__pathToAdd in sys.path:
sys.path.append(self.__pathToAdd)
# Import the module
module = __import__(self.packageName, globals(), locals(), [], -1)
return self.__scanforexports__(module.__dict__.iteritems())
class ImportedPluginSource(PluginSource):
"""
Import plugins by imported module
"""
def __init__(self, module):
self.__module = module
super(ImportedPluginSource, self).__init__()
@property
def module(self): return self.__module
def __getplugins__(self):
"""
Get module
"""
return self.__scanforexports__(self.__module.__dict__.iteritems())
class ConfigParserPluginSource(PluginSource):
"""
Import plugins by config parser
"""
def __init__(self, parser):
self.__parser = parser
super(ConfigParserPluginSource, self).__init__()
def __getplugins__(self):
"""
Import package
"""
# Get plugin info
if self.__parser.has_section('plugin'):
# Path to add
for key, path in self.__parser.items('plugin'):
if key.startswith('path'):
if not path in sys.path:
sys.path.append(path)
elif key.startswith('file'):
# Import as file
source = FilePluginSource(path)
for plugin in source.__getplugins__():
yield plugin
elif key.startswith('pack'):
# Import as package
source = PackagePluginSource(path)
for plugin in source.__getplugins__():
yield plugin
class ConfigPluginSource(ConfigParserPluginSource):
"""
Import plugins by config file
"""
def __init__(self, filename):
# Add config file's root path
filepath = os.path.abspath(filename)
filepath = filepath[ :filepath.rfind(os.path.sep)]
# Create config parser
parser = ConfigParser(defaults = { '__configpath__': filepath })
parser.read(filename)
super(ConfigPluginSource, self).__init__(parser)
def export(name, shared = None, **metas):
"""
Export class or method as a plugin
Necessary:
name export name. This name somehow looks like a namespace
Options:
shared shared instance of not, if None means shared or non-shared boths are ok. None by default
metas meta data, stores kv value
Special meta info:
__author__ author name
__email__ author email
__version__ version info
__site__ support site info
"""
def wrapper(pyType):
selfMod = sys.modules[pyType.__module__]
plugin, innerName = None, None # The plugin and the inner name
if isinstance(pyType, types.TypeType):
# A new-style class type
# Create new InteralExportedPlugin
plugin = InternalExportedPlugin(name, pyType, shared, metas)
innerName = pyType.__name__
elif isinstance(pyType, types.MethodType) or isinstance(pyType, types.FunctionType) or isinstance(pyType, types.LambdaType):
# A method type, need a special plugin wrapper
plugin = InternalExportedMethodPlugin(name, pyType, shared, metas)
innerName = '%s_%s' % (pyType.__name__, id(pyType))
elif isinstance(pyType, object):
# An object instance
plugin = InternalExportedInstancePlugin(name, pyType, shared, metas)
innerName = '%s_%s' % (type(pyType).__name__, id(pyType))
else:
raise RuntimeError('Unsupported export type %s' % pyType)
# The internal name
internalName = '__pluginpy_%s_%s__' % (name, innerName)
# Insert plugin type into a special object
if not hasattr(selfMod, __default_plugin_local_repository_name__):
# Create one
localRsp = InternalLocalPluginRepository(selfMod)
setattr(selfMod, __default_plugin_local_repository_name__, localRsp)
else:
localRsp = getattr(selfMod, __default_plugin_local_repository_name__)
if internalName in localRsp:
raise ValueError('Conflict plugin export name %s' % pyType.__name__)
localRsp[internalName] = plugin
return pyType
return wrapper
class PluginRepository(object):
"""
Plugin repository stores all plugins info, keep plugin namespace, do export/import
One application can have many repositories, each repository could manage different plugins
There exists a default repository and if not specified, all operations will be performed in default one
NOTE:
About the config, the repository will use the default config by default. If you set config to a repository,
it'll change the default config also, so, if you don't want to change the default one, please set a new dict instance to the config attribute.
But be carefully, if a new one is setted, it should guarantee all the config keys have already existed. Copy/DeepCopy is a good idea when setting new config.
"""
def __init__(self):
# Root entry
self.config = __default_plugin_repository_config__
self.__root = InternalPluginRepositoryEntry('-', self, None, None, None)
def __getitem__(self, namespace):
"""
Get plugin entry by namespace
"""
return self.__root.visit(namespace)
def iterAllExportedPlugins(self):
"""
Iterate all exported plugins
"""
for plugin in self.__root.iterAllExportedPlugins():
yield plugin
def addPluginSource(self, source):
"""
Add a plugin source
"""
try:
# Iterate all plugin, add them to entry
for plugin in source:
entry = self.__root.visit(plugin.name, True)
if entry.values is None:
entry.values = [ plugin ]
else:
entry.values.append(plugin)
except Exception as error:
if self.config['source.loadfailed.throw']:
raise
def addPluginSources(self, sources):
"""
Add plugin sources
"""
for source in sources:
self.addPluginSource(source)
def importOne(self, namespace, shared = None, filterFunc = None, argsFunc = None):
"""
Import many plugins
Return plugin itself
"""
# Find plugins
entry = self[namespace]
if entry is None or entry.values is None or len(entry.values) == 0:
# No plugin found
if self.config['import.one.onzero.throw']:
raise ImportError('No plugin found')
else:
return None
return entry.importOne(shared, filterFunc, argsFunc)
def importMany(self, namespace, shared = None, filterFunc = None, argsFunc = None):
"""
Import many plugins
Return a list consist of plugin instances, if no plugin found, an empty list will be returned
"""
# Find plugins
entry = self[namespace]
if entry is None or entry.values is None or len(entry.values) == 0:
return []
return entry.importMany(shared, filterFunc, argsFunc)
class ImportError(Exception):
"""
Import error
"""
def __init__(self, message):
self.message = message
# -*- ------------------------------ Internal implementation -------------------------------- -*-
class InternalExportedPlugin(object):
"""
Store all info (mode, type, meta, etc..) of a exported plugin
"""
def __init__(self, name, pluginType, shared, metas):
self.name = name
self.pluginType = pluginType
self.shared = shared
self.metas = metas
self.__instance = None # Save shared instance
def __repr__(self):
return '<plugin: %s [%s][%s] author: %s, version: %s>' % (
self.name,
self.pluginType,
'shared' if self.shared == True else 'non-shared' if self.shared == False else 'any',
self.author if not self.author is None else '',
self.version if not self.version is None else ''
)
def __createinstance__(self, argsFunc):
"""
Create plugin instance
"""
if not argsFunc is None:
args, kwargs = argsFunc()
return self.pluginType(*args, **kwargs)
else:
return self.pluginType()
@property
def author(self):
return self.metas['__author__'] if '__author__' in self.metas else None
@property
def version(self):
return self.metas['__version__'] if '__version__' in self.metas else None
def check(self, shared, filterFunc):
"""
Check if this plugin satisfy the requirements
"""
if shared == True and self.shared == False:
return False
if shared == False and self.shared == True:
return False
if not filterFunc is None:
return filterFunc(self)
return True
def instance(self, shared, argsFunc):
"""
Get instance
"""
if shared == True or shared is None:
# Use shared instance (If both shared and non-shared are OK, try to use shared one)
if self.__instance is None:
self.__instance = self.__createinstance__(argsFunc)
return self.__instance
elif shared == False:
# Create new one
return self.__createinstance__(argsFunc)
else:
raise ValueError('Unknown shared mode %s' % shared)
class InternalExportedMethodPlugin(InternalExportedPlugin):
"""
This class is used to wrap a method plugin
"""
def instance(self, shared, argsFunc):
"""
Get instance
For method type plugin, ignore argsFunc
"""
return self.pluginType
class InternalExportedInstancePlugin(InternalExportedPlugin):
"""
This class is used to wrap an object instance
"""
def instance(self, shared, argsFunc):
"""
Get instance
For object instance plugin, ignore argsFunc
"""
return self.pluginType
class InternalLocalPluginRepository(dict):
"""
Local plugin repository. Store the original plugin info from one module
"""
def __init__(self, module):
self.module = module
super(InternalLocalPluginRepository, self).__init__()
class InternalPluginRepositoryEntry(object):
"""
Represent an entry of plugin repository
An entry is a node in namespace, e.g.
name: security.auth.namepwdAuth
entry: security, auth, namepwdAuth
"""
def __init__(self, name, rsp, parent = None, children = None, values = None):
self.__name = name
self.__rsp = rsp
self.parent = parent
self.__children = children
self.values = values
def __repr__(self):
return '<Entry %s P:%s C:[%s] V:[%s]>' % (self.name, self.parent.name, ','.join([ x.name for x in self ]), ','.join([ x.name for x in self.values ]) if not self.values is None else '')
@property
def name(self):
"""
Entry name
"""
return self.__name
@property
def hasChildren(self):
"""
If this node has child node
"""
return True if not self.__children is None and len(self.__children) > 0 else False
def __len__(self):
"""
The children count
"""
return 0 if self.__children is None else len(self.__children)
def __contains__(self, name):
"""
If this entry contains an name node as a child
"""
if self.hasChildren:
return name in self.__children
else:
return False
def __getitem__(self, name):
"""
Get a child
"""
if self.hasChildren:
return self.__children[name]
else:
raise KeyError(name)
def __setitem__(self, name, entry):
"""
Set a child
"""
if self.__children is None:
self.__children = {}
# NOTE, This will not remove entry from its previous parent
# And it seems there's no need to do that
self.__children[name] = entry
entry.parent = self
def __iter__(self):
"""
Iterate child
"""
if not self.__children is None:
for child in self.__children.itervalues():
yield child
def visit(self, namespace, autoCreate = False):
"""
Get an entry by a namespace
The namespace's super root is the node you called itself
"""
names = namespace.split('.')
# Visit every name
node = self
for name in names:
if not name in node:
# Node not found
if autoCreate:
node[name] = InternalPluginRepositoryEntry(name, self.__rsp)
else:
return None
# Next
node = node[name]
return node
def iterAllExportedPlugins(self):
"""
Iterate all exported plugins
"""
if not self.values is None:
for value in self.values:
yield value
for childEntry in self:
for value in childEntry.iterAllExportedPlugins():
yield value
def importOne(self, shared = None, filterFunc = None, argsFunc = None):
"""
Import one plugin
"""
if self.values is None:
# No plugin found
if self.__rsp.config['import.one.onzero.throw']:
raise ImportError('No plugin found')
else:
return None
# Check and get instance
plugins = []
for plugin in self.values:
if plugin.check(shared, filterFunc):
# A good one
plugins.append(plugin)
if len(plugins) != 1:
# Multiple plugin found
if self.__rsp.config['import.one.onmulti.throw']:
raise ImportError('Multiple plugin found')
else:
return None
return plugins[0].instance(shared, argsFunc)
def importMany(self, shared = None, filterFunc = None, argsFunc = None):
"""
Import multiple plugin
"""
if self.values is None:
return []
# Check and get instance
plugins = []
for plugin in self.values:
if plugin.check(shared, filterFunc):
# A good one
plugins.append(plugin.instance(shared, argsFunc))
return plugins
# -*- ------------------------------------- Definition -------------------------------------- -*-
__default_plugin_local_repository_name__ = '__pluginpy_local_repository__'
# Default plugin repository config
__default_plugin_repository_config__ = {
# Config if the framework should throw an exception when importOne found zero plugin
'import.one.onzero.throw' : True,
# Config if the framework should throw an exception when importOne found multiple plugins
'import.one.onmulti.throw' : False,
# Config if the framework should throw an exception when failed to load plugin from source
'source.loadfailed.throw' : True,
}
# Max module plugin search depth
__max_module_depth__ = 3
# Default plugin repository
__default_plugin_repository__ = PluginRepository()
# -*- ----------------------------------- Quick functions ----------------------------------- -*-
def addPluginSource(source, repository = None):
"""
Add plugin source
"""
if repository is None:
repository = __default_plugin_repository__
repository.addPluginSource(source)
def addPluginSources(sources, repository = None):
"""
Add plugin source
"""
if repository is None:
repository = __default_plugin_repository__
repository.addPluginSources(sources)
def loadConfig(configfile, repository = None):
"""
Add plugin by config
This method can be invoked multi-times, but be carefully, config files may have conflicts among themselves
"""
if repository is None:
repository = __default_plugin_repository__
source = ConfigPluginSource(configfile)
repository.addPluginSource(source)
def importOne(namespace, shared = None, filterFunc = None, repository = None, argsFunc = None):
"""
Import exactly one plugin, return object instance
"""
if repository is None:
repository = __default_plugin_repository__
return repository.importOne(namespace, shared, filterFunc, argsFunc)
def importMany(namespace, shared = None, filterFunc = None, repository = None, argsFunc = None):
"""
Import multi plugins, return [ object instance ]
"""
if repository is None:
repository = __default_plugin_repository__
return repository.importMany(namespace, shared, filterFunc, argsFunc)
def importEntry(namespace, repository = None):
"""
Import an plugin entry
"""
if repository is None:
repository = __default_plugin_repository__
return repository[namespace]
def configDefault(key, value):
"""
Config default configuration
"""
__default_plugin_repository_config__[key] = value
def defaultRepository():
"""
Return the default repository
"""
return __default_plugin_repository__
if __name__ == '__main__':
# The main method implement a plugin listing tool
from argparse import ArgumentParser
def getArguments():
parser = ArgumentParser()
parser.add_argument('-t', '--type', dest = 'type', choices = [ 'file', 'package', 'config' ], required = True, help = 'Plugin source type')
parser.add_argument('-a', '--all', dest = 'allInfos', action = 'store_true', default = False, help = 'Show all plugin infos')
parser.add_argument('inputs', nargs = '+', help = 'Inputs')
return parser.parse_args()
def main():
args = getArguments()
sourceType = None
if args.type == 'file':
sourceType = FilePluginSource
elif args.type == 'package':
sourceType = PackagePluginSource
elif args.type == 'config':
sourceType = ConfigPluginSource
else:
raise ValueError('Invalid plugin type %s' % args.type)
# Add plugin sources
for inputPath in args.inputs:
source = sourceType(inputPath)
addPluginSource(source)
# List all plugins
plugins = list(defaultRepository().iterAllExportedPlugins())
print 'Totally %d exported plugins' % len(plugins)
if args.allInfos:
# Print every plugins' info
for plugin in plugins:
value = repr(plugin)
if isinstance(value, unicode):
print repr(plugin).encode('utf8')
else:
print repr(plugin)
main()
| snowdreamist/wfpy | src/wfpy/pluginpy.py | Python | gpl-3.0 | 22,059 | [
"VisIt"
] | dd4f1c58ee9b992fb7f0df777083ff911d7c65f60bb9b20f10d33c6f7c2c8119 |
#!/usr/bin/env python2
import os, sys, re
# Scripts and programs
python_exec = 'python2'
build_pdb_script = '~/opt/script/BuildAllAtomsFromLammps.py'
# Auxillary functions
def build_pdb(dump_file, snapshot_index, structure_index):
path, file_name = os.path.split(dump_file)
print path
print "%s %s %s structure%s %s -seq %s/*.seq" % (python_exec, build_pdb_script, dump_file, structure_index, snapshot_index, path)
os.system("%s %s %s structure%s %s -seq %s/*.seq" % (python_exec, build_pdb_script, dump_file, structure_index, snapshot_index, path))
# Lists
data_array = []
files_array = []
conditions = []
condition_signs = []
# File names and parameters
dump_file_name = "dump.lammpstrj"
output_file_name = "pick_structures_all.dat"
structure_output_file_name = "pick_structures.dat"
structure_index = 1
structure_stride = 305
max_pdbs_to_build = 20
pdb_index = 0
found_pdb_index = 0
if len(sys.argv) < 4:
print "Usage: python pick_structures.py metadata output_directory cond1 (cond2 cond3 ...)"
sys.exit()
# Read command line arguments
metadata_file = sys.argv[1]
output_directory = os.path.abspath(sys.argv[2])
if len(sys.argv) > 3:
for j in range(3,len(sys.argv)):
if "gt" in sys.argv[j]:
condition_signs.append("+")
condition = sys.argv[j].split("gt")
if "lt" in sys.argv[j]:
condition_signs.append("-")
condition = sys.argv[j].split("lt")
conditions.append(condition)
# Load all data into array
for line in open(metadata_file, "r"):
line = line.split()
file_name = line[0]
if file_name == "#": continue
files_array.append(file_name)
data_array.append([])
line_index = 0
for data in open(file_name, "r"):
data = data.split()
if data[0] == "#timestep": continue
data_array[files_array.index(file_name)].append([])
for i in range(len(data)):
data_array[files_array.index(file_name)][line_index].append(float(data[i]))
line_index += 1
# loop over data and output those points that satisfy all conditions
if not os.path.exists(output_directory):
os.mkdir(output_directory)
os.chdir(output_directory)
output_file = open(output_file_name, "w")
structure_output_file = open(structure_output_file_name, "w")
vmd_out = open("vmd.tcl", "w")
for data_file in files_array:
path_name, data_file_name = os.path.split(data_file)
file_index = files_array.index(data_file)
data_index = 0
for data_point in data_array[file_index]:
bad_condition = False
# If all conditions are satisfied, print out the data
for i in range(len(conditions)):
if condition_signs[i] == "+":
if not data_point[int(conditions[i][0])-1] > float(conditions[i][1]): bad_condition = True
elif condition_signs[i] == "-":
if not data_point[int(conditions[i][0])-1] < float(conditions[i][1]): bad_condition = True
else:
print "Bad condition argument."
sys.exit()
if not bad_condition:
# print int(data_array[file_index].index(data_point)+1), found_pdb_index
found_pdb_index += 1
dump_file = data_file.replace(data_file_name,dump_file_name)
output_file.write("%d %s %s\n" % (structure_index, dump_file, str(data_point).replace(',','').replace('[','').replace(']','')))
if pdb_index < max_pdbs_to_build and found_pdb_index % structure_stride == 0:
structure_output_file.write("%d %s %s\n" % (structure_index, dump_file, str(data_point).replace(',','').replace('[','').replace(']','')))
build_pdb(dump_file, int(data_array[file_index].index(data_point)+1), structure_index)
vmd_out.write("mol new structure%s.pdb\n" % structure_index)
vmd_out.write("mol modcolor 0 [molinfo top] Index\n")
vmd_out.write("mol modstyle 0 [molinfo top] NewCartoon 0.300000 10.000000 4.100000 0\n")
pdb_index += 1
structure_index += 1
data_index += 1
output_file.close()
vmd_out.write("color Display Background white\n")
vmd_out.close()
| luwei0917/awsemmd_script | archive/back_pick_structures.py | Python | mit | 4,192 | [
"VMD"
] | 8f1bea98d7a4cc174dd5cd07499ee51b51af36664ca5299ec678a0647ef67b3d |
# -*- coding: utf-8 -*-
"""
Create an initial ATP Profile for an ECs Mesh and write it out as .vtp.
"""
import os
import sys
# Run in current directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Import path for the GenerateATPMapV2 script.
importPath = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../util'))
if not importPath in sys.path:
sys.path.insert(1, importPath)
del importPath
import GenerateATPMapV2
# This is for the c8000 mesh.
GenerateATPMapV2.centrelineFile = "c8000Centreline.vtk"
GenerateATPMapV2.meshFile = "quadMeshFullECc8000.vtp"
GenerateATPMapV2.atpFile = "quadMeshFullATPc8000.vtp"
GenerateATPMapV2.debugAtpFile = "quadMeshFullATPV2Debugc8000.vtp"
GenerateATPMapV2.numBranches = 1
GenerateATPMapV2.numQuads = 8000
GenerateATPMapV2.numAxialQuads = 200
GenerateATPMapV2.numECsPerCol = 4
GenerateATPMapV2.atpGradient = 4.0
GenerateATPMapV2.atpMin = 0.1
GenerateATPMapV2.atpMax = 1.0
def main():
GenerateATPMapV2.buildATPMesh()
if __name__ == '__main__':
print "Starting", os.path.basename(__file__)
main()
print "Exiting", os.path.basename(__file__) | BlueFern/DBiharMesher | meshes/c8000/Generate8000ATPMesh.py | Python | gpl-2.0 | 1,123 | [
"VTK"
] | 99507936339ecf4c97297feb60d7e830373312bd6fdd47a9d7bd8c2a511f5ffe |
#!/usr/bin/env python
"""
Experimentations with feature tracking. Working towards the following paper:
Kanhere, Neeraj K., and Stanley T. Birchfield. "Real-time incremental segmentation and tracking of vehicles at low camera angles using stable features." IEEE Transactions on Intelligent Transportation Systems 9.1 (2008): 148-160. cecas.clemson.edu/~stb/publications/vehicle_tracking_its2008.pdf.
Notes
======
+ Uses background subtraction to isolate moving objects
+ Identifies corners in the image to seed feature tracker
+ Uses Lucas-Kanade feature tracker to track features across frames
+ Uses background mask to project points to ground and determine stable/unstable features (see Kanhere, et. al)
"""
# NOTE this requires OpenCV 3
# TODO collect all adjustable parameters, work on some calibration techniques/tools
# TODO implement grouping of features to create vehicle hypotheses
import os, sys, time, argparse
import rlcompleter, readline
import multiprocessing
import cv2
import numpy as np
from cvguipy import cvgui, cvgeom
def getFirstRunOfSize(bits, minSize=2):
"""
Return the index of the beginning of the first run of length
greater than minSize in binary/logical array bits.
"""
bits = np.array(bits, dtype=np.int32) # make integers
# make sure all runs of ones are well-bounded
bounded = np.hstack(([0], bits, [0]))
# get 1 at run starts and -1 at run ends
difs = np.diff(bounded)
runStarts, = np.where(difs > 0)
runEnds, = np.where(difs < 0)
runLens = runEnds - runStarts
# return the index of the first run that is long enough
longEnough = runLens > minSize
if np.any(longEnough):
return runStarts[longEnough][0]
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "({}, {})".format(self.x, self.y)
def __eq__(self, p):
return self.x == p.x and self.y == p.y
def __add__(self, p):
return Point(self.x + p.x, self.y + p.y)
def __sub__(self, p):
return Point(self.x - p.x, self.y - p.y)
def __neg__(self, p):
return Point(-self.x, -self.y)
def __mul__(self, s):
return Point(self.x*s, self.y*s)
def __div__(self, s):
return Point(self.x/s, self.y/s)
def asTuple(self):
return (self.x, self.y)
def norm2(self):
return np.sqrt(self.norm2Squared())
def norm2Squared(self):
return self.x**2 + self.y**2
class Track(object):
def __init__(self, trackId, color=None, smoothingWindow=5):
self.trackId = trackId
self.color = color if color is not None else cvgui.randomColor()
self.smoothingWindow = smoothingWindow
self.points = []
self.velocity = []
self.lastVel = None
self.lastPos = None
self.smoothedVel = None
def __repr__(self):
return "[{}]: {}".format(self.trackId, self.points)
def numPoints(self):
return len(self.points)
def addPoint(self, x, y):
self.lastPos = Point(x, y)
if len(self.points) > 0:
self.lastVel = (self.lastPos - self.points[-1])
self.velocity.append(self.lastVel)
self.points.append(self.lastPos)
def removeOldest(self):
self.points.pop(0)
def pointArray(self, dtype=None):
return np.array([p.asTuple() for p in self.points], dtype=dtype)
class featureTrackerPlayer(cvgui.cvPlayer):
def __init__(self, videoFilename, detectShadows=True, removeShadows=True, detectionInterval=5, **kwargs):
super(featureTrackerPlayer, self).__init__(videoFilename, fps=15.0, **kwargs)
self.detectShadows = detectShadows
self.removeShadows = removeShadows
self.lastFrameDrawn = -1
self.fgmask = None
self.fgframe = None
self.grayImg = None
self.times = []
self.frameQueue = multiprocessing.Queue()
self.fgmaskQueue = multiprocessing.Queue()
self.backSubThread = None
self.trackerThread = None
self.tStart = time.time()
# params for feature detector
self.maxCorners = 1000
self.qualityLevel = 0.01
self.minDistance = 5
self.blockSize = 7
self.maxTrackLength = np.inf
self.detectionInterval = detectionInterval # limit detection to keep noise down
self.lastDetectionFrame = -1
# params for feature tracker
self.winSize = (15, 15)
self.maxLevel = 2
self.criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03) # termination criteria
self.minFeatureTime = 3 # minimum length of trajectory to accept feature for keeping
self.tracks = []
# for classifying features
self.roadAngle = None
def open(self):
self.openWindow()
self.openVideo()
self.readFrame()
# get angle of road from line in config file
self.loadConfig()
if len(self.objects) > 0:
# transverse line across road
lobj = self.objects['transverse'] if 'transverse' in self.objects else None
if lobj is not None and len(lobj.points) == 2:
p1, p2 = lobj.points.values()
d = p2 - p1
rho, self.roadAngle = cvgeom.cart2pol(d.x, d.y)
# read detection region mask
detreg = self.objects['detection_region'] if 'detection_region' in self.objects else None
self.detectionRegion = None
if detreg is not None:
pts = np.array([detreg.points[i].asTuple() for i in sorted(detreg.points.keys())])
import pdb; pdb.set_trace()
self.detectionRegion = np.uint8(cv2.fillConvexPoly(np.zeros((self.imgHeight,self.imgWidth)),pts,255))
# remove objects so they aren't drawn on the image
self.objects = cvgeom.ObjectCollection()
# start background subtractor
#self.backSub = cv2.createBackgroundSubtractorMOG2(detectShadows=self.detectShadows)
self.backSub = cv2.createBackgroundSubtractorKNN(detectShadows=self.detectShadows)
def getForegroundMask(self):
"""
Use the background subtractor to generate a foreground mask, then
apply a Gaussian filter to remove small patches of background.
"""
fgmask = self.backSub.apply(self.img)
return cv2.GaussianBlur(fgmask, (11, 11), 0)
def getForegroundFrame(self):
self.fgmask = self.getForegroundMask()
if self.removeShadows:
self.fgmask[self.fgmask==127] = 0
self.img = cv2.bitwise_and(self.img, self.img, mask=self.fgmask)
def getGrayImage(self):
if self.grayImg is not None:
self.lastGrayImage = self.grayImg.copy()
mimg = self.img if self.detectionRegion is None else cv2.bitwise_and(self.img, self.img, mask=self.detectionRegion)
self.grayImg = cv2.cvtColor(mimg, cv2.COLOR_BGR2GRAY)
def resetTracks(self):
"""Clear targets to reset the feature tracker (after jumps and stuff)"""
self.tracks = []
def getNewTracks(self):
"""Get new features from the current frame and add them to our targets."""
corners = cv2.goodFeaturesToTrack(self.grayImg, mask=self.detectionRegion, maxCorners=self.maxCorners, qualityLevel=self.qualityLevel, minDistance=self.minDistance, blockSize=self.blockSize)
if corners is not None:
for x, y in np.float32(corners).reshape(-1, 2):
# make a new track with the next ID number
tid = len(self.tracks)
t = Track(tid)
t.addPoint(x,y)
#print(t)
self.tracks.append(t)
self.lastDetectionFrame = self.posFrames
def trackFeatures(self):
"""Track features across frames. Most of this is from OpenCV's lk_track.py example."""
# get a grayscale image for the feature detector/tracker
self.getGrayImage()
# if it's the first frame, or if we just jumped backwards, or if we jumped ahead (more than one frame ahead of last frame drawn)
if self.lastFrameDrawn == -1 or self.lastFrameDrawn > self.posFrames or (self.posFrames-self.lastFrameDrawn) > 1:
self.resetTracks() # reset the feature tracker
# if we have any tracks, track them into the new frame (we'll hit this on the 2nd time around)
if len(self.tracks) > 0:
p0 = np.float32([tr.points[-1].asTuple() for tr in self.tracks]).reshape(-1, 1, 2)
#print(p0)
# track forwards
p1, st, err = cv2.calcOpticalFlowPyrLK(self.lastGrayImage, self.grayImg, p0, None, winSize=self.winSize, maxLevel=self.maxLevel, criteria=self.criteria)
# track backwards
p0r, st, err = cv2.calcOpticalFlowPyrLK(self.grayImg, self.lastGrayImage, p1, None, winSize=self.winSize, maxLevel=self.maxLevel, criteria=self.criteria)
# compare motion between the two - they shouldn't differ much (I think that's what this does)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
goodTracks = d < 1
# add new points to our tracks
newTracks = []
for tr, (x, y), goodFlag in zip(self.tracks, p1.reshape(-1, 2), goodTracks):
if not goodFlag:
continue # only keep the good ones
tr.addPoint(x, y)
if tr.numPoints() > self.maxTrackLength:
tr.removeOldest() # trim tracks that are too long
newTracks.append(tr)
self.tracks = newTracks
# if it's the first frame, or it's been detectionInterval frames since the last detection, detect some new features
if self.lastDetectionFrame == -1 or (self.posFrames-self.lastDetectionFrame) >= self.detectionInterval:
self.getNewTracks()
def drawTrack(self, t, perturb=20):
"""Draw a track as a line leading up to a point."""
if len(t.points) >= self.minFeatureTime and t.lastVel is not None and t.lastVel.norm2() > 1:
# TODO move most of this to another method
r = int(round(t.lastPos.y))
c = int(round(t.lastPos.x))
cl = max(0,c-perturb)
cr = min(self.fgmask.shape[1]-1,c+perturb)
dl = self.fgmask[r:,cl]
dm = self.fgmask[r:,c]
dr = self.fgmask[r:,cr]
bg = 0
msz = 5
if bg in dl and bg in dr:
# project down from the feature point, and perturb left and right
il = getFirstRunOfSize(dl==bg, minSize=msz)
im = getFirstRunOfSize(dm==bg, minSize=msz)
ir = getFirstRunOfSize(dr==bg, minSize=msz)
if all([il,im,ir]):
# check angle of the resulting line WRT the road transverse line to group
# the features as stable (front/back of vehicles, closer to the ground),
# and unstable (sides of vehicles)
ix = cr - cl
iy = ir - il
rho, phi = cvgeom.cart2pol(ix, iy)
angleToRoad = cvgeom.rad2deg(phi-self.roadAngle)
if abs(angleToRoad) < 10:
# uncomment to draw trajectories as lines
#cv2.polylines(self.img, [t.pointArray(dtype=np.int32)], False, t.color, thickness=2)
# draw stable features in blue
if len(t.points) >= 1:
p = t.points[-1]
# if drawing from Track object
#self.drawPoint(cvgeom.imagepoint(p.x, p.y, index=t.trackId, color=t.color))
#cv2.circle(self.img, p.asTuple(), 4, cvgui.getColorCode('blue'), thickness=4)
cv2.circle(self.img, tuple(map(int, (c,r))), 4, cvgui.getColorCode('blue'), thickness=4)
# draw unstable features in red
else:
if len(t.points) >= 1:
p = t.points[-1]
#cv2.circle(self.img, p.asTuple(), 4, cvgui.getColorCode('red'), thickness=4)
cv2.circle(self.img, tuple(map(int, (c,r))), 4, cvgui.getColorCode('red'), thickness=4)
# TODO group features, etc.
def makeAvgTime(self, tElapsed):
if len(self.times) > 20:
self.times.pop(0)
self.times.append(tElapsed)
return np.mean(self.times)
def drawExtra(self):
# track features
self.trackFeatures()
# get a foreground mask & frame
self.getForegroundFrame()
#self.img = self.fgframe.copy()
# plot all the tracks
if len(self.tracks) > 0:
#print(len(self.tracks))
for t in self.tracks:
self.drawTrack(t)
self.lastFrameDrawn = self.posFrames
# Entry point
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Simple test of feature tracking with background extraction. Note that this requires OpenCV 3.")
parser.add_argument('videoFilename', help="Name of the video file to play.")
parser.add_argument('-f', dest='configFile', help="Name of the config file containing geometry.")
parser.add_argument('-s', dest='configSection', help="Section of the config file containing geometry to load.")
args = parser.parse_args()
videoFilename = args.videoFilename
player = featureTrackerPlayer(videoFilename, configFilename=args.configFile, configSection=args.configSection)
try:
player.play()
#player.pause()
#player.playInThread()
# once the video is playing, make this session interactive
except KeyboardInterrupt:
os.environ['PYTHONINSPECT'] = 'Y' # start interactive/inspect mode (like using the -i option)
readline.parse_and_bind('tab:complete') # turn on tab-autocomplete
| gwparikh/cvgui | featuretracker.py | Python | mit | 14,601 | [
"Gaussian"
] | 961c6f9a89c1ab2036044a98e0b047c65ec86b36072a04a32d66da257b333ac1 |
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import json
import os
import pytest
import re
import shutil
import tarfile
import yaml
from io import BytesIO, StringIO
from units.compat.mock import MagicMock
import ansible.module_utils.six.moves.urllib.error as urllib_error
from ansible import context
from ansible.cli.galaxy import GalaxyCLI
from ansible.errors import AnsibleError
from ansible.galaxy import collection, api
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils import context_objects as co
from ansible.utils.display import Display
def call_galaxy_cli(args):
orig = co.GlobalCLIArgs._Singleton__instance
co.GlobalCLIArgs._Singleton__instance = None
try:
GalaxyCLI(args=['ansible-galaxy', 'collection'] + args).run()
finally:
co.GlobalCLIArgs._Singleton__instance = orig
def artifact_json(namespace, name, version, dependencies, server):
json_str = json.dumps({
'artifact': {
'filename': '%s-%s-%s.tar.gz' % (namespace, name, version),
'sha256': '2d76f3b8c4bab1072848107fb3914c345f71a12a1722f25c08f5d3f51f4ab5fd',
'size': 1234,
},
'download_url': '%s/download/%s-%s-%s.tar.gz' % (server, namespace, name, version),
'metadata': {
'namespace': namespace,
'name': name,
'dependencies': dependencies,
},
'version': version
})
return to_text(json_str)
def artifact_versions_json(namespace, name, versions, galaxy_api, available_api_versions=None):
results = []
available_api_versions = available_api_versions or {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
for version in versions:
results.append({
'href': '%s/api/%s/%s/%s/versions/%s/' % (galaxy_api.api_server, api_version, namespace, name, version),
'version': version,
})
if api_version == 'v2':
json_str = json.dumps({
'count': len(versions),
'next': None,
'previous': None,
'results': results
})
if api_version == 'v3':
response = {'meta': {'count': len(versions)},
'data': results,
'links': {'first': None,
'last': None,
'next': None,
'previous': None},
}
json_str = json.dumps(response)
return to_text(json_str)
def error_json(galaxy_api, errors_to_return=None, available_api_versions=None):
errors_to_return = errors_to_return or []
available_api_versions = available_api_versions or {}
response = {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
if api_version == 'v2':
assert len(errors_to_return) <= 1
if errors_to_return:
response = errors_to_return[0]
if api_version == 'v3':
response['errors'] = errors_to_return
json_str = json.dumps(response)
return to_text(json_str)
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
yield
co.GlobalCLIArgs._Singleton__instance = None
@pytest.fixture()
def collection_artifact(request, tmp_path_factory):
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
namespace = 'ansible_namespace'
collection = 'collection'
skeleton_path = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'collection_skeleton')
collection_path = os.path.join(test_dir, namespace, collection)
call_galaxy_cli(['init', '%s.%s' % (namespace, collection), '-c', '--init-path', test_dir,
'--collection-skeleton', skeleton_path])
dependencies = getattr(request, 'param', None)
if dependencies:
galaxy_yml = os.path.join(collection_path, 'galaxy.yml')
with open(galaxy_yml, 'rb+') as galaxy_obj:
existing_yaml = yaml.safe_load(galaxy_obj)
existing_yaml['dependencies'] = dependencies
galaxy_obj.seek(0)
galaxy_obj.write(to_bytes(yaml.safe_dump(existing_yaml)))
galaxy_obj.truncate()
call_galaxy_cli(['build', collection_path, '--output-path', test_dir])
collection_tar = os.path.join(test_dir, '%s-%s-0.1.0.tar.gz' % (namespace, collection))
return to_bytes(collection_path), to_bytes(collection_tar)
@pytest.fixture()
def galaxy_server():
context.CLIARGS._store = {'ignore_certs': False}
galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com')
return galaxy_api
def test_build_requirement_from_path(collection_artifact):
actual = collection.CollectionRequirement.from_path(collection_artifact[0], True)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.b_path == collection_artifact[0]
assert actual.api is None
assert actual.skip is True
assert actual.versions == set([u'*'])
assert actual.latest_version == u'*'
assert actual.dependencies == {}
@pytest.mark.parametrize('version', ['1.1.1', 1.1, 1])
def test_build_requirement_from_path_with_manifest(version, collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': version,
'dependencies': {
'ansible_namespace.collection': '*'
}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
actual = collection.CollectionRequirement.from_path(collection_artifact[0], True)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.b_path == collection_artifact[0]
assert actual.api is None
assert actual.skip is True
assert actual.versions == set([to_text(version)])
assert actual.latest_version == to_text(version)
assert actual.dependencies == {'ansible_namespace.collection': '*'}
def test_build_requirement_from_path_invalid_manifest(collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(b"not json")
expected = "Collection file at '%s' does not contain a valid json string." % to_native(manifest_path)
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_path(collection_artifact[0], True)
def test_build_requirement_from_path_no_version(collection_artifact, monkeypatch):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
actual = collection.CollectionRequirement.from_path(collection_artifact[0], True)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.b_path == collection_artifact[0]
assert actual.api is None
assert actual.skip is True
assert actual.versions == set(['*'])
assert actual.latest_version == u'*'
assert actual.dependencies == {}
assert mock_display.call_count == 1
actual_warn = ' '.join(mock_display.mock_calls[0][1][0].split('\n'))
expected_warn = "Collection at '%s' does not have a valid version set, falling back to '*'. Found version: ''" \
% to_text(collection_artifact[0])
assert expected_warn in actual_warn
def test_build_requirement_from_tar(collection_artifact):
actual = collection.CollectionRequirement.from_tar(collection_artifact[1], True, True)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.b_path == collection_artifact[1]
assert actual.api is None
assert actual.skip is False
assert actual.versions == set([u'0.1.0'])
assert actual.latest_version == u'0.1.0'
assert actual.dependencies == {}
def test_build_requirement_from_tar_fail_not_tar(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
test_file = os.path.join(test_dir, b'fake.tar.gz')
with open(test_file, 'wb') as test_obj:
test_obj.write(b"\x00\x01\x02\x03")
expected = "Collection artifact at '%s' is not a valid tar file." % to_native(test_file)
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_tar(test_file, True, True)
def test_build_requirement_from_tar_no_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'files': [],
'format': 1,
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('FILES.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
expected = "Collection at '%s' does not contain the required file MANIFEST.json." % to_native(tar_path)
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_tar(tar_path, True, True)
def test_build_requirement_from_tar_no_files(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'collection_info': {},
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
expected = "Collection at '%s' does not contain the required file FILES.json." % to_native(tar_path)
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_tar(tar_path, True, True)
def test_build_requirement_from_tar_invalid_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = b"not a json"
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_tar(tar_path, True, True)
def test_build_requirement_from_name(galaxy_server, monkeypatch):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.1.9', '2.1.10']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '*', True, True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'2.1.9', u'2.1.10'])
assert actual.latest_version == u'2.1.10'
assert actual.dependencies is None
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_with_prerelease(galaxy_server, monkeypatch):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '*', True, True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'1.0.1', u'2.0.1'])
assert actual.latest_version == u'2.0.1'
assert actual.dependencies is None
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirment_from_name_with_prerelease_explicit(galaxy_server, monkeypatch):
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1-beta.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '2.0.1-beta.1', True,
True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'2.0.1-beta.1'])
assert actual.latest_version == u'2.0.1-beta.1'
assert actual.dependencies == {}
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1-beta.1')
def test_build_requirement_from_name_second_server(galaxy_server, monkeypatch):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
broken_server = copy.copy(galaxy_server)
broken_server.api_server = 'https://broken.com/'
mock_404 = MagicMock()
mock_404.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 404, 'msg', {},
StringIO()), "custom msg")
monkeypatch.setattr(broken_server, 'get_collection_versions', mock_404)
actual = collection.CollectionRequirement.from_name('namespace.collection', [broken_server, galaxy_server],
'>1.0.1', False, True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
# assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'1.0.2', u'1.0.3'])
assert actual.latest_version == u'1.0.3'
assert actual.dependencies is None
assert mock_404.call_count == 1
assert mock_404.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_missing(galaxy_server, monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 404, 'msg', {},
StringIO()), "")
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
expected = "Failed to find collection namespace.collection:*"
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server, galaxy_server], '*', False,
True)
def test_build_requirement_from_name_401_unauthorized(galaxy_server, monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 401, 'msg', {},
StringIO()), "error")
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
expected = "error (HTTP Code: 401, Message: msg)"
with pytest.raises(api.GalaxyError, match=re.escape(expected)):
collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server, galaxy_server], '*', False)
def test_build_requirement_from_name_single_version(galaxy_server, monkeypatch):
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.0', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '2.0.0', True,
True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'2.0.0'])
assert actual.latest_version == u'2.0.0'
assert actual.dependencies == {}
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.0')
def test_build_requirement_from_name_multiple_versions_one_match(galaxy_server, monkeypatch):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '>=2.0.1,<2.0.2',
True, True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'2.0.1'])
assert actual.latest_version == u'2.0.1'
assert actual.dependencies == {}
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1')
def test_build_requirement_from_name_multiple_version_results(galaxy_server, monkeypatch):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2', '2.0.3', '2.0.4', '2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '!=2.0.2',
True, True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'2.0.0', u'2.0.1', u'2.0.3', u'2.0.4', u'2.0.5'])
assert actual.latest_version == u'2.0.5'
assert actual.dependencies is None
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
@pytest.mark.parametrize('versions, requirement, expected_filter, expected_latest', [
[['1.0.0', '1.0.1'], '*', ['1.0.0', '1.0.1'], '1.0.1'],
[['1.0.0', '1.0.5', '1.1.0'], '>1.0.0,<1.1.0', ['1.0.5'], '1.0.5'],
[['1.0.0', '1.0.5', '1.1.0'], '>1.0.0,<=1.0.5', ['1.0.5'], '1.0.5'],
[['1.0.0', '1.0.5', '1.1.0'], '>=1.1.0', ['1.1.0'], '1.1.0'],
[['1.0.0', '1.0.5', '1.1.0'], '!=1.1.0', ['1.0.0', '1.0.5'], '1.0.5'],
[['1.0.0', '1.0.5', '1.1.0'], '==1.0.5', ['1.0.5'], '1.0.5'],
[['1.0.0', '1.0.5', '1.1.0'], '1.0.5', ['1.0.5'], '1.0.5'],
[['1.0.0', '2.0.0', '3.0.0'], '>=2', ['2.0.0', '3.0.0'], '3.0.0'],
])
def test_add_collection_requirements(versions, requirement, expected_filter, expected_latest):
req = collection.CollectionRequirement('namespace', 'name', None, 'https://galaxy.com', versions, requirement,
False)
assert req.versions == set(expected_filter)
assert req.latest_version == expected_latest
def test_add_collection_requirement_to_unknown_installed_version(monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
req = collection.CollectionRequirement('namespace', 'name', None, 'https://galaxy.com', ['*'], '*', False,
skip=True)
req.add_requirement('parent.collection', '1.0.0')
assert req.latest_version == '*'
assert mock_display.call_count == 1
actual_warn = ' '.join(mock_display.mock_calls[0][1][0].split('\n'))
assert "Failed to validate the collection requirement 'namespace.name:1.0.0' for parent.collection" in actual_warn
def test_add_collection_wildcard_requirement_to_unknown_installed_version():
req = collection.CollectionRequirement('namespace', 'name', None, 'https://galaxy.com', ['*'], '*', False,
skip=True)
req.add_requirement(str(req), '*')
assert req.versions == set('*')
assert req.latest_version == '*'
def test_add_collection_requirement_with_conflict(galaxy_server):
expected = "Cannot meet requirement ==1.0.2 for dependency namespace.name from source '%s'. Available versions " \
"before last requirement added: 1.0.0, 1.0.1\n" \
"Requirements from:\n" \
"\tbase - 'namespace.name:==1.0.2'" % galaxy_server.api_server
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement('namespace', 'name', None, galaxy_server, ['1.0.0', '1.0.1'], '==1.0.2',
False)
def test_add_requirement_to_existing_collection_with_conflict(galaxy_server):
req = collection.CollectionRequirement('namespace', 'name', None, galaxy_server, ['1.0.0', '1.0.1'], '*', False)
expected = "Cannot meet dependency requirement 'namespace.name:1.0.2' for collection namespace.collection2 from " \
"source '%s'. Available versions before last requirement added: 1.0.0, 1.0.1\n" \
"Requirements from:\n" \
"\tbase - 'namespace.name:*'\n" \
"\tnamespace.collection2 - 'namespace.name:1.0.2'" % galaxy_server.api_server
with pytest.raises(AnsibleError, match=re.escape(expected)):
req.add_requirement('namespace.collection2', '1.0.2')
def test_add_requirement_to_installed_collection_with_conflict():
source = 'https://galaxy.ansible.com'
req = collection.CollectionRequirement('namespace', 'name', None, source, ['1.0.0', '1.0.1'], '*', False,
skip=True)
expected = "Cannot meet requirement namespace.name:1.0.2 as it is already installed at version '1.0.1'. " \
"Use --force to overwrite"
with pytest.raises(AnsibleError, match=re.escape(expected)):
req.add_requirement(None, '1.0.2')
def test_add_requirement_to_installed_collection_with_conflict_as_dep():
source = 'https://galaxy.ansible.com'
req = collection.CollectionRequirement('namespace', 'name', None, source, ['1.0.0', '1.0.1'], '*', False,
skip=True)
expected = "Cannot meet requirement namespace.name:1.0.2 as it is already installed at version '1.0.1'. " \
"Use --force-with-deps to overwrite"
with pytest.raises(AnsibleError, match=re.escape(expected)):
req.add_requirement('namespace.collection2', '1.0.2')
def test_install_skipped_collection(monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
req = collection.CollectionRequirement('namespace', 'name', None, 'source', ['1.0.0'], '*', False, skip=True)
req.install(None, None)
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == "Skipping 'namespace.name' as it is already installed"
def test_install_collection(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
collection_tar = collection_artifact[1]
output_path = os.path.join(os.path.split(collection_tar)[0], b'output')
collection_path = os.path.join(output_path, b'ansible_namespace', b'collection')
os.makedirs(os.path.join(collection_path, b'delete_me')) # Create a folder to verify the install cleans out the dir
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
req = collection.CollectionRequirement.from_tar(collection_tar, True, True)
req.install(to_text(output_path), temp_path)
# Ensure the temp directory is empty, nothing is left behind
assert os.listdir(temp_path) == []
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles']
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
def test_install_collection_with_download(galaxy_server, collection_artifact, monkeypatch):
collection_tar = collection_artifact[1]
output_path = os.path.join(os.path.split(collection_tar)[0], b'output')
collection_path = os.path.join(output_path, b'ansible_namespace', b'collection')
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_download = MagicMock()
mock_download.return_value = collection_tar
monkeypatch.setattr(collection, '_download_file', mock_download)
monkeypatch.setattr(galaxy_server, '_available_api_versions', {'v2': 'v2/'})
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
meta = api.CollectionVersionMetadata('ansible_namespace', 'collection', '0.1.0', 'https://downloadme.com',
'myhash', {})
req = collection.CollectionRequirement('ansible_namespace', 'collection', None, galaxy_server,
['0.1.0'], '*', False, metadata=meta)
req.install(to_text(output_path), temp_path)
# Ensure the temp directory is empty, nothing is left behind
assert os.listdir(temp_path) == []
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles']
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_download.call_count == 1
assert mock_download.mock_calls[0][1][0] == 'https://downloadme.com'
assert mock_download.mock_calls[0][1][1] == temp_path
assert mock_download.mock_calls[0][1][2] == 'myhash'
assert mock_download.mock_calls[0][1][3] is True
def test_install_collections_from_tar(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
collection.install_collections([(to_text(collection_tar), '*', None,)], to_text(temp_path),
[u'https://galaxy.ansible.com'], True, False, False, False, False)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 3
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
def test_install_collections_existing_without_force(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# If we don't delete collection_path it will think the original build skeleton is installed so we expect a skip
collection.install_collections([(to_text(collection_tar), '*', None,)], to_text(temp_path),
[u'https://galaxy.ansible.com'], True, False, False, False, False)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'README.md', b'docs', b'galaxy.yml', b'playbooks', b'plugins', b'roles']
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
# Msg1 is the warning about not MANIFEST.json, cannot really check message as it has line breaks which varies based
# on the path size
assert display_msgs[1] == "Process install dependency map"
assert display_msgs[2] == "Starting collection install process"
assert display_msgs[3] == "Skipping 'ansible_namespace.collection' as it is already installed"
# Makes sure we don't get stuck in some recursive loop
@pytest.mark.parametrize('collection_artifact', [
{'ansible_namespace.collection': '>=0.0.1'},
], indirect=True)
def test_install_collection_with_circular_dependency(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
collection.install_collections([(to_text(collection_tar), '*', None,)], to_text(temp_path),
[u'https://galaxy.ansible.com'], True, False, False, False, False)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 3
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
| simonwydooghe/ansible | test/units/galaxy/test_collection_install.py | Python | gpl-3.0 | 33,210 | [
"Galaxy"
] | 0b609391b2cd8379b4cd28b27fde0cfbfa785fb60883d01b7606b3c9093aab14 |
input_name = '../examples/multi_physics/biot.py'
output_name = 'test_biot.vtk'
from tests_basic import TestInput
class Test( TestInput ):
pass
| RexFuzzle/sfepy | tests/test_input_biot.py | Python | bsd-3-clause | 149 | [
"VTK"
] | 15b60da70ce14b22f6154061f498967ffb0fe46539ed7af56fae6ca9917114fd |
# License: BSD 3 clause
import numpy as np
from scipy.linalg.special_matrices import toeplitz
def features_normal_cov_uniform(n_samples: int = 200, n_features: int = 30,
dtype="float64"):
"""Normal features generator with uniform covariance
An example of features obtained as samples of a centered Gaussian
vector with a specific covariance matrix given by 0.5 * (U + U.T),
where U is uniform on [0, 1] and diagonal filled by ones.
Parameters
----------
n_samples : `int`, default=200
Number of samples
n_features : `int`, default=30
Number of features
dtype : `{'float64', 'float32'}`, default='float64'
Type of the arrays used.
Returns
-------
output : `numpy.ndarray`, shape=(n_samples, n_features)
n_samples realization of a Gaussian vector with the described
covariance
"""
C = np.random.uniform(size=(n_features, n_features), dtype=dtype)
np.fill_diagonal(C, 1.0)
cov = 0.5 * (C + C.T)
features = np.random.multivariate_normal(
np.zeros(n_features), cov, size=n_samples)
if dtype != "float64":
return features.astype(dtype)
return features
def features_normal_cov_toeplitz(n_samples: int = 200, n_features: int = 30,
cov_corr: float = 0.5, dtype="float64"):
"""Normal features generator with toeplitz covariance
An example of features obtained as samples of a centered Gaussian
vector with a toeplitz covariance matrix
Parameters
----------
n_samples : `int`, default=200
Number of samples
n_features : `int`, default=30
Number of features
cov_corr : `float`, default=0.5
correlation coefficient of the Toeplitz correlation matrix
dtype : `{'float64', 'float32'}`, default='float64'
Type of the arrays used.
Returns
-------
output : `numpy.ndarray`, shape=(n_samples, n_features)
n_samples realization of a Gaussian vector with the described
covariance
"""
cov = toeplitz(cov_corr ** np.arange(0, n_features))
features = np.random.multivariate_normal(
np.zeros(n_features), cov, size=n_samples)
if dtype != "float64":
return features.astype(dtype)
return features
| Dekken/tick | tick/simulation/features.py | Python | bsd-3-clause | 2,316 | [
"Gaussian"
] | d30c7b09baa16460aab7e51bc3ac645ec4eb1bf6bf90e533e5ddccce555c32a1 |
'''
*** SHED SKIN Python-to-C++ Compiler ***
Copyright 2005-2013 Mark Dufour; License GNU GPL version 3 (See LICENSE)
graph.py: build constraint graph used in dataflow analysis
constraint graph: graph along which possible types 'flow' during an 'abstract execution' of a program (a dataflow analysis). consider the assignment statement 'a = b'. it follows that the set of possible types of b is smaller than or equal to that of a (a constraint). we can determine possible types of a, by 'flowing' the types from b to a, in other words, along the constraint.
constraint graph nodes are stored in gx.cnode, and the set of types of for each node in gx.types. nodes are identified by an AST Node, and two integers. the integers are used in py to duplicate parts of the constraint graph along two dimensions. in the initial constraint graph, these integers are always 0.
class ModuleVisitor: inherits visitor pattern from compiler.visitor.ASTVisitor, to recursively generate constraints for each syntactical Python construct. for example, the visitFor method is called in case of a for-loop. temporary variables are introduced in many places, to enable translation to a lower-level language.
parse_module(): locate module by name (e.g. 'os.path'), and use ModuleVisitor if not cached
'''
import copy
import os
import re
import sys
from compiler.ast import Const, AssTuple, AssList, From, Add, ListCompFor, \
UnaryAdd, Import, Bitand, Stmt, Assign, FloorDiv, Not, Mod, AssAttr, \
Keyword, GenExpr, LeftShift, AssName, Div, Or, Lambda, And, CallFunc, \
Global, Slice, RightShift, Sub, Getattr, Dict, Ellipsis, Mul, \
Subscript, Function as FunctionNode, Return, Power, Bitxor, Class as ClassNode, Name, List, \
Discard, Sliceobj, Tuple, Pass, UnarySub, Bitor, ListComp, TryExcept, With
from compiler.visitor import ASTVisitor
from error import error
from infer import inode, in_out, CNode, default_var, register_temp_var
from python import StaticClass, lookup_func, Function, is_zip2, \
lookup_class, is_method, is_literal, is_enum, lookup_var, assign_rec, \
Class, is_property_setter, is_fastfor, aug_msg, is_isinstance, \
Module, def_class, parse_file, find_module
# --- global variable mv
_mv = None
def setmv(mv):
global _mv
_mv = mv
return _mv
def getmv():
return _mv
class FakeGetattr3(Getattr):
pass
class FakeGetattr2(Getattr):
pass
class FakeGetattr(Getattr):
pass # XXX ugly
def check_redef(gx, node, s=None, onlybuiltins=False): # XXX to modvisitor, rewrite
if not getmv().module.builtin:
existing = [getmv().ext_classes, getmv().ext_funcs]
if not onlybuiltins:
existing += [getmv().classes, getmv().funcs]
for whatsit in existing:
if s is not None:
name = s
else:
name = node.name
if name in whatsit:
error("function/class redefinition is not supported", gx, node, mv=getmv())
# --- maintain inheritance relations between copied AST nodes
def inherit_rec(gx, original, copy, mv):
gx.inheritance_relations.setdefault(original, []).append(copy)
gx.inherited.add(copy)
gx.parent_nodes[copy] = original
for (a, b) in zip(original.getChildNodes(), copy.getChildNodes()):
inherit_rec(gx, a, b, mv)
def register_node(node, func):
if func:
func.registered.append(node)
def slice_nums(nodes):
nodes2 = []
x = 0
for i, n in enumerate(nodes):
if not n or (isinstance(n, Const) and n.value is None):
nodes2.append(Const(0))
else:
nodes2.append(n)
x |= (1 << i)
return [Const(x)] + nodes2
# --- module visitor; analyze program, build constraint graph
class ModuleVisitor(ASTVisitor):
def __init__(self, module, gx):
ASTVisitor.__init__(self)
self.module = module
self.gx = gx
self.classes = {}
self.funcs = {}
self.globals = {}
self.exc_names = {}
self.current_with_vars = []
self.lambdas = {}
self.imports = {}
self.fake_imports = {}
self.ext_classes = {}
self.ext_funcs = {}
self.lambdaname = {}
self.lwrapper = {}
self.tempcount = self.gx.tempcount
self.callfuncs = []
self.for_in_iters = []
self.listcomps = []
self.defaults = {}
self.importnodes = []
def dispatch(self, node, *args):
if (node, 0, 0) not in self.gx.cnode:
ASTVisitor.dispatch(self, node, *args)
def fake_func(self, node, objexpr, attrname, args, func):
if (node, 0, 0) in self.gx.cnode: # XXX
newnode = self.gx.cnode[node, 0, 0]
else:
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
fakefunc = CallFunc(Getattr(objexpr, attrname), args)
fakefunc.lineno = objexpr.lineno
self.visit(fakefunc, func)
self.add_constraint((inode(self.gx, fakefunc), newnode), func)
inode(self.gx, objexpr).fakefunc = fakefunc
return fakefunc
# simple heuristic for initial list split: count nesting depth, first constant child type
def list_type(self, node):
count = 0
child = node
while isinstance(child, (List, ListComp)):
if not child.getChildNodes():
return None
child = child.getChildNodes()[0]
count += 1
if isinstance(child, (UnarySub, UnaryAdd)):
child = child.expr
if isinstance(child, CallFunc) and isinstance(child.node, Name):
map = {'int': int, 'str': str, 'float': float}
if child.node.name in ('range'): # ,'xrange'):
count, child = count + 1, int
elif child.node.name in map:
child = map[child.node.name]
elif child.node.name in (cl.ident for cl in self.gx.allclasses) or child.node.name in getmv().classes: # XXX getmv().classes
child = child.node.name
else:
if count == 1:
return None
child = None
elif isinstance(child, Const):
child = type(child.value)
elif isinstance(child, Name) and child.name in ('True', 'False'):
child = bool
elif isinstance(child, Tuple):
child = tuple
elif isinstance(child, Dict):
child = dict
else:
if count == 1:
return None
child = None
self.gx.list_types.setdefault((count, child), len(self.gx.list_types) + 2)
# print 'listtype', node, self.gx.list_types[count, child]
return self.gx.list_types[count, child]
def instance(self, node, cl, func=None):
if (node, 0, 0) in self.gx.cnode: # XXX to create_node() func
newnode = self.gx.cnode[node, 0, 0]
else:
newnode = CNode(self.gx, node, parent=func, mv=getmv())
newnode.constructor = True
if cl.ident in ['int_', 'float_', 'str_', 'none', 'class_', 'bool_']:
self.gx.types[newnode] = set([(cl, cl.dcpa - 1)])
else:
if cl.ident == 'list' and self.list_type(node):
self.gx.types[newnode] = set([(cl, self.list_type(node))])
else:
self.gx.types[newnode] = set([(cl, cl.dcpa)])
def constructor(self, node, classname, func):
cl = def_class(self.gx, classname)
self.instance(node, cl, func)
default_var(self.gx, 'unit', cl)
if classname in ['list', 'tuple'] and not node.nodes:
self.gx.empty_constructors.add(node) # ifa disables those that flow to instance variable assignments
# --- internally flow binary tuples
if cl.ident == 'tuple2':
default_var(self.gx, 'first', cl)
default_var(self.gx, 'second', cl)
elem0, elem1 = node.nodes
self.visit(elem0, func)
self.visit(elem1, func)
self.add_dynamic_constraint(node, elem0, 'unit', func)
self.add_dynamic_constraint(node, elem1, 'unit', func)
self.add_dynamic_constraint(node, elem0, 'first', func)
self.add_dynamic_constraint(node, elem1, 'second', func)
return
# --- add dynamic children constraints for other types
if classname == 'dict': # XXX filter children
default_var(self.gx, 'unit', cl)
default_var(self.gx, 'value', cl)
for child in node.getChildNodes():
self.visit(child, func)
for (key, value) in node.items: # XXX filter
self.add_dynamic_constraint(node, key, 'unit', func)
self.add_dynamic_constraint(node, value, 'value', func)
else:
for child in node.nodes:
self.visit(child, func)
for child in self.filter_redundant_children(node):
self.add_dynamic_constraint(node, child, 'unit', func)
# --- for compound list/tuple/dict constructors, we only consider a single child node for each subtype
def filter_redundant_children(self, node):
done = set()
nonred = []
for child in node.nodes:
type = self.child_type_rec(child)
if not type or not type in done:
done.add(type)
nonred.append(child)
return nonred
# --- determine single constructor child node type, used by the above
def child_type_rec(self, node):
if isinstance(node, (UnarySub, UnaryAdd)):
node = node.expr
if isinstance(node, (List, Tuple)):
if isinstance(node, List):
cl = def_class(self.gx, 'list')
elif len(node.nodes) == 2:
cl = def_class(self.gx, 'tuple2')
else:
cl = def_class(self.gx, 'tuple')
merged = set()
for child in node.nodes:
merged.add(self.child_type_rec(child))
if len(merged) == 1:
return (cl, merged.pop())
elif isinstance(node, Const):
return (list(inode(self.gx, node).types())[0][0],)
# --- add dynamic constraint for constructor argument, e.g. '[expr]' becomes [].__setattr__('unit', expr)
def add_dynamic_constraint(self, parent, child, varname, func):
# print 'dynamic constr', child, parent
self.gx.assign_target[child] = parent
cu = Const(varname)
self.visit(cu, func)
fakefunc = CallFunc(FakeGetattr2(parent, '__setattr__'), [cu, child])
self.visit(fakefunc, func)
fakechildnode = CNode(self.gx, (child, varname), parent=func, mv=getmv()) # create separate 'fake' CNode per child, so we can have multiple 'callfuncs'
self.gx.types[fakechildnode] = set()
self.add_constraint((inode(self.gx, parent), fakechildnode), func) # add constraint from parent to fake child node. if parent changes, all fake child nodes change, and the callfunc for each child node is triggered
fakechildnode.callfuncs.append(fakefunc)
# --- add regular constraint to function
def add_constraint(self, constraint, func):
in_out(constraint[0], constraint[1])
self.gx.constraints.add(constraint)
while isinstance(func, Function) and func.listcomp:
func = func.parent # XXX
if isinstance(func, Function):
func.constraints.add(constraint)
def struct_unpack(self, rvalue, func):
if isinstance(rvalue, CallFunc):
if isinstance(rvalue.node, Getattr) and isinstance(rvalue.node.expr, Name) and rvalue.node.expr.name == 'struct' and rvalue.node.attrname == 'unpack' and lookup_var('struct', func, mv=self).imported: # XXX imported from where?
return True
elif isinstance(rvalue.node, Name) and rvalue.node.name == 'unpack' and 'unpack' in self.ext_funcs and not lookup_var('unpack', func, mv=self): # XXX imported from where?
return True
def struct_info(self, node, func):
if isinstance(node, Name):
var = lookup_var(node.name, func, mv=self) # XXX fwd ref?
if not var or len(var.const_assign) != 1:
error('non-constant format string', self.gx, node, mv=self)
error('assuming constant format string', self.gx, node, mv=self, warning=True)
fmt = var.const_assign[0].value
elif isinstance(node, Const):
fmt = node.value
else:
error('non-constant format string', self.gx, node, mv=self)
char_type = dict(['xx', 'cs', 'bi', 'Bi', '?b', 'hi', 'Hi', 'ii', 'Ii', 'li', 'Li', 'qi', 'Qi', 'ff', 'df', 'ss', 'ps'])
ordering = '@'
if fmt and fmt[0] in '@<>!=':
ordering, fmt = fmt[0], fmt[1:]
result = []
digits = ''
for i, c in enumerate(fmt):
if c.isdigit():
digits += c
elif c in char_type:
rtype = {'i': 'int', 's': 'str', 'b': 'bool', 'f': 'float', 'x': 'pad'}[char_type[c]]
if rtype == 'str' and c != 'c':
result.append((ordering, c, 'str', int(digits or '1')))
elif digits == '0':
result.append((ordering, c, rtype, 0))
else:
result.extend(int(digits or '1') * [(ordering, c, rtype, 1)])
digits = ''
else:
error('bad or unsupported char in struct format: ' + repr(c), self.gx, node, mv=self)
digits = ''
return result
def struct_faketuple(self, info):
result = []
for o, c, t, d in info:
if d != 0 or c == 's':
if t == 'int':
result.append(Const(1))
elif t == 'str':
result.append(Const(''))
elif t == 'float':
result.append(Const(1.0))
elif t == 'bool':
result.append(Name('True'))
return Tuple(result)
def visitExec(self, node, func=None):
error("'exec' is not supported", self.gx, node, mv=getmv())
def visitGenExpr(self, node, func=None):
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
lc = ListComp(node.code.expr, [ListCompFor(qual.assign, qual.iter, qual.ifs, qual.lineno) for qual in node.code.quals], lineno=node.lineno)
register_node(lc, func)
self.gx.genexp_to_lc[node] = lc
self.visit(lc, func)
self.add_constraint((inode(self.gx, lc), newnode), func)
def visitStmt(self, node, func=None):
comments = []
for b in node.nodes:
if isinstance(b, Discard):
self.bool_test_add(b.expr)
if isinstance(b, Discard) and isinstance(b.expr, Const) and type(b.expr.value) == str:
comments.append(b.expr.value)
elif comments:
self.gx.comments[b] = comments
comments = []
self.visit(b, func)
def visitModule(self, node):
# --- bootstrap built-in classes
if self.module.ident == 'builtin':
for dummy in self.gx.builtins:
self.visit(ClassNode(dummy, [], None, Pass()))
if self.module.ident != 'builtin':
n = From('builtin', [('*', None)], None) # Python2.5+
getmv().importnodes.append(n)
self.visit(n)
# --- __name__
if self.module.ident != 'builtin':
namevar = default_var(self.gx, '__name__', None, mv=getmv())
self.gx.types[inode(self.gx, namevar)] = set([(def_class(self.gx, 'str_'), 0)])
self.forward_references(node)
# --- visit children
for child in node.getChildNodes():
if isinstance(child, Stmt):
getmv().importnodes.extend(n for n in child.nodes if isinstance(n, (Import, From)))
self.visit(child, None)
# --- register classes
for cl in getmv().classes.values():
self.gx.allclasses.add(cl)
# --- inheritance expansion
# determine base classes
for cl in self.classes.values():
for base in cl.node.bases:
if not (isinstance(base, Name) and base.name == 'object'):
ancestor = lookup_class(base, getmv())
cl.bases.append(ancestor)
ancestor.children.append(cl)
# for each base class, duplicate methods
for cl in self.classes.values():
for ancestor in cl.ancestors_upto(None)[1:]:
cl.staticmethods.extend(ancestor.staticmethods)
cl.properties.update(ancestor.properties)
for func in ancestor.funcs.values():
if not func.node or func.inherited:
continue
ident = func.ident
if ident in cl.funcs:
ident += ancestor.ident + '__'
# deep-copy AST function nodes
func_copy = copy.deepcopy(func.node)
inherit_rec(self.gx, func.node, func_copy, func.mv)
tempmv, mv = getmv(), func.mv
setmv(mv)
self.visitFunction(func_copy, cl, inherited_from=ancestor)
mv = tempmv
setmv(mv)
# maintain relation with original
self.gx.inheritance_relations.setdefault(func, []).append(cl.funcs[ident])
cl.funcs[ident].inherited = func.node
cl.funcs[ident].inherited_from = func
func_copy.name = ident
if ident == func.ident:
cl.funcs[ident + ancestor.ident + '__'] = cl.funcs[ident]
def stmt_nodes(self, node, cl):
result = []
for child in node.getChildNodes():
if isinstance(child, Stmt):
for n in child.nodes:
if isinstance(n, cl):
result.append(n)
return result
def forward_references(self, node):
getmv().classnodes = []
# classes
for n in self.stmt_nodes(node, ClassNode):
check_redef(self.gx, n)
getmv().classnodes.append(n)
newclass = Class(self.gx, n, getmv())
self.classes[n.name] = newclass
getmv().classes[n.name] = newclass
newclass.module = self.module
newclass.parent = StaticClass(newclass, getmv())
# methods
for m in self.stmt_nodes(n, FunctionNode):
if hasattr(m, 'decorators') and m.decorators and [dec for dec in m.decorators if is_property_setter(dec)]:
m.name = m.name + '__setter__'
if m.name in newclass.funcs: # and func.ident not in ['__getattr__', '__setattr__']: # XXX
error("function/class redefinition is not allowed", self.gx, m, mv=getmv())
func = Function(self.gx, m, newclass, mv=getmv())
newclass.funcs[func.ident] = func
self.set_default_vars(m, func)
# functions
getmv().funcnodes = []
for n in self.stmt_nodes(node, FunctionNode):
check_redef(self.gx, n)
getmv().funcnodes.append(n)
func = getmv().funcs[n.name] = Function(self.gx, n, mv=getmv())
self.set_default_vars(n, func)
# global variables XXX visitGlobal
for assname in self.local_assignments(node, global_=True):
default_var(self.gx, assname.name, None, mv=getmv())
def set_default_vars(self, node, func):
globals = set(self.get_globals(node))
for assname in self.local_assignments(node):
if assname.name not in globals:
default_var(self.gx, assname.name, func)
def get_globals(self, node):
if isinstance(node, Global):
result = node.names
else:
result = []
for child in node.getChildNodes():
result.extend(self.get_globals(child))
return result
def local_assignments(self, node, global_=False):
if global_ and isinstance(node, (ClassNode, FunctionNode)):
return []
elif isinstance(node, (ListComp, GenExpr)):
return []
elif isinstance(node, AssName):
result = [node]
else:
# Try-Excepts introduce a new small scope with the exception name,
# so we skip it here.
if isinstance(node, TryExcept):
children = list(node.body.getChildNodes())
for handler in node.handlers:
children.extend(handler[2].getChildNodes())
if node.else_:
children.extend(node.else_.getChildNodes())
elif isinstance(node, With):
children = node.body.getChildNodes()
else:
children = node.getChildNodes()
result = []
for child in children:
result.extend(self.local_assignments(child, global_))
return result
def visitImport(self, node, func=None):
if not node in getmv().importnodes:
error("please place all imports (no 'try:' etc) at the top of the file", self.gx, node, mv=getmv())
for (name, pseudonym) in node.names:
if pseudonym:
# --- import a.b as c: don't import a
self.import_module(name, pseudonym, node, False)
else:
self.import_modules(name, node, False)
def import_modules(self, name, node, fake):
# --- import a.b.c: import a, then a.b, then a.b.c
split = name.split('.')
module = getmv().module
for i in range(len(split)):
subname = '.'.join(split[:i + 1])
parent = module
module = self.import_module(subname, subname, node, fake)
if module.ident not in parent.mv.imports: # XXX
if not fake:
parent.mv.imports[module.ident] = module
return module
def import_module(self, name, pseudonym, node, fake):
module = self.analyze_module(name, pseudonym, node, fake)
if not fake:
var = default_var(self.gx, pseudonym or name, None, mv=getmv())
var.imported = True
self.gx.types[inode(self.gx, var)] = set([(module, 0)])
return module
def visitFrom(self, node, parent=None):
if not node in getmv().importnodes: # XXX use (func, node) as parent..
error("please place all imports (no 'try:' etc) at the top of the file", self.gx, node, mv=getmv())
if hasattr(node, 'level') and node.level:
error("relative imports are not supported", self.gx, node, mv=getmv())
if node.modname == '__future__':
for name, _ in node.names:
if name not in ['with_statement', 'print_function']:
error("future '%s' is not yet supported" % name, self.gx, node, mv=getmv())
return
module = self.import_modules(node.modname, node, True)
self.gx.from_module[node] = module
for name, pseudonym in node.names:
if name == '*':
self.ext_funcs.update(module.mv.funcs)
self.ext_classes.update(module.mv.classes)
for import_name, import_module in module.mv.imports.items():
var = default_var(self.gx, import_name, None, mv=getmv()) # XXX merge
var.imported = True
self.gx.types[inode(self.gx, var)] = set([(import_module, 0)])
self.imports[import_name] = import_module
for name, extvar in module.mv.globals.items():
if not extvar.imported and not name in ['__name__']:
var = default_var(self.gx, name, None, mv=getmv()) # XXX merge
var.imported = True
self.add_constraint((inode(self.gx, extvar), inode(self.gx, var)), None)
continue
path = module.path
pseudonym = pseudonym or name
if name in module.mv.funcs:
self.ext_funcs[pseudonym] = module.mv.funcs[name]
elif name in module.mv.classes:
self.ext_classes[pseudonym] = module.mv.classes[name]
elif name in module.mv.globals and not module.mv.globals[name].imported: # XXX
extvar = module.mv.globals[name]
var = default_var(self.gx, pseudonym, None, mv=getmv())
var.imported = True
self.add_constraint((inode(self.gx, extvar), inode(self.gx, var)), None)
elif os.path.isfile(os.path.join(path, name + '.py')) or \
os.path.isfile(os.path.join(path, name, '__init__.py')):
modname = '.'.join(module.name_list + [name])
self.import_module(modname, name, node, False)
else:
error("no identifier '%s' in module '%s'" % (name, node.modname), self.gx, node, mv=getmv())
def analyze_module(self, name, pseud, node, fake):
module = parse_module(name, self.gx, getmv().module, node)
if not fake:
self.imports[pseud] = module
else:
self.fake_imports[pseud] = module
return module
def visitFunction(self, node, parent=None, is_lambda=False, inherited_from=None):
if not getmv().module.builtin and (node.varargs or node.kwargs):
error('argument (un)packing is not supported', self.gx, node, mv=getmv())
if not parent and not is_lambda and node.name in getmv().funcs:
func = getmv().funcs[node.name]
elif isinstance(parent, Class) and not inherited_from and node.name in parent.funcs:
func = parent.funcs[node.name]
else:
func = Function(self.gx, node, parent, inherited_from, mv=getmv())
if inherited_from:
self.set_default_vars(node, func)
if not is_method(func):
if not getmv().module.builtin and not node in getmv().funcnodes and not is_lambda:
error("non-global function '%s'" % node.name, self.gx, node, mv=getmv())
if hasattr(node, 'decorators') and node.decorators:
for dec in node.decorators.nodes:
if isinstance(dec, Name) and dec.name == 'staticmethod':
parent.staticmethods.append(node.name)
elif isinstance(dec, Name) and dec.name == 'property':
parent.properties[node.name] = [node.name, None]
elif is_property_setter(dec):
parent.properties[dec.expr.name][1] = node.name
else:
error("unsupported type of decorator", self.gx, dec, mv=getmv())
if parent:
if not inherited_from and not func.ident in parent.staticmethods and (not func.formals or func.formals[0] != 'self'):
error("formal arguments of method must start with 'self'", self.gx, node, mv=getmv())
if not func.mv.module.builtin and func.ident in ['__new__', '__getattr__', '__setattr__', '__radd__', '__rsub__', '__rmul__', '__rdiv__', '__rtruediv__', '__rfloordiv__', '__rmod__', '__rdivmod__', '__rpow__', '__rlshift__', '__rrshift__', '__rand__', '__rxor__', '__ror__', '__iter__', '__call__', '__enter__', '__exit__', '__del__', '__copy__', '__deepcopy__']:
error("'%s' is not supported" % func.ident, self.gx, node, warning=True, mv=getmv())
if is_lambda:
self.lambdas[node.name] = func
# --- add unpacking statement for tuple formals
func.expand_args = {}
for i, formal in enumerate(func.formals):
if isinstance(formal, tuple):
tmp = self.temp_var((node, i), func)
func.formals[i] = tmp.name
fake_unpack = Assign([self.unpack_rec(formal)], Name(tmp.name))
func.expand_args[tmp.name] = fake_unpack
self.visit(fake_unpack, func)
func.defaults = node.defaults
for formal in func.formals:
var = default_var(self.gx, formal, func)
var.formal_arg = True
# --- flow return expressions together into single node
func.retnode = retnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[retnode] = set()
func.yieldnode = yieldnode = CNode(self.gx, (node, 'yield'), parent=func, mv=getmv())
self.gx.types[yieldnode] = set()
self.visit(node.code, func)
for i, default in enumerate(func.defaults):
if not is_literal(default):
self.defaults[default] = (len(self.defaults), func, i)
self.visit(default, None) # defaults are global
# --- add implicit 'return None' if no return expressions
if not func.returnexpr:
func.fakeret = Return(Name('None'))
self.visit(func.fakeret, func)
# --- register function
if isinstance(parent, Class):
if func.ident not in parent.staticmethods: # XXX use flag
default_var(self.gx, 'self', func)
if func.ident == '__init__' and '__del__' in parent.funcs: # XXX what if no __init__
self.visit(CallFunc(Getattr(Name('self'), '__del__'), []), func)
self.gx.gc_cleanup = True
parent.funcs[func.ident] = func
def unpack_rec(self, formal):
if isinstance(formal, str):
return AssName(formal, 'OP_ASSIGN')
else:
return AssTuple([self.unpack_rec(elem) for elem in formal])
def visitLambda(self, node, func=None):
lambdanr = len(self.lambdas)
name = '__lambda%d__' % lambdanr
fakenode = FunctionNode(None, name, node.argnames, node.defaults, node.flags, None, Return(node.code))
self.visit(fakenode, None, True)
f = self.lambdas[name]
f.lambdanr = lambdanr
self.lambdaname[node] = name
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set([(f, 0)])
newnode.copymetoo = True
def visitAnd(self, node, func=None):
self.visit_and_or(node, func)
def visitOr(self, node, func=None):
self.visit_and_or(node, func)
def visit_and_or(self, node, func):
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
for child in node.getChildNodes():
if node in self.gx.bool_test_only:
self.bool_test_add(child)
self.visit(child, func)
self.add_constraint((inode(self.gx, child), newnode), func)
self.temp_var2(child, newnode, func)
def visitIf(self, node, func=None):
for test, code in node.tests:
if is_isinstance(test):
self.gx.filterstack.append(test.args)
self.bool_test_add(test)
faker = CallFunc(Name('bool'), [test])
self.visit(faker, func)
self.visit(code, func)
if is_isinstance(test):
self.gx.filterstack.pop()
if node.else_:
self.visit(node.else_, func)
def visitIfExp(self, node, func=None):
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
for child in node.getChildNodes():
self.visit(child, func)
self.add_constraint((inode(self.gx, node.then), newnode), func)
self.add_constraint((inode(self.gx, node.else_), newnode), func)
def visitGlobal(self, node, func=None):
func.globals += node.names
def visitList(self, node, func=None):
self.constructor(node, 'list', func)
def visitDict(self, node, func=None):
self.constructor(node, 'dict', func)
if node.items: # XXX library bug
node.lineno = node.items[0][0].lineno
def visitNot(self, node, func=None):
self.bool_test_add(node.expr)
newnode = CNode(self.gx, node, parent=func, mv=getmv())
newnode.copymetoo = True
self.gx.types[newnode] = set([(def_class(self.gx, 'bool_'), 0)]) # XXX new type?
self.visit(node.expr, func)
def visitBackquote(self, node, func=None):
self.fake_func(node, node.expr, '__repr__', [], func)
def visitTuple(self, node, func=None):
if len(node.nodes) == 2:
self.constructor(node, 'tuple2', func)
else:
self.constructor(node, 'tuple', func)
def visitSubscript(self, node, func=None): # XXX merge __setitem__, __getitem__
if len(node.subs) > 1:
subscript = Tuple(node.subs)
else:
subscript = node.subs[0]
if isinstance(subscript, Ellipsis): # XXX also check at setitem
error('ellipsis is not supported', self.gx, node, mv=getmv())
if isinstance(subscript, Sliceobj):
self.slice(node, node.expr, subscript.nodes, func)
else:
if node.flags == 'OP_DELETE':
self.fake_func(node, node.expr, '__delitem__', [subscript], func)
elif len(node.subs) > 1:
self.fake_func(node, node.expr, '__getitem__', [subscript], func)
else:
ident = '__getitem__'
self.fake_func(node, node.expr, ident, [subscript], func)
def visitSlice(self, node, func=None):
self.slice(node, node.expr, [node.lower, node.upper, None], func)
def slice(self, node, expr, nodes, func, replace=None):
nodes2 = slice_nums(nodes)
if replace:
self.fake_func(node, expr, '__setslice__', nodes2 + [replace], func)
elif node.flags == 'OP_DELETE':
self.fake_func(node, expr, '__delete__', nodes2, func)
else:
self.fake_func(node, expr, '__slice__', nodes2, func)
def visitUnarySub(self, node, func=None):
self.fake_func(node, node.expr, '__neg__', [], func)
def visitUnaryAdd(self, node, func=None):
self.fake_func(node, node.expr, '__pos__', [], func)
def visitCompare(self, node, func=None):
newnode = CNode(self.gx, node, parent=func, mv=getmv())
newnode.copymetoo = True
self.gx.types[newnode] = set([(def_class(self.gx, 'bool_'), 0)]) # XXX new type?
self.visit(node.expr, func)
msgs = {'<': 'lt', '>': 'gt', 'in': 'contains', 'not in': 'contains', '!=': 'ne', '==': 'eq', '<=': 'le', '>=': 'ge'}
left = node.expr
for op, right in node.ops:
self.visit(right, func)
msg = msgs.get(op)
if msg == 'contains':
self.fake_func(node, right, '__' + msg + '__', [left], func)
elif msg in ('lt', 'gt', 'le', 'ge'):
fakefunc = CallFunc(Name('__%s' % msg), [left, right])
fakefunc.lineno = left.lineno
self.visit(fakefunc, func)
elif msg:
self.fake_func(node, left, '__' + msg + '__', [right], func)
left = right
# tempvars, e.g. (t1=fun())
for term in node.ops[:-1]:
if not isinstance(term[1], (Name, Const)):
self.temp_var2(term[1], inode(self.gx, term[1]), func)
def visitBitand(self, node, func=None):
self.visitBitpair(node, aug_msg(node, 'and'), func)
def visitBitor(self, node, func=None):
self.visitBitpair(node, aug_msg(node, 'or'), func)
def visitBitxor(self, node, func=None):
self.visitBitpair(node, aug_msg(node, 'xor'), func)
def visitBitpair(self, node, msg, func=None):
CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[inode(self.gx, node)] = set()
left = node.nodes[0]
for i, right in enumerate(node.nodes[1:]):
faker = self.fake_func((left, i), left, msg, [right], func)
left = faker
self.add_constraint((inode(self.gx, faker), inode(self.gx, node)), func)
def visitAdd(self, node, func=None):
self.fake_func(node, node.left, aug_msg(node, 'add'), [node.right], func)
def visitInvert(self, node, func=None):
self.fake_func(node, node.expr, '__invert__', [], func)
def visitRightShift(self, node, func=None):
self.fake_func(node, node.left, aug_msg(node, 'rshift'), [node.right], func)
def visitLeftShift(self, node, func=None):
self.fake_func(node, node.left, aug_msg(node, 'lshift'), [node.right], func)
def visitAugAssign(self, node, func=None): # a[b] += c -> a[b] = a[b]+c, using tempvars to handle sidefx
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
clone = copy.deepcopy(node)
lnode = node.node
if isinstance(node.node, Name):
blah = AssName(clone.node.name, 'OP_ASSIGN')
elif isinstance(node.node, Getattr):
blah = AssAttr(clone.node.expr, clone.node.attrname, 'OP_ASSIGN')
elif isinstance(node.node, Subscript):
t1 = self.temp_var(node.node.expr, func)
a1 = Assign([AssName(t1.name, 'OP_ASSIGN')], node.node.expr)
self.visit(a1, func)
self.add_constraint((inode(self.gx, node.node.expr), inode(self.gx, t1)), func)
if len(node.node.subs) > 1:
subs = Tuple(node.node.subs)
else:
subs = node.node.subs[0]
t2 = self.temp_var(subs, func)
a2 = Assign([AssName(t2.name, 'OP_ASSIGN')], subs)
self.visit(a1, func)
self.visit(a2, func)
self.add_constraint((inode(self.gx, subs), inode(self.gx, t2)), func)
inode(self.gx, node).temp1 = t1.name
inode(self.gx, node).temp2 = t2.name
inode(self.gx, node).subs = subs
blah = Subscript(Name(t1.name), 'OP_APPLY', [Name(t2.name)])
lnode = Subscript(Name(t1.name), 'OP_APPLY', [Name(t2.name)])
else:
error('unsupported type of assignment', self.gx, node, mv=getmv())
if node.op == '-=':
blah2 = Sub((lnode, node.expr))
if node.op == '+=':
blah2 = Add((lnode, node.expr))
if node.op == '|=':
blah2 = Bitor((lnode, node.expr))
if node.op == '&=':
blah2 = Bitand((lnode, node.expr))
if node.op == '^=':
blah2 = Bitxor((lnode, node.expr))
if node.op == '**=':
blah2 = Power((lnode, node.expr))
if node.op == '<<=':
blah2 = LeftShift((lnode, node.expr))
if node.op == '>>=':
blah2 = RightShift((lnode, node.expr))
if node.op == '%=':
blah2 = Mod((lnode, node.expr))
if node.op == '*=':
blah2 = Mul((lnode, node.expr))
if node.op == '/=':
blah2 = Div((lnode, node.expr))
if node.op == '//=':
blah2 = FloorDiv((lnode, node.expr))
blah2.augment = True
assign = Assign([blah], blah2)
register_node(assign, func)
inode(self.gx, node).assignhop = assign
self.visit(assign, func)
def visitSub(self, node, func=None):
self.fake_func(node, node.left, aug_msg(node, 'sub'), [node.right], func)
def visitMul(self, node, func=None):
self.fake_func(node, node.left, aug_msg(node, 'mul'), [node.right], func)
def visitDiv(self, node, func=None):
self.fake_func(node, node.left, aug_msg(node, 'div'), [node.right], func)
def visitFloorDiv(self, node, func=None):
self.fake_func(node, node.left, aug_msg(node, 'floordiv'), [node.right], func)
def visitPower(self, node, func=None):
self.fake_func(node, node.left, '__pow__', [node.right], func)
def visitMod(self, node, func=None):
if isinstance(node.right, (Tuple, Dict)):
self.fake_func(node, node.left, '__mod__', [], func)
for child in node.right.getChildNodes():
self.visit(child, func)
if isinstance(node.right, Tuple):
self.fake_func(inode(self.gx, child), child, '__str__', [], func)
else:
self.fake_func(node, node.left, '__mod__', [node.right], func)
def visitPrintnl(self, node, func=None):
self.visitPrint(node, func)
def visitPrint(self, node, func=None):
pnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[pnode] = set()
for child in node.getChildNodes():
self.visit(child, func)
self.fake_func(inode(self.gx, child), child, '__str__', [], func)
def temp_var(self, node, func=None, looper=None, wopper=None, exc_name=False):
if node in self.gx.parent_nodes:
varname = self.tempcount[self.gx.parent_nodes[node]]
elif node in self.tempcount: # XXX investigate why this happens
varname = self.tempcount[node]
else:
varname = '__' + str(len(self.tempcount))
var = default_var(self.gx, varname, func, mv=getmv(), exc_name=exc_name)
var.looper = looper
var.wopper = wopper
self.tempcount[node] = varname
register_temp_var(var, func)
return var
def temp_var2(self, node, source, func):
tvar = self.temp_var(node, func)
self.add_constraint((source, inode(self.gx, tvar)), func)
return tvar
def temp_var_int(self, node, func):
var = self.temp_var(node, func)
self.gx.types[inode(self.gx, var)] = set([(def_class(self.gx, 'int_'), 0)])
inode(self.gx, var).copymetoo = True
return var
def visitRaise(self, node, func=None):
if node.expr1 is None or node.expr2 is not None or node.expr3 is not None:
error('unsupported raise syntax', self.gx, node, mv=getmv())
for child in node.getChildNodes():
self.visit(child, func)
def visitTryExcept(self, node, func=None):
self.visit(node.body, func)
for handler in node.handlers:
if not handler[0]:
continue
if isinstance(handler[0], Tuple):
pairs = [(n, handler[1]) for n in handler[0].nodes]
else:
pairs = [(handler[0], handler[1])]
for (h0, h1) in pairs:
if isinstance(h0, Name) and h0.name in ['int', 'float', 'str', 'class']:
continue # handle in lookup_class
cl = lookup_class(h0, getmv())
if not cl:
error("unknown or unsupported exception type", self.gx, h0, mv=getmv())
if isinstance(h1, AssName):
var = self.default_var(h1.name, func, exc_name=True)
else:
var = self.temp_var(h0, func, exc_name=True)
var.invisible = True
inode(self.gx, var).copymetoo = True
self.gx.types[inode(self.gx, var)] = set([(cl, 1)])
for handler in node.handlers:
self.visit(handler[2], func)
# else
if node.else_:
self.visit(node.else_, func)
self.temp_var_int(node.else_, func)
def visitTryFinally(self, node, func=None):
error("'try..finally' is not supported", self.gx, node, mv=getmv())
def visitYield(self, node, func):
func.isGenerator = True
func.yieldNodes.append(node)
self.visit(Return(CallFunc(Name('__iter'), [node.value])), func)
self.add_constraint((inode(self.gx, node.value), func.yieldnode), func)
def visitFor(self, node, func=None):
# --- iterable contents -> assign node
assnode = CNode(self.gx, node.assign, parent=func, mv=getmv())
self.gx.types[assnode] = set()
get_iter = CallFunc(Getattr(node.list, '__iter__'), [])
fakefunc = CallFunc(Getattr(get_iter, 'next'), [])
self.visit(fakefunc, func)
self.add_constraint((inode(self.gx, fakefunc), assnode), func)
# --- assign node -> variables XXX merge into assign_pair
if isinstance(node.assign, AssName):
# for x in..
lvar = self.default_var(node.assign.name, func)
self.add_constraint((assnode, inode(self.gx, lvar)), func)
elif isinstance(node.assign, AssAttr): # XXX experimental :)
# for expr.x in..
CNode(self.gx, node.assign, parent=func, mv=getmv())
self.gx.assign_target[node.assign.expr] = node.assign.expr # XXX multiple targets possible please
fakefunc2 = CallFunc(Getattr(node.assign.expr, '__setattr__'), [Const(node.assign.attrname), fakefunc])
self.visit(fakefunc2, func)
elif isinstance(node.assign, (AssTuple, AssList)):
# for (a,b, ..) in..
self.tuple_flow(node.assign, node.assign, func)
else:
error('unsupported type of assignment', self.gx, node, mv=getmv())
self.do_for(node, assnode, get_iter, func)
# --- for-else
if node.else_:
self.temp_var_int(node.else_, func)
self.visit(node.else_, func)
# --- loop body
self.gx.loopstack.append(node)
self.visit(node.body, func)
self.gx.loopstack.pop()
self.for_in_iters.append(node.list)
def do_for(self, node, assnode, get_iter, func):
# --- for i in range(..) XXX i should not be modified.. use tempcounter; two bounds
if is_fastfor(node):
self.temp_var2(node.assign, assnode, func)
self.temp_var2(node.list, inode(self.gx, node.list.args[0]), func)
if len(node.list.args) == 3 and not isinstance(node.list.args[2], Name) and not is_literal(node.list.args[2]): # XXX merge with ListComp
for arg in node.list.args:
if not isinstance(arg, Name) and not is_literal(arg): # XXX create func for better check
self.temp_var2(arg, inode(self.gx, arg), func)
# --- temp vars for list, iter etc.
else:
self.temp_var2(node, inode(self.gx, node.list), func)
self.temp_var2((node, 1), inode(self.gx, get_iter), func)
self.temp_var_int(node.list, func)
if is_enum(node) or is_zip2(node):
self.temp_var2((node, 2), inode(self.gx, node.list.args[0]), func)
if is_zip2(node):
self.temp_var2((node, 3), inode(self.gx, node.list.args[1]), func)
self.temp_var_int((node, 4), func)
self.temp_var((node, 5), func, looper=node.list)
if isinstance(node.list, CallFunc) and isinstance(node.list.node, Getattr):
self.temp_var((node, 6), func, wopper=node.list.node.expr)
self.temp_var2((node, 7), inode(self.gx, node.list.node.expr), func)
def bool_test_add(self, node):
if isinstance(node, (And, Or, Not)):
self.gx.bool_test_only.add(node)
def visitWhile(self, node, func=None):
self.gx.loopstack.append(node)
self.bool_test_add(node.test)
for child in node.getChildNodes():
self.visit(child, func)
self.gx.loopstack.pop()
if node.else_:
self.temp_var_int(node.else_, func)
self.visit(node.else_, func)
def visitWith(self, node, func=None):
if node.vars:
varnode = CNode(self.gx, node.vars, parent=func, mv=getmv())
self.gx.types[varnode] = set()
self.visit(node.expr, func)
self.add_constraint((inode(self.gx, node.expr), varnode), func)
lvar = self.default_var(node.vars.name, func)
self.add_constraint((varnode, inode(self.gx, lvar)), func)
else:
self.visit(node.expr, func)
for child in node.getChildNodes():
self.visit(child, func)
def visitListCompIf(self, node, func=None):
self.bool_test_add(node.test)
for child in node.getChildNodes():
self.visit(child, func)
def visitListComp(self, node, func=None):
# --- [expr for iter in list for .. if cond ..]
lcfunc = Function(self.gx, mv=getmv())
lcfunc.listcomp = True
lcfunc.ident = 'l.c.' # XXX
lcfunc.parent = func
for qual in node.quals:
# iter
assnode = CNode(self.gx, qual.assign, parent=func, mv=getmv())
self.gx.types[assnode] = set()
# list.unit->iter
get_iter = CallFunc(Getattr(qual.list, '__iter__'), [])
fakefunc = CallFunc(Getattr(get_iter, 'next'), [])
self.visit(fakefunc, lcfunc)
self.add_constraint((inode(self.gx, fakefunc), inode(self.gx, qual.assign)), lcfunc)
if isinstance(qual.assign, AssName): # XXX merge with visitFor
lvar = default_var(self.gx, qual.assign.name, lcfunc) # XXX str or Name?
self.add_constraint((inode(self.gx, qual.assign), inode(self.gx, lvar)), lcfunc)
else: # AssTuple, AssList
self.tuple_flow(qual.assign, qual.assign, lcfunc)
self.do_for(qual, assnode, get_iter, lcfunc)
# cond
for child in qual.ifs:
self.visit(child, lcfunc)
self.for_in_iters.append(qual.list)
# node type
if node in self.gx.genexp_to_lc.values(): # converted generator expression
self.instance(node, def_class(self.gx, '__iter'), func)
else:
self.instance(node, def_class(self.gx, 'list'), func)
# expr->instance.unit
self.visit(node.expr, lcfunc)
self.add_dynamic_constraint(node, node.expr, 'unit', lcfunc)
lcfunc.ident = 'list_comp_' + str(len(self.listcomps))
self.listcomps.append((node, lcfunc, func))
def visitReturn(self, node, func):
self.visit(node.value, func)
func.returnexpr.append(node.value)
if not (isinstance(node.value, Const) and node.value.value is None):
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
if isinstance(node.value, Name):
func.retvars.append(node.value.name)
if func.retnode:
self.add_constraint((inode(self.gx, node.value), func.retnode), func)
def visitAssign(self, node, func=None):
# --- rewrite for struct.unpack XXX rewrite callfunc as tuple
if len(node.nodes) == 1:
lvalue, rvalue = node.nodes[0], node.expr
if self.struct_unpack(rvalue, func) and isinstance(lvalue, (AssList, AssTuple)) and not [n for n in lvalue.nodes if isinstance(n, (AssList, AssTuple))]:
self.visit(node.expr, func)
sinfo = self.struct_info(rvalue.args[0], func)
faketuple = self.struct_faketuple(sinfo)
self.visit(Assign(node.nodes, faketuple), func)
tvar = self.temp_var2(rvalue.args[1], inode(self.gx, rvalue.args[1]), func)
tvar_pos = self.temp_var_int(rvalue.args[0], func)
self.gx.struct_unpack[node] = (sinfo, tvar.name, tvar_pos.name)
return
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
# --- a,b,.. = c,(d,e),.. = .. = expr
for target_expr in node.nodes:
pairs = assign_rec(target_expr, node.expr)
for (lvalue, rvalue) in pairs:
# expr[expr] = expr
if isinstance(lvalue, Subscript) and not isinstance(lvalue.subs[0], Sliceobj):
self.assign_pair(lvalue, rvalue, func) # XXX use here generally, and in tuple_flow
# expr.attr = expr
elif isinstance(lvalue, AssAttr):
self.assign_pair(lvalue, rvalue, func)
# name = expr
elif isinstance(lvalue, AssName):
if (rvalue, 0, 0) not in self.gx.cnode: # XXX generalize
self.visit(rvalue, func)
self.visit(lvalue, func)
lvar = self.default_var(lvalue.name, func)
if isinstance(rvalue, Const):
lvar.const_assign.append(rvalue)
self.add_constraint((inode(self.gx, rvalue), inode(self.gx, lvar)), func)
# (a,(b,c), ..) = expr
elif isinstance(lvalue, (AssTuple, AssList)):
self.visit(rvalue, func)
self.tuple_flow(lvalue, rvalue, func)
# expr[a:b] = expr # XXX bla()[1:3] = [1]
elif isinstance(lvalue, Slice):
self.slice(lvalue, lvalue.expr, [lvalue.lower, lvalue.upper, None], func, rvalue)
# expr[a:b:c] = expr
elif isinstance(lvalue, Subscript) and isinstance(lvalue.subs[0], Sliceobj):
self.slice(lvalue, lvalue.expr, lvalue.subs[0].nodes, func, rvalue)
# temp vars
if len(node.nodes) > 1 or isinstance(node.expr, Tuple):
if isinstance(node.expr, Tuple):
if [n for n in node.nodes if isinstance(n, AssTuple)]:
for child in node.expr.nodes:
if (child, 0, 0) not in self.gx.cnode: # (a,b) = (1,2): (1,2) never visited
continue
if not isinstance(child, Const) and not (isinstance(child, Name) and child.name == 'None'):
self.temp_var2(child, inode(self.gx, child), func)
elif not isinstance(node.expr, Const) and not (isinstance(node.expr, Name) and node.expr.name == 'None'):
self.temp_var2(node.expr, inode(self.gx, node.expr), func)
def assign_pair(self, lvalue, rvalue, func):
# expr[expr] = expr
if isinstance(lvalue, Subscript) and not isinstance(lvalue.subs[0], Sliceobj):
if len(lvalue.subs) > 1:
subscript = Tuple(lvalue.subs)
else:
subscript = lvalue.subs[0]
fakefunc = CallFunc(Getattr(lvalue.expr, '__setitem__'), [subscript, rvalue])
self.visit(fakefunc, func)
inode(self.gx, lvalue.expr).fakefunc = fakefunc
if len(lvalue.subs) > 1:
inode(self.gx, lvalue.expr).faketuple = subscript
if not isinstance(lvalue.expr, Name):
self.temp_var2(lvalue.expr, inode(self.gx, lvalue.expr), func)
# expr.attr = expr
elif isinstance(lvalue, AssAttr):
CNode(self.gx, lvalue, parent=func, mv=getmv())
self.gx.assign_target[rvalue] = lvalue.expr
fakefunc = CallFunc(Getattr(lvalue.expr, '__setattr__'), [Const(lvalue.attrname), rvalue])
self.visit(fakefunc, func)
def default_var(self, name, func, exc_name=False):
if isinstance(func, Function) and name in func.globals:
return default_var(self.gx, name, None, mv=getmv(), exc_name=exc_name)
else:
return default_var(self.gx, name, func, mv=getmv(), exc_name=exc_name)
def tuple_flow(self, lvalue, rvalue, func=None):
self.temp_var2(lvalue, inode(self.gx, rvalue), func)
if isinstance(lvalue, (AssTuple, AssList)):
lvalue = lvalue.nodes
for (i, item) in enumerate(lvalue):
fakenode = CNode(self.gx, (item,), parent=func, mv=getmv()) # fake node per item, for multiple callfunc triggers
self.gx.types[fakenode] = set()
self.add_constraint((inode(self.gx, rvalue), fakenode), func)
fakefunc = CallFunc(FakeGetattr3(rvalue, '__getitem__'), [Const(i)])
fakenode.callfuncs.append(fakefunc)
self.visit(fakefunc, func)
self.gx.item_rvalue[item] = rvalue
if isinstance(item, AssName):
lvar = self.default_var(item.name, func)
self.add_constraint((inode(self.gx, fakefunc), inode(self.gx, lvar)), func)
elif isinstance(item, (Subscript, AssAttr)):
self.assign_pair(item, fakefunc, func)
elif isinstance(item, (AssTuple, AssList)): # recursion
self.tuple_flow(item, fakefunc, func)
else:
error('unsupported type of assignment', self.gx, item, mv=getmv())
def super_call(self, orig, parent):
node = orig.node
while isinstance(parent, Function):
parent = parent.parent
if (isinstance(node.expr, CallFunc) and
node.attrname not in ('__getattr__', '__setattr__') and
isinstance(node.expr.node, Name) and
node.expr.node.name == 'super'):
if (len(node.expr.args) >= 2 and
isinstance(node.expr.args[1], Name) and node.expr.args[1].name == 'self'):
cl = lookup_class(node.expr.args[0], getmv())
if cl.node.bases:
return cl.node.bases[0]
error("unsupported usage of 'super'", self.gx, orig, mv=getmv())
def visitCallFunc(self, node, func=None): # XXX clean up!!
newnode = CNode(self.gx, node, parent=func, mv=getmv())
if isinstance(node.node, Getattr): # XXX import math; math.e
# rewrite super(..) call
base = self.super_call(node, func)
if base:
node.node = Getattr(copy.deepcopy(base), node.node.attrname)
node.args = [Name('self')] + node.args
# method call
if isinstance(node.node, FakeGetattr): # XXX butt ugly
self.visit(node.node, func)
elif isinstance(node.node, FakeGetattr2):
self.gx.types[newnode] = set() # XXX move above
self.callfuncs.append((node, func))
for arg in node.args:
inode(self.gx, arg).callfuncs.append(node) # this one too
return
elif isinstance(node.node, FakeGetattr3):
pass
else:
self.visitGetattr(node.node, func, callfunc=True)
inode(self.gx, node.node).callfuncs.append(node) # XXX iterative dataflow analysis: move there?
inode(self.gx, node.node).fakert = True
ident = node.node.attrname
inode(self.gx, node.node.expr).callfuncs.append(node) # XXX iterative dataflow analysis: move there?
if isinstance(node.node.expr, Name) and node.node.expr.name in getmv().imports and node.node.attrname == '__getattr__': # XXX analyze_callfunc
if node.args[0].value in getmv().imports[node.node.expr.name].mv.globals: # XXX bleh
self.add_constraint((inode(self.gx, getmv().imports[node.node.expr.name].mv.globals[node.args[0].value]), newnode), func)
elif isinstance(node.node, Name):
# direct call
ident = node.node.name
if ident == 'print':
ident = node.node.name = '__print' # XXX
if ident in ['hasattr', 'getattr', 'setattr', 'slice', 'type', 'Ellipsis']:
error("'%s' function is not supported" % ident, self.gx, node.node, mv=getmv())
if ident == 'dict' and [x for x in node.args if isinstance(x, Keyword)]:
error('unsupported method of initializing dictionaries', self.gx, node, mv=getmv())
if ident == 'isinstance':
error("'isinstance' is not supported; always returns True", self.gx, node, mv=getmv(), warning=True)
if lookup_var(ident, func, mv=getmv()):
self.visit(node.node, func)
inode(self.gx, node.node).callfuncs.append(node) # XXX iterative dataflow analysis: move there
else:
self.visit(node.node, func)
inode(self.gx, node.node).callfuncs.append(node) # XXX iterative dataflow analysis: move there
# --- arguments
if not getmv().module.builtin and (node.star_args or node.dstar_args):
error('argument (un)packing is not supported', self.gx, node, mv=getmv())
args = node.args[:]
if node.star_args:
args.append(node.star_args) # partially allowed in builtins
if node.dstar_args:
args.append(node.dstar_args)
for arg in args:
if isinstance(arg, Keyword):
arg = arg.expr
self.visit(arg, func)
inode(self.gx, arg).callfuncs.append(node) # this one too
# --- handle instantiation or call
constructor = lookup_class(node.node, getmv())
if constructor and (not isinstance(node.node, Name) or not lookup_var(node.node.name, func, mv=getmv())):
self.instance(node, constructor, func)
inode(self.gx, node).callfuncs.append(node) # XXX see above, investigate
else:
self.gx.types[newnode] = set()
self.callfuncs.append((node, func))
def visitClass(self, node, parent=None):
if not getmv().module.builtin and not node in getmv().classnodes:
error("non-global class '%s'" % node.name, self.gx, node, mv=getmv())
if len(node.bases) > 1:
error('multiple inheritance is not supported', self.gx, node, mv=getmv())
if not getmv().module.builtin:
for base in node.bases:
if not isinstance(base, (Name, Getattr)):
error("invalid expression for base class", self.gx, node, mv=getmv())
if isinstance(base, Name):
name = base.name
else:
name = base.attrname
cl = lookup_class(base, getmv())
if not cl:
error("no such class: '%s'" % name, self.gx, node, mv=getmv())
elif cl.mv.module.builtin and name not in ['object', 'Exception', 'tzinfo']:
if def_class(self.gx, 'Exception') not in cl.ancestors():
error("inheritance from builtin class '%s' is not supported" % name, self.gx, node, mv=getmv())
if node.name in getmv().classes:
newclass = getmv().classes[node.name] # set in visitModule, for forward references
else:
check_redef(self.gx, node) # XXX merge with visitModule
newclass = Class(self.gx, node, getmv())
self.classes[node.name] = newclass
getmv().classes[node.name] = newclass
newclass.module = self.module
newclass.parent = StaticClass(newclass, getmv())
# --- built-in functions
for cl in [newclass, newclass.parent]:
for ident in ['__setattr__', '__getattr__']:
func = Function(self.gx, mv=getmv())
func.ident = ident
func.parent = cl
if ident == '__setattr__':
func.formals = ['name', 'whatsit']
retexpr = Return(Name('None'))
self.visit(retexpr, func)
elif ident == '__getattr__':
func.formals = ['name']
cl.funcs[ident] = func
# --- built-in attributes
if 'class_' in getmv().classes or 'class_' in getmv().ext_classes:
var = default_var(self.gx, '__class__', newclass)
var.invisible = True
self.gx.types[inode(self.gx, var)] = set([(def_class(self.gx, 'class_'), def_class(self.gx, 'class_').dcpa)])
def_class(self.gx, 'class_').dcpa += 1
# --- staticmethod, property
skip = []
for child in node.code.getChildNodes():
if isinstance(child, Assign) and len(child.nodes) == 1:
lvalue, rvalue = child.nodes[0], child.expr
if isinstance(lvalue, AssName) and isinstance(rvalue, CallFunc) and isinstance(rvalue.node, Name) and rvalue.node.name in ['staticmethod', 'property']:
if rvalue.node.name == 'property':
if len(rvalue.args) == 1 and isinstance(rvalue.args[0], Name):
newclass.properties[lvalue.name] = rvalue.args[0].name, None
elif len(rvalue.args) == 2 and isinstance(rvalue.args[0], Name) and isinstance(rvalue.args[1], Name):
newclass.properties[lvalue.name] = rvalue.args[0].name, rvalue.args[1].name
else:
error("complex properties are not supported", self.gx, rvalue, mv=getmv())
else:
newclass.staticmethods.append(lvalue.name)
skip.append(child)
# --- children
for child in node.code.getChildNodes():
if child not in skip:
cl = self.classes[node.name]
if isinstance(child, FunctionNode):
self.visit(child, cl)
else:
cl.parent.static_nodes.append(child)
self.visit(child, cl.parent)
# --- __iadd__ etc.
if not newclass.mv.module.builtin or newclass.ident in ['int_', 'float_', 'str_', 'tuple', 'complex']:
msgs = ['add', 'mul'] # XXX mod, pow
if newclass.ident in ['int_', 'float_']:
msgs += ['sub', 'div', 'floordiv']
if newclass.ident in ['int_']:
msgs += ['lshift', 'rshift', 'and', 'xor', 'or']
for msg in msgs:
if not '__i' + msg + '__' in newclass.funcs:
self.visit(FunctionNode(None, '__i' + msg + '__', ['self', 'other'], [], 0, None, Stmt([Return(CallFunc(Getattr(Name('self'), '__' + msg + '__'), [Name('other')], None, None))])), newclass)
# --- __str__, __hash__ # XXX model in lib/builtin.py, other defaults?
if not newclass.mv.module.builtin and not '__str__' in newclass.funcs:
self.visit(FunctionNode(None, '__str__', ['self'], [], 0, None, Return(CallFunc(Getattr(Name('self'), '__repr__'), []))), newclass)
newclass.funcs['__str__'].invisible = True
if not newclass.mv.module.builtin and not '__hash__' in newclass.funcs:
self.visit(FunctionNode(None, '__hash__', ['self'], [], 0, None, Return(Const(0)), []), newclass)
newclass.funcs['__hash__'].invisible = True
def visitGetattr(self, node, func=None, callfunc=False):
if node.attrname in ['__doc__']:
error('%s attribute is not supported' % node.attrname, self.gx, node, mv=getmv())
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
fakefunc = CallFunc(FakeGetattr(node.expr, '__getattr__'), [Const(node.attrname)])
self.visit(fakefunc, func)
self.add_constraint((self.gx.cnode[fakefunc, 0, 0], newnode), func)
self.callfuncs.append((fakefunc, func))
if not callfunc:
self.fncl_passing(node, newnode, func)
def visitConst(self, node, func=None):
if type(node.value) == unicode:
error('unicode is not supported', self.gx, node, mv=getmv())
map = {int: 'int_', str: 'str_', float: 'float_', type(None): 'none', long: 'int_', complex: 'complex'} # XXX 'return' -> Return(Const(None))?
self.instance(node, def_class(self.gx, map[type(node.value)]), func)
def fncl_passing(self, node, newnode, func):
lfunc, lclass = lookup_func(node, getmv()), lookup_class(node, getmv())
if lfunc:
if lfunc.mv.module.builtin:
lfunc = self.builtin_wrapper(node, func)
elif lfunc.ident not in lfunc.mv.lambdas:
lfunc.lambdanr = len(lfunc.mv.lambdas)
lfunc.mv.lambdas[lfunc.ident] = lfunc
self.gx.types[newnode] = set([(lfunc, 0)])
elif lclass:
if lclass.mv.module.builtin:
lclass = self.builtin_wrapper(node, func)
else:
lclass = lclass.parent
self.gx.types[newnode] = set([(lclass, 0)])
else:
return False
newnode.copymetoo = True # XXX merge into some kind of 'seeding' function
return True
def visitName(self, node, func=None):
newnode = CNode(self.gx, node, parent=func, mv=getmv())
self.gx.types[newnode] = set()
if node.name == '__doc__':
error("'%s' attribute is not supported" % node.name, self.gx, node, mv=getmv())
if node.name in ['None', 'True', 'False']:
if node.name == 'None': # XXX also bools, remove def seed_nodes()
self.instance(node, def_class(self.gx, 'none'), func)
else:
self.instance(node, def_class(self.gx, 'bool_'), func)
return
if isinstance(func, Function) and node.name in func.globals:
var = default_var(self.gx, node.name, None, mv=getmv())
else:
var = lookup_var(node.name, func, mv=getmv())
if not var:
if self.fncl_passing(node, newnode, func):
pass
elif node.name in ['int', 'float', 'str']: # XXX
cl = self.ext_classes[node.name + '_']
self.gx.types[newnode] = set([(cl.parent, 0)])
newnode.copymetoo = True
else:
var = default_var(self.gx, node.name, None, mv=getmv())
if var:
self.add_constraint((inode(self.gx, var), newnode), func)
for a, b in self.gx.filterstack:
if var.name == a.name:
self.gx.filters[node] = lookup_class(b, getmv())
def builtin_wrapper(self, node, func):
node2 = CallFunc(copy.deepcopy(node), [Name(x) for x in 'abcde'])
l = Lambda(list('abcde'), [], 0, node2)
self.visit(l, func)
self.lwrapper[node] = self.lambdaname[l]
self.gx.lambdawrapper[node2] = self.lambdaname[l]
f = self.lambdas[self.lambdaname[l]]
f.lambdawrapper = True
inode(self.gx, node2).lambdawrapper = f
return f
def parse_module(name, gx, parent=None, node=None):
# --- valid name?
if not re.match("^[a-zA-Z0-9_.]+$", name):
print ("*ERROR*:%s.py: module names should consist of letters, digits and underscores" % name)
sys.exit(1)
# --- create module
try:
if parent and parent.path != os.getcwd():
basepaths = [parent.path, os.getcwd()]
else:
basepaths = [os.getcwd()]
module_paths = basepaths + gx.libdirs
absolute_name, filename, relative_filename, builtin = find_module(gx, name, module_paths)
module = Module(absolute_name, filename, relative_filename, builtin, node)
except ImportError:
error('cannot locate module: ' + name, gx, node, mv=getmv())
# --- check cache
if module.name in gx.modules: # cached?
return gx.modules[module.name]
gx.modules[module.name] = module
# --- not cached, so parse
module.ast = parse_file(module.filename)
old_mv = getmv()
module.mv = mv = ModuleVisitor(module, gx)
setmv(mv)
mv.visit = mv.dispatch
mv.visitor = mv
mv.dispatch(module.ast)
module.import_order = gx.import_order
gx.import_order += 1
mv = old_mv
setmv(mv)
return module
| shedskin/shedskin | shedskin/graph.py | Python | gpl-3.0 | 71,909 | [
"VisIt"
] | 7793836f8eba820f622a2613e9349a198e679517469e1023d9acb77996c1d44b |
import numpy as np, pandas as pd
import nose.tools as nt
import numpy.testing.decorators as dec
from itertools import product
from ...tests.flags import SMALL_SAMPLES
from ...tests.instance import (gaussian_instance as instance,
logistic_instance)
from ...tests.decorators import (set_sampling_params_iftrue,
wait_for_return_value,
set_seed_iftrue)
from ..lasso import (lasso,
ROSI,
data_carving,
data_splitting,
split_model,
standard_lasso,
nominal_intervals,
glm_sandwich_estimator,
glm_parametric_estimator)
from ..sqrt_lasso import (solve_sqrt_lasso, choose_lambda)
import regreg.api as rr
try:
import statsmodels.api
statsmodels_available = True
except ImportError:
statsmodels_available = False
def test_gaussian(n=100, p=20):
y = np.random.standard_normal(n)
X = np.random.standard_normal((n,p))
lam_theor = np.mean(np.fabs(np.dot(X.T, np.random.standard_normal((n, 1000)))).max(0))
Q = rr.identity_quadratic(0.01, 0, np.ones(p), 0)
weights_with_zeros = 0.5*lam_theor * np.ones(p)
weights_with_zeros[:3] = 0.
huge_weights = weights_with_zeros * 10000
for q, fw in product([Q, None],
[0.5*lam_theor, weights_with_zeros, huge_weights]):
L = lasso.gaussian(X, y, fw, 1., quadratic=Q)
L.fit()
C = L.constraints
sandwich = glm_sandwich_estimator(L.loglike, B=5000)
L = lasso.gaussian(X, y, fw, 1., quadratic=Q, covariance_estimator=sandwich)
L.fit()
C = L.constraints
S = L.summary('onesided', compute_intervals=True)
S = L.summary('twosided')
nt.assert_raises(ValueError, L.summary, 'none')
print(L.active)
yield (np.testing.assert_array_less,
np.dot(L.constraints.linear_part, L.onestep_estimator),
L.constraints.offset)
def test_sqrt_lasso(n=100, p=20):
y = np.random.standard_normal(n)
X = np.random.standard_normal((n,p))
lam_theor = np.mean(np.fabs(np.dot(X.T, np.random.standard_normal((n, 1000)))).max(0)) / np.sqrt(n)
Q = rr.identity_quadratic(0.01, 0, np.random.standard_normal(p) / 5., 0)
weights_with_zeros = 0.5*lam_theor * np.ones(p)
weights_with_zeros[:3] = 0.
huge_weights = weights_with_zeros * 10000
for q, fw in product([None, Q],
[0.5*lam_theor, weights_with_zeros, huge_weights]):
L = lasso.sqrt_lasso(X, y, fw, quadratic=q, solve_args={'min_its':300, 'tol':1.e-12})
L.fit(solve_args={'min_its':300, 'tol':1.e-12})
C = L.constraints
S = L.summary('onesided', compute_intervals=True)
S = L.summary('twosided')
yield (np.testing.assert_array_less,
np.dot(L.constraints.linear_part, L.onestep_estimator),
L.constraints.offset)
def test_logistic():
for Y, T in [(np.random.binomial(1,0.5,size=(10,)),
np.ones(10)),
(np.random.binomial(1,0.5,size=(10,)),
None),
(np.random.binomial(3,0.5,size=(10,)),
3*np.ones(10))]:
X = np.random.standard_normal((10,5))
L = lasso.logistic(X, Y, 0.1, trials=T)
L.fit()
L = lasso.logistic(X, Y, 0.1, trials=T)
L.fit()
C = L.constraints
np.testing.assert_array_less( \
np.dot(L.constraints.linear_part, L.onestep_estimator),
L.constraints.offset)
P = L.summary()['pval']
return L, C, P
def test_poisson():
X = np.random.standard_normal((10,5))
Y = np.random.poisson(10, size=(10,))
L = lasso.poisson(X, Y, 0.1)
L.fit()
L = lasso.poisson(X, Y, 0.1)
L.fit()
C = L.constraints
np.testing.assert_array_less( \
np.dot(L.constraints.linear_part, L.onestep_estimator),
L.constraints.offset)
P = L.summary()['pval']
return L, C, P
@dec.skipif(not statsmodels_available, "needs statsmodels")
def test_coxph():
Q = rr.identity_quadratic(0.01, 0, np.ones(5), 0)
X = np.random.standard_normal((100,5))
T = np.random.standard_exponential(100)
S = np.random.binomial(1, 0.5, size=(100,))
L = lasso.coxph(X, T, S, 0.1, quadratic=Q)
L.fit()
L = lasso.coxph(X, T, S, 0.1, quadratic=Q)
L.fit()
C = L.constraints
np.testing.assert_array_less( \
np.dot(L.constraints.linear_part, L.onestep_estimator),
L.constraints.offset)
P = L.summary()['pval']
return L, C, P
@wait_for_return_value(max_tries=100)
@set_seed_iftrue(True)
@set_sampling_params_iftrue(SMALL_SAMPLES, ndraw=10, burnin=10)
def test_data_carving_gaussian(n=200,
p=200,
s=7,
sigma=5,
rho=0.3,
signal=7.,
split_frac=0.8,
lam_frac=2.,
ndraw=8000,
burnin=2000,
df=np.inf,
compute_intervals=True,
use_full_cov=True,
return_only_screening=True):
X, y, beta, true_active, sigma, _ = instance(n=n,
p=p,
s=s,
sigma=sigma,
rho=rho,
signal=signal,
df=df)
mu = np.dot(X, beta)
idx = np.arange(n)
np.random.shuffle(idx)
stage_one = idx[:int(n*split_frac)]
lam_theor = lam_frac * np.mean(np.fabs(np.dot(X.T, np.random.standard_normal((n, 5000)))).max(0)) * sigma
DC = data_carving.gaussian(X, y, feature_weights=lam_theor,
sigma=sigma,
stage_one=stage_one)
DC.fit()
if len(DC.active) < n - int(n*split_frac):
DS = data_splitting.gaussian(X, y, feature_weights=lam_theor,
sigma=sigma,
stage_one=stage_one)
DS.fit(use_full_cov=True)
DS.fit(use_full_cov=False)
DS.fit(use_full_cov=use_full_cov)
data_split = True
else:
print('not enough data for second stage data splitting')
print(DC.active)
data_split = False
if set(true_active).issubset(DC.active):
carve = []
split = []
for var in DC.active:
carve.append(DC.hypothesis_test(var, burnin=burnin, ndraw=ndraw))
if data_split:
split.append(DS.hypothesis_test(var))
else:
split.append(np.random.sample()) # appropriate p-value if data splitting can't estimate 2nd stage
Xa = X[:,DC.active]
truth = np.dot(np.linalg.pinv(Xa), mu)
active = np.zeros(p, np.bool)
active[true_active] = 1
v = (carve, split, active)
return v
@wait_for_return_value()
@set_seed_iftrue(True)
@set_sampling_params_iftrue(SMALL_SAMPLES, ndraw=10, burnin=10)
def test_data_carving_sqrt_lasso(n=200,
p=200,
s=7,
sigma=5,
rho=0.3,
signal=7.,
split_frac=0.9,
lam_frac=1.2,
ndraw=8000,
burnin=2000,
df=np.inf,
compute_intervals=True,
return_only_screening=True):
X, y, beta, true_active, sigma, _ = instance(n=n,
p=p,
s=s,
sigma=sigma,
rho=rho,
signal=signal,
df=df)
mu = np.dot(X, beta)
idx = np.arange(n)
np.random.shuffle(idx)
stage_one = idx[:int(n*split_frac)]
n1 = len(stage_one)
lam_theor = lam_frac * np.mean(np.fabs(np.dot(X[stage_one].T, np.random.standard_normal((n1, 5000)))).max(0)) / np.sqrt(n1)
DC = data_carving.sqrt_lasso(X, y, feature_weights=lam_theor,
stage_one=stage_one)
DC.fit()
if len(DC.active) < n - int(n*split_frac):
DS = data_splitting.sqrt_lasso(X, y, feature_weights=lam_theor,
stage_one=stage_one)
DS.fit(use_full_cov=True)
data_split = True
else:
print('not enough data for second stage data splitting')
print(DC.active)
data_split = False
if set(true_active).issubset(DC.active):
carve = []
split = []
for var in DC.active:
carve.append(DC.hypothesis_test(var, burnin=burnin, ndraw=ndraw))
if data_split:
split.append(DS.hypothesis_test(var))
else:
split.append(np.random.sample())
Xa = X[:,DC.active]
truth = np.dot(np.linalg.pinv(Xa), mu)
active = np.zeros(p, np.bool)
active[true_active] = 1
v = (carve, split, active)
return v
@wait_for_return_value()
@set_seed_iftrue(True)
@set_sampling_params_iftrue(SMALL_SAMPLES, ndraw=10, burnin=10)
def test_data_carving_logistic(n=700,
p=300,
s=5,
rho=0.05,
signal=12.,
split_frac=0.8,
ndraw=8000,
burnin=2000,
df=np.inf,
compute_intervals=True,
use_full_cov=False,
return_only_screening=True):
X, y, beta, true_active, _ = logistic_instance(n=n,
p=p,
s=s,
rho=rho,
signal=signal,
equicorrelated=False)
mu = X.dot(beta)
prob = np.exp(mu) / (1 + np.exp(mu))
X = np.hstack([np.ones((n,1)), X])
active = np.array(true_active)
active += 1
s += 1
active = [0] + list(active)
true_active = active
idx = np.arange(n)
np.random.shuffle(idx)
stage_one = idx[:int(n*split_frac)]
n1 = len(stage_one)
lam_theor = 1.0 * np.ones(p+1)
lam_theor[0] = 0.
DC = data_carving.logistic(X, y,
feature_weights=lam_theor,
stage_one=stage_one)
DC.fit()
if len(DC.active) < n - int(n*split_frac):
DS = data_splitting.logistic(X, y, feature_weights=lam_theor,
stage_one=stage_one)
DS.fit(use_full_cov=True)
data_split = True
else:
print('not enough data for data splitting second stage')
print(DC.active)
data_split = False
print(true_active, DC.active)
if set(true_active).issubset(DC.active):
carve = []
split = []
for var in DC.active:
carve.append(DC.hypothesis_test(var, burnin=burnin, ndraw=ndraw))
if data_split:
split.append(DS.hypothesis_test(var))
else:
split.append(np.random.sample())
Xa = X[:,DC.active]
active = np.zeros(p+1, np.bool)
active[true_active] = 1
v = (carve, split, active)
return v
@wait_for_return_value()
@set_seed_iftrue(True)
@set_sampling_params_iftrue(SMALL_SAMPLES, ndraw=10, burnin=10)
def test_data_carving_poisson(n=500,
p=300,
s=5,
sigma=5,
rho=0.3,
signal=12.,
split_frac=0.8,
lam_frac=1.2,
ndraw=8000,
burnin=2000,
df=np.inf,
compute_intervals=True,
use_full_cov=True,
return_only_screening=True):
X, y, beta, true_active, sigma, _ = instance(n=n,
p=p,
s=s,
sigma=sigma,
rho=rho,
signal=signal,
df=df)
X = np.hstack([np.ones((n,1)), X])
y = np.random.poisson(10, size=y.shape)
s = 1
true_active = [0]
idx = np.arange(n)
np.random.shuffle(idx)
stage_one = idx[:int(n*split_frac)]
n1 = len(stage_one)
lam_theor = 3. * np.ones(p+1)
lam_theor[0] = 0.
DC = data_carving.poisson(X, y, feature_weights=lam_theor,
stage_one=stage_one)
DC.fit()
if len(DC.active) < n - int(n*split_frac):
DS = data_splitting.poisson(X, y, feature_weights=lam_theor,
stage_one=stage_one)
DS.fit(use_full_cov=True)
data_split = True
else:
print('not enough data for data splitting second stage')
print(DC.active)
data_split = False
if set(DS.active) != set(DC.active):
raise ValueError('different active sets for carving and splitting')
print(DC.active)
if set(true_active).issubset(DC.active):
carve = []
split = []
for var in DC.active:
carve.append(DC.hypothesis_test(var, burnin=burnin, ndraw=ndraw))
if data_split and var in DS.active:
split.append(DS.hypothesis_test(var))
else:
split.append(np.random.sample())
Xa = X[:,DC.active]
active = np.zeros(p+1, np.bool)
active[true_active] = 1
v = (carve, split, active)
return v
@wait_for_return_value()
@set_seed_iftrue(True)
@dec.skipif(not statsmodels_available, "needs statsmodels")
@set_sampling_params_iftrue(SMALL_SAMPLES, ndraw=10, burnin=10)
def test_data_carving_coxph(n=400,
p=20,
split_frac=0.8,
lam_frac=1.2,
ndraw=8000,
burnin=2000,
df=np.inf,
compute_intervals=True,
return_only_screening=True):
X = np.random.standard_normal((n,p))
T = np.random.standard_exponential(n)
S = np.random.binomial(1, 0.5, size=(n,))
true_active = []
s = 0
active = np.array(true_active)
idx = np.arange(n)
np.random.shuffle(idx)
stage_one = idx[:int(n*split_frac)]
n1 = len(stage_one)
lam_theor = 10. * np.ones(p)
lam_theor[0] = 0.
DC = data_carving.coxph(X, T, S, feature_weights=lam_theor,
stage_one=stage_one)
DC.fit()
if len(DC.active) < n - int(n*split_frac):
DS = data_splitting.coxph(X, T, S, feature_weights=lam_theor,
stage_one=stage_one)
DS.fit(use_full_cov=True)
data_split = True
else:
print('not enough data for data splitting second stage')
print(DC.active)
data_split = False
if set(true_active).issubset(DC.active):
carve = []
split = []
for var in DC.active:
carve.append(DC.hypothesis_test(var, burnin=burnin, ndraw=ndraw))
if data_split:
split.append(DS.hypothesis_test(var))
else:
split.append(np.random.sample())
Xa = X[:,DC.active]
active = np.zeros(p, np.bool)
active[true_active] = 1
v = (carve, split, active)
return v
@set_seed_iftrue(True)
def test_intervals(n=100, p=20, s=5):
t = []
X, y, beta, true_active, sigma, _ = instance(n=n, p=p, s=s)
las = lasso.gaussian(X, y, 4., sigma=sigma)
las.fit()
# smoke test
las.soln
las.constraints
S = las.summary(compute_intervals=True)
nominal_intervals(las)
@wait_for_return_value()
def test_gaussian_pvals(n=100,
p=500,
s=7,
sigma=5,
rho=0.3,
signal=8.):
X, y, beta, true_active, sigma, _ = instance(n=n,
p=p,
s=s,
sigma=sigma,
rho=rho,
signal=signal)
L = lasso.gaussian(X, y, 20., sigma=sigma)
L.fit()
L.fit(L.lasso_solution)
if set(true_active).issubset(L.active):
S = L.summary('onesided')
S = L.summary('twosided')
return S['pval'], [v in true_active for v in S['variable']]
@wait_for_return_value()
def test_sqrt_lasso_pvals(n=100,
p=200,
s=7,
sigma=5,
rho=0.3,
signal=7.):
X, y, beta, true_active, sigma, _ = instance(n=n,
p=p,
s=s,
sigma=sigma,
rho=rho,
signal=signal)
lam_theor = np.mean(np.fabs(np.dot(X.T, np.random.standard_normal((n, 1000)))).max(0)) / np.sqrt(n)
Q = rr.identity_quadratic(0.01, 0, np.ones(p), 0)
weights_with_zeros = 0.7*lam_theor * np.ones(p)
weights_with_zeros[:3] = 0.
lasso.sqrt_lasso(X, y, weights_with_zeros, covariance='parametric')
L = lasso.sqrt_lasso(X, y, weights_with_zeros)
L.fit()
if set(true_active).issubset(L.active):
S = L.summary('onesided')
S = L.summary('twosided')
return S['pval'], [v in true_active for v in S['variable']]
@wait_for_return_value()
def test_sqrt_lasso_sandwich_pvals(n=200,
p=50,
s=10,
sigma=10,
rho=0.3,
signal=6.,
use_lasso_sd=False):
X, y, beta, true_active, sigma, _ = instance(n=n,
p=p,
s=s,
sigma=sigma,
rho=rho,
signal=signal)
heteroscedastic_error = sigma * np.random.standard_normal(n) * (np.fabs(X[:,-1]) + 0.5)**2
heteroscedastic_error += sigma * np.random.standard_normal(n) * (np.fabs(X[:,-2]) + 0.2)**2
heteroscedastic_error += sigma * np.random.standard_normal(n) * (np.fabs(X[:,-3]) + 0.5)**2
y += heteroscedastic_error
feature_weights = np.ones(p) * choose_lambda(X)
feature_weights[10:12] = 0
L_SQ = lasso.sqrt_lasso(X, y, feature_weights, covariance='sandwich')
L_SQ.fit()
if set(true_active).issubset(L_SQ.active):
S = L_SQ.summary('twosided')
return S['pval'], [v in true_active for v in S['variable']]
@wait_for_return_value()
def test_gaussian_sandwich_pvals(n=200,
p=50,
s=10,
sigma=10,
rho=0.3,
signal=6.,
use_lasso_sd=False):
X, y, beta, true_active, sigma, _ = instance(n=n,
p=p,
s=s,
sigma=sigma,
rho=rho,
signal=signal)
heteroscedastic_error = sigma * np.random.standard_normal(n) * (np.fabs(X[:,-1]) + 0.5)**2
heteroscedastic_error += sigma * np.random.standard_normal(n) * (np.fabs(X[:,-2]) + 0.2)**2
heteroscedastic_error += sigma * np.random.standard_normal(n) * (np.fabs(X[:,-3]) + 0.5)**2
y += heteroscedastic_error
# two different estimators of variance
loss = rr.glm.gaussian(X, y)
sandwich = glm_sandwich_estimator(loss, B=5000)
# make sure things work with some unpenalized columns
feature_weights = np.ones(p) * 3 * sigma
feature_weights[10:12] = 0
# try using RSS from LASSO to estimate sigma
if use_lasso_sd:
L_prelim = lasso.gaussian(X, y, feature_weights)
L_prelim.fit()
beta_lasso = L_prelim.lasso_solution
sigma_hat = np.linalg.norm(y - X.dot(beta_lasso))**2 / (n - len(L_prelim.active))
parametric = glm_parametric_estimator(loss, dispersion=sigma_hat**2)
else:
parametric = glm_parametric_estimator(loss, dispersion=None)
L_P = lasso.gaussian(X, y, feature_weights, covariance_estimator=parametric)
L_P.fit()
if set(true_active).issubset(L_P.active):
S = L_P.summary('twosided')
P_P = [p for p, v in zip(S['pval'], S['variable']) if v not in true_active]
L_S = lasso.gaussian(X, y, feature_weights, covariance_estimator=sandwich)
L_S.fit()
S = L_S.summary('twosided')
P_S = [p for p, v in zip(S['pval'], S['variable']) if v not in true_active]
return P_P, P_S, [v in true_active for v in S['variable']]
@wait_for_return_value()
def test_logistic_pvals(n=500,
p=200,
s=3,
rho=0.3,
signal=15.):
X, y, beta, true_active, _ = logistic_instance(n=n,
p=p,
s=s,
rho=rho,
signal=signal,
equicorrelated=False)
X = np.hstack([np.ones((n,1)), X])
print(true_active, 'true')
active = np.array(true_active)
active += 1
active = [0] + list(active)
true_active = active
L = lasso.logistic(X, y, [0]*1 + [1.2]*p)
L.fit()
S = L.summary('onesided')
print(true_active, L.active)
if set(true_active).issubset(L.active):
return S['pval'], [v in true_active for v in S['variable']]
@set_seed_iftrue(True)
def test_adding_quadratic_lasso():
X, y, beta, true_active, sigma, _ = instance(n=300, p=200)
Q = rr.identity_quadratic(0.01, 0, np.random.standard_normal(X.shape[1]), 0)
L1 = lasso.gaussian(X, y, 20, quadratic=Q)
beta1 = L1.fit(solve_args={'min_its':500, 'tol':1.e-12})
G1 = X[:,L1.active].T.dot(X.dot(beta1) - y) + Q.objective(beta1,'grad')[L1.active]
np.testing.assert_allclose(G1 * np.sign(beta1[L1.active]), -20)
lin = rr.identity_quadratic(0.0, 0, np.random.standard_normal(X.shape[1]), 0)
L2 = lasso.gaussian(X, y, 20, quadratic=lin)
beta2 = L2.fit(solve_args={'min_its':500, 'tol':1.e-12})
G2 = X[:,L2.active].T.dot(X.dot(beta2) - y) + lin.objective(beta2,'grad')[L2.active]
np.testing.assert_allclose(G2 * np.sign(beta2[L2.active]), -20)
@set_seed_iftrue(True)
def test_equivalence_sqrtlasso(n=200, p=400, s=10, sigma=3.):
"""
Check equivalent LASSO and sqrtLASSO solutions.
"""
Y = np.random.standard_normal(n) * sigma
beta = np.zeros(p)
beta[:s] = 8 * (2 * np.random.binomial(1, 0.5, size=(s,)) - 1)
X = np.random.standard_normal((n,p)) + 0.3 * np.random.standard_normal(n)[:,None]
X /= (X.std(0)[None,:] * np.sqrt(n))
Y += np.dot(X, beta) * sigma
lam_theor = choose_lambda(X, quantile=0.9)
weights = lam_theor*np.ones(p)
weights[:3] = 0.
soln1, loss1 = solve_sqrt_lasso(X, Y, weights=weights, quadratic=None, solve_args={'min_its':500, 'tol':1.e-10})
G1 = loss1.smooth_objective(soln1, 'grad')
# find active set, and estimate of sigma
active = (soln1 != 0)
nactive = active.sum()
subgrad = np.sign(soln1[active]) * weights[active]
X_E = X[:,active]
X_Ei = np.linalg.pinv(X_E)
sigma_E= np.linalg.norm(Y - X_E.dot(X_Ei.dot(Y))) / np.sqrt(n - nactive)
multiplier = sigma_E * np.sqrt((n - nactive) / (1 - np.linalg.norm(X_Ei.T.dot(subgrad))**2))
# XXX how should quadratic be changed?
# multiply everything by sigma_E?
loss2 = rr.glm.gaussian(X, Y)
penalty = rr.weighted_l1norm(weights, lagrange=multiplier)
problem = rr.simple_problem(loss2, penalty)
soln2 = problem.solve(tol=1.e-12, min_its=200)
G2 = loss2.smooth_objective(soln2, 'grad') / multiplier
np.testing.assert_allclose(G1[3:], G2[3:])
np.testing.assert_allclose(soln1, soln2)
@set_seed_iftrue(True)
def test_gaussian_full(n=100, p=20):
y = np.random.standard_normal(n)
X = np.random.standard_normal((n,p))
lam_theor = np.mean(np.fabs(np.dot(X.T, np.random.standard_normal((n, 1000)))).max(0))
Q = rr.identity_quadratic(0.01, 0, np.ones(p), 0)
weights_with_zeros = 0.5*lam_theor * np.ones(p)
weights_with_zeros[:3] = 0.
L = ROSI.gaussian(X, y, weights_with_zeros, 1., quadratic=Q)
L.fit()
print(L.summary(compute_intervals=True))
@set_seed_iftrue(True)
def test_logistic_full():
for Y, T in [(np.random.binomial(1,0.5,size=(10,)),
np.ones(10)),
(np.random.binomial(1,0.5,size=(10,)),
None),
(np.random.binomial(3,0.5,size=(10,)),
3*np.ones(10))]:
X = np.random.standard_normal((10,5))
L = ROSI.logistic(X, Y, 0.1, trials=T)
L.fit()
L.summary(compute_intervals=True)
@set_seed_iftrue(True)
def test_poisson_full():
X = np.random.standard_normal((10,5))
Y = np.random.poisson(10, size=(10,))
L = ROSI.poisson(X, Y, 0.1)
L.fit()
L.summary(compute_intervals=True)
| selective-inference/selective-inference | selectinf/algorithms/tests/test_lasso.py | Python | bsd-3-clause | 27,613 | [
"Gaussian"
] | 18d7bb1903593fe9231f9b77c7c67e02f34df3c5cb02e27ce51d6ea07f86db84 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send VLAG commands to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_vlag
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Manage VLAG resources and attributes on devices running
Lenovo CNOS
description:
- This module allows you to work with virtual Link Aggregation Groups
(vLAG) related configurations. The operators used are overloaded to ensure
control over switch vLAG configurations. Apart from the regular device
connection related attributes, there are four vLAG arguments which are
overloaded variables that will perform further configurations. They are
vlagArg1, vlagArg2, vlagArg3, and vlagArg4. For more details on how to use
these arguments, see [Overloaded Variables].
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the
playbook is run. For more information about this module from Lenovo and
customizing it usage for your use cases, please visit
U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_vlag.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
vlagArg1:
description:
- This is an overloaded vlag first argument. Usage of this argument can
be found is the User Guide referenced above.
required: Yes
default: Null
choices: [enable, auto-recovery,config-consistency,isl,mac-address-table,
peer-gateway,priority,startup-delay,tier-id,vrrp,instance,hlthchk]
vlagArg2:
description:
- This is an overloaded vlag second argument. Usage of this argument can
be found is the User Guide referenced above.
required: No
default: Null
choices: [Interval in seconds,disable or strict,Port Aggregation Number,
VLAG priority,Delay time in seconds,VLAG tier-id value,
VLAG instance number,keepalive-attempts,keepalive-interval,
retry-interval,peer-ip]
vlagArg3:
description:
- This is an overloaded vlag third argument. Usage of this argument can
be found is the User Guide referenced above.
required: No
default: Null
choices: [enable or port-aggregation,Number of keepalive attempts,
Interval in seconds,Interval in seconds,
VLAG health check peer IP4 address]
vlagArg4:
description:
- This is an overloaded vlag fourth argument. Usage of this argument can
be found is the User Guide referenced above.
required: No
default: Null
choices: [Port Aggregation Number,default or management]
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_vlag. These are
written in the main.yml file of the tasks directory.
---
- name: Test Vlag - enable
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user']}}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "enable"
- name: Test Vlag - autorecovery
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user']}}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "auto-recovery"
vlagArg2: 266
- name: Test Vlag - config-consistency
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user']}}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "config-consistency"
vlagArg2: "strict"
- name: Test Vlag - isl
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user']}}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "isl"
vlagArg2: 23
- name: Test Vlag - mac-address-table
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user']}}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "mac-address-table"
- name: Test Vlag - peer-gateway
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user']}}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "peer-gateway"
- name: Test Vlag - priority
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user']}}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "priority"
vlagArg2: 1313
- name: Test Vlag - startup-delay
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user']}}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "startup-delay"
vlagArg2: 323
- name: Test Vlag - tier-id
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user']}}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "tier-id"
vlagArg2: 313
- name: Test Vlag - vrrp
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user']}}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "vrrp"
- name: Test Vlag - instance
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user']}}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "instance"
vlagArg2: 33
vlagArg3: 333
- name: Test Vlag - instance2
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user']}}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "instance"
vlagArg2: "33"
- name: Test Vlag - keepalive-attempts
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user']}}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "hlthchk"
vlagArg2: "keepalive-attempts"
vlagArg3: 13
- name: Test Vlag - keepalive-interval
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user']}}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "hlthchk"
vlagArg2: "keepalive-interval"
vlagArg3: 131
- name: Test Vlag - retry-interval
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user']}}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "hlthchk"
vlagArg2: "retry-interval"
vlagArg3: 133
- name: Test Vlag - peer ip
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user']}}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "hlthchk"
vlagArg2: "peer-ip"
vlagArg3: "1.2.3.4"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "vLAG configurations accomplished"
'''
import sys
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def vlagConfig(module, prompt, answer):
retVal = ''
# vlag config command happens here.
command = 'vlag '
vlagArg1 = module.params['vlagArg1']
vlagArg2 = module.params['vlagArg2']
vlagArg3 = module.params['vlagArg3']
vlagArg4 = module.params['vlagArg4']
deviceType = module.params['deviceType']
if(vlagArg1 == "enable"):
# debugOutput("enable")
command = command + vlagArg1 + " "
elif(vlagArg1 == "auto-recovery"):
# debugOutput("auto-recovery")
command = command + vlagArg1 + " "
value = cnos.checkSanityofVariable(
deviceType, "vlag_auto_recovery", vlagArg2)
if(value == "ok"):
command = command + vlagArg2
else:
retVal = "Error-160"
return retVal
elif(vlagArg1 == "config-consistency"):
# debugOutput("config-consistency")
command = command + vlagArg1 + " "
value = cnos.checkSanityofVariable(
deviceType, "vlag_config_consistency", vlagArg2)
if(value == "ok"):
command = command + vlagArg2
else:
retVal = "Error-161"
return retVal
elif(vlagArg1 == "isl"):
# debugOutput("isl")
command = command + vlagArg1 + " port-channel "
value = cnos.checkSanityofVariable(
deviceType, "vlag_port_aggregation", vlagArg2)
if(value == "ok"):
command = command + vlagArg2
else:
retVal = "Error-162"
return retVal
elif(vlagArg1 == "mac-address-table"):
# debugOutput("mac-address-table")
command = command + vlagArg1 + " refresh"
elif(vlagArg1 == "peer-gateway"):
# debugOutput("peer-gateway")
command = command + vlagArg1 + " "
elif(vlagArg1 == "priority"):
# debugOutput("priority")
command = command + vlagArg1 + " "
value = cnos.checkSanityofVariable(deviceType, "vlag_priority",
vlagArg2)
if(value == "ok"):
command = command + vlagArg2
else:
retVal = "Error-163"
return retVal
elif(vlagArg1 == "startup-delay"):
# debugOutput("startup-delay")
command = command + vlagArg1 + " "
value = cnos.checkSanityofVariable(
deviceType, "vlag_startup_delay", vlagArg2)
if(value == "ok"):
command = command + vlagArg2
else:
retVal = "Error-164"
return retVal
elif(vlagArg1 == "tier-id"):
# debugOutput("tier-id")
command = command + vlagArg1 + " "
value = cnos.checkSanityofVariable(deviceType, "vlag_tier_id", vlagArg2)
if(value == "ok"):
command = command + vlagArg2
else:
retVal = "Error-165"
return retVal
elif(vlagArg1 == "vrrp"):
# debugOutput("vrrp")
command = command + vlagArg1 + " active"
elif(vlagArg1 == "instance"):
# debugOutput("instance")
command = command + vlagArg1 + " "
value = cnos.checkSanityofVariable(deviceType, "vlag_instance",
vlagArg2)
if(value == "ok"):
command = command + vlagArg2
if(vlagArg3 is not None):
command = command + " port-channel "
value = cnos.checkSanityofVariable(
deviceType, "vlag_port_aggregation", vlagArg3)
if(value == "ok"):
command = command + vlagArg3
else:
retVal = "Error-162"
return retVal
else:
command = command + " enable "
else:
retVal = "Error-166"
return retVal
elif(vlagArg1 == "hlthchk"):
# debugOutput("hlthchk")
command = command + vlagArg1 + " "
value = cnos.checkSanityofVariable(
deviceType, "vlag_hlthchk_options", vlagArg2)
if(value == "ok"):
if(vlagArg2 == "keepalive-attempts"):
value = cnos.checkSanityofVariable(
deviceType, "vlag_keepalive_attempts", vlagArg3)
if(value == "ok"):
command = command + vlagArg2 + " " + vlagArg3
else:
retVal = "Error-167"
return retVal
elif(vlagArg2 == "keepalive-interval"):
value = cnos.checkSanityofVariable(
deviceType, "vlag_keepalive_interval", vlagArg3)
if(value == "ok"):
command = command + vlagArg2 + " " + vlagArg3
else:
retVal = "Error-168"
return retVal
elif(vlagArg2 == "retry-interval"):
value = cnos.checkSanityofVariable(
deviceType, "vlag_retry_interval", vlagArg3)
if(value == "ok"):
command = command + vlagArg2 + " " + vlagArg3
else:
retVal = "Error-169"
return retVal
elif(vlagArg2 == "peer-ip"):
# Here I am not taking care of IPV6 option.
value = cnos.checkSanityofVariable(
deviceType, "vlag_peerip", vlagArg3)
if(value == "ok"):
command = command + vlagArg2 + " " + vlagArg3
if(vlagArg4 is not None):
value = cnos.checkSanityofVariable(
deviceType, "vlag_peerip_vrf", vlagArg4)
if(value == "ok"):
command = command + " vrf " + vlagArg4
else:
retVal = "Error-170"
return retVal
else:
retVal = "Error-171"
return retVal
else:
retVal = "Error-172"
return retVal
# debugOutput(command)
cmd = [{'command': command, 'prompt': None, 'answer': None}]
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
return retVal
# EOM
def main():
#
# Define parameters for vlag creation entry
#
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
vlagArg1=dict(required=True),
vlagArg2=dict(required=False),
vlagArg3=dict(required=False),
vlagArg4=dict(required=False),),
supports_check_mode=False)
outputfile = module.params['outputfile']
output = ""
# Send the CLi command
output = output + str(vlagConfig(module, '(config)#', None))
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# need to add logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="VLAG configurations accomplished")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| caphrim007/ansible | lib/ansible/modules/network/cnos/cnos_vlag.py | Python | gpl-3.0 | 18,499 | [
"VisIt"
] | 4e3cb3b9212d8e20a812c6ae7af4f1fc97422763fad712a945c1b33120b57f60 |
"""
This module contains all modifiers of OVITO.
The abstract base class of all modifier types is the :py:class:`Modifier` class.
Typically you create a modifier instance, set its parameters, and finally insert it into the
modification pipeline of an :py:class:`~ovito.ObjectNode`, e.g.::
from ovito.modifiers import *
m = AssignColorModifier()
m.color = (0.2, 1.0, 0.9)
node.modifiers.append(m)
The following modifier types are available:
============================================== =========================================
Python class name User interface name
============================================== =========================================
:py:class:`AffineTransformationModifier` :guilabel:`Affine transformation`
:py:class:`AmbientOcclusionModifier` :guilabel:`Ambient occlusion`
:py:class:`AssignColorModifier` :guilabel:`Assign color`
:py:class:`AtomicStrainModifier` :guilabel:`Atomic strain`
:py:class:`BinAndReduceModifier` :guilabel:`Bin and reduce`
:py:class:`BondAngleAnalysisModifier` :guilabel:`Bond-angle analysis`
:py:class:`CalculateDisplacementsModifier` :guilabel:`Displacement vectors`
:py:class:`CentroSymmetryModifier` :guilabel:`Centrosymmetry parameter`
:py:class:`ClearSelectionModifier` :guilabel:`Clear selection`
:py:class:`ClusterAnalysisModifier` :guilabel:`Cluster analysis`
:py:class:`ColorCodingModifier` :guilabel:`Color coding`
:py:class:`CombineParticleSetsModifier` :guilabel:`Combine particle sets`
:py:class:`CommonNeighborAnalysisModifier` :guilabel:`Common neighbor analysis`
:py:class:`ComputeBondLengthsModifier` :guilabel:`Compute bond lengths`
:py:class:`ComputePropertyModifier` :guilabel:`Compute property`
:py:class:`ConstructSurfaceModifier` :guilabel:`Construct surface mesh`
:py:class:`CoordinationNumberModifier` :guilabel:`Coordination analysis`
:py:class:`CreateBondsModifier` :guilabel:`Create bonds`
:py:class:`DeleteSelectedParticlesModifier` :guilabel:`Delete selected particles`
:py:class:`DislocationAnalysisModifier` :guilabel:`Dislocation analysis (DXA)`
:py:class:`ElasticStrainModifier` :guilabel:`Elastic strain calculation`
:py:class:`ExpandSelectionModifier` :guilabel:`Expand selection`
:py:class:`FreezePropertyModifier` :guilabel:`Freeze property`
:py:class:`HistogramModifier` :guilabel:`Histogram`
:py:class:`IdentifyDiamondModifier` :guilabel:`Identify diamond structure`
:py:class:`InvertSelectionModifier` :guilabel:`Invert selection`
:py:class:`LoadTrajectoryModifier` :guilabel:`Load trajectory`
:py:class:`ManualSelectionModifier` :guilabel:`Manual selection`
:py:class:`PolyhedralTemplateMatchingModifier` :guilabel:`Polyhedral template matching`
:py:class:`PythonScriptModifier` :guilabel:`Python script`
:py:class:`ScatterPlotModifier` :guilabel:`Scatter plot`
:py:class:`SelectExpressionModifier` :guilabel:`Expression select`
:py:class:`SelectParticleTypeModifier` :guilabel:`Select particle type`
:py:class:`ShowPeriodicImagesModifier` :guilabel:`Show periodic images`
:py:class:`SliceModifier` :guilabel:`Slice`
:py:class:`VoronoiAnalysisModifier` :guilabel:`Voronoi analysis`
:py:class:`WignerSeitzAnalysisModifier` :guilabel:`Wigner-Seitz defect analysis`
:py:class:`WrapPeriodicImagesModifier` :guilabel:`Wrap at periodic boundaries`
============================================== =========================================
*Note that some analysis modifiers are not accessible from Python. That is because they do things that can be achieved equally well using the Numpy python module.*
"""
# Load the native module.
from PyScriptScene import Modifier
from PyScriptScene import PythonScriptModifier
| srinath-chakravarthy/ovito | src/plugins/pyscript/python/ovito/modifiers/__init__.py | Python | gpl-3.0 | 4,052 | [
"OVITO"
] | 8a0a1005f56af9c31e2ad298e82cd21b64738d22c98ccc9973587df245f96d52 |
"""
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
# rescale
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
.. versionadded:: 0.17
tol : float, optional, (default 1.0e-4)
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals)
)[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
evecs /= np.linalg.norm(evecs, axis=0)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = (S**2 / np.sum(
S**2))[:self._max_components]
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1) +
np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = np.bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Get the maximum number of components
if self.n_components is None:
self._max_components = len(self.classes_) - 1
else:
self._max_components = min(len(self.classes_) - 1,
self.n_components)
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
return X_new[:, :self._max_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
.. versionadded:: 0.17
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
.. versionadded:: 0.17
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| wazeerzulfikar/scikit-learn | sklearn/discriminant_analysis.py | Python | bsd-3-clause | 26,698 | [
"Gaussian"
] | dcdda44270af5fe3a96ce11df11eb0ab73ffd77bde7aadc52dae66d126766302 |
"""
.. module:: pysm
:platform: Unix
:synopsis: module containing primary use classes Sky and Instrument.
.. moduleauthor: Ben Thorne <ben.thorne@physics.ox.ac.uk>
"""
from __future__ import absolute_import, print_function
from scipy import interpolate, integrate
import numpy as np
import healpy as hp
import scipy.constants as constants
import os, sys
from .components import Dust, Synchrotron, Freefree, AME, CMB
from .common import read_key, convert_units, bandpass_convert_units, check_lengths, write_map, build_full_map
class Sky(object):
"""Model sky signal of Galactic foregrounds.
This class combines the contribtions to the Galactic microwave
foreground from thermal dust, synchrotron, AME, free-free, and CMB
emissions.
Is it inistialised using a dictionary. The keys must be 'cmb',
'dust', 'synchrotron', 'freefree', 'ame', and the values must be
dictionaries with the configuration of the named component, e.g.:
cmb_config = {
'model' : 'taylens',
'cmb_specs' : np.loadtxt('pysm/template/camb_lenspotentialCls.dat', unpack = True),
'delens' : False,
'delensing_ells' : np.loadtxt('pysm/template/delens_ells.txt'),
'nside' : nside,
'cmb_seed' : 1111
}
dust_config = {
'model' : 'modified_black_body',
'nu_0_I' : 545.,
'nu_0_P' : 353.,
'A_I' : pysm.read_map('pysm/template/dust_t_new.fits', nside, field = 0),
'A_Q' : pysm.read_map('pysm/template/dust_q_new.fits', nside, field = 0),
'A_U' : pysm.read_map('pysm/template/dust_u_new.fits', nside, field = 0),
'spectral_index' : 1.5,
'temp' : pysm.read_map('pysm/template/dust_temp.fits', nside, field = 0)
}
sky_config = {
'cmb' : cmb_config,
'dust': dust_config,
}
"""
def __init__(self, config, mpi_comm=None):
"""Read the configuration dict for Sky
Implement the configuration file for the Sky instance. Then
define the getattributes corresponding to the requested
components.
"""
self.__config = config
self.__components = list(config.keys())
if 'cmb' in self.Components:
self.cmb = component_adder(CMB, self.Config['cmb'])
self.Uses_HD17 = False
if 'dust' in self.Components:
self.dust = component_adder(Dust, self.Config['dust'], mpi_comm=mpi_comm)
# Here we add an exception for the HD_17 model. This model requires that for bandpass
# integration the model be inistialized knowing the bandpass specification, rather than
# just inidividual frequencies. Therefore we need to be able to call the model directly
# during the bandpass evaluation.
if self.Config['dust'][0]['model'] == 'hensley_draine_2017':
self.Uses_HD17 = True
self.HD_17_bpass = initialise_hd_dust_model_bandpass(self.dust, mpi_comm=mpi_comm, **self.Config['dust'][0])
if 'synchrotron' in self.Components:
self.synchrotron = component_adder(Synchrotron, self.Config['synchrotron'])
if 'freefree' in self.Components:
self.freefree = component_adder(Freefree, self.Config['freefree'])
if 'ame' in self.Components:
self.ame = component_adder(AME, self.Config['ame'])
return
@property
def Uses_HD17(self):
return self.__uses_hd17
@Uses_HD17.setter
def Uses_HD17(self, value):
self.__uses_hd17 = value
@property
def Config(self):
try:
return self.__config
except AttributeError:
print("Sky attribute 'Config' not set.")
sys.exit(1)
@property
def Components(self):
try:
return self.__components
except AttributeError:
print("Sky attribute 'Components' not set.")
sys.exit(1)
def signal(self, **kwargs):
"""Returns the sky as a function of frequency.
This returns a function which is the sum of all the requested
sky components at the given frequency: (T, Q, U)(nu)."""
def signal(nu):
sig = 0.
for component in self.Components:
sig += getattr(self, component)(nu, **kwargs)
return sig
return signal
def add_component(self, name, component):
"""Add a already initialized component object to the sky
Parameters
==========
name : str
name of the new component, it cannot include spaces or commas
component : object
object that provides a signal(nu, **kwargs) function that returns the emission in uK_RJ
"""
self.__components.append(name)
setattr(self, name, component.signal)
class Instrument(object):
"""This class contains the attributes and methods required to model
the instrument observing Sky.
Instrument contains methods used to perform bandpass integration over an arbitrary bandpass, smooth with a Gaussian beam, and a white Gaussian noise component.
Instrument is initialised with dictionary, the possible keys are:
- `frequencies` : frequencies at which to evaluate the Sky model -- numpy.ndarray.
- `use_smoothing` : whether or not to use smoothing -- bool.
- `beams` : Gaussian beam FWHMs in arcmin. Only used if use_smoothing is True. Must be the same length as frequencies.
- `add_noise` : whether or not to add noise -- bool
- `sens_I` : sensitivity of intensity in uK_RJamin. Only used if add_noise is True. Must be same length as frequencies -- numpy.ndarray
- `sens_P` : sensitivity of polarisation in uK_RJamin. Only used if add_noise is True. Must be same length as frequencies -- numpy.ndarray
- `nside` : nside at which to evaluate maps -- int.
- `noise_seed` : noise seed -- int.
- `use_bandpass` : whether or not to use bandpass. If this is True `frequencies` is not required -- bool
- `channels` : frequencies and weights of channels to be calculated as a list of tuples [(frequencies_1, weights_1), (frequencies_2, weights_2) ...] -- list of tuples
- `channel_names` : list of names used to label the files to which channel maps are written -- string.
- `output_directory` : directory to which the files will be written -- str.
- `output_prefix` : prefix for all output files -- str.
- `output_units` : output units -- str
The use of Instrument is with the :class:`pysm.pysm.Sky` class. Given an instance of Sky we can use the :meth:`pysm.pysm.Instrument.obseve` to apply instrumental effects:
>>> sky = pysm.Sky(sky_config)
>>> instrument = pysm.Instrument(instrument_config)
>>> instrument.observe(sky)
"""
def __init__(self, config):
"""Specifies the attributes of the Instrument class."""
for k in config.keys():
read_key(self, k, config)
#Get the number of channels of observations.
if self.Use_Bandpass:
N_channels = len(self.Channels)
#Whilst we are here let's normalise the bandpasses.
self.normalise_bandpass()
if not self.Use_Bandpass:
N_channels = len(self.Frequencies)
#If they are not specified, set the sensitivities and beams
#to zero, corresponding to noiseless and perfect-resolution
#observations.
if not self.Use_Smoothing:
self.Beams = np.zeros(N_channels)
if not self.Add_Noise:
self.Sens_I = np.zeros(N_channels)
self.Sens_P = np.zeros(N_channels)
return
@property
def Frequencies(self):
try:
return self.__frequencies
except AttributeError:
print("Instrument attribute 'Frequencies' not set.")
sys.exit(1)
@property
def Channels(self):
try:
return self.__channels
except AttributeError:
print("Instrument attribute 'Channels' not set.")
sys.exit(1)
@Channels.setter
def Channels(self, value):
self.__channels = value
@property
def Beams(self):
try:
return self.__beams
except AttributeError:
print("Instrument attribute 'Beams' not set.")
sys.exit(1)
@Beams.setter
def Beams(self, value):
self.__beams = value
@property
def Sens_I(self):
try:
return self.__sens_I
except AttributeError:
print("Instrument attribute 'Sens_I' not set.")
sys.exit(1)
@Sens_I.setter
def Sens_I(self, value):
self.__sens_I = value
@property
def Sens_P(self):
try:
return self.__sens_P
except AttributeError:
print("Instrument attribute 'Sens_P' not set.")
sys.exit(1)
@Sens_P.setter
def Sens_P(self, value):
self.__sens_P = value
@property
def Nside(self):
try:
return self.__nside
except AttributeError:
print("Instrument attribute 'Nside' not set.")
sys.exit(1)
@property
def Noise_Seed(self):
try:
return self.__noise_seed
except AttributeError:
print("Instrument attribute 'Noise_Seed' not set.")
sys.exit(1)
@property
def Use_Bandpass(self):
try:
return self.__use_bandpass
except AttributeError:
print("Instrument attribute 'Use_Bandpass' not set.")
sys.exit(1)
@property
def Output_Prefix(self):
try:
return self.__output_prefix
except AttributeError:
print("Instrument attribute 'Output_Prefix' not set.")
sys.exit(1)
@property
def Output_Directory(self):
try:
return self.__output_directory
except AttributeError:
print("Instrument attribute 'Output_Directory' not set.")
sys.exit(1)
@property
def Channel_Names(self):
try:
return self.__channel_names
except AttributeError:
print("Instrument attribute 'Channel_Names' not set.")
sys.exit(1)
@property
def Write_Components(self):
try:
return self.__write_components
except AttributeError:
print("Instrument attribute 'Write_Components' not set.")
sys.exit(1)
@property
def Add_Noise(self):
try:
return self.__add_noise
except AttributeError:
print("Instrument attribute 'Add_Noise' not set.")
@property
def Use_Smoothing(self):
try:
return self.__use_smoothing
except AttributeError:
print("Instrument attribute 'Use_Smoothing' not set.")
@property
def Output_Units(self):
try:
return self.__output_units
except AttributeError:
print("Instrument attribute 'Output_Units not set.'")
@property
def pixel_indices(self):
try:
return self.__pixel_indices
except AttributeError:
print("Instrument attribute 'pixel_indices' not set.")
def observe(self, Sky, write_outputs=True):
"""Evaluate and add instrument effects to Sky's signal function.
This method evaluates the Sky class's signal method at the
requested frequencies, or over the requested bandpass. Then
smooths with a Gaussian beam, if requested. Then adds Gaussian
white noise, if requested. Finally writes the maps to file.
:param Sky: instance of the :class:`pysm.pysm.Sky` class.
:type Sky: class
:return: no return, writes to file.
"""
self.print_info()
signal = Sky.signal()
output = self.apply_bandpass(signal, Sky)
output = self.smoother(output)
noise = self.noiser()
output, noise = self.unit_converter(output, noise)
if write_outputs:
self.writer(output, noise)
else:
return output, noise
return
def apply_bandpass(self, signal, Sky):
"""Function to integrate signal over a bandpass. Frequencies must be
evenly spaced, if they are not the function will object. Weights
must be normalisable.
:param signal: signal function to be integrated of bandpass
:type param: function
:return: maps after bandpass integration shape either (N_freqs, 3, Npix) or (N_channels, 3, Npix) -- numpy.ndarray
"""
if not self.Use_Bandpass:
return signal(self.Frequencies)
elif self.Use_Bandpass:
#First need to tell the Sky class that we are using bandpass and if we are using the HD17 model.
bpass_signal = Sky.signal(use_bandpass = Sky.Uses_HD17)
# convert to Jysr in order to integrate over bandpass
signal_Jysr = lambda nu: bpass_signal(nu) * convert_units("uK_RJ", "Jysr", nu)
bpass_integrated = np.array([bandpass(f, w, signal_Jysr) for (f, w) in self.Channels])
# We now add an exception in for the case of the HD_17 model. This requires that the model be initialised
# with the bandpass information in order for the model to be computaitonally efficient. Therefore this is
# evaluated differently from other models. The function HD_17_bandpass() accepts a tuple (freqs, weights)
# and returns the integrated signal in units of Jysr. Note that it was initialised when the Instrument
# class was first instantiated, if use_bandpass = True. Note that the dust signal will still contribute
# to the bpass_integrated sum in the evaluation above, but will be zero.
if Sky.Uses_HD17:
bpass_integrated += np.array(list(map(Sky.HD_17_bpass, self.Channels)))
return bpass_integrated
else:
print("Please set 'Use_Bandpass' for Instrument object.")
sys.exit(1)
def normalise_bandpass(self):
"""Function to normalise input bandpasses such that they integrate to one
over the stated frequency range.
"""
self.Channels = [(freqs, weights / np.trapz(weights, freqs * 1.e9)) for (freqs, weights) in self.Channels]
return
def smoother(self, map_array):
"""Function to smooth an array of N (T, Q, U) maps with N beams in
units of arcmin.
:param map_array:
:type map_array:
"""
if not self.Use_Smoothing:
return map_array
elif self.Use_Smoothing:
if self.pixel_indices is None:
full_map = map_array
else:
full_map = build_full_map(self.pixel_indices, map_array, self.Nside)
smoothed_map_array = np.array([hp.smoothing(m, fwhm = np.pi / 180. * b / 60., verbose = False) for (m, b) in zip(full_map, self.Beams)])
if self.pixel_indices is None:
return smoothed_map_array
else:
assert smoothed_map_array.ndim == 3, \
"Assuming map array is 3 dimensional (n_freqs x n_maps x n_pixels)"
return smoothed_map_array[..., self.pixel_indices]
else:
print("Please set 'Use_Smoothing' in Instrument object.")
sys.exit(1)
def noiser(self):
"""Calculate white noise maps for given sensitivities. Returns signal
+ noise, and noise maps at the given nside in (T, Q, U). Input
sensitivities are expected to be in uK_CMB amin for the rest of
PySM.
:param map_array: array of maps to which we add noise.
:type map_array: numpy.ndarray.
:return: map plus noise, and noise -- numpy.ndarray
"""
try:
npix = len(self.pixel_indices)
except TypeError:
npix = hp.nside2npix(self.Nside)
if not self.Add_Noise:
return np.zeros((len(self.Sens_I), 3, npix))
elif self.Add_Noise:
# solid angle per pixel in amin2
pix_amin2 = 4. * np.pi / float(hp.nside2npix(self.Nside)) * (180. * 60. / np.pi) ** 2
"""sigma_pix_I/P is std of noise per pixel. It is an array of length
equal to the number of input maps."""
sigma_pix_I = np.sqrt(self.Sens_I ** 2 / pix_amin2)
sigma_pix_P = np.sqrt(self.Sens_P ** 2 / pix_amin2)
np.random.seed(seed = self.Noise_Seed)
noise = np.random.randn(len(self.Sens_I), 3, npix)
noise[:, 0, :] *= sigma_pix_I[:, None]
noise[:, 1, :] *= sigma_pix_P[:, None]
noise[:, 2, :] *= sigma_pix_P[:, None]
return noise
else:
print("Please set 'Add_Noise' in Instrument object.")
sys.exit(1)
def unit_converter(self, map_array, noise):
"""Function to handle the conversion of units.
If using delta bandpasses just evaluate the unit conversion
factor normally. If using a bandpass we calculate the
conversion factor following the Planck HFI definitions.
:param map_array: signal + noise map to convert units of.
:type map_array: numpy.ndarray
:param noise: noise map to conver units of.
:type noise: numpy.ndarray
:return: signal + noise map converted to output units, noise map converted to output units -- numpy.ndarray
"""
if not self.Use_Bandpass:
#If using a delta bandpass just evaluate the standard unit conversion at
#the frequencies of interest. All the scaling is done in uK_RJ.
Uc_signal = np.array(convert_units("uK_RJ", self.Output_Units, self.Frequencies))
elif self.Use_Bandpass:
# In the case of a given bandpass we calculate the unit conversion as explained in the documentation
# of bandpass_convert_units.
Uc_signal = np.array([bandpass_convert_units(self.Output_Units, channel) for channel in self.Channels])
if self.Add_Noise:
# If noise requested also multiple the calculated noise.
if not self.Use_Bandpass:
Uc_noise = np.array(convert_units("uK_CMB", self.Output_Units, self.Frequencies))
elif self.Use_Bandpass:
# first convert noise to Jysr then apply the same unit conversion as used for the signal.
Uc_noise = Uc_signal * np.array([1. / bandpass_convert_units("uK_CMB", channel) for channel in self.Channels])
elif not self.Add_Noise:
Uc_noise = np.zeros_like(Uc_signal)
return Uc_signal[:, None, None] * map_array, Uc_noise[:, None, None] * noise
def file_path(self, channel_name = None, f = None, extra_info = ""):
"""Returns file path for pysm outputs.
"""
if not self.Use_Bandpass:
fname = '%s_nu%sGHz_%s_nside%04d.fits'%(self.Output_Prefix, str("%07.2f"%f).replace(".", "p"), extra_info, self.Nside)
elif self.Use_Bandpass:
fname = '%s_bandpass_%s_%s_nside%04d.fits'%(self.Output_Prefix, channel_name, extra_info, self.Nside)
else:
print("Bandpass set incorrectly.")
sys.exit(1)
return os.path.join(self.Output_Directory, fname)
def writer(self, output, noise):
"""Function to write the total and noise maps to file."""
if not self.Use_Bandpass:
if self.Add_Noise:
for f, o, n in zip(self.Frequencies, output, noise):
print(np.std(n, axis = 1))# * np.sqrt(4. * np.pi / float(hp.nside2npix(128)) * (180. * 60. / np.pi) ** 2)
print(np.std(o, axis = 1))
write_map(self.file_path(f = f, extra_info = "noise"), n, nside=self.Nside, pixel_indices=self.pixel_indices)
write_map(self.file_path(f = f, extra_info = "total"), o + n, nside=self.Nside, pixel_indices=self.pixel_indices)
elif not self.Add_Noise:
for f, o in zip(self.Frequencies, output):
write_map(self.file_path(f = f, extra_info = "total"), o, nside=self.Nside, pixel_indices=self.pixel_indices)
elif self.Use_Bandpass:
if self.Add_Noise:
for c, o, n in zip(self.Channel_Names, output, noise):
write_map(self.file_path(channel_name = c, extra_info = "total"), o + n, nside=self.Nside, pixel_indices=self.pixel_indices)
write_map(self.file_path(channel_name = c, extra_info = "noise"), n, nside=self.Nside, pixel_indices=self.pixel_indices)
elif not self.Add_Noise:
for c, o in zip(self.Channel_Names, output):
write_map(self.file_path(channel_name = c, extra_info = "total"), o, nside=self.Nside, pixel_indices=self.pixel_indices)
return
def print_info(self):
"""Function to print information about current Instrument
specifications to screen.
"""
if not self.Use_Bandpass:
if not check_lengths(self.Frequencies, self.Beams, self.Sens_I, self.Sens_P):
print("Check lengths of frequencies, beams, and sensitivities are equal.")
sys.exit(1)
print("nu (GHz) | sigma_I (uK_CMB amin) | sigma_P (uK_CMB amin) | FWHM (arcmin) \n")
for f, s_I, s_P, b in zip(self.Frequencies, self.Sens_I, self.Sens_P, self.Beams):
print("%07.2f | %05.2f | %05.2f | %05.2f "%(f, s_I, s_P, b))
elif self.Use_Bandpass:
print("Channel name | sigma_I (uK_CMB amin) | sigma_P (uK_CMB amin) | FWHM (arcmin) |")
for cn, s_I, s_P, b in zip(self.Channel_Names, self.Sens_I, self.Sens_P, self.Beams):
print("%s | %05.2f | %05.2f | %05.2f "%(cn, s_I, s_P, b))
return
def bandpass(frequencies, weights, signal):
"""Function to integrate signal over a bandpass.
Frequencies must be evenly spaced, if they are not the function
will object. Weights must be able to be normalised to integrate to 1.
"""
# check that the frequencies are evenly spaced.
check_bpass_frequencies(frequencies)
frequency_separation = (frequencies[1] - frequencies[0]) * 1.e9
# normalise the weights and check that they integrate to 1.
weights /= np.sum(weights * frequency_separation)
check_bpass_weights_normalisation(weights, frequency_separation)
# define the integration: integrand = signal(nu) * w(nu) * d(nu)
# signal is already in MJysr.
return sum([signal(nu) * w * frequency_separation for (nu, w) in zip(frequencies, weights)])
def check_bpass_weights_normalisation(weights, spacing):
"""Function that checks the weights of the bandpass were normalised
properly.
"""
try:
np.testing.assert_almost_equal(np.sum(weights * spacing), 1, decimal = 3)
except AssertionError:
print("Bandpass weights can not be normalised.")
sys.exit(1)
return
def check_bpass_frequencies(frequencies):
"""Function checking the separation of frequencies are even."""
frequency_separation = frequencies[1] - frequencies[0]
number_of_frequencies = frequencies.size
frequency_range = frequencies[-1] - frequencies[0]
try:
np.testing.assert_almost_equal(frequency_separation * (number_of_frequencies - 1)/ frequency_range, 1., decimal = 3)
except AssertionError:
print("Bandpass frequencies not evenly spaced.")
sys.exit(1)
for i in range(frequencies.size - 1):
spacing = frequencies[i + 1] - frequencies[i]
try:
np.testing.assert_almost_equal(spacing / frequency_range, frequency_separation / frequency_range, decimal = 3)
except AssertionError:
print("Bandpass frequencies not evenly spaced.")
sys.exit(1)
return
def component_adder(component_class, dictionary_list, **kwargs):
"""This function adds instances of a component class to a Sky
attribute for that component, e.g. Sky.Dust, thereby allowing for
multiple populations of that component to be simulated.
"""
# need this step in order to avoid calling the setup for
# each scaling law every time the signal is evaluated.
# each dictionary is a configuration dict used to
# instantiate the component's class. We then take the
# signal produced by that population.
population_signals = [component_class(dic).signal(**kwargs) for dic in dictionary_list]
# sigs is now a list of functions. Each function is the emission
# due to a population of the component.
def total_signal(nu, **kwargs):
total_component_signal = 0
# now sum up the contributions of each population at
# frequency nu.
for population_signal in population_signals:
total_component_signal += population_signal(nu, **kwargs)
return total_component_signal
# return the total contribution from all populations
# as a function of frequency nu.
return total_signal
def initialise_hd_dust_model_bandpass(hd_unint_signal, mpi_comm, **kwargs):
"""Function to initialise the bandpass-integrated
version of the Hensley-Draine 2017 model.
The keyword arguments are expected to be the initialisation
dictionary for the HD dust component.
:param hd_unint_signal: signal of the un-integrated HD17 model.
:type hd_unint_signal: function
"""
#Draw map of uval using Commander dust data.
uval = Dust.draw_uval(kwargs['draw_uval_seed'], kwargs['nside'], mpi_comm)
if "pixel_indices" in kwargs and kwargs["pixel_indices"] is not None:
uval = uval[kwargs["pixel_indices"]]
#Read in the precomputed dust emission spectra as a function of lambda and U.
data_sil, data_silfe, data_car, wav, uvec = Dust.read_hd_data()
c = 2.99792458e10
fcar = kwargs['fcar']
f_fe = kwargs['f_fe']
#Interpolate the dust emission properties in uval and freuency, this is necessary to compute the factor to
#rescale the dust emission templates to the new model.
sil_i = interpolate.RectBivariateSpline(uvec,wav,(data_sil[:,3:84]*(wav[:,np.newaxis]*1.e-4/c)*1.e23).T) # to Jy/sr/H
car_i = interpolate.RectBivariateSpline(uvec,wav,(data_car[:,3:84]*(wav[:,np.newaxis]*1.e-4/c)*1.e23).T) # to Jy/sr/H
silfe_i = interpolate.RectBivariateSpline(uvec,wav,(data_silfe[:,3:84]*(wav[:,np.newaxis]*1.e-4/c)*1.e23).T) # to Jy/sr/H
sil_p = interpolate.RectBivariateSpline(uvec,wav,(data_sil[:,84:165]*(wav[:,np.newaxis]*1.e-4/c)*1.e23).T) # to Jy/sr/H
car_p = interpolate.RectBivariateSpline(uvec,wav,(data_car[:,84:165]*(wav[:,np.newaxis]*1.e-4/c)*1.e23).T) # to Jy/sr/H
silfe_p = interpolate.RectBivariateSpline(uvec,wav,(data_silfe[:,84:165]*(wav[:,np.newaxis]*1.e-4/c)*1.e23).T) # to Jy/sr/H
nu_to_lambda = lambda x: 1.e-3 * constants.c / x #Note this is in SI units.
non_int_model_i = lambda nu: (1. - f_fe) * sil_i.ev(uval, nu_to_lambda(nu)) + fcar * car_i.ev(uval, nu_to_lambda(nu)) + f_fe * silfe_i.ev(uval, nu_to_lambda(nu))
non_int_model_p = lambda nu: (1. - f_fe) * sil_p.ev(uval, nu_to_lambda(nu)) + fcar * car_p.ev(uval, nu_to_lambda(nu)) + f_fe * silfe_p.ev(uval, nu_to_lambda(nu))
A_I = kwargs['A_I'] * convert_units("uK_RJ", "Jysr", kwargs['nu_0_I']) / non_int_model_i(kwargs['nu_0_I'])
A_Q = kwargs['A_Q'] * convert_units("uK_RJ", "Jysr", kwargs['nu_0_P']) / non_int_model_p(kwargs['nu_0_P'])
A_U = kwargs['A_U'] * convert_units("uK_RJ", "Jysr", kwargs['nu_0_P']) / non_int_model_p(kwargs['nu_0_P'])
def bpass_model(channel):
"""Note that nu is in GHz, and so we have to multipl by 1.e9 in the following functions.
"""
(nu, t_nu) = channel
# Integrate table over bandpass.
sil_i_vec = np.zeros(len(uvec))
car_i_vec = np.zeros(len(uvec))
silfe_i_vec = np.zeros(len(uvec))
sil_p_vec = np.zeros(len(uvec))
car_p_vec = np.zeros(len(uvec))
silfe_p_vec = np.zeros(len(uvec))
for i in range(len(uvec)):
# Note: Table in terms of wavelength in um, increasing
# and lambda*I_lambda. Thus we reverse the order
# to nu increasing before interpolating to the
# bandpass frequencies, then divide by nu to get
# I_nu.
sil_i_vec[i] = np.trapz(t_nu*np.interp(nu*1.e9,c/(wav[::-1]*1.e-4),data_sil[::-1,3+i]*1.e23)/nu*1.e-9, nu*1.e9)
car_i_vec[i] = np.trapz(t_nu*np.interp(nu*1.e9,c/(wav[::-1]*1.e-4),data_car[::-1,3+i]*1.e23)/nu*1.e-9, nu*1.e9)
silfe_i_vec[i] = np.trapz(t_nu*np.interp(nu*1.e9,c/(wav[::-1]*1.e-4),data_silfe[::-1,3+i]*1.e23)/nu*1.e-9, nu*1.e9)
sil_p_vec[i] = np.trapz(t_nu*np.interp(nu*1.e9,c/(wav[::-1]*1.e-4),data_sil[::-1,84+i]*1.e23)/nu*1.e-9, nu*1.e9)
car_p_vec[i] = np.trapz(t_nu*np.interp(nu*1.e9,c/(wav[::-1]*1.e-4),data_car[::-1,84+i]*1.e23)/nu*1.e-9, nu*1.e9)
silfe_p_vec[i] = np.trapz(t_nu*np.interp(nu*1.e9,c/(wav[::-1]*1.e-4),data_silfe[::-1,84+i]*1.e23)/nu*1.e-9, nu*1.e9)
# Step 2: Interpolate over U values
sil_i = interpolate.interp1d(uvec, sil_i_vec)
car_i = interpolate.interp1d(uvec, car_i_vec)
silfe_i = interpolate.interp1d(uvec, silfe_i_vec)
sil_p = interpolate.interp1d(uvec, sil_p_vec)
car_p = interpolate.interp1d(uvec, car_p_vec)
silfe_p = interpolate.interp1d(uvec, silfe_p_vec)
#We now compute the final scaling. The integrated quantities sil_i,
#car_i silfe_i etc.. are in Jy/sr. Therefore we want to convert the
#templates from uK_RJ to Jy/sr.
scaling_I = ((1. - f_fe) * sil_i(uval) + fcar * car_i(uval) + f_fe * silfe_i(uval))
scaling_P = ((1. - f_fe) * sil_p(uval) + fcar * car_p(uval) + f_fe * silfe_p(uval))
return np.array([scaling_I * A_I, scaling_P * A_Q, scaling_P * A_U])
return bpass_model
| bthorne93/PySM_public | pysm/pysm.py | Python | mit | 30,303 | [
"Gaussian"
] | a5f505ec8fc5bcec5a91ce04a7982abd8b166f4c0cedf97ac0ed32ad50024ea1 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import re
import sys
from .exceptions import *
from .libmintsgshell import *
class Gaussian94BasisSetParser(object):
"""Class for parsing basis sets from a text file in Gaussian 94
format. Translated directly from the Psi4 libmints class written
by Justin M. Turney and Andrew C. Simmonett.
"""
def __init__(self, forced_puream=None):
"""Constructor"""
# If the parser needs to force spherical or cartesian (e.g., loading old guess)
self.force_puream_or_cartesian = False if forced_puream is None else True
# Is the forced value to use puream? (Otherwise force Cartesian).
self.forced_is_puream = False if forced_puream is None else forced_puream
# string filename
self.filename = None
def load_file(self, filename, basisname=None):
"""Load and return the file to be used by parse. Return only
portion of *filename* pertaining to *basisname* if specified (for
multi-basisset files) otherwise entire file as list of strings.
"""
# string filename
self.filename = filename
given_basisname = False if basisname is None else True
found_basisname = False
basis_separator = re.compile(r'^\s*\[\s*(.*?)\s*\]\s*$')
# Loads an entire file.
try:
infile = open(filename, 'r')
except IOError:
raise BasisSetFileNotFound("""BasisSetParser::parse: Unable to open basis set file: %s""" % (filename))
if os.stat(filename).st_size == 0:
raise ValidationError("""BasisSetParser::parse: given filename '%s' is blank.""" % (filename))
contents = infile.readlines()
infile.close()
lines = []
for text in contents:
text = text.strip()
# If no basisname was given always save the line.
if given_basisname is False:
lines.append(text)
if found_basisname:
# If we find another [*] we're done.
if basis_separator.match(text):
what = basis_separator.match(text).group(1)
break
lines.append(text)
continue
# If the user gave a basisname AND text matches the basisname we want to trigger to retain
if given_basisname and basis_separator.match(text):
if basisname == basis_separator.match(text).group(1):
found_basisname = True
return lines
def parse(self, symbol, dataset):
"""Given a string, parse for the basis set needed for atom.
* @param symbol atom symbol to look for in dataset
* @param dataset data set to look through
dataset can be list of lines or a single string which will be converted to list of lines
"""
if isinstance(dataset, str):
lines = dataset.split('\n')
else:
lines = dataset
# Regular expressions that we'll be checking for.
cartesian = re.compile(r'^\s*cartesian\s*', re.IGNORECASE)
spherical = re.compile(r'^\s*spherical\s*', re.IGNORECASE)
comment = re.compile(r'^\s*\!.*') # line starts with !
separator = re.compile(r'^\s*\*\*\*\*') # line starts with ****
ATOM = r'(([A-Z]{1,3}\d*)|([A-Z]{1,3}_\w+))' # match 'C 0', 'Al c 0', 'P p88 p_pass 0' not 'Ofail 0', 'h99_text 0'
atom_array = re.compile(r'^\s*((' + ATOM + r'\s+)+)0\s*$', re.IGNORECASE) # array of atomic symbols terminated by 0
atom_ecp = re.compile(r'^\s*((' + ATOM + r'-ECP\s+)+)(\d+)\s+(\d+)\s*$', re.IGNORECASE) # atom_ECP number number
shell = re.compile(r'^\s*(\w+|L=\d+)\s*(\d+)\s*(-?\d+\.\d+)') # Match beginning of contraction
blank = re.compile(r'^\s*$')
NUMBER = r'((?:[-+]?\d*\.\d+(?:[DdEe][-+]?\d+)?)|(?:[-+]?\d+\.\d*(?:[DdEe][-+]?\d+)?)|(?:[-+]?\d+))'
primitives1 = re.compile(r'^\s*' + NUMBER + r'\s+' + NUMBER + '.*') # Match s, p, d, f, g, ... functions
primitives2 = re.compile(r'^\s*' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + '.*') # match sp functions
ecpinfo = re.compile(r'^\s*(\d)\s+' + NUMBER + r'\s+' + NUMBER + '.*') # Match rpower, exponent, coefficient
# s, p and s, p, d can be grouped together in Pople-style basis sets
sp = 'SP'
spd = 'SPD'
# a b c d e f g h i j k l m n o p q r s t u v w x y z
#shell_to_am = [-1,-1,-1, 2,-1, 3, 4, 5, 6,-1, 7, 8, 9,10,11, 1,12,13, 0,14,15,16,17,18,19,20]
alpha = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',
'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
angmo = [-1, -1, -1, 2, -1, 3, 4, 5, 6, -1,-1,-1,
-1, -1, -1, 1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1]
angmo_HIK = [-1, -1, -1, 2, -1, 3, 4, 5, 6, -1, 7, 8,
9, 10, 11, 1, 12, 13, 0, 14, 15, 16, 17, 18, 19, 20]
angmo_HIJ = [-1, -1, -1, 2, -1, 3, 4, 5, 6, 7, 8, 9,
10,11,12, 1,13,14, 0,15,16,17,18,19,20,21]
shell_to_am = dict(zip(alpha, angmo))
shell_to_am_HIK = dict(zip(alpha, angmo_HIK))
shell_to_am_HIJ = dict(zip(alpha, angmo_HIJ))
am_convention = None # None is undetermined; "HIK" is the natural Psi4 HIK convention; "HIJ" is the GBS HIJ convention
max_am_to_date = 0
# Basis type.
gaussian_type = 'Pure'
if self.force_puream_or_cartesian:
if self.forced_is_puream == False:
gaussian_type = 'Cartesian'
# Need a dummy center for the shell.
center = [0.0, 0.0, 0.0]
shell_list = []
ecp_shell_list = []
lineno = 0
ncore = 0
ecp_msg = None
basis_found = False
while lineno < len(lines):
line = lines[lineno]
lineno += 1
# Ignore blank lines
if blank.match(line):
continue
# Look for Cartesian or Spherical
if not self.force_puream_or_cartesian:
if cartesian.match(line):
gaussian_type = 'Cartesian'
#TODO if psi4.get_global_option('PUREAM').has_changed():
#TODO gaussian_type = 'Pure' if int(psi4.get_global('PUREAM')) else 'Cartesian'
continue
elif spherical.match(line):
gaussian_type = 'Pure'
#TODO if psi4.get_global_option('PUREAM').has_changed():
#TODO gaussian_type = 'Pure' if int(psi4.get_global('PUREAM')) else 'Cartesian'
continue
#end case where puream setting wasn't forced by caller
# Do some matches
if comment.match(line):
continue
if separator.match(line):
continue
# Match: H 0
# or: H O... 0
if atom_array.match(line):
what = atom_array.match(line).group(1).split()
if symbol in [x.upper() for x in what]:
# Read in the next line
line = lines[lineno]
lineno += 1
# Match: H_ECP 0
# or: H_ECP O_ECP ... 0
if atom_ecp.match(line):
ecp_msg = """line %5d""" % (lineno)
symbol_to_am = {0: 0, 's': 0, 'S': 0,
1: 1, 'p': 1, 'P': 1,
2: 2, 'd': 2, 'D': 2,
3: 3, 'f': 3, 'F': 3,
4: 4, 'g': 4, 'G': 4,
5: 5, 'h': 5, 'H': 5,
6: 6, 'i': 6, 'I': 6}
# This is an ECP spec like "KR-ECP 3 28"
matchobj = atom_ecp.match(line)
sl = line.split()
maxam = int(sl[-2])
ncore = int(sl[-1])
# This parser is not tolerant of comments of blank lines. Perhaps the best strategy is to
# remove all comments/blank lines first before getting in here. This'll do for now.
for am in range(maxam+1):
#f-ul potential"
line = lines[lineno]
lineno += 1
angmom = symbol_to_am[line.lstrip()[0]]
if am == 0: angmom = -angmom # Flag this as a Type1 shell, by setting negative AM. This'll be handled in the BasisSet builder.
line = lines[lineno]
lineno += 1
nprimitives = int(line)
rpowers = [0 for i in range(nprimitives)]
exponents = [0.0 for i in range(nprimitives)]
contractions = [0.0 for i in range(nprimitives)]
for term in range(nprimitives):
line = lines[lineno]
lineno += 1
line = line.replace('D', 'e', 2)
line = line.replace('d', 'e', 2)
what = ecpinfo.match(line)
if not what:
raise ValidationError("""Gaussian94BasisSetParser::parse: Bad ECP specification : line %d: %s""" % (lineno, line))
rpowers[term] = int(what.group(1))
exponents[term] = float(what.group(2))
contractions[term] = float(what.group(3))
# We have a full shell, push it to the basis set
ecp_shell_list.append(ShellInfo(angmom, contractions, exponents,
gaussian_type, 0, center, 0, 'Normalized', rpowers))
else:
# This is a basis set spec
basis_found = True
msg = """line %5d""" % (lineno)
# Need to do the following until we match a "****" which is the end of the basis set
while not separator.match(line):
# Match shell information
if shell.match(line):
what = shell.match(line)
shell_type = str(what.group(1)).upper()
nprimitive = int(what.group(2))
scale = float(what.group(3))
if len(shell_type) == 1 or (len(shell_type) >= 3 and shell_type[:2] == 'L='):
if len(shell_type) == 1:
am = shell_to_am[shell_type[0]]
if am_convention == "HIK" or (am == -1 and shell_type[0].upper() == "K" and max_am_to_date == 6):
am_convention = "HIK"
am = shell_to_am_HIK[shell_type[0]]
if am_convention == "HIJ" or (am == -1 and shell_type[0].upper() == "J" and max_am_to_date == 6):
am_convention = "HIJ"
am = shell_to_am_HIJ[shell_type[0]]
if am_convention is None and am == -1:
raise ValidationError(f"""Gaussian94BasisSetParser::parse: angular momentum type {shell_type[0]} too high and HIJ/HIK convention uncertain. Use, for example, `L=7` notation. line {lineno}: {line}""")
else:
am = int(shell_type[2:])
max_am_to_date = max(max_am_to_date, am)
if am < 0:
raise ValidationError("""Gaussian94BasisSetParser::parse: angular momentum type %s not recognized: line %d: %s""" % (shell_type[0], lineno, line))
exponents = [0.0] * nprimitive
contractions = [0.0] * nprimitive
for p in range(nprimitive):
line = lines[lineno]
lineno += 1
line = line.replace('D', 'e', 2)
line = line.replace('d', 'e', 2)
what = primitives1.match(line)
# Must match primitives1; will work on the others later
if not what:
raise ValidationError("""Gaussian94BasisSetParser::parse: Unable to match an exponent with one contraction: line %d: %s""" % (lineno, line))
exponent = float(what.group(1))
contraction = float(what.group(2))
# Scale the contraction and save the information
contraction *= scale
exponents[p] = exponent
contractions[p] = contraction
# We have a full shell, push it to the basis set
shell_list.append(ShellInfo(am, contractions, exponents,
gaussian_type, 0, center, 0, 'Unnormalized'))
elif len(shell_type) == 2:
# This is to handle instances of SP, PD, DF, FG, ...
am1 = shell_to_am[shell_type[0]]
am2 = shell_to_am[shell_type[1]]
if am1 < 0 or am2 < 0:
raise ValidationError("""Gaussian94BasisSetParser::parse: angular momentum type %s not recognized: line %d: %s""" % (shell_type[0], lineno, line))
exponents = [0.0] * nprimitive
contractions1 = [0.0] * nprimitive
contractions2 = [0.0] * nprimitive
for p in range(nprimitive):
line = lines[lineno]
lineno += 1
line = line.replace('D', 'e', 3)
line = line.replace('d', 'e', 3)
what = primitives2.match(line)
# Must match primitivies2
if not what:
raise ValidationError("Gaussian94BasisSetParser::parse: Unable to match an exponent with two contractions: line %d: %s" % (lineno, line))
exponent = float(what.group(1))
contraction = float(what.group(2))
# Scale the contraction and save the information
contraction *= scale
exponents[p] = exponent
contractions1[p] = contraction
# Do the other contraction
contraction = float(what.group(3))
# Scale the contraction and save the information
contraction *= scale
contractions2[p] = contraction
shell_list.append(ShellInfo(am1, contractions1, exponents,
gaussian_type, 0, center, 0, 'Unnormalized'))
shell_list.append(ShellInfo(am2, contractions2, exponents,
gaussian_type, 0, center, 0, 'Unnormalized'))
else:
raise ValidationError("""Gaussian94BasisSetParser::parse: Unable to parse basis sets with spd, or higher grouping""")
else:
raise ValidationError("""Gaussian94BasisSetParser::parse: Expected shell information, but got: line %d: %s""" % (lineno, line))
line = lines[lineno]
lineno += 1
if not basis_found:
#raise BasisSetNotFound("Gaussian94BasisSetParser::parser: Unable to find the basis set for %s in %s" % \
# (symbol, self.filename), silent=True)
return None, None, None, None, None
return shell_list, msg, ecp_shell_list, ecp_msg, ncore
| lothian/psi4 | psi4/driver/qcdb/libmintsbasissetparser.py | Python | lgpl-3.0 | 18,326 | [
"Gaussian",
"Psi4"
] | 0c083e5f329b6dbe83521e5d324021cb9cebe316b79ed0d363995538a2f13f99 |
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Stephen Fromm <sfromm@gmail.com>
# Brian Coca <briancoca+dev@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import os.path
import tempfile
import re
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native, to_text
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.utils.hashing import checksum_s
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp()
tmp = os.fdopen(tmpfd,'w')
delimit_me = False
add_newline = False
for f in (to_text(p, errors='surrogate_or_strict') for p in sorted(os.listdir(src_path))):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = u"%s/%s" % (src_path, f)
if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
continue
fragment_content = open(self._loader.get_real_file(fragment)).read()
# always put a newline between fragments if the previous fragment didn't end with a newline.
if add_newline:
tmp.write('\n')
# delimiters should only appear between fragments
if delimit_me:
if delimiter:
# un-escape anything like newlines
delimiter = delimiter.decode('unicode-escape')
tmp.write(delimiter)
# always make sure there's a newline after the
# delimiter, so lines don't run together
if delimiter[-1] != '\n':
tmp.write('\n')
tmp.write(fragment_content)
delimit_me = True
if fragment_content.endswith('\n'):
add_newline = False
else:
add_newline = True
tmp.close()
return temp_path
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if self._play_context.check_mode:
result['skipped'] = True
result['msg'] = "skipped, this module does not support check_mode."
return result
src = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
delimiter = self._task.args.get('delimiter', None)
remote_src = self._task.args.get('remote_src', 'yes')
regexp = self._task.args.get('regexp', None)
follow = self._task.args.get('follow', False)
ignore_hidden = self._task.args.get('ignore_hidden', False)
if src is None or dest is None:
result['failed'] = True
result['msg'] = "src and dest are required"
return result
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
if not tmp:
tmp = self._make_tmp_path(remote_user)
self._cleanup_remote_tmp = True
if boolean(remote_src):
result.update(self._execute_module(tmp=tmp, task_vars=task_vars, delete_remote_tmp=False))
self._remove_tmp_path(tmp)
return result
else:
try:
src = self._find_needle('files', src)
except AnsibleError as e:
result['failed'] = True
result['msg'] = to_native(e)
return result
if not os.path.isdir(src):
result['failed'] = True
result['msg'] = u"Source (%s) is not a directory" % src
return result
_re = None
if regexp is not None:
_re = re.compile(regexp)
# Does all work assembling the file
path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden)
path_checksum = checksum_s(path)
dest = self._remote_expand_user(dest)
dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow, tmp=tmp)
diff = {}
# setup args for running modules
new_module_args = self._task.args.copy()
# clean assemble specific options
for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden']:
if opt in new_module_args:
del new_module_args[opt]
new_module_args.update(
dict(
dest=dest,
original_basename=os.path.basename(src),
)
)
if path_checksum != dest_stat['checksum']:
if self._play_context.diff:
diff = self._get_diff_data(dest, path, task_vars)
remote_path = self._connection._shell.join_path(tmp, 'src')
xfered = self._transfer_file(path, remote_path)
# fix file permissions when the copy is done as a different user
self._fixup_perms2((tmp, remote_path), remote_user)
new_module_args.update( dict( src=xfered,))
res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False)
if diff:
res['diff'] = diff
result.update(res)
else:
result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False))
self._remove_tmp_path(tmp)
return result
| wkeeling/ansible | lib/ansible/plugins/action/assemble.py | Python | gpl-3.0 | 6,412 | [
"Brian"
] | 42b69654c31e5fa513b6e379cbb6f8a28db6f90cda937a635cdc3ec4b7e50353 |
import unittest
import os
from netCDF4 import Dataset
from cis.data_io.gridded_data import make_from_cube, GriddedDataList
from cis.test.util.mock import make_dummy_2d_ungridded_data, make_mock_cube
from cis.test.integration_test_data import valid_cis_col_file, valid_cis_col_variable
from cis.data_io.write_netcdf import write
tmp_file = "tmp_file.nc"
class TestWriteNetcdf(unittest.TestCase):
def tearDown(self):
if hasattr(self, 'd') and self.d.isopen():
self.d.close()
os.remove(tmp_file)
def test_write_netcdf(self):
data_object = make_dummy_2d_ungridded_data()
write(data_object, tmp_file)
def test_write_col_and_reload_1(self):
# Copy a collocated file and try to reload it. This exposes a bug where
# var.shape is set in the NetCDF metadata
from cis.data_io.products import Aerosol_CCI_L2
prod = Aerosol_CCI_L2()
data_object = prod.create_data_object([valid_cis_col_file], valid_cis_col_variable)
write(data_object, tmp_file)
self.d = Dataset(tmp_file)
v = self.d.variables['AOT_440']
# This will fail because var.shape is in the file
print(v[:2])
def test_write_col_and_reload_2(self):
# Copy a collocated file and try to reload it. This exposes a bug where
# latitude and longitude aren't recognised on reload
from cis.data_io.products import cis
prod = cis()
data_object = prod.create_data_object([valid_cis_col_file], valid_cis_col_variable)
write(data_object, tmp_file)
data_object2 = prod.create_data_object([tmp_file], valid_cis_col_variable)
def test_ungridded_write_attributes(self):
data = make_dummy_2d_ungridded_data()
attrs = {'attr_name': 'attr_val',
'standard_name': 'std_val',
'long_name': 'lg_val',
'units': 'units'}
data.add_attributes(attrs)
write(data, tmp_file)
self.d = Dataset(tmp_file)
for key, val in attrs.items():
assert getattr(self.d.variables['rainfall_flux'], key) == val
def test_gridded_write_attributes(self):
data = make_from_cube(make_mock_cube())
data.var_name = 'rain'
attrs = {'attr_name': 'attr_val',
'standard_name': 'convective_rainfall_amount',
'long_name': 'lg_val',
'units': 'units'}
data.add_attributes(attrs)
data.save_data(tmp_file)
self.d = Dataset(tmp_file)
for key, val in attrs.items():
assert getattr(self.d.variables['rain'], key) == val
def test_ungridded_write_units(self):
data = make_dummy_2d_ungridded_data()
data.units = 'kg'
write(data, tmp_file)
self.d = Dataset(tmp_file)
assert self.d.variables['rainfall_flux'].units == 'kg'
def test_gridded_write_units(self):
data = make_from_cube(make_mock_cube())
data.var_name = 'rain'
data.units = 'ppm'
data.save_data(tmp_file)
self.d = Dataset(tmp_file)
assert self.d.variables['rain'].units == 'ppm'
def test_gridded_write_time_as_unlimited_dimension(self):
data = make_from_cube(make_mock_cube(time_dim_length=7))
data.var_name = 'rain'
data.save_data(tmp_file)
self.d = Dataset(tmp_file)
assert self.d.dimensions['time'].isunlimited()
def test_gridded_write_no_time_has_no_unlimited_dimension(self):
data = make_from_cube(make_mock_cube())
data.var_name = 'rain'
data.save_data(tmp_file)
self.d = Dataset(tmp_file)
for d in self.d.dimensions.values():
assert not d.isunlimited()
def test_gridded_list_write_time_as_unlimited_dimension(self):
data = GriddedDataList([make_from_cube(make_mock_cube(time_dim_length=7))])
data[0].var_name = 'rain'
data.save_data(tmp_file)
self.d = Dataset(tmp_file)
assert self.d.dimensions['time'].isunlimited()
def test_gridded_list_write_no_time_has_no_unlimited_dimension(self):
data = GriddedDataList([make_from_cube(make_mock_cube())])
data[0].var_name = 'rain'
data.save_data(tmp_file)
self.d = Dataset(tmp_file)
for d in self.d.dimensions.values():
assert not d.isunlimited()
# def test_can_write_hierarchical_group_variables(self):
# from cis.test.integration_test_data import valid_nested_groups_file
# from cis import read_data
# from hamcrest import assert_that, is_
# var_name = 'group1/group2/var4'
# data = read_data(valid_nested_groups_file, var_name, product='cis')
# assert_that(data.data, is_([12321]))
# data.save_data(tmp_file)
# self.d = Dataset(tmp_file)
# assert_that(self.d.variables[var_name][:], is_([12321]))
| cedadev/jasmin_cis | cis/test/integration/test_io/test_write_netcdf.py | Python | gpl-3.0 | 4,915 | [
"NetCDF"
] | 6935fefc38f03c2d98a75a28997b444c5c148e18ba43ed0f6f17dca2cdcc432f |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import numpy as np
from mdtraj.utils import ensure_type
from mdtraj.geometry import compute_distances, compute_angles
from mdtraj.geometry import _geometry
__all__ = ['wernet_nilsson', 'baker_hubbard', 'kabsch_sander']
##############################################################################
# Functions
##############################################################################
def wernet_nilsson(traj, exclude_water=True, periodic=True, sidechain_only=False):
"""Identify hydrogen bonds based on cutoffs for the Donor-H...Acceptor
distance and angle according to the criterion outlined in [1].
As opposed to Baker-Hubbard, this is a "cone" criterion where the
distance cutoff depends on the angle.
The criterion employed is :math:`r_\\text{DA} < 3.3 A - 0.00044*\\delta_{HDA}*\\delta_{HDA}`,
where :math:`r_\\text{DA}` is the distance between donor and acceptor heavy atoms,
and :math:`\\delta_{HDA}` is the angle made by the hydrogen atom, donor, and acceptor atoms,
measured in degrees (zero in the case of a perfectly straight bond: D-H ... A).
When donor the donor is 'O' and the acceptor is 'O', this corresponds to
the definition established in [1]_. The donors considered by this method
are NH and OH, and the acceptors considered are O and N. In the paper the only
donor considered is OH.
Parameters
----------
traj : md.Trajectory
An mdtraj trajectory. It must contain topology information.
exclude_water : bool, default=True
Exclude solvent molecules from consideration.
periodic : bool, default=True
Set to True to calculate displacements and angles across periodic box boundaries.
sidechain_only : bool, default=False
Set to True to only consider sidechain-sidechain interactions.
Returns
-------
hbonds : list, len=n_frames
A list containing the atom indices involved in each of the identified
hydrogen bonds at each frame. Each element in the list is an array
where each row contains three integer indices, `(d_i, h_i, a_i)`,
such that `d_i` is the index of the donor atom, `h_i` the index
of the hydrogen atom, and `a_i` the index of the acceptor atom involved
in a hydrogen bond which occurs in that frame.
Notes
-----
Each hydrogen bond is distinguished for the purpose of this function by the
indices of the donor, hydrogen, and acceptor atoms. This means that, for
example, when an ARG sidechain makes a hydrogen bond with its NH2 group,
you might see what appear like double counting of the h-bonds, since the
hydrogen bond formed via the H_1 and H_2 are counted separately, despite
their "chemical indistinguishably"
Examples
--------
>>> md.wernet_nilsson(t)
array([[ 0, 10, 8],
[ 0, 11, 7],
[ 69, 73, 54],
[ 76, 82, 65],
[119, 131, 89],
[140, 148, 265],
[166, 177, 122],
[181, 188, 231]])
>>> label = lambda hbond : '%s -- %s' % (t.topology.atom(hbond[0]), t.topology.atom(hbond[2]))
>>> for hbond in hbonds:
>>> print label(hbond)
GLU1-N -- GLU1-OE2
GLU1-N -- GLU1-OE1
GLY6-N -- SER4-O
CYS7-N -- GLY5-O
TYR11-N -- VAL8-O
MET12-N -- LYS20-O
See Also
--------
baker_hubbard, kabsch_sander
References
----------
.. [1] Wernet, Ph., L.G.M. Pettersson, and A. Nilsson, et al.
"The Structure of the First Coordination Shell in Liquid Water." (2004)
Science 304, 995-999.
"""
distance_cutoff = 0.33
angle_const = 0.000044
angle_cutoff = 45
if traj.topology is None:
raise ValueError('wernet_nilsson requires that traj contain topology '
'information')
# Get the possible donor-hydrogen...acceptor triplets
bond_triplets = _get_bond_triplets(traj.topology,
exclude_water=exclude_water, sidechain_only=sidechain_only)
# Compute geometry
mask, distances, angles = _compute_bounded_geometry(traj, bond_triplets,
distance_cutoff, [0, 2], [2, 0, 1], periodic=periodic)
# Update triplets under consideration
bond_triplets = bond_triplets.compress(mask, axis=0)
# Calculate the true cutoffs for distances
cutoffs = distance_cutoff - angle_const * (angles * 180.0 / np.pi) ** 2
# Find triplets that meet the criteria
presence = np.logical_and(distances < cutoffs, angles < angle_cutoff)
return [bond_triplets.compress(present, axis=0) for present in presence]
def baker_hubbard(traj, freq=0.1, exclude_water=True, periodic=True, sidechain_only=False,
distance_cutoff=0.25, angle_cutoff=120):
"""Identify hydrogen bonds based on cutoffs for the Donor-H...Acceptor
distance and angle.
The criterion employed is :math:`\\theta > 120` and
:math:`r_\\text{H...Acceptor} < 2.5 A`.
When donor the donor is 'N' and the acceptor is 'O', this corresponds to
the definition established in [1]_. The donors considered by this method
are NH and OH, and the acceptors considered are O and N.
Parameters
----------
traj : md.Trajectory
An mdtraj trajectory. It must contain topology information.
freq : float, default=0.1
Return only hydrogen bonds that occur in greater this fraction of the
frames in the trajectory.
exclude_water : bool, default=True
Exclude solvent molecules from consideration
periodic : bool, default=True
Set to True to calculate displacements and angles across periodic box boundaries.
sidechain_only : bool, default=False
Set to True to only consider sidechain-sidechain interactions.
distance_cutoff : float, default=0.25
Distance cutoff of Donor-H...Acceptor contact in nanometers.
The criterion employed is any contact that is shorter than the distance cutoff.
with an distance_cutoff is accepted.
angle_cutoff : float, default=120
Angle cutoff of the angle theta in degrees.
The criterion employed is any contact with an angle theta greater than the
angle_cutoff is accepted.
Returns
-------
hbonds : np.array, shape=[n_hbonds, 3], dtype=int
An array containing the indices atoms involved in each of the identified
hydrogen bonds. Each row contains three integer indices, `(d_i, h_i,
a_i)`, such that `d_i` is the index of the donor atom, `h_i` the index
of the hydrogen atom, and `a_i` the index of the acceptor atom involved
in a hydrogen bond which occurs (according to the definition above) in
proportion greater than `freq` of the trajectory.
Notes
-----
Each hydrogen bond is distinguished for the purpose of this function by the
indices of the donor, hydrogen, and acceptor atoms. This means that, for
example, when an ARG sidechain makes a hydrogen bond with its NH2 group,
you might see what appear like double counting of the h-bonds, since the
hydrogen bond formed via the H_1 and H_2 are counted separately, despite
their "chemical indistinguishably"
Examples
--------
>>> md.baker_hubbard(t)
array([[ 0, 10, 8],
[ 0, 11, 7],
[ 69, 73, 54],
[ 76, 82, 65],
[119, 131, 89],
[140, 148, 265],
[166, 177, 122],
[181, 188, 231]])
>>> label = lambda hbond : '%s -- %s' % (t.topology.atom(hbond[0]), t.topology.atom(hbond[2]))
>>> for hbond in hbonds:
>>> print label(hbond)
GLU1-N -- GLU1-OE2
GLU1-N -- GLU1-OE1
GLY6-N -- SER4-O
CYS7-N -- GLY5-O
TYR11-N -- VAL8-O
MET12-N -- LYS20-O
See Also
--------
kabsch_sander
References
----------
.. [1] Baker, E. N., and R. E. Hubbard. "Hydrogen bonding in globular
proteins." Progress in Biophysics and Molecular Biology
44.2 (1984): 97-179.
"""
angle_cutoff = np.radians(angle_cutoff)
if traj.topology is None:
raise ValueError('baker_hubbard requires that traj contain topology '
'information')
# Get the possible donor-hydrogen...acceptor triplets
bond_triplets = _get_bond_triplets(traj.topology,
exclude_water=exclude_water, sidechain_only=sidechain_only)
mask, distances, angles = _compute_bounded_geometry(traj, bond_triplets,
distance_cutoff, [1, 2], [0, 1, 2], freq=freq, periodic=periodic)
# Find triplets that meet the criteria
presence = np.logical_and(distances < distance_cutoff, angles > angle_cutoff)
mask[mask] = np.mean(presence, axis=0) > freq
return bond_triplets.compress(mask, axis=0)
def kabsch_sander(traj):
"""Compute the Kabsch-Sander hydrogen bond energy between each pair
of residues in every frame.
Hydrogen bonds are defined using an electrostatic definition, assuming
partial charges of -0.42 e and +0.20 e to the carbonyl oxygen and amide
hydrogen respectively, their opposites assigned to the carbonyl carbon
and amide nitrogen. A hydrogen bond is identified if E in the following
equation is less than -0.5 kcal/mol:
.. math::
E = 0.42 \cdot 0.2 \cdot 33.2 kcal/(mol \cdot nm) * \\
(1/r_{ON} + 1/r_{CH} - 1/r_{OH} - 1/r_{CN})
Parameters
----------
traj : md.Trajectory
An mdtraj trajectory. It must contain topology information.
Returns
-------
matrices : list of scipy.sparse.csr_matrix
The return value is a list of length equal to the number of frames
in the trajectory. Each element is an n_residues x n_residues sparse
matrix, where the existence of an entry at row `i`, column `j` with value
`x` means that there exists a hydrogen bond between a backbone CO
group at residue `i` with a backbone NH group at residue `j` whose
Kabsch-Sander energy is less than -0.5 kcal/mol (the threshold for
existence of the "bond"). The exact value of the energy is given by the
value `x`.
See Also
--------
wernet_nilsson, baker_hubbard
References
----------
.. [1] Kabsch W, Sander C (1983). "Dictionary of protein secondary structure: pattern recognition of hydrogen-bonded and geometrical features". Biopolymers 22 (12): 2577-637. doi:10.1002/bip.360221211
"""
if traj.topology is None:
raise ValueError('kabsch_sander requires topology')
import scipy.sparse
xyz, nco_indices, ca_indices, proline_indices, _ = _prep_kabsch_sander_arrays(traj)
n_residues = len(ca_indices)
hbonds = np.empty((xyz.shape[0], n_residues, 2), np.int32)
henergies = np.empty((xyz.shape[0], n_residues, 2), np.float32)
hbonds.fill(-1)
henergies.fill(np.nan)
_geometry._kabsch_sander(xyz, nco_indices, ca_indices, proline_indices,
hbonds, henergies)
# The C code returns its info in a pretty inconvenient format.
# Let's change it to a list of scipy CSR matrices.
matrices = []
hbonds_mask = (hbonds != -1)
for i in range(xyz.shape[0]):
# appologies for this cryptic code -- we need to deal with the low
# level aspects of the csr matrix format.
hbonds_frame = hbonds[i]
mask = hbonds_mask[i]
henergies_frame = henergies[i]
indptr = np.zeros(n_residues + 1, np.int32)
indptr[1:] = np.cumsum(mask.sum(axis=1))
indices = hbonds_frame[mask].flatten()
data = henergies_frame[mask].flatten()
matrices.append(scipy.sparse.csr_matrix(
(data, indices, indptr), shape=(n_residues, n_residues)).T)
return matrices
def _get_bond_triplets(topology, exclude_water=True, sidechain_only=False):
def can_participate(atom):
# Filter waters
if exclude_water and atom.residue.is_water:
return False
# Filter non-sidechain atoms
if sidechain_only and not atom.is_sidechain:
return False
# Otherwise, accept it
return True
def get_donors(e0, e1):
# Find all matching bonds
elems = set((e0, e1))
atoms = [(one, two) for one, two in topology.bonds
if set((one.element.symbol, two.element.symbol)) == elems]
# Filter non-participating atoms
atoms = [atom for atom in atoms
if can_participate(atom[0]) and can_participate(atom[1])]
# Get indices for the remaining atoms
indices = []
for a0, a1 in atoms:
pair = (a0.index, a1.index)
# make sure to get the pair in the right order, so that the index
# for e0 comes before e1
if a0.element.symbol == e1:
pair = pair[::-1]
indices.append(pair)
return indices
# Check that there are bonds in topology
nbonds = 0
for _bond in topology.bonds:
nbonds += 1
break # Only need to find one hit for this check (not robust)
if nbonds == 0:
raise ValueError('No bonds found in topology. Try using '
'traj._topology.create_standard_bonds() to create bonds '
'using our PDB standard bond definitions.')
nh_donors = get_donors('N', 'H')
oh_donors = get_donors('O', 'H')
xh_donors = np.array(nh_donors + oh_donors)
if len(xh_donors) == 0:
# if there are no hydrogens or protein in the trajectory, we get
# no possible pairs and return nothing
return np.zeros((0, 3), dtype=int)
acceptor_elements = frozenset(('O', 'N'))
acceptors = [a.index for a in topology.atoms
if a.element.symbol in acceptor_elements and can_participate(a)]
# Make acceptors a 2-D numpy array
acceptors = np.array(acceptors)[:, np.newaxis]
# Generate the cartesian product of the donors and acceptors
xh_donors_repeated = np.repeat(xh_donors, acceptors.shape[0], axis=0)
acceptors_tiled = np.tile(acceptors, (xh_donors.shape[0], 1))
bond_triplets = np.hstack((xh_donors_repeated, acceptors_tiled))
# Filter out self-bonds
self_bond_mask = (bond_triplets[:, 0] == bond_triplets[:, 2])
return bond_triplets[np.logical_not(self_bond_mask), :]
def _compute_bounded_geometry(traj, triplets, distance_cutoff, distance_indices,
angle_indices, freq=0.0, periodic=True):
"""
Returns a tuple include (1) the mask for triplets that fulfill the distance
criteria frequently enough, (2) the actual distances calculated, and (3) the
angles between the triplets specified by angle_indices.
"""
# First we calculate the requested distances
distances = compute_distances(traj, triplets[:, distance_indices], periodic=periodic)
# Now we discover which triplets meet the distance cutoff often enough
prevalence = np.mean(distances < distance_cutoff, axis=0)
mask = prevalence > freq
# Update data structures to ignore anything that isn't possible anymore
triplets = triplets.compress(mask, axis=0)
distances = distances.compress(mask, axis=1)
# Calculate angles using the law of cosines
abc_pairs = zip(angle_indices, angle_indices[1:] + angle_indices[:1])
abc_distances = []
# Calculate distances (if necessary)
for abc_pair in abc_pairs:
if set(abc_pair) == set(distance_indices):
abc_distances.append(distances)
else:
abc_distances.append(compute_distances(traj, triplets[:, abc_pair],
periodic=periodic))
# Law of cosines calculation
a, b, c = abc_distances
cosines = (a ** 2 + b ** 2 - c ** 2) / (2 * a * b)
np.clip(cosines, -1, 1, out=cosines) # avoid NaN error
angles = np.arccos(cosines)
return mask, distances, angles
def _get_or_minus1(f):
try:
return f()
except IndexError:
return -1
def _prep_kabsch_sander_arrays(traj):
xyz = ensure_type(traj.xyz, dtype=np.float32, ndim=3, name='traj.xyz',
shape=(None, None, 3), warn_on_cast=False)
ca_indices, nco_indices, is_proline, is_protein = [], [], [], []
for residue in traj.topology.residues:
ca = _get_or_minus1(lambda: [a.index for a in residue.atoms if a.name == 'CA'][0])
n = _get_or_minus1(lambda: [a.index for a in residue.atoms if a.name == 'N'][0])
c = _get_or_minus1(lambda: [a.index for a in residue.atoms if a.name == 'C'][0])
o = _get_or_minus1(lambda: [a.index for a in residue.atoms if a.name == 'O'][0])
ca_indices.append(ca)
is_proline.append(residue.name == 'PRO')
nco_indices.append([n, c, o])
is_protein.append(ca != -1 and n != -1 and c != -1 and o != -1)
nco_indices = np.array(nco_indices, np.int32)
ca_indices = np.array(ca_indices, np.int32)
proline_indices = np.array(is_proline, np.int32)
is_protein = np.array(is_protein, np.int32)
return xyz, nco_indices, ca_indices, proline_indices, is_protein
| rmcgibbo/mdtraj | mdtraj/geometry/hbond.py | Python | lgpl-2.1 | 18,311 | [
"MDTraj"
] | c7c237ff5bc9e82e69a2ae610a179ef2510d738bc77d810eb24613a8d795a26f |
import numpy as np
from ase.lattice import bulk
from gpaw import GPAW, PW, FermiDirac, MethfesselPaxton
a0 = 4.04
al = bulk('Al', 'fcc', a=a0)
cell0 = al.cell
for ecut in range(200, 501, 50):
al.calc = GPAW(mode=PW(ecut),
xc='PBE',
kpts=(8, 8, 8),
parallel={'band': 1},
basis='dzp',
txt='Al-%d.txt' % ecut)
for eps in np.linspace(-0.02, 0.02, 5):
al.cell = (1 + eps) * cell0
al.get_potential_energy()
al.calc.set(mode=PW(400))
for k in range(4, 17):
al.calc.set(kpts=(k, k, k),
txt='Al-%02d.txt' % k)
for eps in np.linspace(-0.02, 0.02, 5):
al.cell = (1 + eps) * cell0
al.get_potential_energy()
| robwarm/gpaw-symm | doc/tutorials/lattice_constants/al.py | Python | gpl-3.0 | 757 | [
"ASE",
"GPAW"
] | c5550b4e76d2642d741db00f873ce660f21ba9d18aa3c10da73e2af4658e079f |
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A MCTS actor."""
from typing import Optional, Tuple
import acme
from acme import adders
from acme import specs
from acme.agents.tf.mcts import models
from acme.agents.tf.mcts import search
from acme.agents.tf.mcts import types
from acme.tf import variable_utils as tf2_variable_utils
import dm_env
import numpy as np
from scipy import special
import sonnet as snt
import tensorflow as tf
class MCTSActor(acme.Actor):
"""Executes a policy- and value-network guided MCTS search."""
_prev_timestep: dm_env.TimeStep
def __init__(
self,
environment_spec: specs.EnvironmentSpec,
model: models.Model,
network: snt.Module,
discount: float,
num_simulations: int,
adder: Optional[adders.Adder] = None,
variable_client: Optional[tf2_variable_utils.VariableClient] = None,
):
# Internalize components: model, network, data sink and variable source.
self._model = model
self._network = tf.function(network)
self._variable_client = variable_client
self._adder = adder
# Internalize hyperparameters.
self._num_actions = environment_spec.actions.num_values
self._num_simulations = num_simulations
self._actions = list(range(self._num_actions))
self._discount = discount
# We need to save the policy so as to add it to replay on the next step.
self._probs = np.ones(
shape=(self._num_actions,), dtype=np.float32) / self._num_actions
def _forward(
self, observation: types.Observation) -> Tuple[types.Probs, types.Value]:
"""Performs a forward pass of the policy-value network."""
logits, value = self._network(tf.expand_dims(observation, axis=0))
# Convert to numpy & take softmax.
logits = logits.numpy().squeeze(axis=0)
value = value.numpy().item()
probs = special.softmax(logits)
return probs, value
def select_action(self, observation: types.Observation) -> types.Action:
"""Computes the agent's policy via MCTS."""
if self._model.needs_reset:
self._model.reset(observation)
# Compute a fresh MCTS plan.
root = search.mcts(
observation,
model=self._model,
search_policy=search.puct,
evaluation=self._forward,
num_simulations=self._num_simulations,
num_actions=self._num_actions,
discount=self._discount,
)
# The agent's policy is softmax w.r.t. the *visit counts* as in AlphaZero.
probs = search.visit_count_policy(root)
action = np.int32(np.random.choice(self._actions, p=probs))
# Save the policy probs so that we can add them to replay in `observe()`.
self._probs = probs.astype(np.float32)
return action
def update(self, wait: bool = False):
"""Fetches the latest variables from the variable source, if needed."""
if self._variable_client:
self._variable_client.update(wait)
def observe_first(self, timestep: dm_env.TimeStep):
self._prev_timestep = timestep
if self._adder:
self._adder.add_first(timestep)
def observe(self, action: types.Action, next_timestep: dm_env.TimeStep):
"""Updates the agent's internal model and adds the transition to replay."""
self._model.update(self._prev_timestep, action, next_timestep)
self._prev_timestep = next_timestep
if self._adder:
self._adder.add(action, next_timestep, extras={'pi': self._probs})
| deepmind/acme | acme/agents/tf/mcts/acting.py | Python | apache-2.0 | 3,992 | [
"VisIt"
] | a57e58a1e922cddd2950276b40f5bf251843b0d3ebfb53a123eb8a63cc73015d |
#!/usr/bin/env python3
import argparse
import common
import functools
import multiprocessing
import os
import os.path
import pathlib
import re
import subprocess
import stat
import sys
import traceback
import shutil
import paths
EXCLUDED_PREFIXES = (
"./generated/",
"./thirdparty/",
"./build",
"./.git/",
"./bazel-",
"./.cache",
"./source/extensions/extensions_build_config.bzl",
"./bazel/toolchains/configs/",
"./tools/testdata/check_format/",
"./tools/pyformat/",
"./third_party/",
"./test/extensions/filters/http/wasm/test_data",
"./test/extensions/filters/network/wasm/test_data",
"./test/extensions/stats_sinks/wasm/test_data",
"./test/extensions/bootstrap/wasm/test_data",
"./test/extensions/common/wasm/test_data",
"./test/extensions/access_loggers/wasm/test_data",
"./source/extensions/common/wasm/ext",
"./examples/wasm",
)
SUFFIXES = ("BUILD", "WORKSPACE", ".bzl", ".cc", ".h", ".java", ".m", ".md", ".mm", ".proto",
".rst")
DOCS_SUFFIX = (".md", ".rst")
PROTO_SUFFIX = (".proto")
# Files in these paths can make reference to protobuf stuff directly
GOOGLE_PROTOBUF_ALLOWLIST = ("ci/prebuilt", "source/common/protobuf", "api/test",
"test/extensions/bootstrap/wasm/test_data")
REPOSITORIES_BZL = "bazel/repositories.bzl"
# Files matching these exact names can reference real-world time. These include the class
# definitions for real-world time, the construction of them in main(), and perf annotation.
# For now it includes the validation server but that really should be injected too.
REAL_TIME_ALLOWLIST = (
"./source/common/common/utility.h", "./source/extensions/common/aws/utility.cc",
"./source/common/event/real_time_system.cc", "./source/common/event/real_time_system.h",
"./source/exe/main_common.cc", "./source/exe/main_common.h",
"./source/server/config_validation/server.cc", "./source/common/common/perf_annotation.h",
"./test/common/common/log_macros_test.cc", "./test/common/protobuf/utility_test.cc",
"./test/test_common/simulated_time_system.cc", "./test/test_common/simulated_time_system.h",
"./test/test_common/test_time.cc", "./test/test_common/test_time.h",
"./test/test_common/utility.cc", "./test/test_common/utility.h",
"./test/integration/integration.h")
# Tests in these paths may make use of the Registry::RegisterFactory constructor or the
# REGISTER_FACTORY macro. Other locations should use the InjectFactory helper class to
# perform temporary registrations.
REGISTER_FACTORY_TEST_ALLOWLIST = ("./test/common/config/registry_test.cc",
"./test/integration/clusters/", "./test/integration/filters/")
# Files in these paths can use MessageLite::SerializeAsString
SERIALIZE_AS_STRING_ALLOWLIST = (
"./source/common/config/version_converter.cc",
"./source/common/protobuf/utility.cc",
"./source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc",
"./test/common/protobuf/utility_test.cc",
"./test/common/config/version_converter_test.cc",
"./test/common/grpc/codec_test.cc",
"./test/common/grpc/codec_fuzz_test.cc",
"./test/extensions/filters/http/common/fuzz/uber_filter.h",
"./test/extensions/bootstrap/wasm/test_data/speed_cpp.cc",
)
# Files in these paths can use Protobuf::util::JsonStringToMessage
JSON_STRING_TO_MESSAGE_ALLOWLIST = ("./source/common/protobuf/utility.cc",
"./test/extensions/bootstrap/wasm/test_data/speed_cpp.cc")
# Histogram names which are allowed to be suffixed with the unit symbol, all of the pre-existing
# ones were grandfathered as part of PR #8484 for backwards compatibility.
HISTOGRAM_WITH_SI_SUFFIX_ALLOWLIST = ("downstream_cx_length_ms", "downstream_cx_length_ms",
"initialization_time_ms", "loop_duration_us", "poll_delay_us",
"request_time_ms", "upstream_cx_connect_ms",
"upstream_cx_length_ms")
# Files in these paths can use std::regex
STD_REGEX_ALLOWLIST = (
"./source/common/common/utility.cc", "./source/common/common/regex.h",
"./source/common/common/regex.cc", "./source/common/stats/tag_extractor_impl.h",
"./source/common/stats/tag_extractor_impl.cc",
"./source/common/formatter/substitution_formatter.cc",
"./source/extensions/filters/http/squash/squash_filter.h",
"./source/extensions/filters/http/squash/squash_filter.cc", "./source/server/admin/utils.h",
"./source/server/admin/utils.cc", "./source/server/admin/stats_handler.h",
"./source/server/admin/stats_handler.cc", "./source/server/admin/prometheus_stats.h",
"./source/server/admin/prometheus_stats.cc", "./tools/clang_tools/api_booster/main.cc",
"./tools/clang_tools/api_booster/proto_cxx_utils.cc", "./source/common/version/version.cc")
# Only one C++ file should instantiate grpc_init
GRPC_INIT_ALLOWLIST = ("./source/common/grpc/google_grpc_context.cc")
# These files should not throw exceptions. Add HTTP/1 when exceptions removed.
EXCEPTION_DENYLIST = ("./source/common/http/http2/codec_impl.h",
"./source/common/http/http2/codec_impl.cc")
CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-10")
BUILDIFIER_PATH = paths.getBuildifier()
BUILDOZER_PATH = paths.getBuildozer()
ENVOY_BUILD_FIXER_PATH = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),
"envoy_build_fixer.py")
HEADER_ORDER_PATH = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "header_order.py")
SUBDIR_SET = set(common.includeDirOrder())
INCLUDE_ANGLE = "#include <"
INCLUDE_ANGLE_LEN = len(INCLUDE_ANGLE)
PROTO_PACKAGE_REGEX = re.compile(r"^package (\S+);\n*", re.MULTILINE)
X_ENVOY_USED_DIRECTLY_REGEX = re.compile(r'.*\"x-envoy-.*\".*')
DESIGNATED_INITIALIZER_REGEX = re.compile(r"\{\s*\.\w+\s*\=")
MANGLED_PROTOBUF_NAME_REGEX = re.compile(r"envoy::[a-z0-9_:]+::[A-Z][a-z]\w*_\w*_[A-Z]{2}")
HISTOGRAM_SI_SUFFIX_REGEX = re.compile(r"(?<=HISTOGRAM\()[a-zA-Z0-9_]+_(b|kb|mb|ns|us|ms|s)(?=,)")
TEST_NAME_STARTING_LOWER_CASE_REGEX = re.compile(r"TEST(_.\(.*,\s|\()[a-z].*\)\s\{")
EXTENSIONS_CODEOWNERS_REGEX = re.compile(r'.*(extensions[^@]*\s+)(@.*)')
COMMENT_REGEX = re.compile(r"//|\*")
DURATION_VALUE_REGEX = re.compile(r'\b[Dd]uration\(([0-9.]+)')
PROTO_VALIDATION_STRING = re.compile(r'\bmin_bytes\b')
VERSION_HISTORY_NEW_LINE_REGEX = re.compile("\* ([a-z \-_]+): ([a-z:`]+)")
VERSION_HISTORY_SECTION_NAME = re.compile("^[A-Z][A-Za-z ]*$")
RELOADABLE_FLAG_REGEX = re.compile(".*(.)(envoy.reloadable_features.[^ ]*)\s.*")
# Check for punctuation in a terminal ref clause, e.g.
# :ref:`panic mode. <arch_overview_load_balancing_panic_threshold>`
REF_WITH_PUNCTUATION_REGEX = re.compile(".*\. <[^<]*>`\s*")
DOT_MULTI_SPACE_REGEX = re.compile("\\. +")
# yapf: disable
PROTOBUF_TYPE_ERRORS = {
# Well-known types should be referenced from the ProtobufWkt namespace.
"Protobuf::Any": "ProtobufWkt::Any",
"Protobuf::Empty": "ProtobufWkt::Empty",
"Protobuf::ListValue": "ProtobufWkt::ListValue",
"Protobuf::NULL_VALUE": "ProtobufWkt::NULL_VALUE",
"Protobuf::StringValue": "ProtobufWkt::StringValue",
"Protobuf::Struct": "ProtobufWkt::Struct",
"Protobuf::Value": "ProtobufWkt::Value",
# Other common mis-namespacing of protobuf types.
"ProtobufWkt::Map": "Protobuf::Map",
"ProtobufWkt::MapPair": "Protobuf::MapPair",
"ProtobufUtil::MessageDifferencer": "Protobuf::util::MessageDifferencer"
}
LIBCXX_REPLACEMENTS = {
"absl::make_unique<": "std::make_unique<",
}
UNOWNED_EXTENSIONS = {
"extensions/filters/http/ratelimit",
"extensions/filters/http/buffer",
"extensions/filters/http/rbac",
"extensions/filters/http/ip_tagging",
"extensions/filters/http/tap",
"extensions/filters/http/health_check",
"extensions/filters/http/cors",
"extensions/filters/http/ext_authz",
"extensions/filters/http/dynamo",
"extensions/filters/http/lua",
"extensions/filters/http/common",
"extensions/filters/common",
"extensions/filters/common/ratelimit",
"extensions/filters/common/rbac",
"extensions/filters/common/lua",
"extensions/filters/listener/original_dst",
"extensions/filters/listener/proxy_protocol",
"extensions/stat_sinks/statsd",
"extensions/stat_sinks/common",
"extensions/stat_sinks/common/statsd",
"extensions/health_checkers/redis",
"extensions/access_loggers/grpc",
"extensions/access_loggers/file",
"extensions/common/tap",
"extensions/transport_sockets/raw_buffer",
"extensions/transport_sockets/tap",
"extensions/tracers/zipkin",
"extensions/tracers/dynamic_ot",
"extensions/tracers/opencensus",
"extensions/tracers/lightstep",
"extensions/tracers/common",
"extensions/tracers/common/ot",
"extensions/retry/host/previous_hosts",
"extensions/filters/network/ratelimit",
"extensions/filters/network/client_ssl_auth",
"extensions/filters/network/rbac",
"extensions/filters/network/tcp_proxy",
"extensions/filters/network/echo",
"extensions/filters/network/ext_authz",
"extensions/filters/network/redis_proxy",
"extensions/filters/network/kafka",
"extensions/filters/network/kafka/broker",
"extensions/filters/network/kafka/protocol",
"extensions/filters/network/kafka/serialization",
"extensions/filters/network/mongo_proxy",
"extensions/filters/network/common",
"extensions/filters/network/common/redis",
}
# yapf: enable
class FormatChecker:
def __init__(self, args):
self.operation_type = args.operation_type
self.target_path = args.target_path
self.api_prefix = args.api_prefix
self.api_shadow_root = args.api_shadow_prefix
self.envoy_build_rule_check = not args.skip_envoy_build_rule_check
self.namespace_check = args.namespace_check
self.namespace_check_excluded_paths = args.namespace_check_excluded_paths + [
"./tools/api_boost/testdata/",
"./tools/clang_tools/",
]
self.build_fixer_check_excluded_paths = args.build_fixer_check_excluded_paths + [
"./bazel/external/",
"./bazel/toolchains/",
"./bazel/BUILD",
"./tools/clang_tools",
]
self.include_dir_order = args.include_dir_order
# Map a line transformation function across each line of a file,
# writing the result lines as requested.
# If there is a clang format nesting or mismatch error, return the first occurrence
def evaluateLines(self, path, line_xform, write=True):
error_message = None
format_flag = True
output_lines = []
for line_number, line in enumerate(self.readLines(path)):
if line.find("// clang-format off") != -1:
if not format_flag and error_message is None:
error_message = "%s:%d: %s" % (path, line_number + 1, "clang-format nested off")
format_flag = False
if line.find("// clang-format on") != -1:
if format_flag and error_message is None:
error_message = "%s:%d: %s" % (path, line_number + 1, "clang-format nested on")
format_flag = True
if format_flag:
output_lines.append(line_xform(line, line_number))
else:
output_lines.append(line)
# We used to use fileinput in the older Python 2.7 script, but this doesn't do
# inplace mode and UTF-8 in Python 3, so doing it the manual way.
if write:
pathlib.Path(path).write_text('\n'.join(output_lines), encoding='utf-8')
if not format_flag and error_message is None:
error_message = "%s:%d: %s" % (path, line_number + 1, "clang-format remains off")
return error_message
# Obtain all the lines in a given file.
def readLines(self, path):
return self.readFile(path).split('\n')
# Read a UTF-8 encoded file as a str.
def readFile(self, path):
return pathlib.Path(path).read_text(encoding='utf-8')
# lookPath searches for the given executable in all directories in PATH
# environment variable. If it cannot be found, empty string is returned.
def lookPath(self, executable):
return shutil.which(executable) or ''
# pathExists checks whether the given path exists. This function assumes that
# the path is absolute and evaluates environment variables.
def pathExists(self, executable):
return os.path.exists(os.path.expandvars(executable))
# executableByOthers checks whether the given path has execute permission for
# others.
def executableByOthers(self, executable):
st = os.stat(os.path.expandvars(executable))
return bool(st.st_mode & stat.S_IXOTH)
# Check whether all needed external tools (clang-format, buildifier, buildozer) are
# available.
def checkTools(self):
error_messages = []
clang_format_abs_path = self.lookPath(CLANG_FORMAT_PATH)
if clang_format_abs_path:
if not self.executableByOthers(clang_format_abs_path):
error_messages.append("command {} exists, but cannot be executed by other "
"users".format(CLANG_FORMAT_PATH))
else:
error_messages.append(
"Command {} not found. If you have clang-format in version 10.x.x "
"installed, but the binary name is different or it's not available in "
"PATH, please use CLANG_FORMAT environment variable to specify the path. "
"Examples:\n"
" export CLANG_FORMAT=clang-format-10.0.0\n"
" export CLANG_FORMAT=/opt/bin/clang-format-10\n"
" export CLANG_FORMAT=/usr/local/opt/llvm@10/bin/clang-format".format(
CLANG_FORMAT_PATH))
def checkBazelTool(name, path, var):
bazel_tool_abs_path = self.lookPath(path)
if bazel_tool_abs_path:
if not self.executableByOthers(bazel_tool_abs_path):
error_messages.append("command {} exists, but cannot be executed by other "
"users".format(path))
elif self.pathExists(path):
if not self.executableByOthers(path):
error_messages.append("command {} exists, but cannot be executed by other "
"users".format(path))
else:
error_messages.append("Command {} not found. If you have {} installed, but the binary "
"name is different or it's not available in $GOPATH/bin, please use "
"{} environment variable to specify the path. Example:\n"
" export {}=`which {}`\n"
"If you don't have {} installed, you can install it by:\n"
" go get -u github.com/bazelbuild/buildtools/{}".format(
path, name, var, var, name, name, name))
checkBazelTool('buildifier', BUILDIFIER_PATH, 'BUILDIFIER_BIN')
checkBazelTool('buildozer', BUILDOZER_PATH, 'BUILDOZER_BIN')
return error_messages
def checkNamespace(self, file_path):
for excluded_path in self.namespace_check_excluded_paths:
if file_path.startswith(excluded_path):
return []
nolint = "NOLINT(namespace-%s)" % self.namespace_check.lower()
text = self.readFile(file_path)
if not re.search("^\s*namespace\s+%s\s*{" % self.namespace_check, text, re.MULTILINE) and \
not nolint in text:
return [
"Unable to find %s namespace or %s for file: %s" %
(self.namespace_check, nolint, file_path)
]
return []
def packageNameForProto(self, file_path):
package_name = None
error_message = []
result = PROTO_PACKAGE_REGEX.search(self.readFile(file_path))
if result is not None and len(result.groups()) == 1:
package_name = result.group(1)
if package_name is None:
error_message = ["Unable to find package name for proto file: %s" % file_path]
return [package_name, error_message]
# To avoid breaking the Lyft import, we just check for path inclusion here.
def allowlistedForProtobufDeps(self, file_path):
return (file_path.endswith(PROTO_SUFFIX) or file_path.endswith(REPOSITORIES_BZL) or \
any(path_segment in file_path for path_segment in GOOGLE_PROTOBUF_ALLOWLIST))
# Real-world time sources should not be instantiated in the source, except for a few
# specific cases. They should be passed down from where they are instantied to where
# they need to be used, e.g. through the ServerInstance, Dispatcher, or ClusterManager.
def allowlistedForRealTime(self, file_path):
if file_path.endswith(".md"):
return True
return file_path in REAL_TIME_ALLOWLIST
def allowlistedForRegisterFactory(self, file_path):
if not file_path.startswith("./test/"):
return True
return any(file_path.startswith(prefix) for prefix in REGISTER_FACTORY_TEST_ALLOWLIST)
def allowlistedForSerializeAsString(self, file_path):
return file_path in SERIALIZE_AS_STRING_ALLOWLIST or file_path.endswith(DOCS_SUFFIX)
def allowlistedForJsonStringToMessage(self, file_path):
return file_path in JSON_STRING_TO_MESSAGE_ALLOWLIST
def allowlistedForHistogramSiSuffix(self, name):
return name in HISTOGRAM_WITH_SI_SUFFIX_ALLOWLIST
def allowlistedForStdRegex(self, file_path):
return file_path.startswith("./test") or file_path in STD_REGEX_ALLOWLIST or file_path.endswith(
DOCS_SUFFIX)
def allowlistedForGrpcInit(self, file_path):
return file_path in GRPC_INIT_ALLOWLIST
def allowlistedForUnpackTo(self, file_path):
return file_path.startswith("./test") or file_path in [
"./source/common/protobuf/utility.cc", "./source/common/protobuf/utility.h"
]
def denylistedForExceptions(self, file_path):
# Returns true when it is a non test header file or the file_path is in DENYLIST or
# it is under toos/testdata subdirectory.
if file_path.endswith(DOCS_SUFFIX):
return False
return (file_path.endswith('.h') and not file_path.startswith("./test/")) or file_path in EXCEPTION_DENYLIST \
or self.isInSubdir(file_path, 'tools/testdata')
def isApiFile(self, file_path):
return file_path.startswith(self.api_prefix) or file_path.startswith(self.api_shadow_root)
def isBuildFile(self, file_path):
basename = os.path.basename(file_path)
if basename in {"BUILD", "BUILD.bazel"} or basename.endswith(".BUILD"):
return True
return False
def isExternalBuildFile(self, file_path):
return self.isBuildFile(file_path) and (file_path.startswith("./bazel/external/") or
file_path.startswith("./tools/clang_tools"))
def isStarlarkFile(self, file_path):
return file_path.endswith(".bzl")
def isWorkspaceFile(self, file_path):
return os.path.basename(file_path) == "WORKSPACE"
def isBuildFixerExcludedFile(self, file_path):
for excluded_path in self.build_fixer_check_excluded_paths:
if file_path.startswith(excluded_path):
return True
return False
def hasInvalidAngleBracketDirectory(self, line):
if not line.startswith(INCLUDE_ANGLE):
return False
path = line[INCLUDE_ANGLE_LEN:]
slash = path.find("/")
if slash == -1:
return False
subdir = path[0:slash]
return subdir in SUBDIR_SET
def checkCurrentReleaseNotes(self, file_path, error_messages):
first_word_of_prior_line = ''
next_word_to_check = '' # first word after :
prior_line = ''
def endsWithPeriod(prior_line):
if not prior_line:
return True # Don't punctuation-check empty lines.
if prior_line.endswith('.'):
return True # Actually ends with .
if prior_line.endswith('`') and REF_WITH_PUNCTUATION_REGEX.match(prior_line):
return True # The text in the :ref ends with a .
return False
for line_number, line in enumerate(self.readLines(file_path)):
def reportError(message):
error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message))
if VERSION_HISTORY_SECTION_NAME.match(line):
if line == "Deprecated":
# The deprecations section is last, and does not have enforced formatting.
break
# Reset all parsing at the start of a section.
first_word_of_prior_line = ''
next_word_to_check = '' # first word after :
prior_line = ''
# make sure flags are surrounded by ``s
flag_match = RELOADABLE_FLAG_REGEX.match(line)
if flag_match:
if not flag_match.groups()[0].startswith('`'):
reportError("Flag `%s` should be enclosed in back ticks" % flag_match.groups()[1])
if line.startswith("* "):
if not endsWithPeriod(prior_line):
reportError("The following release note does not end with a '.'\n %s" % prior_line)
match = VERSION_HISTORY_NEW_LINE_REGEX.match(line)
if not match:
reportError("Version history line malformed. "
"Does not match VERSION_HISTORY_NEW_LINE_REGEX in check_format.py\n %s" %
line)
else:
first_word = match.groups()[0]
next_word = match.groups()[1]
# Do basic alphabetization checks of the first word on the line and the
# first word after the :
if first_word_of_prior_line and first_word_of_prior_line > first_word:
reportError(
"Version history not in alphabetical order (%s vs %s): please check placement of line\n %s. "
% (first_word_of_prior_line, first_word, line))
if first_word_of_prior_line == first_word and next_word_to_check and next_word_to_check > next_word:
reportError(
"Version history not in alphabetical order (%s vs %s): please check placement of line\n %s. "
% (next_word_to_check, next_word, line))
first_word_of_prior_line = first_word
next_word_to_check = next_word
prior_line = line
elif not line:
# If we hit the end of this release note block block, check the prior line.
if not endsWithPeriod(prior_line):
reportError("The following release note does not end with a '.'\n %s" % prior_line)
elif prior_line:
prior_line += line
def checkFileContents(self, file_path, checker):
error_messages = []
if file_path.endswith("version_history/current.rst"):
# Version file checking has enough special cased logic to merit its own checks.
# This only validates entries for the current release as very old release
# notes have a different format.
self.checkCurrentReleaseNotes(file_path, error_messages)
def checkFormatErrors(line, line_number):
def reportError(message):
error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message))
checker(line, file_path, reportError)
evaluate_failure = self.evaluateLines(file_path, checkFormatErrors, False)
if evaluate_failure is not None:
error_messages.append(evaluate_failure)
return error_messages
def fixSourceLine(self, line, line_number):
# Strip double space after '.' This may prove overenthusiastic and need to
# be restricted to comments and metadata files but works for now.
line = re.sub(DOT_MULTI_SPACE_REGEX, ". ", line)
if self.hasInvalidAngleBracketDirectory(line):
line = line.replace("<", '"').replace(">", '"')
# Fix incorrect protobuf namespace references.
for invalid_construct, valid_construct in PROTOBUF_TYPE_ERRORS.items():
line = line.replace(invalid_construct, valid_construct)
# Use recommended cpp stdlib
for invalid_construct, valid_construct in LIBCXX_REPLACEMENTS.items():
line = line.replace(invalid_construct, valid_construct)
return line
# We want to look for a call to condvar.waitFor, but there's no strong pattern
# to the variable name of the condvar. If we just look for ".waitFor" we'll also
# pick up time_system_.waitFor(...), and we don't want to return true for that
# pattern. But in that case there is a strong pattern of using time_system in
# various spellings as the variable name.
def hasCondVarWaitFor(self, line):
wait_for = line.find(".waitFor(")
if wait_for == -1:
return False
preceding = line[0:wait_for]
if preceding.endswith("time_system") or preceding.endswith("timeSystem()") or \
preceding.endswith("time_system_"):
return False
return True
# Determines whether the filename is either in the specified subdirectory, or
# at the top level. We consider files in the top level for the benefit of
# the check_format testcases in tools/testdata/check_format.
def isInSubdir(self, filename, *subdirs):
# Skip this check for check_format's unit-tests.
if filename.count("/") <= 1:
return True
for subdir in subdirs:
if filename.startswith('./' + subdir + '/'):
return True
return False
# Determines if given token exists in line without leading or trailing token characters
# e.g. will return True for a line containing foo() but not foo_bar() or baz_foo
def tokenInLine(self, token, line):
index = 0
while True:
index = line.find(token, index)
# the following check has been changed from index < 1 to index < 0 because
# this function incorrectly returns false when the token in question is the
# first one in a line. The following line returns false when the token is present:
# (no leading whitespace) violating_symbol foo;
if index < 0:
break
if index == 0 or not (line[index - 1].isalnum() or line[index - 1] == '_'):
if index + len(token) >= len(line) or not (line[index + len(token)].isalnum() or
line[index + len(token)] == '_'):
return True
index = index + 1
return False
def checkSourceLine(self, line, file_path, reportError):
# Check fixable errors. These may have been fixed already.
if line.find(". ") != -1:
reportError("over-enthusiastic spaces")
if self.isInSubdir(file_path, 'source', 'include') and X_ENVOY_USED_DIRECTLY_REGEX.match(line):
reportError(
"Please do not use the raw literal x-envoy in source code. See Envoy::Http::PrefixValue."
)
if self.hasInvalidAngleBracketDirectory(line):
reportError("envoy includes should not have angle brackets")
for invalid_construct, valid_construct in PROTOBUF_TYPE_ERRORS.items():
if invalid_construct in line:
reportError("incorrect protobuf type reference %s; "
"should be %s" % (invalid_construct, valid_construct))
for invalid_construct, valid_construct in LIBCXX_REPLACEMENTS.items():
if invalid_construct in line:
reportError("term %s should be replaced with standard library term %s" %
(invalid_construct, valid_construct))
# Do not include the virtual_includes headers.
if re.search("#include.*/_virtual_includes/", line):
reportError("Don't include the virtual includes headers.")
# Some errors cannot be fixed automatically, and actionable, consistent,
# navigable messages should be emitted to make it easy to find and fix
# the errors by hand.
if not self.allowlistedForProtobufDeps(file_path):
if '"google/protobuf' in line or "google::protobuf" in line:
reportError("unexpected direct dependency on google.protobuf, use "
"the definitions in common/protobuf/protobuf.h instead.")
if line.startswith("#include <mutex>") or line.startswith("#include <condition_variable"):
# We don't check here for std::mutex because that may legitimately show up in
# comments, for example this one.
reportError("Don't use <mutex> or <condition_variable*>, switch to "
"Thread::MutexBasicLockable in source/common/common/thread.h")
if line.startswith("#include <shared_mutex>"):
# We don't check here for std::shared_timed_mutex because that may
# legitimately show up in comments, for example this one.
reportError("Don't use <shared_mutex>, use absl::Mutex for reader/writer locks.")
if not self.allowlistedForRealTime(file_path) and not "NO_CHECK_FORMAT(real_time)" in line:
if "RealTimeSource" in line or \
("RealTimeSystem" in line and not "TestRealTimeSystem" in line) or \
"std::chrono::system_clock::now" in line or "std::chrono::steady_clock::now" in line or \
"std::this_thread::sleep_for" in line or self.hasCondVarWaitFor(line):
reportError("Don't reference real-world time sources from production code; use injection")
duration_arg = DURATION_VALUE_REGEX.search(line)
if duration_arg and duration_arg.group(1) != "0" and duration_arg.group(1) != "0.0":
# Matching duration(int-const or float-const) other than zero
reportError(
"Don't use ambiguous duration(value), use an explicit duration type, e.g. Event::TimeSystem::Milliseconds(value)"
)
if not self.allowlistedForRegisterFactory(file_path):
if "Registry::RegisterFactory<" in line or "REGISTER_FACTORY" in line:
reportError("Don't use Registry::RegisterFactory or REGISTER_FACTORY in tests, "
"use Registry::InjectFactory instead.")
if not self.allowlistedForUnpackTo(file_path):
if "UnpackTo" in line:
reportError("Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead")
# Check that we use the absl::Time library
if self.tokenInLine("std::get_time", line):
if "test/" in file_path:
reportError("Don't use std::get_time; use TestUtility::parseTime in tests")
else:
reportError("Don't use std::get_time; use the injectable time system")
if self.tokenInLine("std::put_time", line):
reportError("Don't use std::put_time; use absl::Time equivalent instead")
if self.tokenInLine("gmtime", line):
reportError("Don't use gmtime; use absl::Time equivalent instead")
if self.tokenInLine("mktime", line):
reportError("Don't use mktime; use absl::Time equivalent instead")
if self.tokenInLine("localtime", line):
reportError("Don't use localtime; use absl::Time equivalent instead")
if self.tokenInLine("strftime", line):
reportError("Don't use strftime; use absl::FormatTime instead")
if self.tokenInLine("strptime", line):
reportError("Don't use strptime; use absl::FormatTime instead")
if self.tokenInLine("strerror", line):
reportError("Don't use strerror; use Envoy::errorDetails instead")
# Prefer using abseil hash maps/sets over std::unordered_map/set for performance optimizations and
# non-deterministic iteration order that exposes faulty assertions.
# See: https://abseil.io/docs/cpp/guides/container#hash-tables
if "std::unordered_map" in line:
reportError("Don't use std::unordered_map; use absl::flat_hash_map instead or "
"absl::node_hash_map if pointer stability of keys/values is required")
if "std::unordered_set" in line:
reportError("Don't use std::unordered_set; use absl::flat_hash_set instead or "
"absl::node_hash_set if pointer stability of keys/values is required")
if "std::atomic_" in line:
# The std::atomic_* free functions are functionally equivalent to calling
# operations on std::atomic<T> objects, so prefer to use that instead.
reportError("Don't use free std::atomic_* functions, use std::atomic<T> members instead.")
# Block usage of certain std types/functions as iOS 11 and macOS 10.13
# do not support these at runtime.
# See: https://github.com/envoyproxy/envoy/issues/12341
if self.tokenInLine("std::any", line):
reportError("Don't use std::any; use absl::any instead")
if self.tokenInLine("std::get_if", line):
reportError("Don't use std::get_if; use absl::get_if instead")
if self.tokenInLine("std::holds_alternative", line):
reportError("Don't use std::holds_alternative; use absl::holds_alternative instead")
if self.tokenInLine("std::make_optional", line):
reportError("Don't use std::make_optional; use absl::make_optional instead")
if self.tokenInLine("std::monostate", line):
reportError("Don't use std::monostate; use absl::monostate instead")
if self.tokenInLine("std::optional", line):
reportError("Don't use std::optional; use absl::optional instead")
if self.tokenInLine("std::string_view", line):
reportError("Don't use std::string_view; use absl::string_view instead")
if self.tokenInLine("std::variant", line):
reportError("Don't use std::variant; use absl::variant instead")
if self.tokenInLine("std::visit", line):
reportError("Don't use std::visit; use absl::visit instead")
if "__attribute__((packed))" in line and file_path != "./include/envoy/common/platform.h":
# __attribute__((packed)) is not supported by MSVC, we have a PACKED_STRUCT macro that
# can be used instead
reportError("Don't use __attribute__((packed)), use the PACKED_STRUCT macro defined "
"in include/envoy/common/platform.h instead")
if DESIGNATED_INITIALIZER_REGEX.search(line):
# Designated initializers are not part of the C++14 standard and are not supported
# by MSVC
reportError("Don't use designated initializers in struct initialization, "
"they are not part of C++14")
if " ?: " in line:
# The ?: operator is non-standard, it is a GCC extension
reportError("Don't use the '?:' operator, it is a non-standard GCC extension")
if line.startswith("using testing::Test;"):
reportError("Don't use 'using testing::Test;, elaborate the type instead")
if line.startswith("using testing::TestWithParams;"):
reportError("Don't use 'using testing::Test;, elaborate the type instead")
if TEST_NAME_STARTING_LOWER_CASE_REGEX.search(line):
# Matches variants of TEST(), TEST_P(), TEST_F() etc. where the test name begins
# with a lowercase letter.
reportError("Test names should be CamelCase, starting with a capital letter")
if not self.allowlistedForSerializeAsString(file_path) and "SerializeAsString" in line:
# The MessageLite::SerializeAsString doesn't generate deterministic serialization,
# use MessageUtil::hash instead.
reportError(
"Don't use MessageLite::SerializeAsString for generating deterministic serialization, use MessageUtil::hash instead."
)
if not self.allowlistedForJsonStringToMessage(file_path) and "JsonStringToMessage" in line:
# Centralize all usage of JSON parsing so it is easier to make changes in JSON parsing
# behavior.
reportError("Don't use Protobuf::util::JsonStringToMessage, use TestUtility::loadFromJson.")
if self.isInSubdir(file_path, 'source') and file_path.endswith('.cc') and \
('.counterFromString(' in line or '.gaugeFromString(' in line or \
'.histogramFromString(' in line or '.textReadoutFromString(' in line or \
'->counterFromString(' in line or '->gaugeFromString(' in line or \
'->histogramFromString(' in line or '->textReadoutFromString(' in line):
reportError("Don't lookup stats by name at runtime; use StatName saved during construction")
if MANGLED_PROTOBUF_NAME_REGEX.search(line):
reportError("Don't use mangled Protobuf names for enum constants")
hist_m = HISTOGRAM_SI_SUFFIX_REGEX.search(line)
if hist_m and not self.allowlistedForHistogramSiSuffix(hist_m.group(0)):
reportError(
"Don't suffix histogram names with the unit symbol, "
"it's already part of the histogram object and unit-supporting sinks can use this information natively, "
"other sinks can add the suffix automatically on flush should they prefer to do so.")
if not self.allowlistedForStdRegex(file_path) and "std::regex" in line:
reportError("Don't use std::regex in code that handles untrusted input. Use RegexMatcher")
if not self.allowlistedForGrpcInit(file_path):
grpc_init_or_shutdown = line.find("grpc_init()")
grpc_shutdown = line.find("grpc_shutdown()")
if grpc_init_or_shutdown == -1 or (grpc_shutdown != -1 and
grpc_shutdown < grpc_init_or_shutdown):
grpc_init_or_shutdown = grpc_shutdown
if grpc_init_or_shutdown != -1:
comment = line.find("// ")
if comment == -1 or comment > grpc_init_or_shutdown:
reportError("Don't call grpc_init() or grpc_shutdown() directly, instantiate " +
"Grpc::GoogleGrpcContext. See #8282")
if self.denylistedForExceptions(file_path):
# Skpping cases where 'throw' is a substring of a symbol like in "foothrowBar".
if "throw" in line.split():
comment_match = COMMENT_REGEX.search(line)
if comment_match is None or comment_match.start(0) > line.find("throw"):
reportError("Don't introduce throws into exception-free files, use error " +
"statuses instead.")
if "lua_pushlightuserdata" in line:
reportError(
"Don't use lua_pushlightuserdata, since it can cause unprotected error in call to" +
"Lua API (bad light userdata pointer) on ARM64 architecture. See " +
"https://github.com/LuaJIT/LuaJIT/issues/450#issuecomment-433659873 for details.")
if file_path.endswith(PROTO_SUFFIX):
exclude_path = ['v1', 'v2', 'generated_api_shadow']
result = PROTO_VALIDATION_STRING.search(line)
if result is not None:
if not any(x in file_path for x in exclude_path):
reportError("min_bytes is DEPRECATED, Use min_len.")
def checkBuildLine(self, line, file_path, reportError):
if "@bazel_tools" in line and not (self.isStarlarkFile(file_path) or
file_path.startswith("./bazel/") or
"python/runfiles" in line):
reportError("unexpected @bazel_tools reference, please indirect via a definition in //bazel")
if not self.allowlistedForProtobufDeps(file_path) and '"protobuf"' in line:
reportError("unexpected direct external dependency on protobuf, use "
"//source/common/protobuf instead.")
if (self.envoy_build_rule_check and not self.isStarlarkFile(file_path) and
not self.isWorkspaceFile(file_path) and not self.isExternalBuildFile(file_path) and
"@envoy//" in line):
reportError("Superfluous '@envoy//' prefix")
def fixBuildLine(self, file_path, line, line_number):
if (self.envoy_build_rule_check and not self.isStarlarkFile(file_path) and
not self.isWorkspaceFile(file_path) and not self.isExternalBuildFile(file_path)):
line = line.replace("@envoy//", "//")
return line
def fixBuildPath(self, file_path):
self.evaluateLines(file_path, functools.partial(self.fixBuildLine, file_path))
error_messages = []
# TODO(htuch): Add API specific BUILD fixer script.
if not self.isBuildFixerExcludedFile(file_path) and not self.isApiFile(
file_path) and not self.isStarlarkFile(file_path) and not self.isWorkspaceFile(file_path):
if os.system("%s %s %s" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)) != 0:
error_messages += ["envoy_build_fixer rewrite failed for file: %s" % file_path]
if os.system("%s -lint=fix -mode=fix %s" % (BUILDIFIER_PATH, file_path)) != 0:
error_messages += ["buildifier rewrite failed for file: %s" % file_path]
return error_messages
def checkBuildPath(self, file_path):
error_messages = []
if not self.isBuildFixerExcludedFile(file_path) and not self.isApiFile(
file_path) and not self.isStarlarkFile(file_path) and not self.isWorkspaceFile(file_path):
command = "%s %s | diff %s -" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)
error_messages += self.executeCommand(command, "envoy_build_fixer check failed", file_path)
if self.isBuildFile(file_path) and (file_path.startswith(self.api_prefix + "envoy") or
file_path.startswith(self.api_shadow_root + "envoy")):
found = False
for line in self.readLines(file_path):
if "api_proto_package(" in line:
found = True
break
if not found:
error_messages += ["API build file does not provide api_proto_package()"]
command = "%s -mode=diff %s" % (BUILDIFIER_PATH, file_path)
error_messages += self.executeCommand(command, "buildifier check failed", file_path)
error_messages += self.checkFileContents(file_path, self.checkBuildLine)
return error_messages
def fixSourcePath(self, file_path):
self.evaluateLines(file_path, self.fixSourceLine)
error_messages = []
if not file_path.endswith(DOCS_SUFFIX):
if not file_path.endswith(PROTO_SUFFIX):
error_messages += self.fixHeaderOrder(file_path)
error_messages += self.clangFormat(file_path)
if file_path.endswith(PROTO_SUFFIX) and self.isApiFile(file_path):
package_name, error_message = self.packageNameForProto(file_path)
if package_name is None:
error_messages += error_message
return error_messages
def checkSourcePath(self, file_path):
error_messages = self.checkFileContents(file_path, self.checkSourceLine)
if not file_path.endswith(DOCS_SUFFIX):
if not file_path.endswith(PROTO_SUFFIX):
error_messages += self.checkNamespace(file_path)
command = ("%s --include_dir_order %s --path %s | diff %s -" %
(HEADER_ORDER_PATH, self.include_dir_order, file_path, file_path))
error_messages += self.executeCommand(command, "header_order.py check failed", file_path)
command = ("%s %s | diff %s -" % (CLANG_FORMAT_PATH, file_path, file_path))
error_messages += self.executeCommand(command, "clang-format check failed", file_path)
if file_path.endswith(PROTO_SUFFIX) and self.isApiFile(file_path):
package_name, error_message = self.packageNameForProto(file_path)
if package_name is None:
error_messages += error_message
return error_messages
# Example target outputs are:
# - "26,27c26"
# - "12,13d13"
# - "7a8,9"
def executeCommand(self,
command,
error_message,
file_path,
regex=re.compile(r"^(\d+)[a|c|d]?\d*(?:,\d+[a|c|d]?\d*)?$")):
try:
output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT).strip()
if output:
return output.decode('utf-8').split("\n")
return []
except subprocess.CalledProcessError as e:
if (e.returncode != 0 and e.returncode != 1):
return ["ERROR: something went wrong while executing: %s" % e.cmd]
# In case we can't find any line numbers, record an error message first.
error_messages = ["%s for file: %s" % (error_message, file_path)]
for line in e.output.decode('utf-8').splitlines():
for num in regex.findall(line):
error_messages.append(" %s:%s" % (file_path, num))
return error_messages
def fixHeaderOrder(self, file_path):
command = "%s --rewrite --include_dir_order %s --path %s" % (HEADER_ORDER_PATH,
self.include_dir_order, file_path)
if os.system(command) != 0:
return ["header_order.py rewrite error: %s" % (file_path)]
return []
def clangFormat(self, file_path):
command = "%s -i %s" % (CLANG_FORMAT_PATH, file_path)
if os.system(command) != 0:
return ["clang-format rewrite error: %s" % (file_path)]
return []
def checkFormat(self, file_path):
if file_path.startswith(EXCLUDED_PREFIXES):
return []
if not file_path.endswith(SUFFIXES):
return []
error_messages = []
# Apply fixes first, if asked, and then run checks. If we wind up attempting to fix
# an issue, but there's still an error, that's a problem.
try_to_fix = self.operation_type == "fix"
if self.isBuildFile(file_path) or self.isStarlarkFile(file_path) or self.isWorkspaceFile(
file_path):
if try_to_fix:
error_messages += self.fixBuildPath(file_path)
error_messages += self.checkBuildPath(file_path)
else:
if try_to_fix:
error_messages += self.fixSourcePath(file_path)
error_messages += self.checkSourcePath(file_path)
if error_messages:
return ["From %s" % file_path] + error_messages
return error_messages
def checkFormatReturnTraceOnError(self, file_path):
"""Run checkFormat and return the traceback of any exception."""
try:
return self.checkFormat(file_path)
except:
return traceback.format_exc().split("\n")
def checkOwners(self, dir_name, owned_directories, error_messages):
"""Checks to make sure a given directory is present either in CODEOWNERS or OWNED_EXTENSIONS
Args:
dir_name: the directory being checked.
owned_directories: directories currently listed in CODEOWNERS.
error_messages: where to put an error message for new unowned directories.
"""
found = False
for owned in owned_directories:
if owned.startswith(dir_name) or dir_name.startswith(owned):
found = True
if not found and dir_name not in UNOWNED_EXTENSIONS:
error_messages.append("New directory %s appears to not have owners in CODEOWNERS" % dir_name)
def checkApiShadowStarlarkFiles(self, file_path, error_messages):
command = "diff -u "
command += file_path + " "
api_shadow_starlark_path = self.api_shadow_root + re.sub(r"\./api/", '', file_path)
command += api_shadow_starlark_path
error_message = self.executeCommand(command, "invalid .bzl in generated_api_shadow", file_path)
if self.operation_type == "check":
error_messages += error_message
elif self.operation_type == "fix" and len(error_message) != 0:
shutil.copy(file_path, api_shadow_starlark_path)
return error_messages
def checkFormatVisitor(self, arg, dir_name, names):
"""Run checkFormat in parallel for the given files.
Args:
arg: a tuple (pool, result_list, owned_directories, error_messages)
pool and result_list are for starting tasks asynchronously.
owned_directories tracks directories listed in the CODEOWNERS file.
error_messages is a list of string format errors.
dir_name: the parent directory of the given files.
names: a list of file names.
"""
# Unpack the multiprocessing.Pool process pool and list of results. Since
# python lists are passed as references, this is used to collect the list of
# async results (futures) from running checkFormat and passing them back to
# the caller.
pool, result_list, owned_directories, error_messages = arg
# Sanity check CODEOWNERS. This doesn't need to be done in a multi-threaded
# manner as it is a small and limited list.
source_prefix = './source/'
full_prefix = './source/extensions/'
# Check to see if this directory is a subdir under /source/extensions
# Also ignore top level directories under /source/extensions since we don't
# need owners for source/extensions/access_loggers etc, just the subdirectories.
if dir_name.startswith(full_prefix) and '/' in dir_name[len(full_prefix):]:
self.checkOwners(dir_name[len(source_prefix):], owned_directories, error_messages)
for file_name in names:
if dir_name.startswith("./api") and self.isStarlarkFile(file_name):
result = pool.apply_async(self.checkApiShadowStarlarkFiles,
args=(dir_name + "/" + file_name, error_messages))
result_list.append(result)
result = pool.apply_async(self.checkFormatReturnTraceOnError,
args=(dir_name + "/" + file_name,))
result_list.append(result)
# checkErrorMessages iterates over the list with error messages and prints
# errors and returns a bool based on whether there were any errors.
def checkErrorMessages(self, error_messages):
if error_messages:
for e in error_messages:
print("ERROR: %s" % e)
return True
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Check or fix file format.")
parser.add_argument("operation_type",
type=str,
choices=["check", "fix"],
help="specify if the run should 'check' or 'fix' format.")
parser.add_argument(
"target_path",
type=str,
nargs="?",
default=".",
help="specify the root directory for the script to recurse over. Default '.'.")
parser.add_argument("--add-excluded-prefixes",
type=str,
nargs="+",
help="exclude additional prefixes.")
parser.add_argument("-j",
"--num-workers",
type=int,
default=multiprocessing.cpu_count(),
help="number of worker processes to use; defaults to one per core.")
parser.add_argument("--api-prefix", type=str, default="./api/", help="path of the API tree.")
parser.add_argument("--api-shadow-prefix",
type=str,
default="./generated_api_shadow/",
help="path of the shadow API tree.")
parser.add_argument("--skip_envoy_build_rule_check",
action="store_true",
help="skip checking for '@envoy//' prefix in build rules.")
parser.add_argument("--namespace_check",
type=str,
nargs="?",
default="Envoy",
help="specify namespace check string. Default 'Envoy'.")
parser.add_argument("--namespace_check_excluded_paths",
type=str,
nargs="+",
default=[],
help="exclude paths from the namespace_check.")
parser.add_argument("--build_fixer_check_excluded_paths",
type=str,
nargs="+",
default=[],
help="exclude paths from envoy_build_fixer check.")
parser.add_argument("--bazel_tools_check_excluded_paths",
type=str,
nargs="+",
default=[],
help="exclude paths from bazel_tools check.")
parser.add_argument("--include_dir_order",
type=str,
default=",".join(common.includeDirOrder()),
help="specify the header block include directory order.")
args = parser.parse_args()
if args.add_excluded_prefixes:
EXCLUDED_PREFIXES += tuple(args.add_excluded_prefixes)
format_checker = FormatChecker(args)
# Check whether all needed external tools are available.
ct_error_messages = format_checker.checkTools()
if format_checker.checkErrorMessages(ct_error_messages):
sys.exit(1)
# Returns the list of directories with owners listed in CODEOWNERS. May append errors to
# error_messages.
def ownedDirectories(error_messages):
owned = []
maintainers = [
'@mattklein123', '@htuch', '@alyssawilk', '@zuercher', '@lizan', '@snowp', '@asraa',
'@yavlasov', '@junr03', '@dio', '@jmarantz', '@antoniovicente'
]
try:
with open('./CODEOWNERS') as f:
for line in f:
# If this line is of the form "extensions/... @owner1 @owner2" capture the directory
# name and store it in the list of directories with documented owners.
m = EXTENSIONS_CODEOWNERS_REGEX.search(line)
if m is not None and not line.startswith('#'):
owned.append(m.group(1).strip())
owners = re.findall('@\S+', m.group(2).strip())
if len(owners) < 2:
error_messages.append("Extensions require at least 2 owners in CODEOWNERS:\n"
" {}".format(line))
maintainer = len(set(owners).intersection(set(maintainers))) > 0
if not maintainer:
error_messages.append("Extensions require at least one maintainer OWNER:\n"
" {}".format(line))
return owned
except IOError:
return [] # for the check format tests.
# Calculate the list of owned directories once per run.
error_messages = []
owned_directories = ownedDirectories(error_messages)
if os.path.isfile(args.target_path):
error_messages += format_checker.checkFormat("./" + args.target_path)
else:
results = []
def PooledCheckFormat(path_predicate):
pool = multiprocessing.Pool(processes=args.num_workers)
# For each file in target_path, start a new task in the pool and collect the
# results (results is passed by reference, and is used as an output).
for root, _, files in os.walk(args.target_path):
format_checker.checkFormatVisitor((pool, results, owned_directories, error_messages), root,
[f for f in files if path_predicate(f)])
# Close the pool to new tasks, wait for all of the running tasks to finish,
# then collect the error messages.
pool.close()
pool.join()
# We first run formatting on non-BUILD files, since the BUILD file format
# requires analysis of srcs/hdrs in the BUILD file, and we don't want these
# to be rewritten by other multiprocessing pooled processes.
PooledCheckFormat(lambda f: not format_checker.isBuildFile(f))
PooledCheckFormat(lambda f: format_checker.isBuildFile(f))
error_messages += sum((r.get() for r in results), [])
if format_checker.checkErrorMessages(error_messages):
print("ERROR: check format failed. run 'tools/code_format/check_format.py fix'")
sys.exit(1)
if args.operation_type == "check":
print("PASS")
| envoyproxy/envoy-wasm | tools/code_format/check_format.py | Python | apache-2.0 | 54,221 | [
"VisIt"
] | fc821a163a2f1e2e19cb51d34903652aca0c402c86f30ebce2ae85536b9138a8 |
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
import ConfigParser
from shape_plot import plot_samples
def elem_sympoly(lambdas, k):
''' Given a vector of lambdas and a maximum size k, determine the value of
the elementary symmetric polynomials:
E(l+1,n+1) = sum_{J \subseteq 1..n,|J| = l} prod_{i \in J} lambda(i) '''
N = len(lambdas)
E = np.zeros((k+1, N+1))
E[0,:] = np.ones(N+1)
for l in range(1, k+1):
for n in range(1, N+1):
E[l,n] = E[l,n-1] + lambdas[n-1]*E[l-1,n-1]
return E
def sample_k(lambdas, k):
''' Pick k lambdas according to p(S) \propto prod(lambda \in S) '''
# compute elementary symmetric polynomials
E = elem_sympoly(lambdas, k)
# iterate
i = len(lambdas)-1
remaining = k
S = np.zeros(k, dtype=int)
while remaining > 0:
# compute marginal of i given that we choose remaining values from 0:i
if i == remaining-1:
marg = 1
else:
marg = lambdas[i] * E[remaining-1,i] / E[remaining,i+1]
# sample marginal
if np.random.rand(1) < marg:
S[remaining-1] = i
remaining -= 1
i -= 1
return S
def decompose_kernel(M):
D, V = np.linalg.eig(M)
V = np.real(V)
D = np.real(D)
sort_perm = D.argsort()
D.sort()
V = V[:, sort_perm]
return V, D
def sample_dpp(M, k=None):
''' Sample a set Y from a dpp. M is a kernel, and k is (optionally)
the size of the set to return. '''
V, D = decompose_kernel(M)
if k is None:
# choose eigenvectors randomly
D = D / (1+D)
v = np.nonzero(np.random.rand(len(D)) <= D)[0]
else:
# k-DPP
v = sample_k(D, k)
k = len(v)
V = V[:,v]
# iterate
Y = np.zeros(k, dtype=int)
for i in range(k-1, -1, -1):
# compute probabilities for each item
P = np.sum(V**2, axis=1)
P = P / np.sum(P)
# choose a new item to include
Y[i] = np.nonzero(np.random.rand(1) <= np.cumsum(P))[0][0]
# choose a vector to eliminate
j = np.nonzero(V[Y[i],:])[0][0]
Vj = V[:, j]
V = np.delete(V, j, axis=1)
# update V
V = V - Vj[:, None] * V[Y[i],:][None, :] / Vj[Y[i]]
# orthogonalize
for a in range(i):
for b in range(a):
V[:,a] = V[:,a] - np.inner(V[:,a], V[:,b]) * V[:,b]
V[:,a] = V[:,a] / np.linalg.norm(V[:,a])
Y = np.sort(Y)
return Y
def test(k, sigma, sampling='grid'):
import matplotlib.pyplot as plt
from itertools import product
if sampling == 'gaussian':
X = np.random.normal(scale=.1, size=(1000, 2))
else:
# Genetate grid
x = np.arange(0, 1.1, 0.1)
y = np.arange(0, 1.1, 0.1)
X = np.array(list(product(x, y)))
M = np.exp(-pairwise_distances(X)**2/sigma**2) # gaussian kernel
sample = sample_dpp(M, k)
rand = np.random.choice(X.shape[0], k)
# Plot results
mn = np.min(X, axis=0)-0.1
mx = np.max(X, axis=0)+0.1
plt.figure()
plt.subplot(131)
plt.plot(X[:,0],X[:,1],'.',)
plt.title(sampling)
plt.xlim(mn[0], mx[0])
plt.ylim(mn[1], mx[1])
plt.subplot(132)
plt.plot(X[sample,0],X[sample,1],'.',)
plt.title('Sample from the DPP')
plt.xlim(mn[0], mx[0])
plt.ylim(mn[1], mx[1])
plt.subplot(133)
plt.plot(X[rand,0],X[rand,1],'.',)
plt.title('Random sampling')
plt.xlim(mn[0], mx[0])
plt.ylim(mn[1], mx[1])
if __name__ == "__main__":
sigma = 0.1
k = 100
test(k, sigma, sampling='gaussian')
# from synthesis import synthesize_shape, save_plot
#
# a = 0.1
# A = (1+2*a)*np.random.rand(1000,3)-a # Specify shape attributes here
# model_name = 'PCA'
# c = 0
#
# X, indices = synthesize_shape(A, c=0, model_name='PCA')
# A = A[indices] # set of valid attributes
# X = X[indices] # set of valid shapes
#
# M = np.exp(-pairwise_distances(A)**2/sigma**2) # gaussian kernel
# Y = sample_dpp(M, k)
#
# save_plot(A[Y], X[Y], c=c, model_name=model_name)
| IDEALLab/domain_expansion_jmd_2017 | dpp.py | Python | mit | 4,312 | [
"Gaussian"
] | 1533a154444ec331bbbd93a119654b3ebf02ebf7fb93a010b1a4d45ad5151717 |
#!/usr/bin/env python
"""
filt.py
Functions for quickly filtering time-series of data.
Written by Brian Powell on 02/09/16
Copyright (c)2017 University of Hawaii under the MIT-License.
"""
import numpy as np
import seapy
import scipy.signal
def average_err(x, window=5):
"""
Generate a moving (boxcar) average and variance of the time-series, x, using
the specified window size.
Parameters
----------
x: ndarray,
The time-series of data to bandpass filter.
cutoff: float,
The period at which the bandpass filter will apply the cutoff.
Units are same as the time-step of the signal provided (e.g., if the
data are provided every hour, then a cutoff=12 would be a 12 hour
cutoff.)
Returns
-------
x, variance: ndarray, ndarray
Returns the moving average of x with the moving variance of the
average window
Examples
--------
Create data every 30 minutes for 3 days with time in days:
>>> t = np.linspace(0, 3.0, 2 * 24 * 3.0, endpoint=False)
>>> x = 0.1 * np.sin(2 * np.pi / .008 * t)
>>> x += 0.2 * np.cos(2 * np.pi / 0.6 * t + 0.1)
>>> x += 0.2 * np.cos(2 * np.pi / 1.6 * t + .11)
>>> x += 1 * np.cos(2 * np.pi / 10 * t + 11)
Average the data over 6 hour period
>>> nx, err = average_err(x, window=12)
>>> plt.plot(t, x, 'k', t, nx, 'r', label=['Raw', 'Average'])
>>> plt.figure()
>>> plt.plot(t, err, 'g', label='Variance')
"""
x = np.atleast_1d(x).flatten()
nx = np.ma.masked_all(x.shape)
err = np.ma.masked_all(x.shape)
filt = np.ones(window) / window
padlen = window * 4
# Go over all contiguous regions
regions = seapy.contiguous(x)
for r in regions:
if ((r.stop - r.start) >= padlen):
nx[r] = scipy.signal.filtfilt(
filt, [1.0], x[r], padlen=padlen, axis=0)
err[r] = scipy.signal.filtfilt(
filt, [1.0], (nx[r] - x[r])**2, padlen=padlen, axis=0)
return nx, err
def bandpass(x, dt, low_cutoff=None, hi_cutoff=None, order=7):
"""
Perform a bandpass filter at the cutoff period (same units as the
time-series step).
Parameters
----------
x : ndarray,
The time-series of data to bandpass filter.
dt : float,
The time-step between the values in x. Units must be consistent
with the cutoff period.
low_cutoff: float,
The period at which the bandpass filter will apply the lowpass filter.
Units are same as the time-step of the signal provided (e.g., if the
data are provided every hour, then a cutoff=12 would be a 12 hour
cutoff.) Everything that has a longer period will remain. If you only
want a hi-pass filter, this value should be None.
hi_cutoff: float,
The period at which the bandpass filter will apply the high-pass filter.
Units are same as the time-step of the signal provided (e.g., if the
data are provided every hour, then a cutoff=12 would be a 12 hour
cutoff.) Everything that has a shorter period will remain. If you only
want a low-pass filter, this value should be None.
order: int optional,
The order of the filter to apply
Returns
-------
x : ndarray
The bandpass filtered time-series
Examples
--------
Create data every 30 minutes for 3 days with time in days:
>>> t = np.linspace(0, 3.0, 2 * 24 * 3.0, endpoint=False)
>>> x = 0.1 * np.sin(2 * np.pi / .008 * t)
>>> x += 0.2 * np.cos(2 * np.pi / 0.6 * t + 0.1)
>>> x += 0.2 * np.cos(2 * np.pi / 1.6 * t + .11)
>>> x += 1 * np.cos(2 * np.pi / 10 * t + 11)
Filter the data to low-pass everything longer than the 1 day period
>>> nx = bandpass(x, dt=0.5, low_cutoff=24 )
>>> plt.plot(t, x, 'k', t, nx, 'r', label=['Raw', 'Filter'])
Filter the data to low-pass everything longer the 2 day period
>>> nx = bandpass(x, dt=0.5, low_cutoff=48 )
>>> plt.plot(t, x, 'k', t, nx, 'r', label=['Raw', 'Filter'])
Filter the data to band-pass everything shorter the 2 day period
and longer the 1 hour period
>>> nx = bandpass(x, dt=0.5, low_cutoff=48, hi_cutoff=1 )
>>> plt.plot(t, x, 'k', t, nx, 'r', label=['Raw', 'Filter'])
"""
x = np.ma.array(np.atleast_1d(x).flatten(), copy=False)
nx = np.ma.masked_all(x.shape)
if low_cutoff and hi_cutoff:
freq = 2.0 * dt / np.array([hi_cutoff, low_cutoff])
btype = 'bandpass'
elif low_cutoff:
freq = 2.0 * dt / low_cutoff
btype = 'lowpass'
elif hi_cutoff:
freq = 2.0 * dt / hi_cutoff
btype = 'highpass'
else:
raise AttributeError("You must specify either low or hi cutoff.")
b, a = scipy.signal.butter(order, freq, btype=btype)
padlen = max(len(a), len(b))
# Go over all contiguous regions
regions = seapy.contiguous(x)
for r in regions:
if ((r.stop - r.start) >= padlen):
nx[r] = scipy.signal.filtfilt(
b, a, x[r], padlen=5 * padlen, axis=0)
return nx
| ocefpaf/seapy | seapy/filt.py | Python | mit | 5,192 | [
"Brian"
] | c3d9dbc17691f1d6fae0c9cc174d6624269ffe4b5237d4d1ecf74d77e658e50e |
import os
from itertools import groupby, imap
from functools import partial
from operator import attrgetter
import gzip
from jbio.alignment import *
from jbio.io.file import iterator_over_file_from_extension as ioffe
from jbio.io.blast import record_iterator as blast_record_iterator
from jbio.functional import compose
def disabled_test_LIS(config):
test_data_path = config.get("test_data_path")
alignment_file = os.path.join(test_data_path,
"channel_286_read_45_1406145606_2D.blast6.gz")
blast_alignment_getter = compose(blast_record_iterator, ioffe)
aln_funcs = alignment_functions(attrgetter("sstart"),
attrgetter("send"))
return lambda : aln_funcs.LIS(aln_funcs.score_getter_matching_consensus_estimated,
blast_alignment_getter(alignment_file))
| jgurtowski/jbio | jbio/test/alignment.py | Python | gpl-2.0 | 900 | [
"BLAST"
] | 779c113cea2983aa57b5c7f6c0f490c937c33bc5f872bfc99782695a5bbd0b1a |
# Profile the sweep
#
# Copyright (C) 2010-2011 Huang Xin
#
# See LICENSE.TXT that came with this file.
from __future__ import division
from StimControl.LightStim.Core import DefaultScreen
from StimControl.LightStim.LightData import dictattr
from StimControl.LightStim.FrameControl import FrameSweep
from StimControl.LightStim.ManGrating import ManGrating
# Manual Grating experiment parameters, all must be scalars
DefaultScreen(['control','left','right'])
p = dictattr()
# mask, one of: None, 'gaussian', or 'circle'
p.mask = 'circle'
p.maskSizeStepDeg = 0.5
# initial grating phase
p.phase0 = 0
# grating mean luminance (0-1)
p.ml = 0.5
# grating contrast (0-1)
p.contrast = 1
# background brightness (0-1)
p.bgbrightness = 0.5
# antialiase the bar?
p.antialiase = True
# flash the grating?
p.flash = False
# duration of each on period (sec)
p.flashduration = 0.5
# duration of each off period (sec)
p.flashinterval = 0.3
# factor to chage bar width and height by left/right/up/down key
p.sizemultiplier = 1.02
# factor to change temporal freq by on up/down
p.tfreqmultiplier = 1.01
# factor to change spatial freq by on left/right
p.sfreqmultiplier = 1.01
# factor to change contrast by on +/-
p.contrastmultiplier = 1.005
# orientation step size to snap to when scrolling mouse wheel (deg)
p.snapDeg = 12
stimulus_control = ManGrating(disp_info=True, params=p, viewport='control')
stimulus_left = ManGrating(disp_info=False, params=p, viewport='left')
stimulus_right = ManGrating(disp_info=False, params=p, viewport='right')
sweep = FrameSweep()
sweep.add_stimulus(stimulus_control)
sweep.add_stimulus(stimulus_left)
sweep.add_stimulus(stimulus_right)
import cProfile,pstats
cProfile.run('sweep.go()','mangrating_profile')
p = pstats.Stats('mangrating_profile')
p.sort_stats('cumulative')
p.print_stats() | chrox/RealTimeElectrophy | StimControl/test/sweep_profile.py | Python | bsd-2-clause | 1,815 | [
"Gaussian"
] | c6b36b9c629872baa84b8fa16b2219150df9bb5ec55df308fc01812791af1469 |
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import os
import tempfile
import numbers
import logging
from distutils.version import LooseVersion
import importlib
import numpy as np
import dill
import scipy
import scipy.odr as odr
from scipy.optimize import (leastsq, least_squares,
minimize, differential_evolution)
from scipy.linalg import svd
from contextlib import contextmanager
from hyperspy.external.progressbar import progressbar
from hyperspy.defaults_parser import preferences
from hyperspy.external.mpfit.mpfit import mpfit
from hyperspy.component import Component
from hyperspy.extensions import ALL_EXTENSIONS
from hyperspy.signal import BaseSignal
from hyperspy.misc.export_dictionary import (export_to_dictionary,
load_from_dictionary,
parse_flag_string,
reconstruct_object)
from hyperspy.misc.utils import (slugify, shorten_name, stash_active_state,
dummy_context_manager)
from hyperspy.misc.slicing import copy_slice_from_whitelist
from hyperspy.events import Events, Event, EventSuppressor
import warnings
from hyperspy.exceptions import VisibleDeprecationWarning
from hyperspy.ui_registry import add_gui_method
from hyperspy.misc.model_tools import current_model_values
from IPython.display import display_pretty, display
from hyperspy.docstrings.signal import SHOW_PROGRESSBAR_ARG, PARALLEL_INT_ARG
_logger = logging.getLogger(__name__)
_COMPONENTS = ALL_EXTENSIONS["components1D"]
_COMPONENTS.update(ALL_EXTENSIONS["components1D"])
def reconstruct_component(comp_dictionary, **init_args):
_id = comp_dictionary['_id_name']
if _id in _COMPONENTS:
_class = getattr(
importlib.import_module(
_COMPONENTS[_id]["module"]), _COMPONENTS[_id]["class"])
elif "_class_dump" in comp_dictionary:
# When a component is not registered using the extension mechanism,
# it is serialized using dill.
_class = dill.loads(comp_dictionary['_class_dump'])
else:
raise ImportError(
'Loading the {comp_dictionary["class"]} component ' +
'failed because the component is provided by the ' +
'{comp_dictionary["package"]} Python package, but ' +
'{comp_dictionary["package"]} is not installed.')
return _class(**init_args)
class ModelComponents(object):
"""Container for model components.
Useful to provide tab completion when running in IPython.
"""
def __init__(self, model):
self._model = model
def __repr__(self):
signature = "%4s | %19s | %19s | %19s"
ans = signature % ('#',
'Attribute Name',
'Component Name',
'Component Type')
ans += "\n"
ans += signature % ('-' * 4, '-' * 19, '-' * 19, '-' * 19)
if self._model:
for i, c in enumerate(self._model):
ans += "\n"
name_string = c.name
variable_name = slugify(name_string, valid_variable_name=True)
component_type = c.__class__.__name__
variable_name = shorten_name(variable_name, 19)
name_string = shorten_name(name_string, 19)
component_type = shorten_name(component_type, 19)
ans += signature % (i,
variable_name,
name_string,
component_type)
return ans
@add_gui_method(toolkey="hyperspy.Model")
class BaseModel(list):
"""Model and data fitting tools applicable to signals of both one and two
dimensions.
Models of one-dimensional signals should use the :class:`Model1D` and
models of two-dimensional signals should use the :class:`Model2D`.
A model is constructed as a linear combination of :mod:`components` that
are added to the model using :meth:`append` or :meth:`extend`. There
are many predefined components available in the in the :mod:`components`
module. If needed, new components can be created easily using the code of
existing components as a template.
Once defined, the model can be fitted to the data using :meth:`fit` or
:meth:`multifit`. Once the optimizer reaches the convergence criteria or
the maximum number of iterations the new value of the component parameters
are stored in the components.
It is possible to access the components in the model by their name or by
the index in the model. An example is given at the end of this docstring.
Attributes
----------
signal : BaseSignal instance
It contains the data to fit.
chisq : A BaseSignal of floats
Chi-squared of the signal (or np.nan if not yet fit)
dof : A BaseSignal of integers
Degrees of freedom of the signal (0 if not yet fit)
red_chisq : BaseSignal instance
Reduced chi-squared.
components : `ModelComponents` instance
The components of the model are attributes of this class. This provides
a convenient way to access the model components when working in IPython
as it enables tab completion.
Methods
-------
append
Append one component to the model.
extend
Append multiple components to the model.
remove
Remove component from model.
as_signal
Generate a BaseSignal instance (possible multidimensional)
from the model.
store_current_values
Store the value of the parameters at the current position.
fetch_stored_values
Fetch stored values of the parameters.
update_plot
Force a plot update. (In most cases the plot should update
automatically.)
set_signal_range, remove_signal range, reset_signal_range,
add signal_range.
Customize the signal range to fit.
fit, multifit
Fit the model to the data at the current position or the
full dataset.
save_parameters2file, load_parameters_from_file
Save/load the parameter values to/from a file.
plot
Plot the model and the data.
enable_plot_components, disable_plot_components
Plot each component separately. (Use after `plot`.)
set_current_values_to
Set the current value of all the parameters of the given component as
the value for all the dataset.
export_results
Save the value of the parameters in separate files.
plot_results
Plot the value of all parameters at all positions.
print_current_values
Print the value of the parameters at the current position.
enable_adjust_position, disable_adjust_position
Enable/disable interactive adjustment of the position of the components
that have a well defined position. (Use after `plot`).
fit_component
Fit just the given component in the given signal range, that can be
set interactively.
set_parameters_not_free, set_parameters_free
Fit the `free` status of several components and parameters at once.
set_parameters_value
Set the value of a parameter in components in a model to a specified
value.
as_dictionary
Exports the model to a dictionary that can be saved in a file.
See also
--------
Model1D
Model2D
"""
def __init__(self):
self.events = Events()
self.events.fitted = Event("""
Event that triggers after fitting changed at least one parameter.
The event triggers after the fitting step was finished, and only of
at least one of the parameters changed.
Arguments
---------
obj : Model
The Model that the event belongs to
""", arguments=['obj'])
def __hash__(self):
# This is needed to simulate a hashable object so that PySide does not
# raise an exception when using windows.connect
return id(self)
def store(self, name=None):
"""Stores current model in the original signal
Parameters
----------
name : {None, str}
Stored model name. Auto-generated if left empty
"""
if self.signal is None:
raise ValueError("Cannot store models with no signal")
s = self.signal
s.models.store(self, name)
def save(self, file_name, name=None, **kwargs):
"""Saves signal and its model to a file
Parameters
----------
file_name : str
Name of the file
name : {None, str}
Stored model name. Auto-generated if left empty
**kwargs :
Other keyword arguments are passed onto `BaseSignal.save()`
"""
if self.signal is None:
raise ValueError("Currently cannot save models with no signal")
else:
self.store(name)
self.signal.save(file_name, **kwargs)
def _load_dictionary(self, dic):
"""Load data from dictionary.
Parameters
----------
dic : dictionary
_whitelist : dictionary
a dictionary with keys used as references of save attributes,
for more information, see
:meth:`hyperspy.misc.export_dictionary.load_from_dictionary`
components : dictionary (optional)
Dictionary, with information about components of the model (see
the documentation of component.as_dictionary() method)
* any field from _whitelist.keys() *
"""
if 'components' in dic:
while len(self) != 0:
self.remove(self[0])
id_dict = {}
for comp in dic['components']:
init_args = {}
for k, flags_str in comp['_whitelist'].items():
if not len(flags_str):
continue
if 'init' in parse_flag_string(flags_str):
init_args[k] = reconstruct_object(flags_str, comp[k])
self.append(reconstruct_component(comp, **init_args))
id_dict.update(self[-1]._load_dictionary(comp))
# deal with twins:
for comp in dic['components']:
for par in comp['parameters']:
for tw in par['_twins']:
id_dict[tw].twin = id_dict[par['self']]
if '_whitelist' in dic:
load_from_dictionary(self, dic)
def __repr__(self):
title = self.signal.metadata.General.title
class_name = str(self.__class__).split("'")[1].split('.')[-1]
if len(title):
return "<%s, title: %s>" % (
class_name, self.signal.metadata.General.title)
else:
return "<%s>" % class_name
def _get_component(self, thing):
if isinstance(thing, int) or isinstance(thing, str):
thing = self[thing]
elif np.iterable(thing):
thing = [self._get_component(athing) for athing in thing]
return thing
elif not isinstance(thing, Component):
raise ValueError("Not a component or component id.")
if thing in self:
return thing
else:
raise ValueError("The component is not in the model.")
def insert(self, **kwargs):
raise NotImplementedError
def append(self, thing):
"""Add component to Model.
Parameters
----------
thing: `Component` instance.
"""
if not isinstance(thing, Component):
raise ValueError(
"Only `Component` instances can be added to a model")
# Check if any of the other components in the model has the same name
if thing in self:
raise ValueError("Component already in model")
component_name_list = [component.name for component in self]
if thing.name:
name_string = thing.name
else:
name_string = thing.__class__.__name__
if name_string in component_name_list:
temp_name_string = name_string
index = 0
while temp_name_string in component_name_list:
temp_name_string = name_string + "_" + str(index)
index += 1
name_string = temp_name_string
thing.name = name_string
thing._axes_manager = self.axes_manager
thing._create_arrays()
list.append(self, thing)
thing.model = self
setattr(self.components, slugify(name_string,
valid_variable_name=True), thing)
if self._plot_active is True:
self._connect_parameters2update_plot(components=[thing])
self.update_plot()
def extend(self, iterable):
for object in iterable:
self.append(object)
def __delitem__(self, thing):
thing = self.__getitem__(thing)
self.remove(thing)
def remove(self, thing):
"""Remove component from model.
Examples
--------
>>> s = hs.signals.Signal1D(np.empty(1))
>>> m = s.create_model()
>>> g = hs.model.components1D.Gaussian()
>>> m.append(g)
You could remove `g` like this
>>> m.remove(g)
Like this:
>>> m.remove("Gaussian")
Or like this:
>>> m.remove(0)
"""
thing = self._get_component(thing)
if not np.iterable(thing):
thing = [thing, ]
for athing in thing:
for parameter in athing.parameters:
# Remove the parameter from its twin _twins
parameter.twin = None
for twin in [twin for twin in parameter._twins]:
twin.twin = None
list.remove(self, athing)
athing.model = None
if self._plot_active:
self.update_plot()
def as_signal(self, component_list=None, out_of_range_to_nan=True,
show_progressbar=None, out=None, parallel=None):
"""Returns a recreation of the dataset using the model.
The spectral range that is not fitted is filled with nans.
Parameters
----------
component_list : list of HyperSpy components, optional
If a list of components is given, only the components given in the
list is used in making the returned spectrum. The components can
be specified by name, index or themselves.
out_of_range_to_nan : bool
If True the spectral range that is not fitted is filled with nans.
Default True.
%s
out : {None, BaseSignal}
The signal where to put the result into. Convenient for parallel
processing. If None (default), creates a new one. If passed, it is
assumed to be of correct shape and dtype and not checked.
%s
Returns
-------
BaseSignal : An instance of the same class as `BaseSignal`.
Examples
--------
>>> s = hs.signals.Signal1D(np.random.random((10,100)))
>>> m = s.create_model()
>>> l1 = hs.model.components1D.Lorentzian()
>>> l2 = hs.model.components1D.Lorentzian()
>>> m.append(l1)
>>> m.append(l2)
>>> s1 = m.as_signal()
>>> s2 = m.as_signal(component_list=[l1])
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
if parallel is None:
parallel = preferences.General.parallel
if out is None:
data = np.empty(self.signal.data.shape, dtype='float')
data.fill(np.nan)
signal = self.signal.__class__(
data,
axes=self.signal.axes_manager._get_axes_dicts())
signal.metadata.General.title = (
self.signal.metadata.General.title + " from fitted model")
signal.metadata.Signal.binned = self.signal.metadata.Signal.binned
else:
signal = out
data = signal.data
if parallel is True:
from os import cpu_count
parallel = cpu_count()
if not isinstance(parallel, int):
parallel = int(parallel)
if parallel < 2:
parallel = False
if out_of_range_to_nan is True:
channel_switches_backup = copy.copy(self.channel_switches)
self.channel_switches[:] = True
if parallel is False:
self._as_signal_iter(component_list=component_list,
show_progressbar=show_progressbar, data=data)
else:
am = self.axes_manager
nav_shape = am.navigation_shape
if len(nav_shape):
ind = np.argmax(nav_shape)
size = nav_shape[ind]
if not len(nav_shape) or size < 4:
# no or not enough navigation, just run without threads
return self.as_signal(component_list=component_list,
show_progressbar=show_progressbar,
out=signal, parallel=False)
parallel = min(parallel, size / 2)
splits = [len(sp) for sp in np.array_split(np.arange(size),
parallel)]
models = []
data_slices = []
slices = [slice(None), ] * len(nav_shape)
for sp, csm in zip(splits, np.cumsum(splits)):
slices[ind] = slice(csm - sp, csm)
models.append(self.inav[tuple(slices)])
array_slices = self.signal._get_array_slices(tuple(slices),
True)
data_slices.append(data[array_slices])
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=parallel) as exe:
_map = exe.map(
lambda thing: thing[0]._as_signal_iter(
data=thing[1],
component_list=component_list,
show_progressbar=thing[2] + 1 if show_progressbar else False),
zip(models, data_slices, range(int(parallel))))
_ = next(_map)
if out_of_range_to_nan is True:
self.channel_switches[:] = channel_switches_backup
return signal
as_signal.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_INT_ARG)
def _as_signal_iter(self, component_list=None, show_progressbar=None,
data=None):
# Note that show_progressbar can be an int to determine the progressbar
# position for a thread-friendly bars. Otherwise race conditions are
# ugly...
if data is None:
raise ValueError('No data supplied')
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
with stash_active_state(self if component_list else []):
if component_list:
component_list = [self._get_component(x)
for x in component_list]
for component_ in self:
active = component_ in component_list
if component_.active_is_multidimensional:
if active:
continue # Keep active_map
component_.active_is_multidimensional = False
component_.active = active
maxval = self.axes_manager.navigation_size
enabled = show_progressbar and (maxval > 0)
pbar = progressbar(total=maxval, disable=not enabled,
position=show_progressbar, leave=True)
for index in self.axes_manager:
self.fetch_stored_values(only_fixed=False)
data[self.axes_manager._getitem_tuple][
np.where(self.channel_switches)] = self.__call__(
non_convolved=not self.convolved, onlyactive=True).ravel()
pbar.update(1)
@property
def _plot_active(self):
if self._plot is not None and self._plot.is_active:
return True
else:
return False
def _connect_parameters2update_plot(self, components):
if self._plot_active is False:
return
for i, component in enumerate(components):
component.events.active_changed.connect(
self._model_line._auto_update_line, [])
for parameter in component.parameters:
parameter.events.value_changed.connect(
self._model_line._auto_update_line, [])
def _disconnect_parameters2update_plot(self, components):
if self._model_line is None:
return
for component in components:
component.events.active_changed.disconnect(
self._model_line._auto_update_line)
for parameter in component.parameters:
parameter.events.value_changed.disconnect(
self._model_line._auto_update_line)
def update_plot(self, *args, **kwargs):
"""Update model plot.
The updating can be suspended using `suspend_update`.
See Also
--------
suspend_update
"""
if self._plot_active is True and self._suspend_update is False:
try:
if self._model_line is not None:
self._model_line.update()
for component in [component for component in self if
component.active is True]:
self._update_component_line(component)
except BaseException:
self._disconnect_parameters2update_plot(components=self)
@contextmanager
def suspend_update(self, update_on_resume=True):
"""Prevents plot from updating until 'with' clause completes.
See Also
--------
update_plot
"""
es = EventSuppressor()
es.add(self.axes_manager.events.indices_changed)
if self._model_line:
f = self._model_line._auto_update_line
for c in self:
es.add(c.events, f)
for p in c.parameters:
es.add(p.events, f)
for c in self:
if hasattr(c, '_model_plot_line'):
f = c._model_plot_line._auto_update_line
es.add(c.events, f)
for p in c.parameters:
es.add(p.events, f)
old = self._suspend_update
self._suspend_update = True
with es.suppress():
yield
self._suspend_update = old
if update_on_resume is True:
self.update_plot()
def _close_plot(self):
if self._plot_components is True:
self.disable_plot_components()
self._disconnect_parameters2update_plot(components=self)
self._model_line = None
@staticmethod
def _connect_component_line(component):
if hasattr(component, "_model_plot_line"):
f = component._model_plot_line._auto_update_line
component.events.active_changed.connect(f, [])
for parameter in component.parameters:
parameter.events.value_changed.connect(f, [])
@staticmethod
def _disconnect_component_line(component):
if hasattr(component, "_model_plot_line"):
f = component._model_plot_line._auto_update_line
component.events.active_changed.disconnect(f)
for parameter in component.parameters:
parameter.events.value_changed.disconnect(f)
def _connect_component_lines(self):
for component in self:
if component.active:
self._connect_component_line(component)
def _disconnect_component_lines(self):
for component in self:
if component.active:
self._disconnect_component_line(component)
@staticmethod
def _update_component_line(component):
if hasattr(component, "_model_plot_line"):
component._model_plot_line.update()
def _disable_plot_component(self, component):
self._disconnect_component_line(component)
if hasattr(component, "_model_plot_line"):
component._model_plot_line.close()
del component._model_plot_line
self._plot_components = False
def enable_plot_components(self):
if self._plot is None or self._plot_components:
return
self._plot_components = True
for component in [component for component in self if
component.active]:
self._plot_component(component)
def disable_plot_components(self):
if self._plot is None:
return
for component in self:
self._disable_plot_component(component)
self._plot_components = False
def _set_p0(self):
self.p0 = ()
for component in self:
if component.active:
for parameter in component.free_parameters:
self.p0 = (self.p0 + (parameter.value,)
if parameter._number_of_elements == 1
else self.p0 + parameter.value)
def set_boundaries(self):
"""Generate the boundary list.
Necessary before fitting with a boundary aware optimizer.
"""
self.free_parameters_boundaries = []
for component in self:
if component.active:
for param in component.free_parameters:
if param._number_of_elements == 1:
self.free_parameters_boundaries.append((
param._bounds))
else:
self.free_parameters_boundaries.extend((
param._bounds))
def set_mpfit_parameters_info(self):
self.mpfit_parinfo = []
for component in self:
if component.active:
for param in component.free_parameters:
limited = [False, False]
limits = [0, 0]
if param.bmin is not None:
limited[0] = True
limits[0] = param.bmin
if param.bmax is not None:
limited[1] = True
limits[1] = param.bmax
if param._number_of_elements == 1:
self.mpfit_parinfo.append(
{'limited': limited,
'limits': limits})
else:
self.mpfit_parinfo.extend((
{'limited': limited,
'limits': limits},) * param._number_of_elements)
def ensure_parameters_in_bounds(self):
"""For all active components, snaps their free parameter values to
be within their boundaries (if bounded). Does not touch the array of
values.
"""
for component in self:
if component.active:
for param in component.free_parameters:
bmin = -np.inf if param.bmin is None else param.bmin
bmax = np.inf if param.bmax is None else param.bmax
if param._number_of_elements == 1:
if not bmin <= param.value <= bmax:
min_d = np.abs(param.value - bmin)
max_d = np.abs(param.value - bmax)
if min_d < max_d:
param.value = bmin
else:
param.value = bmax
else:
values = np.array(param.value)
if param.bmin is not None:
minmask = values < bmin
values[minmask] = bmin
if param.bmax is not None:
maxmask = values > bmax
values[maxmask] = bmax
param.value = tuple(values)
def store_current_values(self):
""" Store the parameters of the current coordinates into the
parameters array.
If the parameters array has not being defined yet it creates it filling
it with the current parameters."""
for component in self:
if component.active:
component.store_current_parameters_in_map()
def fetch_stored_values(self, only_fixed=False):
"""Fetch the value of the parameters that has been previously stored.
Parameters
----------
only_fixed : bool, optional
If True, only the fixed parameters are fetched.
See Also
--------
store_current_values
"""
cm = (self.suspend_update if self._plot_active
else dummy_context_manager)
with cm(update_on_resume=True):
for component in self:
component.fetch_stored_values(only_fixed=only_fixed)
def fetch_values_from_array(self, array, array_std=None):
"""Fetch the parameter values from the given array, optionally also
fetching the standard deviations.
Parameters
----------
array : array
array with the parameter values
array_std : {None, array}
array with the standard deviations of parameters
"""
self.p0 = array
self._fetch_values_from_p0(p_std=array_std)
def _fetch_values_from_p0(self, p_std=None):
"""Fetch the parameter values from the output of the optimizer `self.p0`
Parameters
----------
p_std : array, optional
array containing the corresponding standard deviation.
"""
comp_p_std = None
counter = 0
for component in self: # Cut the parameters list
if component.active is True:
if p_std is not None:
comp_p_std = p_std[
counter: counter +
component._nfree_param]
component.fetch_values_from_array(
self.p0[counter: counter + component._nfree_param],
comp_p_std, onlyfree=True)
counter += component._nfree_param
def _model2plot(self, axes_manager, out_of_range2nans=True):
old_axes_manager = None
if axes_manager is not self.axes_manager:
old_axes_manager = self.axes_manager
self.axes_manager = axes_manager
self.fetch_stored_values()
s = self.__call__(non_convolved=False, onlyactive=True)
if old_axes_manager is not None:
self.axes_manager = old_axes_manager
self.fetch_stored_values()
if out_of_range2nans is True:
ns = np.empty(self.axis.axis.shape)
ns.fill(np.nan)
ns[np.where(self.channel_switches)] = s
s = ns
return s
def _model_function(self, param):
self.p0 = param
self._fetch_values_from_p0()
to_return = self.__call__(non_convolved=False, onlyactive=True)
return to_return
def _errfunc2(self, param, y, weights=None):
if weights is None:
weights = 1.
return ((weights * self._errfunc(param, y)) ** 2).sum()
def _errfunc4mpfit(self, p, fjac=None, x=None, y=None, weights=None):
if fjac is None:
errfunc = self._model_function(p).ravel() - y
if weights is not None:
errfunc *= weights.ravel()
status = 0
return [status, errfunc]
else:
return [0, self._jacobian(p, y).T]
def _calculate_chisq(self):
if self.signal.metadata.has_item('Signal.Noise_properties.variance'):
variance = self.signal.metadata.Signal.Noise_properties.variance
if isinstance(variance, BaseSignal):
variance = variance.data.__getitem__(
self.axes_manager._getitem_tuple)[np.where(
self.channel_switches)]
else:
variance = 1.0
d = self(onlyactive=True).ravel() - self.signal()[np.where(
self.channel_switches)]
d *= d / (1. * variance) # d = difference^2 / variance.
self.chisq.data[self.signal.axes_manager.indices[::-1]] = d.sum()
def _set_current_degrees_of_freedom(self):
self.dof.data[self.signal.axes_manager.indices[::-1]] = len(self.p0)
@property
def red_chisq(self):
"""Reduced chi-squared. Calculated from self.chisq and self.dof
"""
tmp = self.chisq / (- self.dof + self.channel_switches.sum() - 1)
tmp.metadata.General.title = self.signal.metadata.General.title + \
' reduced chi-squared'
return tmp
def fit(self, fitter="leastsq", method='ls', grad=False,
bounded=False, ext_bounding=False, update_plot=False,
**kwargs):
"""Fits the model to the experimental data.
The chi-squared, reduced chi-squared and the degrees of freedom are
computed automatically when fitting. They are stored as signals, in the
`chisq`, `red_chisq` and `dof`. Note that unless
``metadata.Signal.Noise_properties.variance`` contains an
accurate estimation of the variance of the data, the chi-squared and
reduced chi-squared cannot be computed correctly. This is also true for
homocedastic noise.
Parameters
----------
fitter : {"leastsq", "mpfit", "odr", "Nelder-Mead",
"Powell", "CG", "BFGS", "Newton-CG", "L-BFGS-B", "TNC",
"Differential Evolution"}
The optimization algorithm used to perform the fitting. Default
is "leastsq".
"leastsq" performs least-squares optimization, and supports
bounds on parameters.
"mpfit" performs least-squares using the Levenberg–Marquardt
algorithm and supports bounds on parameters.
"odr" performs the optimization using the orthogonal distance
regression algorithm. It does not support bounds.
"Nelder-Mead", "Powell", "CG", "BFGS", "Newton-CG", "L-BFGS-B"
and "TNC" are wrappers for scipy.optimize.minimize(). Only
"L-BFGS-B" and "TNC" support bounds.
"Differential Evolution" is a global optimization method.
"leastsq", "mpfit" and "odr" can estimate the standard deviation of
the estimated value of the parameters if the
"metada.Signal.Noise_properties.variance" attribute is defined.
Note that if it is not defined, the standard deviation is estimated
using a variance of 1. If the noise is heteroscedastic, this can
result in a biased estimation of the parameter values and errors.
If `variance` is a `Signal` instance of the same `navigation_dimension`
as the signal, and `method` is "ls", then weighted least squares
is performed.
method : {'ls', 'ml', 'custom'}
Choose 'ls' (default) for least-squares and 'ml' for Poisson
maximum likelihood estimation. The latter is not available when
'fitter' is "leastsq", "odr" or "mpfit". 'custom' allows passing
your own minimisation function as a kwarg "min_function", with
optional gradient kwarg "min_function_grad". See User Guide for
details.
grad : bool
If True, the analytical gradient is used if defined to
speed up the optimization.
bounded : bool
If True performs bounded optimization if the fitter
supports it.
update_plot : bool
If True, the plot is updated during the optimization
process. It slows down the optimization but it permits
to visualize the optimization progress.
ext_bounding : bool
If True, enforce bounding by keeping the value of the
parameters constant out of the defined bounding area.
**kwargs : key word arguments
Any extra key word argument will be passed to the chosen
fitter. For more information read the docstring of the optimizer
of your choice in `scipy.optimize`.
See Also
--------
multifit
"""
if fitter is None: # None meant "from preferences" before v1.3
fitter = "leastsq"
switch_aap = (update_plot != self._plot_active)
if switch_aap is True and update_plot is False:
cm = self.suspend_update
else:
cm = dummy_context_manager
# Check for deprecated minimizers
optimizer_dict = {"fmin": "Nelder-Mead",
"fmin_cg": "CG",
"fmin_ncg": "Newton-CG",
"fmin_bfgs": "BFGS",
"fmin_l_bfgs_b": "L-BFGS-B",
"fmin_tnc": "TNC",
"fmin_powell": "Powell"}
check_optimizer = optimizer_dict.get(fitter, None)
if check_optimizer:
warnings.warn(
"The method `%s` has been deprecated and will "
"be removed in HyperSpy 2.0. Please use "
"`%s` instead." % (fitter, check_optimizer),
VisibleDeprecationWarning)
fitter = check_optimizer
if bounded is True:
if fitter not in ("leastsq", "mpfit", "TNC",
"L-BFGS-B", "Differential Evolution"):
raise ValueError("Bounded optimization is only "
"supported by 'leastsq', "
"'mpfit', 'TNC', 'L-BFGS-B' or"
"'Differential Evolution'.")
else:
# this has to be done before setting the p0,
# so moved things around
self.ensure_parameters_in_bounds()
min_function = kwargs.pop('min_function', None)
min_function_grad = kwargs.pop('min_function_grad', None)
if method == 'custom':
if not callable(min_function):
raise ValueError('Custom minimization requires "min_function" '
'kwarg with a callable')
if grad is not False:
if min_function_grad is None:
raise ValueError('Custom gradient function should be '
'supplied with "min_function_grad" kwarg')
from functools import partial
min_function = partial(min_function, self)
if callable(min_function_grad):
min_function_grad = partial(min_function_grad, self)
with cm(update_on_resume=True):
self.p_std = None
self._set_p0()
old_p0 = self.p0
if ext_bounding:
self._enable_ext_bounding()
if grad is False:
approx_grad = True
jacobian = None
odr_jacobian = None
grad_ml = None
grad_ls = None
else:
approx_grad = False
jacobian = self._jacobian
odr_jacobian = self._jacobian4odr
grad_ml = self._gradient_ml
grad_ls = self._gradient_ls
if method in ['ml', 'custom']:
weights = None
if fitter in ("leastsq", "odr", "mpfit"):
raise NotImplementedError(
'"leastsq", "mpfit" and "odr" optimizers only support'
'least squares ("ls") method')
elif method == "ls":
metadata = self.signal.metadata
if "Signal.Noise_properties.variance" not in metadata:
variance = 1
else:
variance = metadata.Signal.Noise_properties.variance
if isinstance(variance, BaseSignal):
if (variance.axes_manager.navigation_shape ==
self.signal.axes_manager.navigation_shape):
variance = variance.data.__getitem__(
self.axes_manager._getitem_tuple)[
np.where(self.channel_switches)]
else:
raise AttributeError(
"The `navigation_shape` of the variance "
"signals is not equal to the variance shape "
"of the signal")
elif not isinstance(variance, numbers.Number):
raise AttributeError(
"Variance must be a number or a `Signal` instance "
"but currently it is a %s" % type(variance))
weights = 1. / np.sqrt(variance)
else:
raise ValueError(
'method must be "ls", "ml" or "custom" but %s given' %
method)
args = (self.signal()[np.where(self.channel_switches)],
weights)
# Least squares "dedicated" fitters
if fitter == "leastsq":
if bounded:
# leastsq with bounds requires scipy >= 0.17
if LooseVersion(
scipy.__version__) < LooseVersion("0.17"):
raise ImportError(
"leastsq with bounds requires SciPy >= 0.17")
self.set_boundaries()
ls_b = self.free_parameters_boundaries
ls_b = ([a if a is not None else -np.inf for a, b in ls_b],
[b if b is not None else np.inf for a, b in ls_b])
output = \
least_squares(self._errfunc, self.p0[:],
args=args, bounds=ls_b, **kwargs)
self.p0 = output.x
# Do Moore-Penrose inverse, discarding zero singular values
# to get pcov (as per scipy.optimize.curve_fit())
_, s, VT = svd(output.jac, full_matrices=False)
threshold = np.finfo(float).eps * \
max(output.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s**2, VT)
elif bounded is False:
# This replicates the original "leastsq"
# behaviour in earlier versions of HyperSpy
# using the Levenberg-Marquardt algorithm
output = \
leastsq(self._errfunc, self.p0[:], Dfun=jacobian,
col_deriv=1, args=args, full_output=True,
**kwargs)
self.p0, pcov = output[0:2]
signal_len = sum([axis.size
for axis in self.axes_manager.signal_axes])
if (signal_len > len(self.p0)) and pcov is not None:
pcov *= ((self._errfunc(self.p0, *args) ** 2).sum() /
(len(args[0]) - len(self.p0)))
self.p_std = np.sqrt(np.diag(pcov))
self.fit_output = output
elif fitter == "odr":
modelo = odr.Model(fcn=self._function4odr,
fjacb=odr_jacobian)
mydata = odr.RealData(
self.axis.axis[np.where(self.channel_switches)],
self.signal()[np.where(self.channel_switches)],
sx=None,
sy=(1 / weights if weights is not None else None))
myodr = odr.ODR(mydata, modelo, beta0=self.p0[:], **kwargs)
myoutput = myodr.run()
result = myoutput.beta
self.p_std = myoutput.sd_beta
self.p0 = result
self.fit_output = myoutput
elif fitter == "mpfit":
autoderivative = 1
if grad:
autoderivative = 0
if bounded:
self.set_mpfit_parameters_info()
elif bounded is False:
self.mpfit_parinfo = None
m = mpfit(self._errfunc4mpfit, self.p0[:],
parinfo=self.mpfit_parinfo, functkw={
'y': self.signal()[self.channel_switches],
'weights': weights},
autoderivative=autoderivative,
quiet=1, **kwargs)
self.p0 = m.params
if hasattr(self, 'axis') and (self.axis.size > len(self.p0)) \
and m.perror is not None:
self.p_std = m.perror * np.sqrt(
(self._errfunc(self.p0, *args) ** 2).sum() /
(len(args[0]) - len(self.p0)))
self.fit_output = m
else:
# General optimizers
# Least squares or maximum likelihood
if method == "ml":
tominimize = self._poisson_likelihood_function
fprime = grad_ml
elif method == "ls":
tominimize = self._errfunc2
fprime = grad_ls
elif method == 'custom':
tominimize = min_function
fprime = min_function_grad
# OPTIMIZERS
# Derivative-free methods
if fitter in ("Nelder-Mead", "Powell"):
self.p0 = minimize(tominimize, self.p0, args=args,
method=fitter, **kwargs).x
# Methods using the gradient
elif fitter in ("CG", "BFGS", "Newton-CG"):
self.p0 = minimize(tominimize, self.p0, jac=fprime,
args=args, method=fitter, **kwargs).x
# Constrained optimizers using the gradient
elif fitter in ("TNC", "L-BFGS-B"):
if bounded:
self.set_boundaries()
elif bounded is False:
self.free_parameters_boundaries = None
self.p0 = minimize(tominimize, self.p0, jac=fprime,
args=args, method=fitter,
bounds=self.free_parameters_boundaries, **kwargs).x
# Global optimizers
elif fitter == "Differential Evolution":
if bounded:
self.set_boundaries()
else:
raise ValueError(
"Bounds must be specified for "
"'Differential Evolution' optimizer")
de_b = self.free_parameters_boundaries
de_b = tuple(((a if a is not None else -np.inf,
b if b is not None else np.inf) for a, b in de_b))
self.p0 = differential_evolution(tominimize, de_b,
args=args, **kwargs).x
else:
raise ValueError("""
The %s optimizer is not available.
Available optimizers:
Unconstrained:
--------------
Least-squares: leastsq and odr
General: Nelder-Mead, Powell, CG, BFGS, Newton-CG
Constrained:
------------
least_squares, mpfit, TNC and L-BFGS-B
Global:
-------
Differential Evolution
""" % fitter)
if np.iterable(self.p0) == 0:
self.p0 = (self.p0,)
self._fetch_values_from_p0(p_std=self.p_std)
self.store_current_values()
self._calculate_chisq()
self._set_current_degrees_of_freedom()
if ext_bounding is True:
self._disable_ext_bounding()
if np.any(old_p0 != self.p0):
self.events.fitted.trigger(self)
def multifit(self, mask=None, fetch_only_fixed=False,
autosave=False, autosave_every=10, show_progressbar=None,
interactive_plot=False, **kwargs):
"""Fit the data to the model at all the positions of the
navigation dimensions.
Parameters
----------
mask : NumPy array, optional
To mask (do not fit) at certain position pass a numpy.array
of type bool where True indicates that the data will not be
fitted at the given position.
fetch_only_fixed : bool
If True, only the fixed parameters values will be updated
when changing the positon. Default False.
autosave : bool
If True, the result of the fit will be saved automatically
with a frequency defined by autosave_every. Default False.
autosave_every : int
Save the result of fitting every given number of spectra.
Default 10.
%s
interactive_plot : bool
If True, update the plot for every position as they are processed.
Note that this slows down the fitting by a lot, but it allows for
interactive monitoring of the fitting (if in interactive mode).
**kwargs : key word arguments
Any extra key word argument will be passed to
the fit method. See the fit method documentation for
a list of valid arguments.
See Also
--------
fit
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
if autosave is not False:
fd, autosave_fn = tempfile.mkstemp(
prefix='hyperspy_autosave-',
dir='.', suffix='.npz')
os.close(fd)
autosave_fn = autosave_fn[:-4]
_logger.info(
"Autosaving each %s pixels to %s.npz" % (autosave_every,
autosave_fn))
_logger.info(
"When multifit finishes its job the file will be deleted")
if mask is not None and (
mask.shape != tuple(
self.axes_manager._navigation_shape_in_array)):
raise ValueError(
"The mask must be a numpy array of boolean type with "
" shape: %s" +
str(self.axes_manager._navigation_shape_in_array))
masked_elements = 0 if mask is None else mask.sum()
maxval = self.axes_manager.navigation_size - masked_elements
show_progressbar = show_progressbar and (maxval > 0)
i = 0
with self.axes_manager.events.indices_changed.suppress_callback(
self.fetch_stored_values):
if interactive_plot:
outer = dummy_context_manager
inner = self.suspend_update
else:
outer = self.suspend_update
inner = dummy_context_manager
with outer(update_on_resume=True):
with progressbar(total=maxval, disable=not show_progressbar,
leave=True) as pbar:
for index in self.axes_manager:
with inner(update_on_resume=True):
if mask is None or not mask[index[::-1]]:
self.fetch_stored_values(
only_fixed=fetch_only_fixed)
self.fit(**kwargs)
i += 1
pbar.update(1)
if autosave is True and i % autosave_every == 0:
self.save_parameters2file(autosave_fn)
if autosave is True:
_logger.info(
'Deleting the temporary file %s pixels' % (
autosave_fn + 'npz'))
os.remove(autosave_fn + '.npz')
multifit.__doc__ %= (SHOW_PROGRESSBAR_ARG)
def save_parameters2file(self, filename):
"""Save the parameters array in binary format.
The data is saved to a single file in numpy's uncompressed ``.npz``
format.
Parameters
----------
filename : str
See Also
--------
load_parameters_from_file, export_results
Notes
-----
This method can be used to save the current state of the model in a way
that can be loaded back to recreate the it using `load_parameters_from
file`. Actually, as of HyperSpy 0.8 this is the only way to do so.
However, this is known to be brittle. For example see
https://github.com/hyperspy/hyperspy/issues/341.
"""
kwds = {}
i = 0
for component in self:
cname = component.name.lower().replace(' ', '_')
for param in component.parameters:
pname = param.name.lower().replace(' ', '_')
kwds['%s_%s.%s' % (i, cname, pname)] = param.map
i += 1
np.savez(filename, **kwds)
def load_parameters_from_file(self, filename):
"""Loads the parameters array from a binary file written with the
'save_parameters2file' function.
Parameters
---------
filename : str
See Also
--------
save_parameters2file, export_results
Notes
-----
In combination with `save_parameters2file`, this method can be used to
recreate a model stored in a file. Actually, before HyperSpy 0.8 this
is the only way to do so. However, this is known to be brittle. For
example see https://github.com/hyperspy/hyperspy/issues/341.
"""
f = np.load(filename)
i = 0
for component in self: # Cut the parameters list
cname = component.name.lower().replace(' ', '_')
for param in component.parameters:
pname = param.name.lower().replace(' ', '_')
param.map = f['%s_%s.%s' % (i, cname, pname)]
i += 1
self.fetch_stored_values()
def assign_current_values_to_all(self, components_list=None, mask=None):
"""Set parameter values for all positions to the current ones.
Parameters
----------
component_list : list of components, optional
If a list of components is given, the operation will be performed
only in the value of the parameters of the given components.
The components can be specified by name, index or themselves.
mask : boolean numpy array or None, optional
The operation won't be performed where mask is True.
"""
if components_list is None:
components_list = []
for comp in self:
if comp.active:
components_list.append(comp)
else:
components_list = [self._get_component(x) for x in components_list]
for comp in components_list:
for parameter in comp.parameters:
parameter.assign_current_value_to_all(mask=mask)
def _enable_ext_bounding(self, components=None):
"""
"""
if components is None:
components = self
for component in components:
for parameter in component.parameters:
parameter.ext_bounded = True
def _disable_ext_bounding(self, components=None):
"""
"""
if components is None:
components = self
for component in components:
for parameter in component.parameters:
parameter.ext_bounded = False
def export_results(self, folder=None, format="hspy", save_std=False,
only_free=True, only_active=True):
"""Export the results of the parameters of the model to the desired
folder.
Parameters
----------
folder : str or None
The path to the folder where the file will be saved. If `None` the
current folder is used by default.
format : str
The extension of the file format. It must be one of the
fileformats supported by HyperSpy. The default is "hspy".
save_std : bool
If True, also the standard deviation will be saved.
only_free : bool
If True, only the value of the parameters that are free will be
exported.
only_active : bool
If True, only the value of the active parameters will be exported.
Notes
-----
The name of the files will be determined by each the Component and
each Parameter name attributes. Therefore, it is possible to customise
the file names modify the name attributes.
"""
for component in self:
if only_active is False or component.active:
component.export(folder=folder, format=format,
save_std=save_std, only_free=only_free)
def plot_results(self, only_free=True, only_active=True):
"""Plot the value of the parameters of the model
Parameters
----------
only_free : bool
If True, only the value of the parameters that are free will be
plotted.
only_active : bool
If True, only the value of the active parameters will be plotted.
Notes
-----
The name of the files will be determined by each the Component and
each Parameter name attributes. Therefore, it is possible to customise
the file names modify the name attributes.
"""
for component in self:
if only_active is False or component.active:
component.plot(only_free=only_free)
def print_current_values(self, only_free=False, only_active=False,
component_list=None, fancy=True):
"""Prints the current values of the parameters of all components.
Parameters
----------
only_free : bool
If True, only components with free parameters will be printed. Within these,
only parameters which are free will be printed.
only_active : bool
If True, only values of active components will be printed
component_list : None or list of components.
If None, print all components.
fancy : bool
If True, attempts to print using html rather than text in the notebook.
"""
if fancy:
display(current_model_values(
model=self, only_free=only_free, only_active=only_active,
component_list=component_list))
else:
display_pretty(current_model_values(
model=self, only_free=only_free, only_active=only_active,
component_list=component_list))
def set_parameters_not_free(self, component_list=None,
parameter_name_list=None):
"""
Sets the parameters in a component in a model to not free.
Parameters
----------
component_list : None, or list of hyperspy components, optional
If None, will apply the function to all components in the model.
If list of components, will apply the functions to the components
in the list. The components can be specified by name, index or
themselves.
parameter_name_list : None or list of strings, optional
If None, will set all the parameters to not free.
If list of strings, will set all the parameters with the same name
as the strings in parameter_name_list to not free.
Examples
--------
>>> v1 = hs.model.components1D.Voigt()
>>> m.append(v1)
>>> m.set_parameters_not_free()
>>> m.set_parameters_not_free(component_list=[v1],
parameter_name_list=['area','centre'])
See also
--------
set_parameters_free
hyperspy.component.Component.set_parameters_free
hyperspy.component.Component.set_parameters_not_free
"""
if not component_list:
component_list = []
for _component in self:
component_list.append(_component)
else:
component_list = [self._get_component(x) for x in component_list]
for _component in component_list:
_component.set_parameters_not_free(parameter_name_list)
def set_parameters_free(self, component_list=None,
parameter_name_list=None):
"""
Sets the parameters in a component in a model to free.
Parameters
----------
component_list : None, or list of hyperspy components, optional
If None, will apply the function to all components in the model.
If list of components, will apply the functions to the components
in the list. The components can be specified by name, index or
themselves.
parameter_name_list : None or list of strings, optional
If None, will set all the parameters to not free.
If list of strings, will set all the parameters with the same name
as the strings in parameter_name_list to not free.
Examples
--------
>>> v1 = hs.model.components1D.Voigt()
>>> m.append(v1)
>>> m.set_parameters_free()
>>> m.set_parameters_free(component_list=[v1],
parameter_name_list=['area','centre'])
See also
--------
set_parameters_not_free
hyperspy.component.Component.set_parameters_free
hyperspy.component.Component.set_parameters_not_free
"""
if not component_list:
component_list = []
for _component in self:
component_list.append(_component)
else:
component_list = [self._get_component(x) for x in component_list]
for _component in component_list:
_component.set_parameters_free(parameter_name_list)
def set_parameters_value(
self,
parameter_name,
value,
component_list=None,
only_current=False):
"""
Sets the value of a parameter in components in a model to a specified
value
Parameters
----------
parameter_name : string
Name of the parameter whose value will be changed
value : number
The new value of the parameter
component_list : list of hyperspy components, optional
A list of components whose parameters will changed. The components
can be specified by name, index or themselves.
only_current : bool, default False
If True, will only change the parameter value at the current
position in the model.
If False, will change the parameter value for all the positions.
Examples
--------
>>> v1 = hs.model.components1D.Voigt()
>>> v2 = hs.model.components1D.Voigt()
>>> m.extend([v1,v2])
>>> m.set_parameters_value('area', 5)
>>> m.set_parameters_value('area', 5, component_list=[v1])
>>> m.set_parameters_value('area', 5, component_list=[v1],
only_current=True)
"""
if not component_list:
component_list = []
for _component in self:
component_list.append(_component)
else:
component_list = [self._get_component(x) for x in component_list]
for _component in component_list:
for _parameter in _component.parameters:
if _parameter.name == parameter_name:
if only_current:
_parameter.value = value
_parameter.store_current_value_in_array()
else:
_parameter.value = value
_parameter.assign_current_value_to_all()
def as_dictionary(self, fullcopy=True):
"""Returns a dictionary of the model, including all components, degrees
of freedom (dof) and chi-squared (chisq) with values.
Parameters
----------
fullcopy : Bool (optional, True)
Copies of objects are stored, not references. If any found,
functions will be pickled and signals converted to dictionaries
Returns
-------
dictionary : a complete dictionary of the model, which includes at
least the following fields:
components : list
a list of dictionaries of components, one per
_whitelist : dictionary
a dictionary with keys used as references for saved attributes,
for more information, see
:meth:`hyperspy.misc.export_dictionary.export_to_dictionary`
* any field from _whitelist.keys() *
Examples
--------
>>> s = signals.Signal1D(np.random.random((10,100)))
>>> m = s.create_model()
>>> l1 = components1d.Lorentzian()
>>> l2 = components1d.Lorentzian()
>>> m.append(l1)
>>> m.append(l2)
>>> d = m.as_dictionary()
>>> m2 = s.create_model(dictionary=d)
"""
dic = {'components': [c.as_dictionary(fullcopy) for c in self]}
export_to_dictionary(self, self._whitelist, dic, fullcopy)
def remove_empty_numpy_strings(dic):
for k, v in dic.items():
if isinstance(v, dict):
remove_empty_numpy_strings(v)
elif isinstance(v, list):
for vv in v:
if isinstance(vv, dict):
remove_empty_numpy_strings(vv)
elif isinstance(vv, np.string_) and len(vv) == 0:
vv = ''
elif isinstance(v, np.string_) and len(v) == 0:
del dic[k]
dic[k] = ''
remove_empty_numpy_strings(dic)
return dic
def set_component_active_value(
self, value, component_list=None, only_current=False):
"""
Sets the component 'active' parameter to a specified value
Parameters
----------
value : bool
The new value of the 'active' parameter
component_list : list of hyperspy components, optional
A list of components whose parameters will changed. The components
can be specified by name, index or themselves.
only_current : bool, default False
If True, will only change the parameter value at the current
position in the model.
If False, will change the parameter value for all the positions.
Examples
--------
>>> v1 = hs.model.components1D.Voigt()
>>> v2 = hs.model.components1D.Voigt()
>>> m.extend([v1,v2])
>>> m.set_component_active_value(False)
>>> m.set_component_active_value(True, component_list=[v1])
>>> m.set_component_active_value(False, component_list=[v1],
only_current=True)
"""
if not component_list:
component_list = []
for _component in self:
component_list.append(_component)
else:
component_list = [self._get_component(x) for x in component_list]
for _component in component_list:
_component.active = value
if _component.active_is_multidimensional:
if only_current:
_component._active_array[
self.axes_manager.indices[::-1]] = value
else:
_component._active_array.fill(value)
def __getitem__(self, value):
"""x.__getitem__(y) <==> x[y]"""
if isinstance(value, str):
component_list = []
for component in self:
if component.name:
if component.name == value:
component_list.append(component)
elif component.__class__.__name__ == value:
component_list.append(component)
if component_list:
if len(component_list) == 1:
return component_list[0]
else:
raise ValueError(
"There are several components with "
"the name \"" + str(value) + "\"")
else:
raise ValueError(
"Component name \"" + str(value) +
"\" not found in model")
else:
return list.__getitem__(self, value)
def create_samfire(self, workers=None, setup=True, **kwargs):
"""Creates a SAMFire object.
Parameters
----------
workers : {None, int}
the number of workers to initialise.
If zero, all computations will be done serially.
If None (default), will attempt to use (number-of-cores - 1),
however if just one core is available, will use one worker.
setup : bool
if the setup should be run upon initialization.
**kwargs
Any that will be passed to the _setup and in turn SamfirePool.
"""
from hyperspy.samfire import Samfire
return Samfire(self, workers=workers,
setup=setup, **kwargs)
class ModelSpecialSlicers(object):
def __init__(self, model, isNavigation):
self.isNavigation = isNavigation
self.model = model
def __getitem__(self, slices):
array_slices = self.model.signal._get_array_slices(
slices,
self.isNavigation)
_signal = self.model.signal._slicer(slices, self.isNavigation)
# TODO: for next major release, change model creation defaults to not
# automate anything. For now we explicitly look for "auto_" kwargs and
# disable them:
import inspect
pars = inspect.signature(_signal.create_model).parameters
kwargs = {key: False for key in pars.keys() if key.startswith('auto_')}
_model = _signal.create_model(**kwargs)
dims = (self.model.axes_manager.navigation_dimension,
self.model.axes_manager.signal_dimension)
if self.isNavigation:
_model.channel_switches[:] = self.model.channel_switches
else:
_model.channel_switches[:] = \
np.atleast_1d(
self.model.channel_switches[
tuple(array_slices[-dims[1]:])])
twin_dict = {}
for comp in self.model:
init_args = {}
for k, v in comp._whitelist.items():
if v is None:
continue
flags_str, value = v
if 'init' in parse_flag_string(flags_str):
init_args[k] = value
_model.append(comp.__class__(**init_args))
copy_slice_from_whitelist(self.model,
_model,
dims,
(slices, array_slices),
self.isNavigation,
)
for co, cn in zip(self.model, _model):
copy_slice_from_whitelist(co,
cn,
dims,
(slices, array_slices),
self.isNavigation)
if _model.axes_manager.navigation_size < 2:
if co.active_is_multidimensional:
cn.active = co._active_array[array_slices[:dims[0]]]
for po, pn in zip(co.parameters, cn.parameters):
copy_slice_from_whitelist(po,
pn,
dims,
(slices, array_slices),
self.isNavigation)
twin_dict[id(po)] = ([id(i) for i in list(po._twins)], pn)
for k in twin_dict.keys():
for tw_id in twin_dict[k][0]:
twin_dict[tw_id][1].twin = twin_dict[k][1]
_model.chisq.data = _model.chisq.data.copy()
_model.dof.data = _model.dof.data.copy()
_model.fetch_stored_values() # to update and have correct values
if not self.isNavigation:
for _ in _model.axes_manager:
_model._calculate_chisq()
return _model
# vim: textwidth=80
| MartialD/hyperspy | hyperspy/model.py | Python | gpl-3.0 | 74,937 | [
"Gaussian"
] | 202a3bfe3ed68d0b55bb4740feada5714b369eefd55615d78f7af79cb12b9bf6 |
""" PlottingClient is a client of the Plotting Service
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import tempfile
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Tornado.Client.ClientSelector import TransferClientSelector as TransferClient
from DIRAC.Core.Base.Client import Client
class PlottingClient(object):
def __init__(self, rpcClient=None, transferClient=None):
self.serviceName = "Framework/Plotting"
self.rpcClient = rpcClient
self.transferClient = transferClient
def __getRPCClient(self):
if self.rpcClient:
return self.rpcClient
return Client(url=self.serviceName)
def __getTransferClient(self):
if self.transferClient:
return self.transferClient
return TransferClient(self.serviceName)
def getPlotToMemory(self, plotName):
"""Get the prefabricated plot from the service and return it as a string"""
transferClient = self.__getTransferClient()
tmpFile = tempfile.TemporaryFile()
retVal = transferClient.receiveFile(tmpFile, plotName)
if not retVal["OK"]:
return retVal
tmpFile.seek(0)
data = tmpFile.read()
tmpFile.close()
return S_OK(data)
def getPlotToFile(self, plotName, fileName):
"""Get the prefabricated plot from the service and store it in a file"""
transferClient = self.__getTransferClient()
try:
with open(fileName, "wb") as destFile:
retVal = transferClient.receiveFile(destFile, plotName)
except Exception as e:
return S_ERROR("Can't open file %s for writing: %s" % (fileName, str(e)))
if not retVal["OK"]:
return retVal
return S_OK(fileName)
def graph(self, data, fname=False, *args, **kw):
"""Generic method to obtain graphs from the Plotting service. The requested
graphs are completely described by their data and metadata
"""
client = self.__getRPCClient()
plotMetadata = {}
for arg in args:
if isinstance(arg, dict):
plotMetadata.update(arg)
else:
return S_ERROR("Non-dictionary non-keyed argument")
plotMetadata.update(kw)
result = client.generatePlot(data, plotMetadata)
if not result["OK"]:
return result
plotName = result["Value"]
if fname and fname != "Memory":
result = self.getPlotToFile(plotName, fname)
else:
result = self.getPlotToMemory(plotName)
return result
def barGraph(self, data, fileName, *args, **kw):
return self.graph(data, fileName, plot_type="BarGraph", statistics_line=True, *args, **kw)
def lineGraph(self, data, fileName, *args, **kw):
return self.graph(data, fileName, plot_type="LineGraph", statistics_line=True, *args, **kw)
def curveGraph(self, data, fileName, *args, **kw):
return self.graph(data, fileName, plot_type="CurveGraph", statistics_line=True, *args, **kw)
def cumulativeGraph(self, data, fileName, *args, **kw):
return self.graph(data, fileName, plot_type="LineGraph", cumulate_data=True, *args, **kw)
def pieGraph(self, data, fileName, *args, **kw):
prefs = {"xticks": False, "yticks": False, "legend_position": "right"}
return self.graph(data, fileName, prefs, plot_type="PieGraph", *args, **kw)
def qualityGraph(self, data, fileName, *args, **kw):
prefs = {"plot_axis_grid": False}
return self.graph(data, fileName, prefs, plot_type="QualityMapGraph", *args, **kw)
def textGraph(self, text, fileName, *args, **kw):
prefs = {"text_image": text}
return self.graph({}, fileName, prefs, *args, **kw)
def histogram(self, data, fileName, bins, *args, **kw):
try:
from pylab import hist
except Exception:
return S_ERROR("No pylab module available")
values, vbins, patches = hist(data, bins)
histo = dict(zip(vbins, values))
span = (max(data) - min(data)) / float(bins) * 0.98
return self.graph(histo, fileName, plot_type="BarGraph", span=span, statistics_line=True, *args, **kw)
| ic-hep/DIRAC | src/DIRAC/FrameworkSystem/Client/PlottingClient.py | Python | gpl-3.0 | 4,325 | [
"DIRAC"
] | db05df8bdb4c038f5cb1dfb5d1d809091f661f51820fa57121290732eff3c8df |
"""
This is used to test the ElasticSearchDB module. It is used to discover all possible changes of Elasticsearch api.
If you modify the test data, you have to update the test cases...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO: move to pytest
import unittest
import sys
import datetime
import time
import os
from DIRAC import gLogger
from DIRAC.Core.Utilities.ElasticSearchDB import ElasticSearchDB
elHost = os.environ.get("NoSQLDB_HOST", "localhost")
elPort = 9200
class ElasticTestCase(unittest.TestCase):
"""Test of ElasticSearchDB class, using local instance"""
def __init__(self, *args, **kwargs):
super(ElasticTestCase, self).__init__(*args, **kwargs)
self.data = [
{"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09 09:00:00.0"},
{"Color": "red", "quantity": 1, "Product": "b", "timestamp": "2015-02-09 16:15:00.0"},
{"Color": "red", "quantity": 1, "Product": "b", "timestamp": "2015-02-09 16:30:00.0"},
{"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09 09:00:00.0"},
{"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09 09:15:00.0"},
{"Color": "red", "quantity": 2, "Product": "b", "timestamp": "2015-02-09 16:15:00.0"},
{"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09 09:15:00.0"},
{"Color": "red", "quantity": 2, "Product": "b", "timestamp": "2015-02-09 16:15:00.0"},
{"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09 09:15:00.0"},
{"Color": "red", "quantity": 2, "Product": "b", "timestamp": "2015-02-09 16:15:00.0"},
]
self.moreData = [
{"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09 09:00:00.0"},
{"Color": "red", "quantity": 1, "Product": "b", "timestamp": "2015-02-09 09:15:00.0"},
{"Color": "red", "quantity": 1, "Product": "c", "timestamp": "2015-02-09 09:30:00.0"},
{"Color": "red", "quantity": 1, "Product": "d", "timestamp": "2015-02-09 10:00:00.0"},
{"Color": "red", "quantity": 1, "Product": "e", "timestamp": "2015-02-09 10:15:00.0"},
{"Color": "red", "quantity": 1, "Product": "f", "timestamp": "2015-02-09 10:30:00.0"},
{"Color": "red", "quantity": 1, "Product": "g", "timestamp": "2015-02-09 10:45:00.0"},
{"Color": "red", "quantity": 1, "Product": "h", "timestamp": "2015-02-09 11:00:00.0"},
{"Color": "red", "quantity": 1, "Product": "i", "timestamp": "2015-02-09 11:15:00.0"},
{"Color": "red", "quantity": 1, "Product": "l", "timestamp": "2015-02-09 11:30:00.0"},
]
self.index_name = ""
self.maxDiff = None
def setUp(self):
gLogger.setLevel("DEBUG")
self.elasticSearchDB = ElasticSearchDB(host=elHost, port=elPort, useSSL=False)
def tearDown(self):
pass
class ElasticBulkCreateChain(ElasticTestCase):
"""Chain for creating indexes"""
def test_bulkindex(self):
"""bulk_index test"""
result = self.elasticSearchDB.bulk_index("integrationtest", self.data)
self.assertTrue(result["OK"])
self.assertEqual(result["Value"], 10)
time.sleep(5)
indexes = self.elasticSearchDB.getIndexes()
self.assertEqual(type(indexes), list)
for index in indexes:
res = self.elasticSearchDB.deleteIndex(index)
self.assertTrue(res["OK"])
def test_bulkindexMonthly(self):
"""bulk_index test (month)"""
result = self.elasticSearchDB.bulk_index(indexPrefix="integrationtestmontly", data=self.data, period="month")
self.assertTrue(result["OK"])
self.assertEqual(result["Value"], 10)
time.sleep(5)
indexes = self.elasticSearchDB.getIndexes()
self.assertEqual(type(indexes), list)
for index in indexes:
res = self.elasticSearchDB.deleteIndex(index)
self.assertTrue(res["OK"])
class ElasticCreateChain(ElasticTestCase):
"""2 simple tests on index creation and deletion"""
def tearDown(self):
self.elasticSearchDB.deleteIndex(self.index_name)
def test_index(self):
"""create index test"""
result = self.elasticSearchDB.createIndex("integrationtest", {})
self.assertTrue(result["OK"])
self.index_name = result["Value"]
for i in self.data:
result = self.elasticSearchDB.index(self.index_name, i)
self.assertTrue(result["OK"])
def test_wrongdataindex(self):
"""create index test (wrong insertion)"""
result = self.elasticSearchDB.createIndex("dsh63tsdgad", {})
self.assertTrue(result["OK"])
index_name = result["Value"]
result = self.elasticSearchDB.index(
index_name, {"Color": "red", "quantity": 1, "Product": "a", "timestamp": 1458226213}
)
self.assertTrue(result["OK"])
result = self.elasticSearchDB.index(
index_name, {"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09T16:15:00Z"}
)
self.assertFalse(result["OK"])
self.assertTrue(result["Message"])
result = self.elasticSearchDB.deleteIndex(index_name)
self.assertTrue(result["OK"])
class ElasticDeleteChain(ElasticTestCase):
"""deletion tests"""
def test_deleteNonExistingIndex(self):
"""delete non-existing index"""
result = self.elasticSearchDB.deleteIndex("dsdssuu")
self.assertTrue(result["OK"])
class ElasticTestChain(ElasticTestCase):
"""various tests chained"""
def setUp(self):
self.elasticSearchDB = ElasticSearchDB(host=elHost, port=elPort, useSSL=False)
result = self.elasticSearchDB.generateFullIndexName("integrationtest", "day")
self.assertTrue(len(result) > len("integrationtest"))
self.index_name = result
result = self.elasticSearchDB.index(
self.index_name, {"Color": "red", "quantity": 1, "Product": "a", "timestamp": 1458226213}
)
self.assertTrue(result["OK"])
def tearDown(self):
self.elasticSearchDB.deleteIndex(self.index_name)
def test_getIndexes(self):
"""test fail if no indexes are present"""
self.elasticSearchDB.deleteIndex(self.index_name)
result = self.elasticSearchDB.getIndexes()
self.assertFalse(result) # it will be empty at this point
def test_getDocTypes(self):
"""test get document types"""
result = self.elasticSearchDB.getDocTypes(self.index_name)
self.assertTrue(result)
if "_doc" in result["Value"]:
self.assertEqual(
set(result["Value"]["_doc"]["properties"]), {u"Color", u"timestamp", u"Product", u"quantity"}
)
else:
self.assertEqual(set(result["Value"]["properties"]), {u"Color", u"timestamp", u"Product", u"quantity"})
def test_existingIndex(self):
result = self.elasticSearchDB.existingIndex(self.index_name)
self.assertTrue(result["OK"] and result["Value"])
def test_generateFullIndexName(self):
indexName = "test"
today = datetime.datetime.today().strftime("%Y-%m-%d")
expected = "%s-%s" % (indexName, today)
result = self.elasticSearchDB.generateFullIndexName(indexName, "day")
self.assertEqual(result, expected)
def test_generateFullIndexName2(self):
indexName = "test"
month = datetime.datetime.today().strftime("%Y-%m")
expected = "%s-%s" % (indexName, month)
result = self.elasticSearchDB.generateFullIndexName(indexName, "month")
self.assertEqual(result, expected)
def test_getUniqueValue(self):
result = self.elasticSearchDB.getUniqueValue(self.index_name, "quantity")
self.assertTrue(result["OK"])
self.assertTrue(result["OK"])
# this, and the next (Product) are not run because (possibly only for ES 6+):
# # 'Fielddata is disabled on text fields by default.
# # Set fielddata=true on [Color] in order to load fielddata in memory by uninverting the inverted index.
# # Note that this can however use significant memory. Alternatively use a keyword field instead.'
# result = self.elasticSearchDB.getUniqueValue(self.index_name, 'Color', )
# self.assertTrue(result['OK'])
# self.assertEqual(result['Value'], [])
# result = self.elasticSearchDB.getUniqueValue(self.index_name, 'Product')
# self.assertTrue(result['OK'])
# self.assertEqual(result['Value'], [])
def test_querySimple(self):
"""simple query test"""
self.elasticSearchDB.deleteIndex(self.index_name)
# inserting 10 entries
for i in self.moreData:
result = self.elasticSearchDB.index(self.index_name, i)
self.assertTrue(result["OK"])
time.sleep(10) # giving ES some time for indexing
# this query returns everything, so we are expecting 10 hits
body = {"query": {"match_all": {}}}
result = self.elasticSearchDB.query(self.index_name, body)
self.assertTrue(result["OK"])
self.assertTrue(isinstance(result["Value"], dict))
self.assertEqual(len(result["Value"]["hits"]["hits"]), 10)
# this query returns nothing
body = {"query": {"match_none": {}}}
result = self.elasticSearchDB.query(self.index_name, body)
self.assertTrue(result["OK"])
self.assertTrue(isinstance(result["Value"], dict))
self.assertEqual(result["Value"]["hits"]["hits"], [])
# this is a wrong query
body = {"pippo": {"bool": {"must": [], "filter": []}}}
result = self.elasticSearchDB.query(self.index_name, body)
self.assertFalse(result["OK"])
# this query should also return everything
body = {"query": {"bool": {"must": [], "filter": []}}}
result = self.elasticSearchDB.query(self.index_name, body)
self.assertTrue(result["OK"])
self.assertTrue(isinstance(result["Value"], dict))
self.assertEqual(len(result["Value"]["hits"]["hits"]), 10)
# def test_query(self):
# body = {"size": 0,
# {"query": {"query_string": {"query": "*"}},
# "filter": {"bool":
# {"must": [{"range":
# {"timestamp":
# {"gte": 1423399451544,
# "lte": 1423631917911
# }
# }
# }],
# "must_not": []
# }
# }
# }
# },
# "aggs": {
# "3": {
# "date_histogram": {
# "field": "timestamp",
# "interval": "3600000ms",
# "min_doc_count": 1,
# "extended_bounds": {
# "min": 1423399451544,
# "max": 1423631917911
# }
# },
# "aggs": {
# "4": {
# "terms": {
# "field": "Product",
# "size": 0,
# "order": {
# "1": "desc"
# }
# },
# "aggs": {
# "1": {
# "sum": {
# "field": "quantity"
# }
# }
# }
# }
# }
# }
# }
# }
# result = self.elasticSearchDB.query(self.index_name, body)
# self.assertEqual(result['aggregations'],
# {u'3': {u'buckets': [{u'4': {u'buckets': [{u'1': {u'value': 5.0},
# u'key': u'a',
# u'doc_count': 5}],
# u'sum_other_doc_count': 0,
# u'doc_count_error_upper_bound': 0},
# u'key': 1423468800000,
# u'doc_count': 5},
# {u'4': {u'buckets': [{u'1': {u'value': 8.0},
# u'key': u'b',
# u'doc_count': 5}],
# u'sum_other_doc_count': 0,
# u'doc_count_error_upper_bound': 0},
# u'key': 1423494000000,
# u'doc_count': 5}]}})
def test_Search(self):
self.elasticSearchDB.deleteIndex(self.index_name)
# inserting 10 entries
for i in self.moreData:
result = self.elasticSearchDB.index(self.index_name, i)
self.assertTrue(result["OK"])
time.sleep(10) # giving ES some time for indexing
s = self.elasticSearchDB._Search(self.index_name)
result = s.execute()
self.assertEqual(len(result.hits), 10)
self.assertEqual(dir(result.hits[0]), [u"Color", u"Product", "meta", u"quantity", u"timestamp"])
q = self.elasticSearchDB._Q("range", timestamp={"lte": 1423501337292, "gte": 1423497057518})
s = self.elasticSearchDB._Search(self.index_name)
s = s.filter("bool", must=q)
query = s.to_dict()
self.assertEqual(
query,
{
"query": {
"bool": {
"filter": [
{"bool": {"must": [{"range": {"timestamp": {"gte": 1423497057518, "lte": 1423501337292}}}]}}
]
}
}
},
)
result = s.execute()
self.assertEqual(len(result.hits), 0)
q = self.elasticSearchDB._Q("range", timestamp={"lte": 1423631917911, "gte": 1423399451544})
s = self.elasticSearchDB._Search(self.index_name)
s = s.filter("bool", must=q)
query = s.to_dict()
self.assertEqual(
query,
{
"query": {
"bool": {
"filter": [
{"bool": {"must": [{"range": {"timestamp": {"gte": 1423399451544, "lte": 1423631917911}}}]}}
]
}
}
},
)
result = s.execute()
self.assertEqual(len(result.hits), 0)
# q = [
# self.elasticSearchDB._Q(
# 'range',
# timestamp={
# 'lte': 1423631917911,
# 'gte': 1423399451544}),
# self.elasticSearchDB._Q(
# 'match',
# Product='a')]
# s = self.elasticSearchDB._Search(self.index_name)
# s = s.filter('bool', must=q)
# query = s.to_dict()
# self.assertEqual(query, {'query': {'bool': {'filter': [{'bool': {
# 'must': [{'range': {'timestamp': {'gte': 1423399451544, 'lte': 1423631917911}}},
# {'match': {'Product': 'a'}}]}}]}}})
# result = s.execute()
# self.assertEqual(len(result.hits), 5)
# self.assertEqual(result.hits[0].Product, 'a')
# self.assertEqual(result.hits[4].Product, 'a')
# def test_A1(self):
# q = [self.elasticSearchDB._Q('range', timestamp={'lte': 1423631917911, 'gte': 1423399451544})]
# s = self.elasticSearchDB._Search(self.index_name)
# s = s.filter('bool', must=q)
# a1 = self.elasticSearchDB._A('terms', field='Product', size=0)
# s.aggs.bucket('2', a1)
# query = s.to_dict()
# self.assertEqual(query, {'query': {'bool': {'filter': [{'bool': {'must': [{'range': {'timestamp': {
# 'gte': 1423399451544, 'lte': 1423631917911}}}]}}]}},
# 'aggs': {'2': {'terms': {'field': 'Product', 'size': 0}}}})
# result = s.execute()
# self.assertEqual(result.aggregations['2'].buckets, [
# {u'key': u'a', u'doc_count': 5}, {u'key': u'b', u'doc_count': 5}])
# def test_A2(self):
# q = [self.elasticSearchDB._Q('range', timestamp={'lte': 1423631917911, 'gte': 1423399451544})]
# s = self.elasticSearchDB._Search(self.index_name)
# s = s.filter('bool', must=q)
# a1 = self.elasticSearchDB._A('terms', field='Product', size=0)
# a1.metric('total_quantity', 'sum', field='quantity')
# s.aggs.bucket('2', a1)
# query = s.to_dict()
# self.assertEqual(
# query, {
# 'query': {
# 'bool': {
# 'filter': [
# {
# 'bool': {
# 'must': [
# {
# 'range': {
# 'timestamp': {
# 'gte': 1423399451544, 'lte': 1423631917911}}}]}}]}}, 'aggs': {
# '2': {
# 'terms': {
# 'field': 'Product', 'size': 0}, 'aggs': {
# 'total_quantity': {
# 'sum': {
# 'field': 'quantity'}}}}}})
# result = s.execute()
# self.assertEqual(result.aggregations['2'].buckets,
# [{u'total_quantity': {u'value': 5.0}, u'key': u'a', u'doc_count': 5}, {
# u'total_quantity': {u'value': 8.0}, u'key': u'b', u'doc_count': 5}])
# def test_piplineaggregation(self):
# q = [self.elasticSearchDB._Q('range', timestamp={'lte': 1423631917911, 'gte': 1423399451544})]
# s = self.elasticSearchDB._Search(self.index_name)
# s = s.filter('bool', must=q)
# a1 = self.elasticSearchDB._A('terms', field='Product', size=0)
# a2 = self.elasticSearchDB._A('terms', field='timestamp')
# a2.metric('total_quantity', 'sum', field='quantity')
# a1.bucket(
# 'end_data',
# 'date_histogram',
# field='timestamp',
# interval='3600000ms').metric(
# 'tt',
# a2).pipeline(
# 'avg_buckets',
# 'avg_bucket',
# buckets_path='tt>total_quantity',
# gap_policy='insert_zeros')
# s.aggs.bucket('2', a1)
# query = s.to_dict()
# self.assertEqual(
# query, {
# 'query': {
# 'bool': {
# 'filter': [
# {
# 'bool': {
# 'must': [
# {
# 'range': {
# 'timestamp': {
# 'gte': 1423399451544, 'lte': 1423631917911}}}]}}]}}, 'aggs': {
# '2': {
# 'terms': {
# 'field': 'Product', 'size': 0}, 'aggs': {
# 'end_data': {
# 'date_histogram': {
# 'field': 'timestamp', 'interval': '3600000ms'}, 'aggs': {
# 'tt': {
# 'terms': {
# 'field': 'timestamp'}, 'aggs': {
# 'total_quantity': {
# 'sum': {
# 'field': 'quantity'}}}}, 'avg_buckets': {
# 'avg_bucket': {
# 'buckets_path': 'tt>total_quantity', 'gap_policy': 'insert_zeros'}}}}}}}})
# result = s.execute()
# self.assertEqual(len(result.aggregations['2'].buckets), 2)
# self.assertEqual(result.aggregations['2'].buckets[0].key, u'a')
# self.assertEqual(result.aggregations['2'].buckets[1].key, u'b')
# self.assertEqual(result.aggregations['2'].buckets[0]['end_data'].buckets[0].avg_buckets, {u'value': 2.5})
# self.assertEqual(result.aggregations['2'].buckets[1]['end_data'].buckets[0].avg_buckets, {u'value': 4})
if __name__ == "__main__":
testSuite = unittest.defaultTestLoader.loadTestsFromTestCase(ElasticTestCase)
testSuite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ElasticCreateChain))
testSuite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ElasticBulkCreateChain))
testSuite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ElasticTestChain))
testSuite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ElasticDeleteChain))
testResult = unittest.TextTestRunner(verbosity=2).run(testSuite)
sys.exit(not testResult.wasSuccessful())
| ic-hep/DIRAC | tests/Integration/Core/Test_ElasticsearchDB.py | Python | gpl-3.0 | 21,844 | [
"DIRAC"
] | b05f698a8f892811e9a73bf370cb7567bad74c795f0482512f50a1ab9bba2b8e |
#!/usr/bin/env python
import pyfits
from LCScommon import *
from pylab import *
import os
import mystuff as my
def loadmastertable(clustername):
infile='/home/rfinn/research/LocalClusters/MasterTables/'+clustername+'mastertable.fits'
tb=pyfits.open(infile)
tbdata=tb[1].data
tb.close()
self.agcflag=tbdata.field('AGCflag')
self.HIflag=tbdata.field('HIFLAG')
self.sdssflag=tbdata.field('SDSSflag')
self.sdssphotflag=tbdata.field('SDSSphotflag')
self.mpaflag=tbdata.field('MPAFLAG')
self.apexflag=tbdata.field('APEXFLAG')
self.sexsdssflag=tbdata.field('SEXSDSSflag')
self.sex24flag=tbdata.field('SEX24FLAG')
self.agcvoptflag=tbdata.field('AGCVOPTFLAG')
self.agcnumber=tbdata.field('AGCNUMBER')
self.raagc=tbdata.field('AGCRA')
self.decagc=tbdata.field('AGCDEC')
self.a100=tbdata.field('A100')
self.b100=tbdata.field('B100')
self.mag10=tbdata.field('MAG10')
self.posang=tbdata.field('POSANG')
self.bsteintype=tbdata.field('BSTEINTYPE')
self.vopt=tbdata.field('VOPT')
self.verr=tbdata.field('VERR')
self.vsource=tbdata.field('VSOURCE')
self.flux100=tbdata.field('FLUX100')
self.rms100=tbdata.field('RMS100')
self.v21=tbdata.field('V21')
self.width=tbdata.field('WIDTH')
self.widtherr=tbdata.field('WIDTHERR')
#sdss info
self.sdssra=tbdata.field('SDSSRA')
self.sdssdec=tbdata.field('SDSSDEC')
self.sdssphotra=tbdata.field('SDSSphotRA')
self.sdssphotdec=tbdata.field('SDSSphotDEC')
self.sdssu=tbdata.field('SDSSU')
self.sdssg=tbdata.field('SDSSG')
self.sdssr=tbdata.field('SDSSR')
self.sdssi=tbdata.field('SDSSI')
self.sdssz=tbdata.field('SDSSZ')
self.sdssspecz=tbdata.field('SDSSSPECZ')
self.sdssvopt=tbdata.field('SDSSVOPT')
self.sdsshaew=tbdata.field('SDSSHAEW')
self.sdsshaewerr=tbdata.field('SDSSHAEWERR')
self.sdssplate=tbdata.field('SDSSPLATE')
self.sdssfiberid=tbdata.field('SDSSFIBERID')
self.sdsstile=tbdata.field('SDSSTILE')
self.mpahalpha=tbdata.field('MPAHALPHA')
self.mpahbeta=tbdata.field('MPAHBETA')
self.mpao3=tbdata.field('MPAOIII')
self.mpan2=tbdata.field('MPANII')
#sextractor info
self.numberser=tbdata.field('NUMBERSER')
self.ximageser=tbdata.field('XIMAGESER')
self.yimageser=tbdata.field('YIMAGESER')
self.xminimageser=tbdata.field('XMINIMAGESER')
self.xmaximageser=tbdata.field('XMAXIMAGESER')
self.yminimageser=tbdata.field('YMINIMAGESER')
self.raser=tbdata.field('RASER')
self.decser=tbdata.field('DECSER')
self.fluxisoser=tbdata.field('FLUXISOSER')
self.fluxerrisoser=tbdata.field('FLUXERRISOSER')
self.magisoser=tbdata.field('MAGISOSER')
self.magerrisoser=tbdata.field('MAGERRISOSER')
self.fluxautoser=tbdata.field('FLUXAUTOSER')
self.fluxerrautoser=tbdata.field('FLUXERRAUTOSER')
self.magautoser=tbdata.field('MAGAUTOSER')
self.magerrautoser=tbdata.field('MAGERRAUTOSER')
self.fluxpetroser=tbdata.field('FLUXPETROSER')
self.fluxerrpetroser=tbdata.field('FLUXERRPETROSER')
self.magpetroser=tbdata.field('MAGPETROSER')
self.magerrpetroser=tbdata.field('MAGERRPETROSER')
self.kronradser=tbdata.field('KRONRADSER')#kron radius
self.petroradser=tbdata.field('PETRORADSER')#petrosian radius
self.fluxradser=tbdata.field('FLUXRADSER')#1/2 light radius
self.isoareaser=tbdata.field('ISOAREASER')
self.aworldser=tbdata.field('AWORLDSER')
self.bworldser=tbdata.field('BWORLDSER')
self.thetaser=tbdata.field('THETASER')
self.errthetaser=tbdata.field('ERRTHETASER')
self.thetaj2000ser=tbdata.field('THETAJ2000SER')
self.errthetaj2000ser=tbdata.field('ERRTHETAJ2000SER')
self.elongser=tbdata.field('ELONGATIONSER')
self.elliptser=tbdata.field('ELLIPTICITYSER')
self.fwhmser=tbdata.field('FWHMSER')
self.flagsser=tbdata.field('FLAGSSER')
self.classstarser=tbdata.field('CLASSSTARSER')
#SEXTRACTOR output 24 micron data
self.numberse24=tbdata.field('NUMBERSE24')
self.ximagese24=tbdata.field('XIMAGESE24')
self.yimagese24=tbdata.field('YIMAGESE24')
self.xminimagese24=tbdata.field('XMINIMAGESE24')
self.xmaximagese24=tbdata.field('XMAXIMAGESE24')
self.xminimagese24=tbdata.field('YMINIMAGESE24')
self.rase24=tbdata.field('RASE24')
self.decse24=tbdata.field('DECSE24')
self.fluxisose24=tbdata.field('FLUXISOSE24')
self.fluxerrisose24=tbdata.field('FLUXERRISOSE24')
self.magisose24=tbdata.field('MAGISOSE24')
self.magerrisose24=tbdata.field('MAGERRISOSE24')
self.fluxautose24=tbdata.field('FLUXAUTOSE24')
self.fluxerrautose24=tbdata.field('FLUXERRAUTOSE24')
self.magautose24=tbdata.field('MAGAUTOSE24')
self.magerrautose24=tbdata.field('MAGERRAUTOSE24')
self.fluxpetrose24=tbdata.field('FLUXPETROSE24')
self.fluxerrpetrose24=tbdata.field('FLUXERRPETROSE24')
self.magpetrose24=tbdata.field('MAGPETROSE24')
self.magerrpetrose24=tbdata.field('MAGERRPETROSE24')
self.kronradse24=tbdata.field('KRONRADSE24')
self.petroradse24=tbdata.field('PETRORADSE24')
self.fluxradse24=tbdata.field('FLUXRADSE24')
self.isoarease24=tbdata.field('ISOAREASE24')
self.aworldse24=tbdata.field('AWORLDSE24')
self.bworldse24=tbdata.field('BWORLDSE24')
self.thetase24=tbdata.field('THETASE24')
self.errthetase24=tbdata.field('ERRTHETASE24')
self.thetaj2000se24=tbdata.field('THETAJ2000SE24')
self.errthetaj2000se24=tbdata.field('ERRTHETAJ2000SE24')
self.elongse24=tbdata.field('ELONGATIONSE24')
self.elliptse24=tbdata.field('ELLIPTICITYSE24')
self.fwhmse24=tbdata.field('FWHMSE24')
self.flagsse24=tbdata.field('FLAGSSE24')
self.classstarse24=tbdata.field('CLASSSTARSE24')
self.f24dist=self.fluxautose24[self.sex24flag]
#apex output
self.mipsra=tbdata.field('MIPSRA')
self.mipsdec=tbdata.field('MIPSDEC')
self.mipsflux=tbdata.field('MIPSFLUX')
self.mipsfluxerr=tbdata.field('MIPSFLUXERR')
self.mipssnr=tbdata.field('MIPSSNR')
self.mipsdeblend=tbdata.field('MIPSDEBLEND')
self.mipsfluxap1=tbdata.field('MIPSFLUXAP1')
self.mipsfluxap1err=tbdata.field('MIPSFLUXAP1ERR')
self.mipsfluxap2=tbdata.field('MIPSFLUXAP2')
self.mipsfluxap2err=tbdata.field('MIPSFLUXAP2ERR')
self.mipsfluxap3=tbdata.field('MIPSFLUXAP3')
self.mipsfluxap4err=tbdata.field('MIPSFLUXAP3ERR')
self.On24ImageFlag=tbdata.field('On24ImageFlag')
self.supervopt=tbdata.field('SUPERVOPT')
self.ra=tbdata.field('SUPERRA')
self.dec=tbdata.field('SUPERDEC')
self.stellarmass=tbdata.field('STELLARMASS')
self.sdssMu=tbdata.field('SDSSMU')
self.sdssLu=tbdata.field('SDSSLU')
self.sdssMg=tbdata.field('SDSSMG')
self.sdssLg=tbdata.field('SDSSLG')
self.sdssMr=tbdata.field('SDSSMR')
self.sdssLr=tbdata.field('SDSSLR')
self.sdssMi=tbdata.field('SDSSMI')
self.sdssLi=tbdata.field('SDSSLI')
self.sdssMz=tbdata.field('SDSSMZ')
self.sdssLz=tbdata.field('SDSSLZ')
self.membflag =tbdata.field('MEMBFLAG')
self.morphflag =tbdata.field('MORPHFLAG')
self.morph =tbdata.field('MORPH')
self.disturb =tbdata.field('DISTURB')
self.localdens =tbdata.field('LOCALDENS')
self.agn1 =tbdata.field('AGNKAUFF')
self.agn2 =tbdata.field('AGNKEWLEY')
self.agn3 =tbdata.field('AGNSTASIN')
self.logn2halpha=log10(self.mpan2/self.mpahalpha)
self.logo3hbeta=log10(self.mpao3/self.mpahbeta)
self.ellipseflag24 =tbdata.field('ELLIPSEFLAG24')
self.ellipseflagsdss =tbdata.field('ELLIPSEFLAGSDSS')
self.ellipseflag =tbdata.field('ELLIPSEFLAG')
# galaxy zoo fields
self.galzooflag =tbdata.field('GALZOOFLAG')
self.galzoonvote =tbdata.field('GALZOONVOTE')
self.galzoopel =tbdata.field('GALZOOPEL')
self.galzoopcw =tbdata.field('GALZOOPCW')
self.galzoopacw =tbdata.field('GALZOOPACW')
self.galzoopedge =tbdata.field('GALZOOPEDGE')
self.galzoopdk =tbdata.field('GALZOOPDK')
self.galzoopmg =tbdata.field('GALZOOPMG')
self.galzoopcs =tbdata.field('GALZOOPCS')
self.galzoopeldebiased =tbdata.field('GALZOOPELDEBIASED')
self.galzoopcsdebiased =tbdata.field('GALZOOPCSDEBIASED')
self.galzoospiral =tbdata.field('GALZOOSPIRAL')
self.galzooelliptical =tbdata.field('GALZOOELLIPTICAL')
self.galzoouncertain =tbdata.field('GALZOOUNCERTAIN')
#end of master table!
#self.spiralFlag=self.On24ImageFlag & self.galzooflag & self.ellipseflag & (self.galzoopcsdebiased > 0.6)
self.spiralFlag=self.On24ImageFlag & self.galzooflag & self.ellipseflag & self.galzoospiral
| rfinn/LCS | paper1code/LCSloadmaster.py | Python | gpl-3.0 | 8,623 | [
"Galaxy"
] | ff9549a97b55bbf1aa80d57e7d9d2f488212fa7629bca21544e170621e545e09 |
from enthought.mayavi.core.registry import registry
from enthought.mayavi.core.pipeline_info import PipelineInfo
from enthought.mayavi.core.metadata import FilterMetadata
from enthought.mayavi.core.metadata import SourceMetadata
# Metadata for the new filters we want to add
boundary_marker_editor = FilterMetadata(
id = "BoundaryMarkerEditor",
menu_name = "BoundaryMarkerEditor",
factory = 'mayavi_amcg.filters.boundary_marker_editor.BoundaryMarkerEditor'
)
field_operations = FilterMetadata(
id = "FieldOperations",
menu_name = "FieldOperations",
factory = 'mayavi_amcg.filters.field_operations.FieldOperations'
)
projection_and_depth_stretch = FilterMetadata(
id = "ProjectionAndDepthStretch",
menu_name = "ProjectionAndDepthStretch",
factory = 'mayavi_amcg.filters.projection_and_depth_stretch.ProjectionAndDepthStretch'
)
mesh_diagnostics = FilterMetadata(
id = "MeshDiagnostics",
menu_name = "MeshDiagnostics",
factory = 'mayavi_amcg.filters.mesh_diagnostics.MeshDiagnostics'
)
tensor_eigenvectors_eigenvalues = FilterMetadata(
id = "TensorEigenvectorsEigenvalues",
menu_name = "TensorEigenvectorsEigenvalues",
factory = 'mayavi_amcg.filters.tensor_eigenvectors_eigenvalues.TensorEigenvectorsEigenvalues'
)
# Register the filters with the mayavi registry
registry.filters.append(boundary_marker_editor)
registry.filters.append(field_operations)
registry.filters.append(projection_and_depth_stretch)
registry.filters.append(mesh_diagnostics)
registry.filters.append(tensor_eigenvectors_eigenvalues)
# Metadata for the new source we want to add
triangle_reader_info = SourceMetadata(
id = "TriangleReader",
class_name = 'mayavi_amcg.triangle_reader.TriangleReader',
tooltip = "Load Triangle files",
desc = "Load Triangle files",
help = "Load Triangle files",
menu_name = "&Triangle files",
extensions = ['face','edge','ele'],
wildcard = 'Triangle files (*.face)|*.face|Triangle files (*.edge)|*.edge|Triangle files (*.ele)|*.ele',
output_info = PipelineInfo(datasets=['unstructured_grid'],
attribute_types=['any'],
attributes=['any'])
)
# Register the source with the mayavi registry
registry.sources.append(triangle_reader_info)
if __name__ == '__main__':
import sys
print "*"*80
print "ERROR: This script isn't supposed to be executed."
print __doc__
print "*"*80
from enthought.util.home_directory import get_home_directory
print "Your .mayavi2 directory should be in %s"%get_home_directory()
print "*"*80
sys.exit(1)
| rjferrier/fluidity | mayavi/site_mayavi.py | Python | lgpl-2.1 | 2,724 | [
"Mayavi"
] | 97134f6696627cd59af2c8bd8d2d77bfec88e9d0b4bee3f5c19b36699b834da6 |
import pyfftw
import numpy as np
from peak_detection import find_peak
class FFTCorrelator(object):
"""
An FFT Correlation Class for a PIV Evaluation of two frames
It uses the `pyfftw <https://hgomersall.github.io/pyFFTW/>`_ library for performant FFT.
This class is also responsible for calculating the Shift after the correlation.
"""
def __init__(self, window_a_size, window_b_size, scale_fft='default'):
"""
Initialize fftw objects for FFTs with the pyfftw library
The necessary functions are loaded and memory allocated.
:param window_a_size: size of the interrogation window
:param window_b_size: size of the search window
:param str scale_fft: if set to upscale, the padding will be upscaled
"""
max_fsize = max([window_a_size, window_b_size])
pad = self._set_padding(max_fsize, scale_fft)
ffta_shape = (window_a_size, window_a_size)
ffta_memory = pyfftw.empty_aligned(ffta_shape, dtype='float64')
self._fa_fft = pyfftw.builders.rfft2(ffta_memory, pad)
fftb_shape = (window_b_size, window_b_size)
fftb_memory = pyfftw.empty_aligned(fftb_shape, dtype='float64')
self._fb_fft = pyfftw.builders.rfft2(fftb_memory, pad)
ifft_shape = (window_b_size, window_b_size//2 + 1)
ifft_memory = pyfftw.empty_aligned(ifft_shape, dtype='complex128')
self._ift_fft = pyfftw.builders.irfft2(ifft_memory, pad)
def _set_padding(self, windows_size, scale_fft):
"""Set zero padding size for FFTs"""
if scale_fft == 'default':
pad = 2*windows_size
if scale_fft == 'upscale':
pad = 2**np.ceil(np.log2(2*windows_size))
return (pad, pad)
def _evaluate_windows(self, window_a, window_b):
"""
Calculate the FFT of both windows, correlate and transform back.
In order to decrease the error a mean subtraction is performed.
To compensate for the indexing during the FFT a FFT Shift is performed.
:param window_a: interrogation window
:param window_b: search window
:returns: correlation window
"""
fft_a = self._fa_fft(window_a - np.mean(window_a))
fft_b = self._fb_fft(window_b - np.mean(window_b))
fft_corr = fft_a*np.conj(fft_b)
inv_fft = self._ift_fft(fft_corr)
return np.fft.fftshift(inv_fft)
def get_displacement(self, window_a, window_b, subpixel_method='gaussian'):
"""
Compute the displacement out of correlation.
First the correlation is performed and afterwards the shift is calculated.
For the displacement calculation the function
.. autofunction:: piv.peak_detection.find_peak
is called with the subpixel_method passed on as parameter.
If a padding was needed, it is removed from the calculated displacement.
:param window_a: interrogation window
:param window_b: search window
:param str subpixel_method: method for peak finder
:returns: shift in x and y direction as tuple
"""
correlation = self._evaluate_windows(window_a, window_b)
xi, yi = find_peak(correlation, subpixel_method)
cx, cy = correlation.shape
corr_pad = (window_b.shape[0] - window_a.shape[0])/2.
return (cx/2. - xi - corr_pad, cy/2. - yi - corr_pad)
| jr7/pypiv | pypiv/piv/fft_correlator.py | Python | bsd-3-clause | 3,433 | [
"Gaussian"
] | 52c51abd19336127bd572a1ea02c5f92bd7c3c1f57685d16188688481cca7350 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes used to define a MD trajectory.
"""
import itertools
import os
import warnings
from fnmatch import fnmatch
from typing import List, Union, Sequence
import numpy as np
from monty.io import zopen
from monty.json import MSONable
from pymatgen.core.structure import Structure, Lattice, Element, Specie, DummySpecie, Composition
from pymatgen.io.vasp.outputs import Xdatcar, Vasprun
__author__ = "Eric Sivonxay, Shyam Dwaraknath"
__version__ = "0.0"
__date__ = "Jan 25, 2019"
class Trajectory(MSONable):
"""
Trajectory object that stores structural information related to a MD simulation.
Provides basic functions such as slicing trajectory or obtaining displacements.
"""
def __init__(self, lattice: Union[List, np.ndarray, Lattice],
species: List[Union[str, Element, Specie, DummySpecie, Composition]],
frac_coords: List[Sequence[Sequence[float]]],
time_step: float = 2,
site_properties: dict = None,
frame_properties: dict = None,
constant_lattice: bool = True,
coords_are_displacement: bool = False,
base_positions: Sequence[Sequence[float]] = None):
"""
Create a trajectory object
Args:
lattice: The lattice as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species: List of species on each site. Can take in flexible input,
including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
frac_coords (MxNx3 array): list of fractional coordinates of
each species
time_step (int, float): Timestep of simulation in femtoseconds. Defaults to 2fs.
site_properties (list): Properties associated with the sites as a list of
dicts of sequences, e.g., [{"magmom":[5,5,5,5]}, {"magmom":[5,5,5,5]}]. The sequences
have to be the same length as the atomic species and fractional_coords. Number of supplied
dicts should match number of frames in trajectory
Defaults to None for no properties.
frame_properties (dict): Properties of the trajectory such as energy, pressure, etc. each property should
have a length equal to the trajectory length. eg: {'energy': [#, #, #, #], 'pressure': [0, 0.1, 0 0.02]}
constant_lattice (bool): Whether the lattice changes during the simulation, such as in an NPT MD simulation.
coords_are_displacement (bool): Whether supplied coordinates are given in displacements (True) or
positions (False)
base_positions (Nx3 array): The starting positions of all atoms in trajectory. Used to reconstruct positions
when converting from displacements to positions. Only needs to be specified if
coords_are_displacement=True. Defaults to first index of frac_coords if coords_are_displacement=False.
"""
# To support from_dict and as_dict
if isinstance(frac_coords, list):
frac_coords = np.array(frac_coords)
if isinstance(lattice, Lattice):
lattice = lattice.matrix
if isinstance(lattice, list):
lattice = np.array(lattice)
self.frac_coords = frac_coords
if coords_are_displacement:
if base_positions is None:
warnings.warn("Without providing an array of starting positions, \
the positions for each time step will not be available")
self.base_positions = base_positions
else:
self.base_positions = frac_coords[0]
self.coords_are_displacement = coords_are_displacement
if not constant_lattice and np.shape(lattice) == (3, 3):
self.lattice = [lattice for i in range(np.shape(self.frac_coords)[0])]
else:
self.lattice = lattice
self.constant_lattice = constant_lattice
self.species = species
self.site_properties = site_properties
self.frame_properties = frame_properties
self.time_step = time_step
def get_structure(self, i):
"""
Returns structure at specified index
Args:
i (int): Index of structure
Returns:
(Structure) pymatgen structure object
"""
return self[i]
def to_positions(self):
"""
Converts fractional coordinates of trajectory into positions
"""
if self.coords_are_displacement:
cumulative_displacements = np.cumsum(self.frac_coords, axis=0)
positions = self.base_positions + cumulative_displacements
self.frac_coords = positions
self.coords_are_displacement = False
def to_displacements(self):
"""
Converts position coordinates of trajectory into displacements between consecutive frames
"""
if not self.coords_are_displacement:
displacements = np.subtract(self.frac_coords, np.roll(self.frac_coords, 1, axis=0))
displacements[0] = np.zeros(np.shape(self.frac_coords[0]))
# Deal with PBC
displacements = [np.subtract(item, np.round(item)) for item in displacements]
self.frac_coords = displacements
self.coords_are_displacement = True
def extend(self, trajectory):
"""
Concatenate another trajectory
Args:
trajectory (Trajectory): Trajectory to add
"""
if self.time_step != trajectory.time_step:
raise ValueError('Trajectory not extended: Time steps of trajectories is incompatible')
if len(self.species) != len(trajectory.species) and self.species != trajectory.species:
raise ValueError('Trajectory not extended: species in trajectory do not match')
# Ensure both trajectories are in positions before combining
self.to_positions()
trajectory.to_positions()
self.site_properties = self._combine_site_props(self.site_properties, trajectory.site_properties,
np.shape(self.frac_coords)[0],
np.shape(trajectory.frac_coords)[0])
self.frame_properties = self._combine_frame_props(self.frame_properties, trajectory.frame_properties,
np.shape(self.frac_coords)[0],
np.shape(trajectory.frac_coords)[0])
self.frac_coords = np.concatenate((self.frac_coords, trajectory.frac_coords), axis=0)
self.lattice, self.constant_lattice = self._combine_lattice(self.lattice, trajectory.lattice,
np.shape(self.frac_coords)[0],
np.shape(trajectory.frac_coords)[0])
def __iter__(self):
for i in range(np.shape(self.frac_coords)[0]):
yield self[i]
def __len__(self):
return np.shape(self.frac_coords)[0]
def __getitem__(self, frames):
"""
Gets a subset of the trajectory if a slice is given, if an int is given, return a structure
Args:
frames (int, slice): int or slice of trajectory to return
Return:
(Trajectory, Structure) Subset of trajectory
"""
# If trajectory is in displacement mode, return the displacements at that frame
if self.coords_are_displacement:
if isinstance(frames, int):
if frames >= np.shape(self.frac_coords)[0]:
raise ValueError('Selected frame exceeds trajectory length')
# For integer input, return the displacements at that timestep
return self.frac_coords[frames]
if isinstance(frames, slice):
# For slice input, return a list of the displacements
start, stop, step = frames.indices(len(self))
return [self.frac_coords[i] for i in range(start, stop, step)]
if isinstance(frames, (list, np.ndarray)):
# For list input, return a list of the displacements
pruned_frames = [i for i in frames if i < len(self)] # Get rid of frames that exceed trajectory length
if len(pruned_frames) < len(frames):
warnings.warn('Some or all selected frames exceed trajectory length')
return [self.frac_coords[i] for i in pruned_frames]
raise Exception('Given accessor is not of type int, slice, list, or array')
# If trajectory is in positions mode, return a structure for the given frame or trajectory for the given frames
if isinstance(frames, int):
if frames >= np.shape(self.frac_coords)[0]:
raise ValueError('Selected frame exceeds trajectory length')
# For integer input, return the structure at that timestep
lattice = self.lattice if self.constant_lattice else self.lattice[frames]
site_properties = self.site_properties[frames] if self.site_properties else None
site_properties = self.site_properties[frames] if self.site_properties else None
return Structure(Lattice(lattice), self.species, self.frac_coords[frames],
site_properties=site_properties,
to_unit_cell=True)
if isinstance(frames, slice):
# For slice input, return a trajectory of the sliced time
start, stop, step = frames.indices(len(self))
pruned_frames = range(start, stop, step)
lattice = self.lattice if self.constant_lattice else [self.lattice[i] for i in pruned_frames]
frac_coords = [self.frac_coords[i] for i in pruned_frames]
if self.site_properties is not None:
site_properties = [self.site_properties[i] for i in pruned_frames]
else:
site_properties = None
if self.frame_properties is not None:
frame_properties = {}
for key, item in self.frame_properties.items():
frame_properties[key] = [item[i] for i in pruned_frames]
else:
frame_properties = None
return Trajectory(lattice, self.species, frac_coords, time_step=self.time_step,
site_properties=site_properties, frame_properties=frame_properties,
constant_lattice=self.constant_lattice, coords_are_displacement=False,
base_positions=self.base_positions)
if isinstance(frames, (list, np.ndarray)):
# For list input, return a trajectory of the specified times
pruned_frames = [i for i in frames if i < len(self)] # Get rid of frames that exceed trajectory length
if len(pruned_frames) < len(frames):
warnings.warn('Some or all selected frames exceed trajectory length')
lattice = self.lattice if self.constant_lattice else [self.lattice[i] for i in pruned_frames]
frac_coords = [self.frac_coords[i] for i in pruned_frames]
if self.site_properties is not None:
site_properties = [self.site_properties[i] for i in pruned_frames]
else:
site_properties = None
if self.frame_properties is not None:
frame_properties = {}
for key, item in self.frame_properties.items():
frame_properties[key] = [item[i] for i in pruned_frames]
else:
frame_properties = None
return Trajectory(lattice, self.species, frac_coords, time_step=self.time_step,
site_properties=site_properties, frame_properties=frame_properties,
constant_lattice=self.constant_lattice, coords_are_displacement=False,
base_positions=self.base_positions)
raise Exception('Given accessor is not of type int, slice, tuple, list, or array')
def copy(self):
"""
:return: Copy of Trajectory.
"""
return Trajectory(self.lattice, self.species, self.frac_coords, time_step=self.time_step,
site_properties=self.site_properties, frame_properties=self.frame_properties,
constant_lattice=self.constant_lattice, coords_are_displacement=False,
base_positions=self.base_positions)
@classmethod
def from_structures(cls, structures, constant_lattice=True, **kwargs):
"""
Convenience constructor to obtain trajectory from a list of structures.
Note: Assumes no atoms removed during simulation
Args:
structures (list): list of pymatgen Structure objects.
constant_lattice (bool): Whether the lattice changes during the simulation, such as in an NPT MD
simulation. True results in
Returns:
(Trajectory)
"""
frac_coords = [structure.frac_coords for structure in structures]
if constant_lattice:
lattice = structures[0].lattice.matrix
else:
lattice = [structure.lattice.matrix for structure in structures]
site_properties = {}
site_properties = [structure.site_properties for structure in structures]
return cls(lattice, structures[0].species, frac_coords, site_properties=site_properties,
constant_lattice=constant_lattice, **kwargs)
@classmethod
def from_file(cls, filename, constant_lattice=True, **kwargs):
"""
Convenience constructor to obtain trajectory from XDATCAR or vasprun.xml file
Args:
filename (str): The filename to read from.
constant_lattice (bool): Whether the lattice changes during the simulation, such as in an NPT MD
simulation. True results in
Returns:
(Trajectory)
"""
# TODO: Support other filetypes
fname = os.path.basename(filename)
if fnmatch(fname, "*XDATCAR*"):
structures = Xdatcar(filename).structures
elif fnmatch(fname, "vasprun*.xml*"):
structures = Vasprun(filename).structures
else:
raise ValueError("Unsupported file")
return cls.from_structures(structures, constant_lattice=constant_lattice, **kwargs)
def as_dict(self):
"""
:return: MSONAble dict.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"species": self.species, "time_step": self.time_step,
"site_properties": self.site_properties,
"frame_properties": self.frame_properties,
"constant_lattice": self.constant_lattice,
"coords_are_displacement": self.coords_are_displacement,
"base_positions": self.base_positions}
d["lattice"] = self.lattice.tolist()
d["frac_coords"] = self.frac_coords.tolist()
return d
@staticmethod
def _combine_lattice(attr_1, attr_2, len_1, len_2):
"""
Helper function to combine trajectory properties such as site_properties or lattice
"""
if np.shape(attr_1) == (3, 3) and np.shape(attr_2) == (3, 3):
attribute = attr_1
attribute_constant = True
elif np.shape(attr_1) == 3 and np.shape(attr_2) == 3:
attribute = np.concatenate((attr_1, attr_2), axis=0)
attribute_constant = False
else:
attribute = [attr_1.copy()] * len_1 if isinstance(attr_1, list) else attr_1.copy()
attribute.extend([attr_2.copy()] * len_2 if isinstance(attr_2, list) else attr_2.copy())
attribute_constant = False
return attribute, attribute_constant
@staticmethod
def _combine_site_props(attr_1, attr_2, len_1, len_2):
"""
Helper function to combine site properties of 2 trajectories
"""
if attr_1 is None and attr_2 is None:
return None
if attr_1 is None or attr_2 is None:
new_site_properties = []
if attr_1 is None:
new_site_properties.extend([None for i in range(len_1)])
elif len(attr_1) == 1:
new_site_properties.extend([attr_1[0] for i in range(len_1)])
elif len(attr_1) > 1:
new_site_properties.extend(attr_1)
if attr_2 is None:
new_site_properties.extend([None for i in range(len_2)])
elif len(attr_2) == 1:
new_site_properties.extend([attr_2[0] for i in range(len_2)])
elif len(attr_2) > 1:
new_site_properties.extend(attr_2)
return new_site_properties
if len(attr_1) == 1 and len(attr_2) == 1:
# If both properties lists are do not change within their respective trajectory
if attr_1 == attr_2:
# If both site_properties are the same, only store one
return attr_1
new_site_properties = [attr_1[0] for i in range(len_1)]
new_site_properties.extend([attr_2[0] for i in range(len_2)])
return new_site_properties
if len(attr_1) > 1 and len(attr_2) > 1:
# Both properties have site properties that change within the trajectory, concat both together
return [*attr_1, *attr_2]
new_site_properties = []
if attr_1 is None:
new_site_properties.extend([None for i in range(len_1)])
elif len(attr_1) == 1:
new_site_properties.extend([attr_1[0] for i in range(len_1)])
elif len(attr_1) > 1:
new_site_properties.extend(attr_1)
if attr_2 is None:
new_site_properties.extend([None for i in range(len_2)])
elif len(attr_2) == 1:
new_site_properties.extend([attr_2[0] for i in range(len_2)])
elif len(attr_2) > 1:
new_site_properties.extend(attr_2)
return new_site_properties
@staticmethod
def _combine_frame_props(attr_1, attr_2, len_1, len_2):
"""
Helper function to combine frame properties such as energy or pressure
"""
if attr_1 is None and attr_2 is None:
return None
# Find all common keys
all_keys = set(attr_1.keys()).union(set(attr_2.keys()))
# Initialize dict with the common keys
new_frame_props = dict(zip(all_keys, [[] for i in all_keys]))
for key in all_keys:
if key in attr_1.keys():
new_frame_props[key].extend(attr_1[key])
else:
# If key doesn't exist in the first trajectory, append None for each index
new_frame_props[key].extend([None for i in range(len_1)])
if key in attr_2.keys():
new_frame_props[key].extend(attr_2[key])
else:
# If key doesn't exist in the second trajectory, append None for each index
new_frame_props[key].extend([None for i in range(len_2)])
return new_frame_props
def write_Xdatcar(self, filename="XDATCAR", system=None, significant_figures=6):
"""
Writes Xdatcar to a file. The supported kwargs are the same as those for
the Xdatcar_from_structs.get_string method and are passed through directly.
Args:
filename (str): name of file (It's prudent to end the filename with 'XDATCAR',
as most visualization and analysis software require this for autodetection)
system (str): Description of system
significant_figures (int): Significant figures in the output file
"""
# Ensure trajectory is in position form
self.to_positions()
if system is None:
system = f'{self[0].composition.reduced_formula}'
lines = []
format_str = "{{:.{0}f}}".format(significant_figures)
syms = [site.specie.symbol for site in self[0]]
site_symbols = [a[0] for a in itertools.groupby(syms)]
syms = [site.specie.symbol for site in self[0]]
natoms = [len(tuple(a[1])) for a in itertools.groupby(syms)]
for si, frac_coords in enumerate(self.frac_coords):
# Only print out the info block if
if self.constant_lattice and si == 0:
lines.extend([system, "1.0"])
if self.constant_lattice:
_lattice = self.lattice
else:
_lattice = self.lattice[si]
for latt_vec in _lattice:
lines.append(f'{" ".join([str(el) for el in latt_vec])}')
lines.append(" ".join(site_symbols))
lines.append(" ".join([str(x) for x in natoms]))
lines.append(f"Direct configuration= {str(si + 1)}")
for (frac_coord, specie) in zip(frac_coords, self.species):
coords = frac_coord
line = f'{" ".join([format_str.format(c) for c in coords])} {specie}'
lines.append(line)
xdatcar_string = "\n".join(lines) + "\n"
with zopen(filename, "wt") as f:
f.write(xdatcar_string)
| gVallverdu/pymatgen | pymatgen/core/trajectory.py | Python | mit | 22,192 | [
"VASP",
"pymatgen"
] | 8b313e68ebd441719cf88b0528b82426b131d8e08750da922f4fb799e2463290 |
"""
Purpose: To implement some alternative techniques for periodicity quantification
to compare with TDA
"""
import numpy as np
import scipy.io as sio
import sys
from CSMSSMTools import *
from VideoTools import *
import scipy.stats
import scipy.signal
import scipy.ndimage
def getCutlerDavisFrequencyScore(I, doPlot = False):
"""
Compute the frequency score suggested by Cutler and Davis, with a slight
modification using Kurtosis instead of mean versus standard deviation
:param I: An Nxd matrix representing a video with N frames at a resolution of
d pixels
:doPlot: If true, show the SSM and average power spectrum across all columns
"""
N = I.shape[0]
(D, _) = getSSM(I, N)
F = np.zeros(N)
#For linearly detrending
A = np.ones((N, 2))
A[:, 1] = np.arange(N)
#Compute the power spectrum column by column
for i in range(N):
x = D[:, i]
#Linearly detrend
mb = np.linalg.lstsq(A, x)[0]
y = x - A.dot(mb)
#Apply Hann Window
y = y*np.hanning(N)
#Add on power spectrum
F += np.abs(np.fft.fft(y))**2
#Compute kurtosis of normalized averaged power spectrum
F = F/np.sum(F)
F[0:2] = 0 #Ignore DC component
F[-1] = 0
kurt = scipy.stats.kurtosis(F, fisher = False)
M = np.mean(F)
S = np.std(F)
if doPlot:
plt.subplot(121)
plt.imshow(D, cmap='afmhot', interpolation = 'none')
plt.subplot(122)
plt.plot(F)
plt.hold(True)
plt.plot([0, N], [M, M], 'b')
plt.plot([0, N], [M+2*S, M+2*S])
plt.title("Kurtosis = %.3g"%kurt)
return (np.max(F) - M)/S
def checkLattice(Q, JJ, II, L, d, offset, CSmooth, doPlot = False):
P = np.zeros((len(JJ), 2))
P[:, 0] = II
P[:, 1] = JJ
#Find closest point in P to every point in the lattice Q
CSM = getCSM(Q, P)
idx = np.argmin(CSM, 1)
dist = np.min(CSM, 1)
#Keep only the points that have a normalized correlation value > 0.25
J = JJ + offset #Index back into the array C
I = II + offset
NPs = np.sum(CSmooth[I, J] > 0.25) #Number of maxes to try to match
scores = CSmooth[I[idx], J[idx]]
idx = idx[scores > 0.25]
dist = dist[scores > 0.25]
#Keep only the points that are closer than d/2
idx = idx[dist < d/2.0]
dist = dist[dist < d/2.0]
J = J[idx]
I = I[idx]
#Now compute error and ratio of matched points
err = np.sum(dist)
r1 = float(len(idx))/Q.shape[0] #Number of matched lattice point
idx = np.unique(idx)
r2 = float(len(idx))/NPs #Number of unmatched points
denom = r1*r2
if denom == 0:
score = np.inf
else:
score = (1+err/r1)*((1.0/denom)**3)
if doPlot:
#Figure out extent of image so
e = (-offset, -offset+CSmooth.shape[0]-1, -offset + CSmooth.shape[1]-1, -offset)
plt.imshow(CSmooth, extent=e, cmap='afmhot', interpolation = 'nearest')
plt.hold(True)
#Draw peaks
plt.scatter(JJ, II, 20, 'r')
#Draw lattice points
plt.scatter(Q[:, 1], Q[:, 0], 20, 'b')
#Draw matched peaks
plt.scatter(J - offset, I - offset, 30, 'g')
plt.title("Err = %.3g, Matched: %.3g\nUnmatched:%.3g, score = %.3g"%(err, r1, r2, score))
plt.xlim([e[0], e[1]])
plt.ylim([e[2], e[3]])
plt.xlabel('X Lag')
plt.ylabel('Y Lag')
return (err, r1, score, Q)
def checkSquareLattice(JJ, II, L, d, offset, CSmooth, doPlot = False):
arr = np.arange(0, L+1, d)
narr = -arr
arr = narr.tolist()[::-1] + arr.tolist()[1::]
X, Y = np.meshgrid(arr, arr)
Q = np.zeros((X.size, 2))
Q[:, 0] = Y.flatten()
Q[:, 1] = X.flatten()
return checkLattice(Q, JJ, II, L, d, offset, CSmooth, doPlot)
def checkDiamondLattice(JJ, II, L, d, offset, CSmooth, doPlot = False):
arr = np.arange(0, L+1, 2*d)
narr = -arr
arr = narr.tolist()[::-1] + arr.tolist()[1::]
X, Y = np.meshgrid(arr, arr)
Q1 = np.zeros((X.size, 2))
Q1[:, 0] = Y.flatten()
Q1[:, 1] = X.flatten()
arr2 = np.arange(d, L+1, 2*d)
narr = -arr2
arr2 = narr.tolist()[::-1] + arr2.tolist()
X, Y = np.meshgrid(arr2, arr2)
Q2 = np.zeros((X.size, 2))
Q2[:, 0] = Y.flatten()
Q2[:, 1] = X.flatten()
Q = np.concatenate((Q1, Q2), 0)
return checkLattice(Q, JJ, II, L, d, offset, CSmooth, doPlot)
def correlateSquareSameFFT(M1, M2):
N = M1.shape[0]
D1 = np.zeros((N*2, N*2))
D1[0:N, 0:N] = M1
D2 = np.zeros((N*2, N*2))
D2[0:N, 0:N] = M2
F1 = np.fft.fft2(D1)
F2 = np.fft.fft2(D2)
F2 = F1*F2
return np.abs(np.fft.ifft2(F2))
#Inspired by
#https://mail.scipy.org/pipermail/scipy-dev/2013-December/019498.html
def normautocorr2d(a):
c = correlateSquareSameFFT(a,np.flipud(np.fliplr(a)))
shape = a.shape
a = correlateSquareSameFFT(a**2, np.ones(shape))
c = c/np.sqrt(a**2)
return c
def getCutlerDavisLatticeScore(I, doPlot = False):
N = I.shape[0]
L = int(N/3)
(D, _) = getSSM(I, N)
#Step 1: Do count normalized autocorrelation with FFT zeropadding
C = normautocorr2d(D)
#Step 2: Apply Gaussian filter
[JJ, II] = np.meshgrid(np.arange(-3, 4), np.arange(-3, 4))
sigma = 1
G = np.exp(-(II**2 + JJ**2)/(2*sigma**2))
G = G/np.sum(G) #Normalize so max autocorrelation is still 1 after smoothing
CSmooth = scipy.signal.correlate2d(C, G, 'valid')
#Step 3: Do peak picking
CSmooth = CSmooth[N-L:N+L+1, N-L:N+L+1]
CSmooth = CSmooth - np.min(CSmooth)
CSmooth = CSmooth/np.max(CSmooth)
#CSmooth = CSmooth/np.max(CSmooth)
M = scipy.ndimage.filters.maximum_filter(CSmooth, size=5)
[JJ, II] = np.meshgrid(np.arange(M.shape[1]), np.arange(M.shape[0]))
#Account for gaussian filter width after 'valid' convolution
offset = L - int(np.ceil(G.shape[0]/2.0))
JJ = JJ[M == CSmooth] - offset
II = II[M == CSmooth] - offset
#Step 4: search over lattices
minscore = np.inf
minQ = np.array([[]])
mind = 2
for d in range(2, L):
if doPlot:
plt.clf()
(err, ratio, score, Q) = checkDiamondLattice(JJ, II, L, d, offset, CSmooth, doPlot)
if doPlot:
plt.savefig("DiamondLattice%i.png"%d, bbox_inches='tight')
if score < minscore:
minscore = score
minQ = Q
mind = d
if doPlot:
plt.clf()
(err, ratio, score, Q) = checkSquareLattice(JJ, II, L, d, offset, CSmooth, doPlot)
if doPlot:
plt.savefig("SquareLattice%i.png"%d, bbox_inches='tight')
if score < minscore:
minscore = score
minQ = Q
mind = d
return {'score':minscore, 'D':D, 'Q':minQ, 'd':mind, 'L':L, 'offset':offset, 'JJ':JJ, 'II':II, 'CSmooth':CSmooth}
def getD2ChiSquareScore(I, win, dim, derivWin = -1, NBins = 50):
print("Doing PCA...")
X = getPCAVideo(I)
print("Finished PCA")
if derivWin > 0:
[X, validIdx] = getTimeDerivative(X, derivWin)
Tau = win/float(dim-1)
N = X.shape[0]
dT = (N-dim*Tau)/float(N)
XS = getSlidingWindowVideo(X, dim, Tau, dT)
#Mean-center and normalize sliding window
XS = XS - np.mean(XS, 0)[None, :]
#XS = XS/np.sqrt(np.sum(XS**2, 1))[:, None]
D = getCSM(XS, XS)
D = D/np.max(D)
N = D.shape[0]
#Compute target distribution
#TODO: Closed form equation for this
M = N*10
X = np.zeros((M, 2))
X[:, 0] = 0.5*np.cos(2*np.pi*np.arange(M)/M)
X[:, 1] = 0.5*np.sin(2*np.pi*np.arange(M)/M)
DGT = getCSM(X, X)
[I, J] = np.meshgrid(np.arange(M), np.arange(M))
(hGT, edges) = np.histogram(DGT[I > J], bins=50)
hGT = 1.0*hGT/np.sum(hGT)
#Compute this distribution
[I, J] = np.meshgrid(np.arange(N), np.arange(N))
(h, edges) = np.histogram(D[I > J], bins = 50)
h = 1.0*h/np.sum(h)
#Compute chi squared distance
num = (h - hGT)**2
denom = h + hGT
num[denom <= 0] = 0
denom[denom <= 0] = 1
d = np.sum(num / denom)
return {'score':d, 'h':h, 'hGT':hGT, 'DGT':DGT, 'D':D}
def getDelaunayAreaScore(I, win, dim, derivWin = -1, doPlot = False):
from SpectralMethods import getDiffusionMap
from scipy.spatial import Delaunay
from GeometryTools import getMeanShiftKNN
print("Doing PCA...")
X = getPCAVideo(I)
print("Finished PCA")
if derivWin > 0:
[X, validIdx] = getTimeDerivative(X, derivWin)
Tau = win/float(dim-1)
N = X.shape[0]
dT = (N-dim*Tau)/float(N)
XS = getSlidingWindowVideo(X, dim, Tau, dT)
#Mean-center and normalize sliding window
XS = XS - np.mean(XS, 0)[None, :]
XS = XS/np.sqrt(np.sum(XS**2, 1))[:, None]
D = getCSM(XS, XS)
tic = time.time()
Y = getDiffusionMap(D, 0.1)
X = Y[:, [-2, -3]]
XMags = np.sqrt(np.sum(X**2, 1))
X = X/np.max(XMags)
X = getMeanShiftKNN(X, int(0.1*N))
tri = Delaunay(X)
#Compute all triangle circumcenters
P0 = X[tri.simplices[:, 0], :]
P1 = X[tri.simplices[:, 1], :]
P2 = X[tri.simplices[:, 2], :]
V1 = P1 - P0
V2 = P2 - P0
Bx = V1[:, 0]
By = V1[:, 1]
Cx = V2[:, 0]
Cy = V2[:, 1]
Dp = 2*(Bx*Cy - By*Cx)
Ux = (Cy*(Bx**2+By**2)-By*(Cx**2+Cy**2))/Dp
Uy = (Bx*(Cx**2+Cy**2)-Cx*(Bx**2+By**2))/Dp
Rs = np.sqrt(Ux**2 + Uy**2) #Radii of points
Cs = np.zeros((len(Ux), 2))
Cs[:, 0] = Ux
Cs[:, 1] = Uy
Cs = Cs + P0 #Add back offset
#Prune down to triangle circumcenters which are inside
#the convex hull of the points
idx = np.arange(Cs.shape[0])
idx = idx[tri.find_simplex(Cs) > -1]
#Find the maximum radius empty circle inside of the convex hull
[R, cx, cy] = [0]*3
if len(idx) > 0:
idxmax = idx[np.argmax(Rs[idx])]
cx = Ux[idxmax] + P0[idxmax, 0]
cy = Uy[idxmax] + P0[idxmax, 1]
R = Rs[idxmax]
toc = time.time()
print("Elapsed Time: %g"%(toc-tic))
if doPlot:
plt.subplot(131)
plt.imshow(D, cmap = 'afmhot')
plt.title("SSM")
plt.subplot(132)
plt.imshow(Y, aspect = 'auto', cmap = 'afmhot', interpolation = 'nearest')
plt.title("Diffusion Map")
plt.subplot(133)
plt.scatter(X[:, 0], X[:, 1])
#simplices = tri.simplices.copy()
#plt.triplot(X[:, 0], X[:, 1], simplices)
#Plot maximum circle
t = np.linspace(0, 2*np.pi, 100)
plt.scatter(cx, cy, 20, 'r')
plt.plot(cx + R*np.cos(t), cy + R*np.sin(t))
plt.axis('equal')
plt.title("R = %g"%R)
plt.xlim([-1, 1])
plt.ylim([-1, 1])
return R
if __name__ == '__main__':
np.random.seed(10)
plt.figure(figsize=(12, 6))
N = 20
NPeriods = 20
t = np.linspace(-1, 1, N+1)[0:N]#**3
t = 0.5*t/max(np.abs(t)) + 0.5
t = 2*np.pi*t
#t = np.linspace(0, 5*2*np.pi, N)
X = np.zeros((N*NPeriods, 2))
for i in range(NPeriods):
X[i*N:(i+1)*N, 0] = np.cos(t) + 2*np.cos(4*t)
X[i*N:(i+1)*N, 1] = np.sin(t) + 2*np.sin(4*t)
X = X + 1*np.random.randn(N*NPeriods, 2)
#X = np.random.randn(N*NPeriods, 2)
r = getCutlerDavisLatticeScore(X)
#Plot results
plt.figure(figsize=(18, 4))
plt.subplot(131)
plt.scatter(X[:, 0], X[:, 1], 20)
plt.title('Time Series')
plt.subplot(132)
plt.imshow(r['D'], cmap='afmhot', interpolation = 'nearest')
plt.title('SSM')
plt.subplot(133)
checkLattice(r['Q'], r['JJ'], r['II'], r['L'], r['d'], r['offset'], r['CSmooth'], doPlot = True)
plt.savefig("Lattice.svg", bbox_inches = 'tight')
r = getD2ChiSquareScore(X, N, N)
plt.figure(figsize=(10, 6))
plt.plot(r['hGT'], 'k')
plt.plot(r['h'], 'b')
plt.savefig("D2.svg", bbox_inches = 'tight')
plt.figure(figsize=(16, 5))
getDelaunayAreaScore(X, N, N, doPlot = True)
plt.savefig("Diffusion.svg", bbox_inches = 'tight')
| ctralie/SlidingWindowVideoTDA | AlternativePeriodicityScoring.py | Python | apache-2.0 | 11,915 | [
"Gaussian"
] | a9b62a85134ecc46e7ec1293331fa6114b8e9f0e78788a4693896c477f3ac3c1 |
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from . import MinCovDet
from ..utils.validation import check_is_fitted, check_array
from ..utils.validation import _deprecate_positional_args
from ..metrics import accuracy_score
from ..base import OutlierMixin
class EllipticEnvelope(OutlierMixin, MinCovDet):
"""An object for detecting outliers in a Gaussian distributed dataset.
Read more in the :ref:`User Guide <outlier_detection>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, the support of robust location and covariance estimates
is computed, and a covariance estimate is recomputed from it,
without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. If None, the minimum value of support_fraction will
be used within the algorithm: `[n_sample + n_features + 1] / 2`.
Range is (0, 1).
contamination : float, default=0.1
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Range is (0, 0.5).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling
the data. Pass an int for reproducible results across multiple function
calls. See :term: `Glossary <random_state>`.
Attributes
----------
location_ : ndarray of shape (n_features,)
Estimated robust location.
covariance_ : ndarray of shape (n_features, n_features)
Estimated robust covariance matrix.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute the
robust estimates of location and shape.
offset_ : float
Offset used to define the decision function from the raw scores.
We have the relation: ``decision_function = score_samples - offset_``.
The offset depends on the contamination parameter and is defined in
such a way we obtain the expected number of outliers (samples with
decision function < 0) in training.
.. versionadded:: 0.20
raw_location_ : ndarray of shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : ndarray of shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
dist_ : ndarray of shape (n_samples,)
Mahalanobis distances of the training set (on which :meth:`fit` is
called) observations.
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import EllipticEnvelope
>>> true_cov = np.array([[.8, .3],
... [.3, .4]])
>>> X = np.random.RandomState(0).multivariate_normal(mean=[0, 0],
... cov=true_cov,
... size=500)
>>> cov = EllipticEnvelope(random_state=0).fit(X)
>>> # predict returns 1 for an inlier and -1 for an outlier
>>> cov.predict([[0, 0],
... [3, 3]])
array([ 1, -1])
>>> cov.covariance_
array([[0.7411..., 0.2535...],
[0.2535..., 0.3053...]])
>>> cov.location_
array([0.0813... , 0.0427...])
See Also
--------
EmpiricalCovariance, MinCovDet
Notes
-----
Outlier detection from covariance estimation may break or not
perform well in high-dimensional settings. In particular, one will
always take care to work with ``n_samples > n_features ** 2``.
References
----------
.. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the
minimum covariance determinant estimator" Technometrics 41(3), 212
(1999)
"""
@_deprecate_positional_args
def __init__(self, *, store_precision=True, assume_centered=False,
support_fraction=None, contamination=0.1,
random_state=None):
super().__init__(
store_precision=store_precision,
assume_centered=assume_centered,
support_fraction=support_fraction,
random_state=random_state)
self.contamination = contamination
def fit(self, X, y=None):
"""Fit the EllipticEnvelope model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
"""
super().fit(X)
self.offset_ = np.percentile(-self.dist_, 100. * self.contamination)
return self
def decision_function(self, X):
"""Compute the decision function of the given observations.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
decision : ndarray of shape (n_samples,)
Decision function of the samples.
It is equal to the shifted Mahalanobis distances.
The threshold for being an outlier is 0, which ensures a
compatibility with other outlier detection algorithms.
"""
check_is_fitted(self)
negative_mahal_dist = self.score_samples(X)
return negative_mahal_dist - self.offset_
def score_samples(self, X):
"""Compute the negative Mahalanobis distances.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
negative_mahal_distances : array-like of shape (n_samples,)
Opposite of the Mahalanobis distances.
"""
check_is_fitted(self)
return -self.mahalanobis(X)
def predict(self, X):
"""
Predict the labels (1 inlier, -1 outlier) of X according to the
fitted model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
X = check_array(X)
is_inlier = np.full(X.shape[0], -1, dtype=int)
values = self.decision_function(X)
is_inlier[values >= 0] = 1
return is_inlier
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) w.r.t. y.
"""
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
| xuewei4d/scikit-learn | sklearn/covariance/_elliptic_envelope.py | Python | bsd-3-clause | 8,074 | [
"Gaussian"
] | 45378323f739600c98ac5099fd1af74920779f50ad074f018c018316efe7257d |
#!/usr/bin/env python3
# -*-coding:Utf-8 -*
import pyfits
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
"""Script used to load SMF from Davizon+17 paper with uncertainties and marke
abundance matching with it
"""
redshifts = np.array([0.2, 0.5, 0.8, 1.1, 1.5, 2, 2.5, 3, 3.5, 4.5, 5.5])
numzbin = np.size(redshifts)-1
"""Load the SMF"""
smf = []
for i in range(10):
smf.append(np.loadtxt('../Data/Davidzon/Davidzon+17_SMF_V3.0/mf_mass2b_fl5b_tot_VmaxFit2D'+str(i)+'.dat'))
"""Plot"""
# for i in range(10):
# plt.fill_between(smf[i][:,0], smf[i][:,2], smf[i][:,3], alpha=0.5,
# label=str(redshifts[i])+'<z<'+str(redshifts[i+1]))
# plt.ylim(-6,-2)
# plt.xlim(9,12)
# plt.title('Davidzon+17 Schechter fits')
# plt.ylabel('Log($\phi$) [Log($Mpc^{-3}$)]')
# plt.xlabel('Log($M_{*}$) [Log($M_{\odot}$)]')
# plt.legend(loc=3)
# plt.show()
"""Compute Galaxy Cumulative Density
"""
numpoints = np.size(smf[0][:,0])
Nstar = np.empty([numzbin, numpoints])
Nstarminus = np.empty([numzbin, numpoints])
Nstarplus = np.empty([numzbin, numpoints])
for i in range(numzbin):
for j in range(numpoints):
Nstar[i,j]= np.sum(10**smf[i][j:,1])
Nstarminus[i,j] = np.sum(10**smf[i][j:,2])
Nstarplus[i,j] = np.sum(10**smf[i][j:,3])
"""Plot"""
# plt.figure()
# for i in range(10):
# plt.fill_between(10**smf[i][:,0], Nstarminus[i,:], Nstarplus[i,:], alpha=0.5)
# plt.xscale('log');plt.yscale('log')
# plt.ylim(10**-6, 1)
# plt.xlim(10**8, 10**12)
# plt.ylabel('N(>M)')
# plt.xlabel('Mass')
# plt.show()
"""Load Density of DM halos from Bolshoï simulation"""
Nhalo = np.load('Nhalo.npy')
MvirNhalo = np.load('MvirNhalo.npy')
"""Interpolate
"""
MstarIary = []
MstarIaryPlus = []
MstarIaryMinus = []
Mhalo = []
for i in range(numzbin):
"""do the interpolation for each redshift bin, in order to have the functions
StellarMass(abundane) and HaloMass(abundance)"""
MstarIary.append(interp1d(Nstar[i,:], 10**smf[i][:,0]))
MstarIaryMinus.append(interp1d(Nstarminus[i,:], 10**smf[i][:,0]))
MstarIaryPlus.append(interp1d(Nstarplus[i,:], 10**smf[i][:,0]))
Mhalo.append(interp1d(Nhalo[i][:], MvirNhalo))
"""Compute M*/Mh with uncertainties"""
n_fit=1000
x = np.empty([numzbin, n_fit])
xm =np.empty([numzbin, n_fit])
ym =np.empty([numzbin, n_fit])
yminus =np.empty([numzbin, n_fit])
yplus =np.empty([numzbin, n_fit])
for i in range(numzbin):
print(i)
x[i] = np.geomspace(max(min(Nstar[i, Nstar[i,:]>0]),
Nstarminus[i,-1], Nstarplus[i,-1], Nhalo[i, -1]),
min(Nstar[i, 0], Nstarminus[i,0], Nstarplus[i,0], Nhalo[i, 0]), 1000)
x[i][0] = max(min(Nstar[i, Nstar[i,:]>0]), Nstarminus[i,-1], Nstarplus[i,-1], Nhalo[i, -1])
x[i][-1] = min(Nstar[i, 0], Nstarminus[i,0], Nstarplus[i,0], Nhalo[i, 0])
xm[i] = Mhalo[i](x[i])
ym[i] = MstarIary[i](x[i])/Mhalo[i](x[i])
yminus[i] = MstarIaryMinus[i](x[i])/Mhalo[i](x[i])
yplus[i] = MstarIaryPlus[i](x[i])/Mhalo[i](x[i])
"""Plot"""
for i in range(numzbin-1):
index_min = np.argmin(Nhalo[i]>0)
plt.plot(xm[i][index_min:], ym[i][index_min:], label=str(redshifts[i])+'<z<'+str(redshifts[i+1]))
for i in range(numzbin-1):
plt.fill_between(xm[i], yminus[i], yplus[i], alpha=0.5)
plt.legend()
plt.xlim(2.8*10**9,10**15)
plt.ylabel('$M_{*}/M_{h}$', size=20)
plt.xlabel('$M_{h}$ [$M_{\odot}]$', size=20)
plt.xscale('log');plt.yscale('log')
plt.title('IariDavidzon Mass Function vs Bolshoï simulation')
plt.show()
| Gorbagzog/StageIAP | IaryDavidzonSMF_wrong.py | Python | gpl-3.0 | 3,498 | [
"Galaxy"
] | 1de0601229bbca25e0277554fd557fcfac7907b55fb543808aac6c1403127d89 |
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.2
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_vsvip
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of VsVip Avi RESTful Object
description:
- This module is used to configure VsVip object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
cloud_ref:
description:
- It is a reference to an object of type cloud.
- Field introduced in 17.1.1.
dns_info:
description:
- Service discovery specific data including fully qualified domain name, type and time-to-live of the dns record.
- Field introduced in 17.1.1.
east_west_placement:
description:
- Force placement on all service engines in the service engine group (container clouds only).
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
name:
description:
- Name for the vsvip object.
- Field introduced in 17.1.1.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the vsvip object.
- Field introduced in 17.1.1.
vip:
description:
- List of virtual service ips and other shareable entities.
- Field introduced in 17.1.1.
vrf_context_ref:
description:
- Virtual routing context that the virtual service is bound to.
- This is used to provide the isolation of the set of networks the application is attached to.
- It is a reference to an object of type vrfcontext.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create VsVip object
avi_vsvip:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_vsvip
"""
RETURN = '''
obj:
description: VsVip (api/vsvip) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
cloud_ref=dict(type='str',),
dns_info=dict(type='list',),
east_west_placement=dict(type='bool',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
vip=dict(type='list',),
vrf_context_ref=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'vsvip',
set([]))
if __name__ == '__main__':
main()
| kbrebanov/ansible | lib/ansible/modules/network/avi/avi_vsvip.py | Python | gpl-3.0 | 4,585 | [
"VisIt"
] | 1fd929bbd70506dc352903bd68eaad6a0e66ff60b7a68d948119128af7dcb0a3 |
##############################################################################
# pymbar: A Python Library for MBAR
#
# Copyright 2010-2014 University of Virginia, Memorial Sloan-Kettering Cancer Center
#
# Authors: Michael Shirts, John Chodera
# Contributors: Kyle Beauchamp
#
# pymbar is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with pymbar. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
Please reference the following if you use this code in your research:
[1] Shirts MR and Chodera JD. Statistically optimal analysis of samples from multiple equilibrium states.
J. Chem. Phys. 129:124105, 2008. http://dx.doi.org/10.1063/1.2978177
This module contains implementations of
* EXP - unidirectional estimator for free energy differences based on Zwanzig relation / exponential averaging
"""
#=============================================================================================
# * Fix computeBAR and computeEXP to be BAR() and EXP() to make them easier to find.
# * Make functions that don't need to be exported (like logsum) private by prefixing an underscore.
# * Make asymptotic covariance matrix computation more robust to over/underflow.
# * Double-check correspondence of comments to equation numbers once manuscript has been finalized.
# * Change self.nonzero_N_k_indices to self.states_with_samples
#=============================================================================================
__authors__ = "Michael R. Shirts and John D. Chodera."
__license__ = "LGPL 2.1"
#=============================================================================================
# IMPORTS
#=============================================================================================
import numpy as np
from pymbar.utils import logsumexp
#=============================================================================================
# One-sided exponential averaging (EXP).
#=============================================================================================
def EXP(w_F, compute_uncertainty=True, is_timeseries=False):
"""Estimate free energy difference using one-sided (unidirectional) exponential averaging (EXP).
Parameters
----------
w_F : np.ndarray, float
w_F[t] is the forward work value from snapshot t. t = 0...(T-1) Length T is deduced from vector.
compute_uncertainty : bool, optional, default=True
if False, will disable computation of the statistical uncertainty (default: True)
is_timeseries : bool, default=False
if True, correlation in data is corrected for by estimation of statisitcal inefficiency (default: False)
Use this option if you are providing correlated timeseries data and have not subsampled the data to produce uncorrelated samples.
Returns
-------
DeltaF : float
DeltaF is the free energy difference between the two states.
dDeltaF : float
dDeltaF is the uncertainty, and is only returned if compute_uncertainty is set to True
Notes
-----
If you are prodividing correlated timeseries data, be sure to set the 'timeseries' flag to True
Examples
--------
Compute the free energy difference given a sample of forward work values.
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> [DeltaF, dDeltaF] = EXP(w_F)
>>> print('Forward free energy difference is %.3f +- %.3f kT' % (DeltaF, dDeltaF))
Forward free energy difference is 1.088 +- 0.076 kT
>>> [DeltaF, dDeltaF] = EXP(w_R)
>>> print('Reverse free energy difference is %.3f +- %.3f kT' % (DeltaF, dDeltaF))
Reverse free energy difference is -1.073 +- 0.082 kT
"""
# Get number of work measurements.
T = float(np.size(w_F)) # number of work measurements
# Estimate free energy difference by exponential averaging using DeltaF = - log < exp(-w_F) >
DeltaF = - (logsumexp(- w_F) - np.log(T))
if compute_uncertainty:
# Compute x_i = np.exp(-w_F_i - max_arg)
max_arg = np.max(-w_F) # maximum argument
x = np.exp(-w_F - max_arg)
# Compute E[x] = <x> and dx
Ex = x.mean()
# Compute effective number of uncorrelated samples.
g = 1.0 # statistical inefficiency
if is_timeseries:
# Estimate statistical inefficiency of x timeseries.
import timeseries
g = timeseries.statisticalInefficiency(x, x)
# Estimate standard error of E[x].
dx = np.std(x) / np.sqrt(T / g)
# dDeltaF = <x>^-1 dx
dDeltaF = (dx / Ex)
# Return estimate of free energy difference and uncertainty.
return (DeltaF, dDeltaF)
else:
return DeltaF
#=============================================================================================
# Gaussian approximation to exponential averaging (Gauss).
#=============================================================================================
def EXPGauss(w_F, compute_uncertainty=True, is_timeseries=False):
"""Estimate free energy difference using gaussian approximation to one-sided (unidirectional) exponential averaging.
Parameters
----------
w_F : np.ndarray, float
w_F[t] is the forward work value from snapshot t. t = 0...(T-1) Length T is deduced from vector.
compute_uncertainty : bool, optional, default=True
if False, will disable computation of the statistical uncertainty (default: True)
is_timeseries : bool, default=False
if True, correlation in data is corrected for by estimation of statisitcal inefficiency (default: False)
Use this option if you are providing correlated timeseries data and have not subsampled the data to produce uncorrelated samples.
Returns
-------
DeltaF : float
DeltaF is the free energy difference between the two states.
dDeltaF : float
dDeltaF is the uncertainty, and is only returned if compute_uncertainty is set to True
Notes
-----
If you are prodividing correlated timeseries data, be sure to set the 'timeseries' flag to True
Examples
--------
Compute the free energy difference given a sample of forward work values.
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> [DeltaF, dDeltaF] = EXPGauss(w_F)
>>> print('Forward Gaussian approximated free energy difference is %.3f +- %.3f kT' % (DeltaF, dDeltaF))
Forward Gaussian approximated free energy difference is 1.049 +- 0.089 kT
>>> [DeltaF, dDeltaF] = EXPGauss(w_R)
>>> print('Reverse Gaussian approximated free energy difference is %.3f +- %.3f kT' % (DeltaF, dDeltaF))
Reverse Gaussian approximated free energy difference is -1.073 +- 0.080 kT
"""
# Get number of work measurements.
T = float(np.size(w_F)) # number of work measurements
var = np.var(w_F)
# Estimate free energy difference by Gaussian approximation, dG = <U> - 0.5*var(U)
DeltaF = np.average(w_F) - 0.5 * var
if compute_uncertainty:
# Compute effective number of uncorrelated samples.
g = 1.0 # statistical inefficiency
T_eff = T
if is_timeseries:
# Estimate statistical inefficiency of x timeseries.
import timeseries
g = timeseries.statisticalInefficiency(w_F, w_F)
T_eff = T / g
# Estimate standard error of E[x].
dx2 = var / T_eff + 0.5 * var * var / (T_eff - 1)
dDeltaF = np.sqrt(dx2)
# Return estimate of free energy difference and uncertainty.
return (DeltaF, dDeltaF)
else:
return DeltaF
#=============================================================================================
# For compatibility with 2.0.1-beta
#=============================================================================================
deprecation_warning = """
Warning
-------
This method name is deprecated, and provided for backward-compatibility only.
It may be removed in future versions.
"""
def computeEXP(*args, **kwargs):
return EXP(*args, **kwargs)
computeEXP.__doc__ = EXP.__doc__ + deprecation_warning
def computeEXPGauss(*args, **kwargs):
return EXPGauss(*args, **kwargs)
computeEXPGauss.__doc__ = EXPGauss.__doc__ + deprecation_warning
def _compatibilityDoctests():
"""
Backwards-compatibility doctests.
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> [DeltaF, dDeltaF] = computeEXP(w_F)
>>> [DeltaF, dDeltaF] = computeEXPGauss(w_F)
"""
pass
| kyleabeauchamp/pymbar | pymbar/exp.py | Python | lgpl-2.1 | 9,273 | [
"Gaussian"
] | 1462247ff65aeb9fc0723aa7cd5ed89cd9d1db9c8505f903779f3501c43cdd4b |
import logging
from parmed.unit import nanometers, picoseconds
import numpy as np
logger = logging.getLogger('InterMolLog')
class GromacsGroParser(object):
"""GromacsGroParser reads and writes Gromacs .gro files
A .gro file also contains some topological information, such as elements and
residue names, but not enough to construct a full Topology object. This
information is recorded and stored in the object's public fields.
"""
def __init__(self, gro_file):
"""Load a .gro gro_file.
The atom positions can be retrieved by calling getPositions().
Parameters:
- gro_file (string) the name of the gro_file to read or write
"""
self.gro_file = gro_file
def read(self):
atomname = list()
resid = list()
resname = list()
boxes = list()
xyzs = list()
vels = list()
with open(self.gro_file) as gro:
next(gro)
n_atoms = int(next(gro).strip())
for _ in range(n_atoms):
line = next(gro)
(thisresnum, thisresname, thisatomname) = [line[i*5:i*5+5].strip() for i in range(3)]
resname.append(thisresname)
resid.append(int(thisresnum))
atomname.append(thisatomname)
entries = line[20:].split()
# If there aren't 6, then fixed column, presumably 8 digit
if len(entries) not in [3, 6]:
data = line[20:]
entries = []
spacing = 8
for j in range(0, len(data), spacing):
entry = data[j:j+spacing].strip()
if len(entry) > 0:
entries.append(entry)
entries = [float(x) for x in entries]
xyz = [x * nanometers for x in entries[:3]]
xyzs.append(xyz)
if len(entries) == 6:
vel = [v * nanometers / picoseconds for v in entries[3:6]]
else:
vel = [v * nanometers / picoseconds for v in [0., 0., 0.]]
vels.append(vel)
line = next(gro)
raw_box_vector = line.split()
v = np.zeros([3, 3], float) * nanometers
# Diagonals
for i in range(3):
v[i, i] = float(raw_box_vector[i]) * nanometers
if len(raw_box_vector) == 9:
k = 3
# Then the off-diagonals
for i in range(3):
for j in range(3):
if i != j:
v[i, j] = float(raw_box_vector[k]) * nanometers
k += 1
boxes.append(v)
self.positions = np.array(xyzs)
self.velocities = np.array(vels)
self.atom_names = atomname
self.residue_ids = resid
self.residue_names = resname
self.box_vector = boxes[0]
def write(self, system):
"""Write the system out in a Gromacs 4.6 format
Args:
filename (str): the file to write out to
"""
with open(self.gro_file, 'w') as gro:
gro.write("{0}\n".format(system.name))
gro.write("{0}\n".format(system.n_atoms))
for n, atom in enumerate(system.atoms):
if atom.name.isdigit():
# Kluge for atoms read in from a LAMMPS data file.
atom.name = "LMP_{0}".format(atom.name)
# .gro wraps at 100,0000, which is why the field is 5 width.
gro.write('{0:5d}{1:<5s}{2:5s}{3:5d}'.format(
atom.residue_index%100000, atom.residue_name, atom.name, (n + 1)%100000))
for pos in atom.position:
gro.write('{0:17.12f}'.format(pos.value_in_unit(nanometers)))
if np.any(atom.velocity):
for vel in atom.velocity:
gro.write('{0:17.12f}'.format(vel.value_in_unit(nanometers / picoseconds)))
gro.write('\n')
# Check for rectangular; should be symmetric, so we don't have to
# check 6 values
if (system.box_vector[1, 0]._value == 0 and
system.box_vector[2, 0]._value == 0 and
system.box_vector[2, 1]._value == 0):
for i in range(3):
gro.write('{0:11.7f}'.format(system.box_vector[i, i].value_in_unit(nanometers)))
else:
for i in range(3):
gro.write('{0:11.7f}'.format(system.box_vector[i, i].value_in_unit(nanometers)))
for i in range(3):
for j in range(3):
if i != j:
gro.write('{0:11.7f}'.format(system.box_vector[i, j].value_in_unit(nanometers)))
gro.write('\n')
| shirtsgroup/InterMol | intermol/gromacs/grofile_parser.py | Python | mit | 4,955 | [
"Gromacs",
"LAMMPS"
] | a0d7dadf68015cdfedcd87ddf8aa303539fa9bc3b29d702280db5c306f28b392 |
import click
import parsley
from visitor import Visitor
class Heading(object):
def __init__(self, text, underline):
self.text = text
self.ctype = underline[0]
class Entry(object):
def __init__(self, text, ctype):
self.text = text
self.ctype = ctype
@property
def done(self):
return self.ctype in ('x', )
@property
def important(self):
return self.ctype in ('!', )
class TODOList(list):
pass
class Document(list):
def __init__(self, elems):
super(Document, self).__init__(e for e in elems if e is not None)
rules = r"""
nl = '\n'
blank_line = nl -> None
char = ~nl anything
line_char = '_' | '-' | '*' | '~' | '=' | '#' | '+'
underline = <line_char+>:l ?(len(l) == l.count(l[0])) -> l
bullet = ('*' | 'x' | '-' | '!')
entry = bullet:b ' ' <char+>:c nl -> Entry(c, b)
list = entry+:entries -> TODOList(entries)
heading = <char+>:c nl underline:l nl ?(len(c) == len(l)) -> Heading(c, l)
doc = ((blank_line+ -> None) | heading | list)+:elems -> Document(elems)
"""
grammar = parsley.makeGrammar(rules, locals())
class Printer(Visitor):
def visit_Document(self, node):
return ''.join(self.visit(e) for e in node).strip('\n') + '\n'
def visit_TODOList(self, node):
entries = sorted(node, key=lambda n: (n.done, not n.important))
return '\n'.join(self.visit(e) for e in entries) + '\n\n'
def visit_Heading(self, node):
buf = node.text + '\n' + node.ctype * len(node.text) + '\n'
return click.style(buf, bold=True)
def visit_Entry(self, node):
style = {}
if node.done:
style['fg'] = 'green'
else:
style['fg'] = 'yellow'
if node.important:
style['fg'] = 'red'
style['bold'] = True
buf = (u'\u25CB' if not node.done else u'\u2714') + ' ' + node.text
return click.style(buf, **style)
| mbr/git-todo | git_todo/parser.py | Python | mit | 1,948 | [
"VisIt"
] | d5d0caa8c33a5f8cd15aa8f4eef9710d5e76fdea206813bf47b9643a55046239 |
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "f 10 0.033, s, f 20 0.033, s, f 30 0.033, s, f 30 0.033, s, q"
tags = "particles, Galaxy"
import pyglet
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
from cocos.particle_systems import *
class L(Layer):
def __init__(self):
super( L, self).__init__()
# p = Fireworks()
# p = Explosion()
# p = Fire()
# p = Flower()
# p = Sun()
# p = Spiral()
# p = Meteor()
p = Galaxy()
p.position = (320,240)
self.add( p )
def main():
director.init( resizable=True )
main_scene = cocos.scene.Scene()
main_scene.add( L() )
director.run( main_scene )
if __name__ == '__main__':
main()
| dangillet/cocos | test/test_particle_galaxy.py | Python | bsd-3-clause | 978 | [
"Galaxy"
] | 62902e1d4d1d01c43cac5e864d4e11d7b2593ab85548a00611a705e6aad7beff |
def agts(queue):
queue.add('H2.agts.py',
walltime=25,
ncpus=8,
creates=['H2-emt.csv', 'H2-gpaw.csv'])
if __name__ == "__main__":
from ase.optimize.test.H2 import *
| qsnake/gpaw | doc/devel/ase_optimize/H2.agts.py | Python | gpl-3.0 | 215 | [
"ASE",
"GPAW"
] | 6536e14abf56e548a84621368c33820fcce208217ad30deefb46ab1f4158dc63 |
# $HeadURL: $
''' DowntimePolicy module
'''
from DIRAC import S_OK
from DIRAC.ResourceStatusSystem.PolicySystem.PolicyBase import PolicyBase
__RCSID__ = '$Id: $'
class DowntimePolicy( PolicyBase ):
'''
The DowntimePolicy checks for downtimes, scheduled or ongoing, depending on
the command parameters.
'''
@staticmethod
def _evaluate( commandResult ):
'''
It returns Active status if there is no downtime announced.
Banned if the element is in OUTAGE.
Degraded if it is on WARNING status.
Otherwise, it returns error.
'''
result = {
'Status' : None,
'Reason' : None
}
if not commandResult[ 'OK' ]:
result[ 'Status' ] = 'Error'
result[ 'Reason' ] = commandResult[ 'Message' ]
return S_OK( result )
status = commandResult[ 'Value' ]
if status is None:
result[ 'Status' ] = 'Active'
result[ 'Reason' ] = 'No DownTime announced'
return S_OK( result )
elif status[ 'Severity' ] == 'OUTAGE':
result[ 'Status' ] = 'Banned'
elif status[ 'Severity' ] == 'WARNING':
result[ 'Status' ] = 'Degraded'
else:
_reason = 'DT_Policy: GOCDB returned an unknown value for DT: "%s"' % status[ 'DowntimeID' ]
result[ 'Status' ] = 'Error'
result[ 'Reason' ] = _reason
return S_OK( result )
#result[ 'EndDate' ] = status[ 'EndDate' ]
result[ 'Reason' ] = '%s %s' % ( status[ 'DowntimeID' ], status[ 'Description' ] )
return S_OK( result )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF | Sbalbp/DIRAC | ResourceStatusSystem/Policy/DowntimePolicy.py | Python | gpl-3.0 | 1,768 | [
"DIRAC"
] | b5eb16dcc2523ca902acfbe3f08545b62a8c866d3033574eb45882b4d1ddc5de |
import os
import struct
from cStringIO import StringIO
import vstruct
import vstruct.defs.pe as vs_pe
import ordlookup
IMAGE_DLLCHARACTERISTICS_RESERVED_1 = 1
IMAGE_DLLCHARACTERISTICS_RESERVED_2 = 2
IMAGE_DLLCHARACTERISTICS_RESERVED_4 = 4
IMAGE_DLLCHARACTERISTICS_RESERVED_8 = 8
IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE = 0x0040 # The DLL can be relocated at load time.
IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY = 0x0080 # Code integrity checks are forced. If you set this flag and a section contains only uninitialized data, set the PointerToRawData member of IMAGE_SECTION_HEADER for that section to zero; otherwise, the image will fail to load because the digital signature cannot be verified.
IMAGE_DLLCHARACTERISTICS_NX_COMPAT = 0x0100 # The image is compatible with data execution prevention (DEP).
IMAGE_DLLCHARACTERISTICS_NO_ISOLATION = 0x0200 # The image is isolation aware, but should not be isolated.
IMAGE_DLLCHARACTERISTICS_NO_SEH = 0x0400 # The image does not use structured exception handling (SEH). No handlers can be called in this image.
IMAGE_DLLCHARACTERISTICS_NO_BIND = 0x0800 # Do not bind the image.
IMAGE_DLLCHARACTERISTICS_RESERVED_1000 = 0x1000 # Reserved
IMAGE_DLLCHARACTERISTICS_WDM_DRIVER = 0x2000 # A WDM driver.
IMAGE_DLLCHARACTERISTICS_RESERVED_4000 = 0x4000 # Reserved
IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE = 0x8000
IMAGE_SUBSYSTEM_UNKNOWN = 0 #Unknown subsystem.
IMAGE_SUBSYSTEM_NATIVE = 1 #No subsystem required (device drivers and native system processes).
IMAGE_SUBSYSTEM_WINDOWS_GUI = 2 #Windows graphical user interface (GUI) subsystem.
IMAGE_SUBSYSTEM_WINDOWS_CUI = 3 #Windows character-mode user interface (CUI) subsystem.
IMAGE_SUBSYSTEM_OS2_CUI = 5 #OS/2 CUI subsystem.
IMAGE_SUBSYSTEM_POSIX_CUI = 7 #POSIX CUI subsystem.
IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 9 #Windows CE system.
IMAGE_SUBSYSTEM_EFI_APPLICATION = 10 #Extensible Firmware Interface (EFI) application.
IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER = 11 #EFI driver with boot services.
IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER = 12 #EFI driver with run-time services.
IMAGE_SUBSYSTEM_EFI_ROM = 13 #EFI ROM image.
IMAGE_SUBSYSTEM_XBOX = 14 #Xbox system.
IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION = 16 #Boot application.
IMAGE_FILE_MACHINE_I386 = 0x014c
IMAGE_FILE_MACHINE_IA64 = 0x0200
IMAGE_FILE_MACHINE_AMD64 = 0x8664
machine_names = {
IMAGE_FILE_MACHINE_I386: 'i386',
IMAGE_FILE_MACHINE_IA64: 'ia64',
IMAGE_FILE_MACHINE_AMD64: 'amd64',
}
IMAGE_REL_BASED_ABSOLUTE = 0
IMAGE_REL_BASED_HIGH = 1
IMAGE_REL_BASED_LOW = 2
IMAGE_REL_BASED_HIGHLOW = 3
IMAGE_REL_BASED_HIGHADJ = 4
IMAGE_REL_BASED_MIPS_JMPADDR = 5
IMAGE_REL_BASED_IA64_IMM64 = 9
IMAGE_REL_BASED_DIR64 = 10
IMAGE_DIRECTORY_ENTRY_EXPORT =0 # Export Directory
IMAGE_DIRECTORY_ENTRY_IMPORT =1 # Import Directory
IMAGE_DIRECTORY_ENTRY_RESOURCE =2 # Resource Directory
IMAGE_DIRECTORY_ENTRY_EXCEPTION =3 # Exception Directory
IMAGE_DIRECTORY_ENTRY_SECURITY =4 # Security Directory
IMAGE_DIRECTORY_ENTRY_BASERELOC =5 # Base Relocation Table
IMAGE_DIRECTORY_ENTRY_DEBUG =6 # Debug Directory
IMAGE_DIRECTORY_ENTRY_COPYRIGHT =7 # (X86 usage)
IMAGE_DIRECTORY_ENTRY_ARCHITECTURE =7 # Architecture Specific Data
IMAGE_DIRECTORY_ENTRY_GLOBALPTR =8 # RVA of GP
IMAGE_DIRECTORY_ENTRY_TLS =9 # TLS Directory
IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG =10 # Load Configuration Directory
IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT =11 # Bound Import Directory in headers
IMAGE_DIRECTORY_ENTRY_IAT =12 # Import Address Table
IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT =13 # Delay Load Import Descriptors
IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR =14 # COM Runtime descriptor
IMAGE_DEBUG_TYPE_UNKNOWN =0
IMAGE_DEBUG_TYPE_COFF =1
IMAGE_DEBUG_TYPE_CODEVIEW =2
IMAGE_DEBUG_TYPE_FPO =3
IMAGE_DEBUG_TYPE_MISC =4
IMAGE_DEBUG_TYPE_EXCEPTION =5
IMAGE_DEBUG_TYPE_FIXUP =6
IMAGE_DEBUG_TYPE_OMAP_TO_SRC =7
IMAGE_DEBUG_TYPE_OMAP_FROM_SRC =8
IMAGE_DEBUG_TYPE_BORLAND =9
IMAGE_DEBUG_TYPE_RESERVED10 =10
IMAGE_DEBUG_TYPE_CLSID =11
IMAGE_SCN_CNT_CODE = 0x00000020
IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040
IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080
IMAGE_SCN_LNK_OTHER = 0x00000100
IMAGE_SCN_LNK_INFO = 0x00000200
IMAGE_SCN_LNK_REMOVE = 0x00000800
IMAGE_SCN_LNK_COMDAT = 0x00001000
IMAGE_SCN_MEM_FARDATA = 0x00008000
IMAGE_SCN_MEM_PURGEABLE = 0x00020000
IMAGE_SCN_MEM_16BIT = 0x00020000
IMAGE_SCN_MEM_LOCKED = 0x00040000
IMAGE_SCN_MEM_PRELOAD = 0x00080000
IMAGE_SCN_ALIGN_1BYTES = 0x00100000
IMAGE_SCN_ALIGN_2BYTES = 0x00200000
IMAGE_SCN_ALIGN_4BYTES = 0x00300000
IMAGE_SCN_ALIGN_8BYTES = 0x00400000
IMAGE_SCN_ALIGN_16BYTES = 0x00500000
IMAGE_SCN_ALIGN_32BYTES = 0x00600000
IMAGE_SCN_ALIGN_64BYTES = 0x00700000
IMAGE_SCN_ALIGN_128BYTES = 0x00800000
IMAGE_SCN_ALIGN_256BYTES = 0x00900000
IMAGE_SCN_ALIGN_512BYTES = 0x00A00000
IMAGE_SCN_ALIGN_1024BYTES = 0x00B00000
IMAGE_SCN_ALIGN_2048BYTES = 0x00C00000
IMAGE_SCN_ALIGN_4096BYTES = 0x00D00000
IMAGE_SCN_ALIGN_8192BYTES = 0x00E00000
IMAGE_SCN_ALIGN_MASK = 0x00F00000
IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000
IMAGE_SCN_MEM_DISCARDABLE = 0x02000000
IMAGE_SCN_MEM_NOT_CACHED = 0x04000000
IMAGE_SCN_MEM_NOT_PAGED = 0x08000000
IMAGE_SCN_MEM_SHARED = 0x10000000
IMAGE_SCN_MEM_EXECUTE = 0x20000000
IMAGE_SCN_MEM_READ = 0x40000000
IMAGE_SCN_MEM_WRITE = 0x80000000
# Flags for the UNWIND_INFO flags field from
# RUNTIME_FUNCTION defs
UNW_FLAG_NHANDLER = 0x0
UNW_FLAG_EHANDLER = 0x1
UNW_FLAG_UHANDLER = 0x2
UNW_FLAG_CHAININFO = 0x4
# Resource Types
RT_CURSOR = 1
RT_BITMAP = 2
RT_ICON = 3
RT_MENU = 4
RT_DIALOG = 5
RT_STRING = 6
RT_FONTDIR = 7
RT_FONT = 8
RT_ACCELERATOR = 9
RT_RCDATA = 10
RT_MESSAGETABLE = 11
RT_GROUP_CURSOR = 12
RT_GROUP_ICON = 14
RT_VERSION = 16
RT_DLGINCLUDE = 17
RT_PLUGPLAY = 19
RT_VXD = 20
RT_ANICURSOR = 21
RT_ANIICON = 22
RT_HTML = 23
RT_MANIFEST = 24
class VS_VERSIONINFO:
'''
A simple (read-only) VS_VERSIONINFO parser
'''
def __init__(self, bytes):
self._version_info = {}
self._parseBytes(bytes)
def getVersionValue(self, key, default=None):
'''
Retrieve a key from the VS_VERSIONINFO data.
Example: vs.getVersionValue('FileVersion')
'''
return self._version_info.get(key, default)
def getVersionKeys(self):
'''
Return a list of the keys in this VS_VERSIONINFO struct.
Example: for keyname in vs.getVersionKeys(): print keyname
'''
return self._version_info.keys()
def getVersionItems(self):
'''
Return dictionary style key,val tuples for the version keys
in this VS_VERSIONINFO structure.
Example: for vskey,vsdata in vs.getVersionItems(): print vskey,vsdata
'''
return self._version_info.items()
def _parseBytes(self, bytes):
offset = 0
mysize, valsize, vstype = struct.unpack('<HHH', bytes[:6])
offset += 6
offset, vinfosig = self._eatStringAndAlign(bytes, offset)
if vinfosig != 'VS_VERSION_INFO':
Exception('Invalid VS_VERSION_INFO signature!: %s' % repr(vinfosig))
if valsize and valsize >= len(vs_pe.VS_FIXEDFILEINFO()):
ffinfo = vs_pe.VS_FIXEDFILEINFO()
ffinfo.vsParse(bytes[offset:offset+valsize])
offset += valsize
offmod = offset % 4
if offmod:
offset += (4 - offmod)
xmax = min(mysize, len(bytes))
i = 0
while offset < xmax and i < 2:
offset = self._stringFileInfo(bytes, offset)
i += 1
def _eatStringAndAlign(self, bytes, offset):
ret = ''
blen = len(bytes)
while bytes[offset:offset+2] != '\x00\x00':
ret += bytes[offset:offset+2]
offset += 2
if offset >= blen:
break
# Add 2 for the null terminator
offset += 2
offmod = offset % 4
if offmod:
offset += (4 - offmod)
return offset, ret.decode('utf-16le')
def _stringFileInfo(self, bytes, offset):
xoffset = offset
mysize, valsize, valtype = struct.unpack('<HHH', bytes[xoffset:xoffset+6])
xoffset += 6
xoffset, sigstr = self._eatStringAndAlign(bytes, xoffset)
#if sigstr not in ('VarFileInfo','StringFileInfo'):
#raise Exception('Invalid StringFileInfo Key!: %s' % repr(sigstr))
xmax = offset + mysize
if sigstr == 'StringFileInfo':
while xoffset < xmax:
xoffset = self._stringTable(bytes, xoffset, mysize - (xoffset-offset))
elif sigstr == 'VarFileInfo':
while xoffset < xmax:
xoffset = self._varTable(bytes, xoffset, mysize - (xoffset-offset))
xmod = xoffset % 4
if xmod:
xoffset += (4 - xmod)
return xoffset
def _varTable(self, bytes, offset, size):
xmax = offset + size
xoffset = offset
mysize, valsize, valtype = struct.unpack('<HHH', bytes[xoffset:xoffset+6])
xoffset += 6
xoffset, varname = self._eatStringAndAlign(bytes, xoffset)
if xoffset + 4 > len(bytes):
return offset+size
varval = struct.unpack('<I', bytes[xoffset:xoffset+4])[0]
xoffset += 4
self._version_info[varname] = varval
return offset + size
def _stringTable(self, bytes, offset, size):
xmax = offset + size
xoffset = offset
mysize, valsize, valtype = struct.unpack('<HHH', bytes[offset:offset+6])
xoffset += 6
xoffset, hexcpage = self._eatStringAndAlign(bytes, xoffset)
while xoffset < xmax:
xoffset = self._stringData(bytes, xoffset)
if xoffset == -1:
break
xmod = xoffset % 4
if xmod:
xoffset += (4 - xmod)
return offset + size
def _stringData(self, bytes, offset):
'''
Parse out a "String" structure...
'''
xoffset = offset
mysize, valsize, stype = struct.unpack('<HHH', bytes[offset:offset+6])
if mysize == 0:
return -1
xoffset += 6
xoffset, strkey = self._eatStringAndAlign(bytes, xoffset)
# valsize is in words...
valsize *= 2
value = bytes[xoffset : xoffset + valsize ]
# Do utf16le decode if we're "textual data"
if stype == 1:
value = value.decode('utf-16le','ignore')
value = value.split('\x00')[0]
#print 'VALSIZE',valsize,'MYSIZE',mysize
#print 'Key: ->%s<-, ->%s<-' % (strkey,repr(value))
self._version_info[strkey] = value
# No matter what we parse, believe the headers...
return offset + mysize
class ResourceDirectory:
'''
Resources are sorted into a hierarchy which begins with
"type" and then "name/id" which still points to another
directory entry which has 1 child (id 1033) with data.
'''
def __init__(self, nameid=None):
self._rsrc_data = []
self._rsrc_nameid = nameid
self._rsrc_subdirs = {}
def addRsrcDirectory(self, nameid):
r = ResourceDirectory(nameid=nameid)
self._rsrc_subdirs[nameid] = r
return r
def addRsrcData(self, rva, size, langinfo):
self._rsrc_data.append( (rva, size, langinfo) )
def getDirById(self, name_id):
return self._rsrc_subdirs.get(name_id)
def getResourceDef(self, restype, name_id):
'''
This should *only* be called on the root node!
'''
typedir = self._rsrc_subdirs.get(restype)
if typedir == None:
return None
datadir = typedir._rsrc_subdirs.get(name_id)
if datadir == None:
return None
if len(datadir._rsrc_data) == 0:
return None
# The first entry in the datadir's data is the one
return datadir._rsrc_data[0]
def getDataEntries(self):
return self._rsrc_data
class PE(object):
def __init__(self, fd, inmem=False):
"""
Construct a PE object. use inmem=True if you are
using a MemObjFile or other "memory like" image.
"""
object.__init__(self)
self.inmem = inmem
self.filesize = None
if not inmem:
fd.seek(0, os.SEEK_END)
self.filesize = fd.tell()
fd.seek(0)
self.fd = fd
self.pe32p = False
self.psize = 4
self.high_bit_mask = 0x80000000
self.IMAGE_DOS_HEADER = vstruct.getStructure("pe.IMAGE_DOS_HEADER")
dosbytes = self.readAtOffset(0, len(self.IMAGE_DOS_HEADER))
self.IMAGE_DOS_HEADER.vsParse(dosbytes)
nt = self.readStructAtOffset(self.IMAGE_DOS_HEADER.e_lfanew,
"pe.IMAGE_NT_HEADERS")
# Parse in a default 32 bit, and then check for 64...
if nt.FileHeader.Machine in [ IMAGE_FILE_MACHINE_AMD64, IMAGE_FILE_MACHINE_IA64 ]:
nt = self.readStructAtOffset(self.IMAGE_DOS_HEADER.e_lfanew,
"pe.IMAGE_NT_HEADERS64")
self.pe32p = True
self.psize = 8
self.high_bit_mask = 0x8000000000000000
self.IMAGE_NT_HEADERS = nt
def getPdataEntries(self):
sec = self.getSectionByName('.pdata')
if sec == None:
return ()
ret = []
rbytes = self.readAtRva(sec.VirtualAddress, sec.VirtualSize)
while len(rbytes):
f = vs_pe.IMAGE_RUNTIME_FUNCTION_ENTRY()
f.vsParse(rbytes)
rbytes = rbytes[len(f):]
ret.append(f)
return ret
def getDllName(self):
'''
Return the "dll name" from the Name field of the IMAGE_EXPORT_DIRECTORY
if one is present. If not, return None.
'''
if self.IMAGE_EXPORT_DIRECTORY != None:
rawname = self.readAtRva(self.IMAGE_EXPORT_DIRECTORY.Name, 32)
return rawname.split('\x00')[0]
return None
def getImports(self):
"""
Return the list of import tuples for this PE. The tuples
are in the format (rva, libname, funcname).
"""
return self.imports
def getExports(self):
"""
Return the list of exports in this PE. The list contains
tuples in the format; (rva, ord, name).
"""
return self.exports
def getForwarders(self):
"""
[ (rva, name, forwardname), ... ]
"""
return self.forwarders
def getSections(self):
return self.sections
def rvaToOffset(self, rva):
if self.inmem:
return rva
for s in self.sections:
sbase = s.VirtualAddress
ssize = max(s.SizeOfRawData, s.VirtualSize)
if rva >= sbase and rva < sbase+ssize:
return s.PointerToRawData + (rva - sbase)
return 0
def offsetToRva(self, offset):
if self.inmem:
return offset
for s in self.sections:
sbase = s.PointerToRawData
ssize = s.SizeOfRawData
if sbase <= offset and offset < sbase + ssize:
return offset - s.PointerToRawData + s.VirtualAddress
return 0
def getSectionByName(self, name):
for s in self.getSections():
if s.Name.split("\x00", 1)[0] == name:
return s
return None
def readStructAtRva(self, rva, structname, check=False):
s = vstruct.getStructure(structname)
slen = len(s)
if check and not self.checkRva(rva, size=slen):
return None
bytes = self.readAtRva(rva, len(s))
if not bytes:
return None
s.vsParse(bytes)
return s
def readStructAtOffset(self, offset, structname):
s = vstruct.getStructure(structname)
sbytes = self.readAtOffset(offset, len(s))
if not sbytes:
return None
s.vsParse(sbytes)
return s
def getDataDirectory(self, idx):
return self.IMAGE_NT_HEADERS.OptionalHeader.DataDirectory[idx]
def getResourceDef(self, rtype, name_id):
'''
Get the (rva, size, (codepage,langid,sublangid)) tuple for the specified
resource type/id combination. Returns None if not found.
'''
return self.ResourceRoot.getResourceDef(rtype, name_id)
def getResources(self):
'''
Get the (rtype, nameid, (rva, size, (codepage,langid,sublangid))) tuples for each
resource in the PE.
'''
ret = []
for rtype,subdir in self.ResourceRoot._rsrc_subdirs.items():
for nameid, subsubdir in subdir._rsrc_subdirs.items():
ret.append( (rtype, nameid, subsubdir._rsrc_data[0]) )
return ret
def readResource(self, rtype, name_id):
'''
Return the bytes which define the specified resource. Returns
None if not found.
'''
rsdef = self.getResourceDef(rtype, name_id)
if rsdef == None:
return None
rsrva, rssize, rscpage = rsdef
return self.readAtRva(rsrva, rssize)
def getPdbPath(self):
'''
Parse and return the Pdb path from the Code View 4.0 data
specified by the IMAGE_DEBUG_DIRECTORY strucutre, or None
if a pdb path is not present.
'''
ddir = self.getDataDirectory(IMAGE_DIRECTORY_ENTRY_DEBUG)
drva = ddir.VirtualAddress
dsize = ddir.Size
d = self.readStructAtRva(drva, 'pe.IMAGE_DEBUG_DIRECTORY', check=True)
if d == None:
return None
if d.Type != IMAGE_DEBUG_TYPE_CODEVIEW:
return None
if not self.checkRva(d.AddressOfRawData, size=d.SizeOfData):
return None
cv = vs_pe.CV_INFO_PDB70()
cv.vsParse( self.readAtRva(d.AddressOfRawData, d.SizeOfData))
if cv.CvSignature != 0x53445352:
return None
return cv.PdbFileName
def getVS_VERSIONINFO(self):
'''
Get a VS_VERSIONINFO object for this PE.
(returns None if version resource is not found)
'''
vbytes = self.readResource(RT_VERSION, 1)
if vbytes == None:
return None
return VS_VERSIONINFO(vbytes)
def parseResources(self):
self.ResourceRoot = ResourceDirectory()
# RP BUG FIX - Binaries can have a .rsrc section it doesn't mean that the .rsrc section contains the resource data we think it does
# validate .rsrc == RESOURCE Section by checking data directory entries...
dresc = self.getDataDirectory(IMAGE_DIRECTORY_ENTRY_RESOURCE)
if not dresc.VirtualAddress:
return
done = {}
rsrc_todo = [ (dresc.VirtualAddress, self.ResourceRoot), ]
while len(rsrc_todo):
rsrva, rsdirobj = rsrc_todo.pop()
rsdir = self.readStructAtRva( rsrva, 'pe.IMAGE_RESOURCE_DIRECTORY', check=True )
if rsdir == None:
continue
totcount = rsdir.NumberOfIdEntries + rsdir.NumberOfNamedEntries
# check if our to do is too many, limit borrowed from pefile
if totcount > 4096:
continue
offset = len(rsdir)
for i in xrange(totcount):
dentrva = rsrva + offset
dirent = self.readStructAtRva( dentrva, 'pe.IMAGE_RESOURCE_DIRECTORY_ENTRY', check=True )
if dirent == None:
break
# We use name/id interchangably in the python dict...
name_id = None
if dirent.Name & 0x80000000: # If high bit is set, it's a string!
namerva = dresc.VirtualAddress + (dirent.Name & 0x7fffffff)
namelen_bytes = self.readAtRva(namerva, 2)
if not namelen_bytes:
continue
namelen = struct.unpack('<H', namelen_bytes)[0]
name_id = self.readAtRva(namerva + 2, namelen * 2).decode('utf-16le', 'ignore')
if not name_id:
name_id = dirent.Name
else:
name_id = dirent.Name
# if OffsetToData & IMAGE_RESOURCE_DATA_IS_DIRECTORY then we have another directory
if dirent.OffsetToData & 0x80000000:
# This points to a subdirectory
subdir = rsdirobj.addRsrcDirectory(name_id)
doffset = dirent.OffsetToData & 0x7fffffff
drva = dresc.VirtualAddress + doffset
# XXX - prevent infinite loop by making sure the RVA isnt in our list to visit
# and we aren't currently examining it.
if doffset and rsrva != drva and not done.get(drva):
rsrc_todo.append( (drva, subdir) )
done[drva] = 1
else:
subdata = self.readStructAtRva( dresc.VirtualAddress + dirent.OffsetToData, 'pe.IMAGE_RESOURCE_DATA_ENTRY')
# RP BUG FIX - sanity check the subdata
if subdata and self.checkRva(subdata.OffsetToData, size=subdata.Size):
langid = name_id & 0x3ff
sublangid = name_id >> 10
langinfo = (subdata.CodePage, langid, sublangid )
rsdirobj.addRsrcData(subdata.OffsetToData, subdata.Size, langinfo )
#print 'Data %s : 0x%.8x (%d)' % (name_id, sec.VirtualAddress + subdata.OffsetToData, subdata.Size)
#print repr(self.readAtRva(subdata.OffsetToData, min(subdata.Size, 40) ))
offset += len(dirent)
#print dirent.tree()
def parseSections(self):
self.sections = []
off = self.IMAGE_DOS_HEADER.e_lfanew + len(self.IMAGE_NT_HEADERS)
secsize = len(vstruct.getStructure("pe.IMAGE_SECTION_HEADER"))
sbytes = self.readAtOffset(off, secsize * self.IMAGE_NT_HEADERS.FileHeader.NumberOfSections)
while sbytes:
s = vstruct.getStructure("pe.IMAGE_SECTION_HEADER")
s.vsParse(sbytes[:secsize])
self.sections.append(s)
sbytes = sbytes[secsize:]
def readRvaFormat(self, fmt, rva):
size = struct.calcsize(fmt)
fbytes = self.readAtRva(rva, size)
return struct.unpack(fmt, fbytes)
def readAtRva(self, rva, size, shortok=False):
offset = self.rvaToOffset(rva)
return self.readAtOffset(offset, size, shortok)
def readAtOffset(self, offset, size, shortok=False):
ret = ""
self.fd.seek(offset)
while len(ret) != size:
rlen = size - len(ret)
x = self.fd.read(rlen)
if x == "":
if not shortok:
return None
return ret
ret += x
return ret
def parseLoadConfig(self):
self.IMAGE_LOAD_CONFIG = None
cdir = self.getDataDirectory(IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG)
rva = cdir.VirtualAddress
# RP BUG FIX - validate config directory
if self.checkRva(rva, size=cdir.Size):
self.IMAGE_LOAD_CONFIG = self.readStructAtRva(rva, "pe.IMAGE_LOAD_CONFIG_DIRECTORY")
def readPointerAtOffset(self, off):
fmt = "<L"
if self.psize == 8:
fmt = "<Q"
return struct.unpack(fmt, self.readAtOffset(off, self.psize))[0]
def readPointerAtRva(self, rva):
off = self.rvaToOffset(rva)
return self.readPointerAtOffset(off)
def getMaxRva(self):
return self.IMAGE_NT_HEADERS.OptionalHeader.SizeOfImage
def checkRva(self, rva, size=None):
'''
Make sure an RVA falls inside the valid mapped range
for the file. (also make sure it's not 0...)
'''
if rva == 0:
return False
isize = self.getMaxRva()
if rva > isize:
#raise Exception('too high! %d > %d' % (rva, isize))
return False
if size != None and (rva + size) > isize:
#raise Exception('too big! %d > %d' % (rva+size, isize))
return False
return True
def readStringAtRva(self, rva, maxsize=None):
ret = ''
while True:
if maxsize and maxsize <= len(ret):
break
x = self.readAtRva(rva, 1)
if x == '\x00' or x == None:
break
ret += x
rva += 1
return ret
def parseImports(self):
self.imports = []
idir = self.getDataDirectory(IMAGE_DIRECTORY_ENTRY_IMPORT)
# RP BUG FIX - invalid IAT entry will point of range of file
irva = idir.VirtualAddress
x = self.readStructAtRva(irva, 'pe.IMAGE_IMPORT_DIRECTORY', check=True)
if x == None:
return
isize = len(x)
while self.checkRva(x.Name):
# RP BUG FIX - we can't assume that we have 256 bytes to read
libname = self.readStringAtRva(x.Name, maxsize=256)
idx = 0
imp_by_name = x.OriginalFirstThunk
if imp_by_name == 0:
imp_by_name = x.FirstThunk
if not self.checkRva(imp_by_name):
break
while True:
arrayoff = self.psize * idx
if self.filesize != None and arrayoff > self.filesize:
self.imports = [] # we probably put grabage in here..
return
ibn_rva = self.readPointerAtRva(imp_by_name+arrayoff)
if ibn_rva == 0:
break
if ibn_rva & self.high_bit_mask:
funcname = ordlookup.ordLookup(libname, ibn_rva & 0x7fffffff)
else:
# RP BUG FIX - we can't use this API on this call because we can have binaries that put their import table
# right at the end of the file, statically saying the imported function name is 128 will cause use to potentially
# over run our read and traceback...
diff = self.getMaxRva() - ibn_rva - 2
ibn = vstruct.getStructure("pe.IMAGE_IMPORT_BY_NAME")
ibn.vsGetField('Name').vsSetLength( min(diff, 128) )
bytes = self.readAtRva(ibn_rva, len(ibn), shortok=True)
if not bytes:
break
try:
ibn.vsParse(bytes)
except:
idx+=1
continue
funcname = ibn.Name
self.imports.append((x.FirstThunk+arrayoff,libname,funcname))
idx += 1
irva += isize
# RP BUG FIX - if the import table is at the end of the file we can't count on the ending to be null
if not self.checkRva(irva, size=isize):
break
x.vsParse(self.readAtRva(irva, isize))
def getRelocations(self):
"""
Return the list of RVA base-relocations in this PE.
"""
return self.relocations
def parseRelocations(self):
self.relocations = []
edir = self.getDataDirectory(IMAGE_DIRECTORY_ENTRY_BASERELOC)
rva = edir.VirtualAddress
rsize = edir.Size
# RP BUG FIX - don't watn to read past the end of the file
if not self.checkRva(rva):
return
reloff = self.rvaToOffset(rva)
relbytes = self.readAtOffset(reloff, rsize)
while relbytes:
# bounce if we have less than 8 bytes to unpack
if len(relbytes) < 8:
return
pageva, chunksize = struct.unpack("<II", relbytes[:8])
relcnt = (chunksize - 8) / 2
# if chunksize == 0 bail
if not chunksize:
return
# RP BUG FIX - sometimes the chunksize is invalid we do a quick check to make sure we dont overrun the buffer
if chunksize > len(relbytes):
return
rels = struct.unpack("<%dH" % relcnt, relbytes[8:chunksize])
for r in rels:
rtype = r >> 12
roff = r & 0xfff
self.relocations.append((pageva+roff, rtype))
relbytes = relbytes[chunksize:]
def getExportName(self):
'''
Return the name of this file acording to it's export entry.
(if there are no exports, return None)
'''
e = self.IMAGE_EXPORT_DIRECTORY
if e == None:
return None
return self.readAtRva(e.Name, 128).split('\x00')[0]
def parseExports(self):
# Initialize our required locals.
self.exports = []
self.forwarders = []
self.IMAGE_EXPORT_DIRECTORY = None
edir = self.getDataDirectory(IMAGE_DIRECTORY_ENTRY_EXPORT)
poff = self.rvaToOffset(edir.VirtualAddress)
if poff == 0: # No exports...
return
self.IMAGE_EXPORT_DIRECTORY = self.readStructAtOffset(poff, "pe.IMAGE_EXPORT_DIRECTORY")
if not self.IMAGE_EXPORT_DIRECTORY:
return
funcoff = self.rvaToOffset(self.IMAGE_EXPORT_DIRECTORY.AddressOfFunctions)
funcsize = 4 * self.IMAGE_EXPORT_DIRECTORY.NumberOfFunctions
nameoff = self.rvaToOffset(self.IMAGE_EXPORT_DIRECTORY.AddressOfNames)
namesize = 4 * self.IMAGE_EXPORT_DIRECTORY.NumberOfNames
ordoff = self.rvaToOffset(self.IMAGE_EXPORT_DIRECTORY.AddressOfOrdinals)
ordsize = 2 * self.IMAGE_EXPORT_DIRECTORY.NumberOfNames
# RP BUG FIX - sanity check the exports before reading
if not funcoff or not ordoff and not nameoff or funcsize > 0x7FFF:
self.IMAGE_EXPORT_DIRECTORY = None
return
funcbytes = self.readAtOffset(funcoff, funcsize)
namebytes = self.readAtOffset(nameoff, namesize)
ordbytes = self.readAtOffset(ordoff, ordsize)
funclist = struct.unpack("%dI" % (len(funcbytes) / 4), funcbytes)
namelist = struct.unpack("%dI" % (len(namebytes) / 4), namebytes)
ordlist = struct.unpack("%dH" % (len(ordbytes) / 2), ordbytes)
#for i in range(len(funclist)):
for i in range(len(namelist)):
ord = ordlist[i]
nameoff = self.rvaToOffset(namelist[i])
if ord > len(funclist):
self.IMAGE_EXPORT_DIRECTORY = None
return
funcoff = funclist[ord]
ffoff = self.rvaToOffset(funcoff)
name = None
if nameoff != 0:
name = self.readAtOffset(nameoff, 256, shortok=True).split("\x00", 1)[0]
else:
name = "ord_%.4x" % ord
# RP BUG FIX - Export forwarding range check is done using RVA's
if funcoff >= edir.VirtualAddress and funcoff < edir.VirtualAddress + edir.Size:
fwdname = self.readAtRva(funcoff, 260, shortok=True).split("\x00", 1)[0]
self.forwarders.append((funclist[ord],name,fwdname))
else:
self.exports.append((funclist[ord], ord, name))
def getSignature(self):
'''
Returns the SignatureEntry vstruct if the pe has an embedded
certificate, None if the magic bytes are NOT set in the security
directory entry AND the size of the signature entry is less than 0.
'''
ds = self.getDataDirectory(IMAGE_DIRECTORY_ENTRY_SECURITY)
va = ds.VirtualAddress
size = ds.Size
if size <= 0:
return None
bytez = self.readAtOffset(va, size)
if not bytez:
return None
se = vstruct.getStructure('pe.SignatureEntry')
se.vsParse(bytez)
if se.magic != "\x00\x02\x02\x00":
return None
return se
def getSignCertInfo(self):
sig = self.getSignature()
if sig == None:
return ()
# Runtime import these so they are optional dependancies
import pyasn1.type.univ
import pyasn1.type.namedtype
import pyasn1.codec.der.decoder
import pyasn1.codec.der.encoder
import pyasn1_modules.rfc2315
substrate = sig.pkcs7
contentInfo, rest = pyasn1.codec.der.decoder.decode(substrate, asn1Spec=pyasn1_modules.rfc2315.ContentInfo())
if rest: substrate = substrate[:-len(rest)]
contentType = contentInfo.getComponentByName('contentType')
contentInfoMap = {
(1, 2, 840, 113549, 1, 7, 1): pyasn1_modules.rfc2315.Data(),
(1, 2, 840, 113549, 1, 7, 2): pyasn1_modules.rfc2315.SignedData(),
(1, 2, 840, 113549, 1, 7, 3): pyasn1_modules.rfc2315.EnvelopedData(),
(1, 2, 840, 113549, 1, 7, 4): pyasn1_modules.rfc2315.SignedAndEnvelopedData(),
(1, 2, 840, 113549, 1, 7, 5): pyasn1_modules.rfc2315.DigestedData(),
(1, 2, 840, 113549, 1, 7, 6): pyasn1_modules.rfc2315.EncryptedData()
}
seqTypeMap = {
(2,5,4,3): 'CN',
(2,5,4,7): 'L',
(2,5,4,10): 'O',
(2,5,4,11): 'OU',
(1,2,840,113549,1,9,1): 'E',
(2,5,4,6): 'C',
(2,5,4,8): 'ST',
(2,5,4,9): 'STREET',
(2,5,4,12): 'TITLE',
(2,5,4,42): 'G',
(2,5,4,43): 'I',
(2,5,4,4): 'SN',
(0,9,2342,19200300,100,1,25): 'DC',
}
content, _ = pyasn1.codec.der.decoder.decode(
contentInfo.getComponentByName('content'),
asn1Spec=contentInfoMap[contentType]
)
a = content.getComponentByName('certificates')
certs = []
for i in a:
cbytes = pyasn1.codec.der.encoder.encode( i['certificate'] )
iparts = []
for rdnsequence in i["certificate"]["tbsCertificate"]["issuer"]:
for rdn in rdnsequence:
rtype = rdn[0]["type"]
rvalue = rdn[0]["value"][2:]
iparts.append('%s=%s' % ( seqTypeMap.get( rtype, 'UNK'), rvalue))
issuer = ','.join( iparts )
sparts = []
for rdnsequence in i["certificate"]["tbsCertificate"]["subject"]:
for rdn in rdnsequence:
rtype = rdn[0]["type"]
rvalue = rdn[0]["value"][2:]
sparts.append('%s=%s' % ( seqTypeMap.get( rtype, 'UNK'), rvalue))
subject = ','.join(sparts)
serial = int(i["certificate"]["tbsCertificate"]["serialNumber"])
cert = { 'subject':subject, 'issuer':issuer, 'serial':serial, 'bytes':cbytes }
certs.append( cert )
return certs
def __getattr__(self, name):
"""
Use a getattr over-ride to allow "on demand" parsing of particular sections.
"""
if name == "exports":
self.parseExports()
return self.exports
elif name == "IMAGE_IMPORT_DIRECTORY":
self.parseImports()
return self.IMAGE_IMPORT_DIRECTORY
elif name == "imports":
self.parseImports()
return self.imports
elif name == "IMAGE_EXPORT_DIRECTORY":
self.parseExports()
return self.IMAGE_EXPORT_DIRECTORY
elif name == "forwarders":
self.parseExports()
return self.forwarders
elif name == "sections":
self.parseSections()
return self.sections
elif name == "ResourceRoot":
self.parseResources()
return self.ResourceRoot
elif name == "relocations":
self.parseRelocations()
return self.relocations
elif name == "IMAGE_LOAD_CONFIG":
self.parseLoadConfig()
return self.IMAGE_LOAD_CONFIG
else:
raise AttributeError
class MemObjFile:
"""
A file like object that wraps a MemoryObject (envi) compatable
object with a file-like object where seek == VA.
"""
def __init__(self, memobj, baseaddr):
self.baseaddr = baseaddr
self.offset = baseaddr
self.memobj = memobj
def seek(self, offset):
self.offset = self.baseaddr + offset
def read(self, size):
ret = self.memobj.readMemory(self.offset, size)
self.offset += size
return ret
def write(self, bytes):
self.memobj.writeMemory(self.offset, bytes)
self.offset += len(bytes)
def peFromMemoryObject(memobj, baseaddr):
fd = MemObjFile(memobj, baseaddr)
return PE(fd, inmem=True)
def peFromFileName(fname):
"""
Utility helper that assures that the file is opened in
binary mode which is required for proper functioning.
"""
f = file(fname, "rb")
return PE(f)
def peFromBytes(fbytes):
fd = StringIO(fbytes)
return PE(fd)
| HackerTool/vivisect | PE/__init__.py | Python | apache-2.0 | 38,449 | [
"VisIt"
] | f886b5d0c436259a5a3351adc0073d303477f4530b3c47d583e46ada2c72b56a |
# Import Google TransitFeed
import transitfeed
from transitfeed import ServicePeriod
# Version
mjts_version = "0.0.2"
# New Schedule
schedule = transitfeed.Schedule()
# Create Agency
schedule.AddAgency("Moose Jaw Transit Service", "http://www.moosejaw.ca/?service=city-of-moose-jaw-transit-division",
"America/Regina")
# Calendars
service_periods = []
## Weekday
service_periods.append(ServicePeriod(id="weekday"))
service_periods[0].SetWeekdayService()
service_periods[0].SetStartDate("20151005")
service_periods[0].SetEndDate("20161005")
### Holidays/No Service Days
service_periods[0].SetDateHasService('20151224', False)
service_periods[0].SetDateHasService('20151225', False)
service_periods[0].SetDateHasService('20151226', False)
## Saturday
service_periods.append(ServicePeriod(id="saturday"))
service_periods[1].SetDayOfWeekHasService(5)
service_periods[1].SetStartDate("20151005")
service_periods[1].SetEndDate("20161005")
### Holidays/No Service Days
service_periods[1].SetDateHasService('20151224', False)
service_periods[1].SetDateHasService('20151225', False)
service_periods[1].SetDateHasService('20151226', False)
#Add all service period objects to the schedule
schedule.SetDefaultServicePeriod(service_periods[0], validate=True)
schedule.AddServicePeriodObject(service_periods[1], validate=True)
# Fares
# Routes
## 1- Athabasca East
ABE = schedule.AddRoute(short_name="1", long_name="Athabasca East",
route_type="Bus")
## 2- Sunningdale
SUN = schedule.AddRoute(short_name="2", long_name="Sunningdale",
route_type="Bus")
## 3- Athabasca West
ABW = schedule.AddRoute(short_name="3", long_name="Athabasca West",
route_type="Bus")
## 4- Westmonut
WES = schedule.AddRoute(short_name="4", long_name="Westmount",
route_type="Bus")
# Stops
## Athabasca East Stops
abe_1001 = schedule.AddStop(lng=-105.535112, lat=50.391708, name = "Main St. N @ High St. W")
abe_1002 = schedule.AddStop(lng=-105.537392, lat=50.391613, name = "High St. W @ 1st Ave. NW")
abe_1003 = schedule.AddStop(lng=-105.537371, lat=50.396825, name = "1st Ave. NW @ Caribou St. W")
abe_1004 = schedule.AddStop(lng=-105.534774, lat=50.396845, name = "Caribou St. E @ Main St. N")
abe_1005 = schedule.AddStop(lng=-105.531893, lat=50.396877, name = "Caribou St. E @ 1st Ave. NE")
abe_1006 = schedule.AddStop(lng=-105.528183, lat=50.396880, name = "Caribou St. E @ Ross Cres.@3rd Ave. NE")
abe_1007 = schedule.AddStop(lng=-105.524750, lat=50.396858, name = "Caribou St. E @ 4th Ave. NE")
abe_1008 = schedule.AddStop(lng=-105.524557, lat=50.398884, name = "4th Ave. NE @ Oxford St. E")
abe_1009 = schedule.AddStop(lng=-105.524633, lat=50.400889, name = "4th Ave. NE @ Hall St. E")
abe_1010 = schedule.AddStop(lng=-105.524658, lat=50.402656, name = "4th Ave. NE @ Saskatchewan St. E")
abe_1011 = schedule.AddStop(lng=-105.527507, lat=50.402627, name = "Saskatchewan St. E @ 3rd Ave. NE")
abe_1012 = schedule.AddStop(lng=-105.531771, lat=50.402648, name = "Saskatchewan St. E @ 1st Ave. NE")
abe_1013 = schedule.AddStop(lng=-105.534698, lat=50.406387, name = "Town & Country Dr. @ Main St. N")
abe_1014 = schedule.AddStop(lng=-105.533552, lat=50.412926, name = "Main St. East Service Road @ Thatcher Dr. E")
abe_1015 = schedule.AddStop(lng=-105.527029, lat=50.419106, name = "Dr. F.H. Wigmore Regional Hospital")
abe_1016 = schedule.AddStop(lng=-105.521458, lat=50.411943, name = "Highland Rd. @ Thatcher Dr. E")
abe_1017 = schedule.AddStop(lng=-105.515950, lat=50.411921, name = "Thatcher Dr. E @ Chester Rd")
abe_1018 = schedule.AddStop(lng=-105.511780, lat=50.411853, name = "Thatcher Dr. E @ 9th Ave. NE")
abe_1019 = schedule.AddStop(lng=-105.511762, lat=50.407610, name = "9th Ave. NE @ Prairie Oasis Trailer Court")
abe_1020 = schedule.AddStop(lng=-105.511820, lat=50.404814, name = "9th Ave. NE @ Lakeview Trailer Court")
abe_1021 = schedule.AddStop(lng=-105.511825, lat=50.396861, name = "9th Ave. NE @ Caribou St. E")
abe_1022 = schedule.AddStop(lng=-105.508979, lat=50.396846, name = "Caribou St. E @ 10th Ave. NE")
abe_1023 = schedule.AddStop(lng=-105.505852, lat=50.395337, name = "11th Ave. NE @ Athabasca St. E")
abe_1024 = schedule.AddStop(lng=-105.508771, lat=50.395345, name = "Athabasca St. E @ 10th Ave. NE")
abe_1025 = schedule.AddStop(lng=-105.511806, lat=50.395353, name = "Athabasca St. E @ 9th Ave. NE")
abe_1026 = schedule.AddStop(lng=-105.515020, lat=50.395332, name = "Athabasca St. E @ 8th Ave. N")
abe_1027 = schedule.AddStop(lng=-105.517468, lat=50.395330, name = "Athabasca St. E @ 7th Ave. NE")
abe_1028 = schedule.AddStop(lng=-105.517468, lat=50.393499, name = "7th Ave. NE @ Ominica St. E")
abe_1029 = schedule.AddStop(lng=-105.517431, lat=50.392565, name = "7th Ave. NE @ Fairford St. E")
abe_1030 = schedule.AddStop(lng=-105.519853, lat=50.392563, name = "Fairford St. E @ 6th Ave. NE")
abe_1031 = schedule.AddStop(lng=-105.524809, lat=50.392491, name = "Fairford St. E @ 4th Ave. NE")
abe_1032 = schedule.AddStop(lng=-105.527268, lat=50.392518, name = "Fairford St. E @ 3rd Ave. NE")
abe_1033 = schedule.AddStop(lng=-105.529674, lat=50.391562, name = "High St. E @ 2nd Ave. NE")
abe_1034 = schedule.AddStop(lng=-105.532080, lat=50.391564, name = "High St. E @ 1st Ave. N")
## Sunningdale Stops
sun_2001 = schedule.AddStop(lng=-105.534554, lat=50.391816, name = "Main St. N @ High St. W")
sun_2002 = schedule.AddStop(lng=-105.534547, lat=50.395128, name = "Main St. N @ Athabasca St. E")
sun_2003 = schedule.AddStop(lng=-105.534606, lat=50.397097, name = "Main St. N @ Caribou St. E")
sun_2004 = schedule.AddStop(lng=-105.534628, lat=50.400801, name = "Main St. N @ Hall St. E")
sun_2005 = schedule.AddStop(lng=-105.532141, lat=50.404088, name = "Town & Country Dr. @ Civic Centre Dr.")
sun_2006 = schedule.AddStop(lng=-105.534698, lat=50.406387, name = "Town & Country Dr. @ Main St. N")
sun_2007 = schedule.AddStop(lng=-105.536385, lat=50.412762, name = "Thatcher Dr. W @ Main St. West Service Road")
sun_2008 = schedule.AddStop(lng=-105.538747, lat=50.413823, name = "Woodlilly Dr. @ Arrowhead Rd.")
sun_2009 = schedule.AddStop(lng=-105.538403, lat=50.415286, name = "Woodlilly Dr. @ Aster Cres.")
sun_2010 = schedule.AddStop(lng=-105.538396, lat=50.416217, name = "Woodlilly Dr. @ Buttercup Cres.")
sun_2011 = schedule.AddStop(lng=-105.538564, lat=50.418213, name = "Woodlilly Dr. @ Crocus Rd.")
sun_2012 = schedule.AddStop(lng=-105.539069, lat=50.419862, name = "Woodlilly Dr. @ Dahlia Cres. S")
sun_2013 = schedule.AddStop(lng=-105.541255, lat=50.422278, name = "Woodlilly Dr. @ Dahlia Cres. N")
sun_2014 = schedule.AddStop(lng=-105.541936, lat=50.422491, name = "Woodlilly Dr. @ Flax Rd.")
sun_2015 = schedule.AddStop(lng=-105.550880, lat=50.423054, name = "Woodlilly Dr. @ Calypso Dr.")
sun_2016 = schedule.AddStop(lng=-105.554598, lat=50.420152, name = "Woodlilly Dr. @ Iris Dr.")
sun_2017 = schedule.AddStop(lng=-105.554614, lat=50.417789, name = "Woodlilly Dr. @ Lewry Cres.")
sun_2018 = schedule.AddStop(lng=-105.554574, lat=50.413746, name = "Woodlilly Dr. @ Thorn Cres.")
sun_2019 = schedule.AddStop(lng=-105.565848, lat=50.413261, name = "Thatcher Dr. W @ 11th Ave. NW")
sun_2020 = schedule.AddStop(lng=-105.565889, lat=50.411244, name = "11th Ave. NW")
sun_2021 = schedule.AddStop(lng=-105.565857, lat=50.409446, name = "11th Ave. NW @ Normandy Dr.")
sun_2022 = schedule.AddStop(lng=-105.562648, lat=50.409395, name = "Normandy Dr. @ General Cres.")
sun_2023 = schedule.AddStop(lng=-105.561139, lat=50.409373, name = "Normandy Dr. @ Marshall Cres. E")
sun_2024 = schedule.AddStop(lng=-105.557632, lat=50.404846, name = "9th Ave. NW @ MacDonald St. W")
sun_2025 = schedule.AddStop(lng=-105.552479, lat=50.404839, name = "MacDonald St. W @ 7th Ave. NW")
sun_2026 = schedule.AddStop(lng=-105.549693, lat=50.404847, name = "MacDonald St. W @ 6th Ave. NW")
sun_2027 = schedule.AddStop(lng=-105.545519, lat=50.404871, name = "MacDonald St. W @ 4th Ave. NW")
sun_2028 = schedule.AddStop(lng=-105.542706, lat=50.404874, name = "MacDonald St. W @ 3rd Ave. NW")
sun_2029 = schedule.AddStop(lng=-105.538524, lat=50.404879, name = "MacDonald St. W @ Redland Ave. NW")
sun_2030 = schedule.AddStop(lng=-105.534626, lat=50.403219, name = "Main St. N @ Saskatchewan St. W")
sun_2031 = schedule.AddStop(lng=-105.534832, lat=50.400466, name = "Main St. N @ Hall St. W")
sun_2032 = schedule.AddStop(lng=-105.534842, lat=50.398715, name = "Main St. N @ Oxford St. W")
sun_2033 = schedule.AddStop(lng=-105.534848, lat=50.397655, name = "Main St. N @ Ross St. W")
sun_2034 = schedule.AddStop(lng=-105.534821, lat=50.395524, name = "Main St. N @ Athabasca St. W")
sun_2035 = schedule.AddStop(lng=-105.534837, lat=50.394191, name = "Main St. N @ Stadacona St. W")
sun_2036 = schedule.AddStop(lng=-105.534783, lat=50.391786, name = "Main St. N @ High St. W")
## Athabasca West Stops
abw_3001 = schedule.AddStop(lng=-105.534557, lat=50.391898, name = "Main St. N @ High St. W")
abw_3002 = schedule.AddStop(lng=-105.534583, lat=50.393453, name = "Main St. N @ Ominica St. E")
abw_3003 = schedule.AddStop(lng=-105.534585, lat=50.394379, name = "Main St. N @ Stadacona St. E")
abw_3004 = schedule.AddStop(lng=-105.534759, lat=50.395276, name = "Main St. N @ Athabasca St. E")
abw_3005 = schedule.AddStop(lng=-105.534775, lat=50.397846, name = "Main St. N @ Ross. St. E")
abw_3006 = schedule.AddStop(lng=-105.535097, lat=50.398949, name = "Oxford St. W @ Main St. N")
abw_3007 = schedule.AddStop(lng=-105.538723, lat=50.398908, name = "Oxford St. W @ Redland Ave. N")
abw_3008 = schedule.AddStop(lng=-105.544121, lat=50.398931, name = "Oxford St. W @ Henleaze Ave. N")
abw_3009 = schedule.AddStop(lng=-105.549653, lat=50.398924, name = "Oxford St. W @ 6th Ave. NW")
abw_3010 = schedule.AddStop(lng=-105.553907, lat=50.398914, name = "Oxford St. W @ Monk Ave. N")
abw_3011 = schedule.AddStop(lng=-105.557577, lat=50.398907, name = "Oxford St. W @ 9th Ave. NW")
abw_3012 = schedule.AddStop(lng=-105.561411, lat=50.398498, name = "Albert St. N @ 10th Ave. NW")
abw_3013 = schedule.AddStop(lng=-105.565239, lat=50.398485, name = "Albert St. N @ 11th Ave. NW")
abw_3014 = schedule.AddStop(lng=-105.565233, lat=50.400107, name = "11th Ave. NW @ Carleton St. W")
abw_3015 = schedule.AddStop(lng=-105.565290, lat=50.403271, name = "11th Ave. NW @ Grace St. W")
abw_3016 = schedule.AddStop(lng=-105.568479, lat=50.403016, name = "Grace St. W @ Gordon Rd. N")
abw_3017 = schedule.AddStop(lng=-105.573261, lat=50.401748, name = "Grace St. W @ 13th Ave. NW")
abw_3018 = schedule.AddStop(lng=-105.577126, lat=50.399929, name = "Grace St. W @ Prince Charles Pl.")
abw_3019 = schedule.AddStop(lng=-105.578960, lat=50.399818, name = "Grace St. W @ Holdsworth Cres.")
abw_3020 = schedule.AddStop(lng=-105.582893, lat=50.399815, name = "Grace St. W @ Corman Cres.")
abw_3021 = schedule.AddStop(lng=-105.584158, lat=50.402874, name = "Thatcher Dr. W @ Rutherford St. W")
abw_3022 = schedule.AddStop(lng=-105.582415, lat=50.406247, name = "Thatcher Dr. W @ 13th Ave. NW")
abw_3023 = schedule.AddStop(lng=-105.581528, lat=50.405857, name = "13th Ave. NW @ Pascoe Dr. W")
abw_3024 = schedule.AddStop(lng=-105.579708, lat=50.404858, name = "13th Ave. NW @ Mayberry Cres.")
abw_3025 = schedule.AddStop(lng=-105.577642, lat=50.403978, name = "13th Ave. NW @ Regal Cres.")
abw_3026 = schedule.AddStop(lng=-105.574382, lat=50.402371, name = "13th Ave. NW @ Gordon Rd. N")
abw_3027 = schedule.AddStop(lng=-105.571680, lat=50.400022, name = "13th Ave. NW @ Carleton St. W")
abw_3028 = schedule.AddStop(lng=-105.571753, lat=50.397651, name = "13th Ave. NW @ Montgomery St. W")
abw_3029 = schedule.AddStop(lng=-105.574652, lat=50.396813, name = "Caribou St. W @ 14th Ave. NW")
abw_3030 = schedule.AddStop(lng=-105.574611, lat=50.395229, name = "Athabasca St. W @ 14th Ave. NW")
abw_3031 = schedule.AddStop(lng=-105.571741, lat=50.395244, name = "Athabasca St. W @ 13th Ave. NW")
abw_3032 = schedule.AddStop(lng=-105.568876, lat=50.395249, name = "Athabasca St. W @ 12th Ave. NW")
abw_3033 = schedule.AddStop(lng=-105.565239, lat=50.395252, name = "Athabasca St. W @ 11th Ave. NW")
abw_3034 = schedule.AddStop(lng=-105.561386, lat=50.395247, name = "Athabasca St. W @ 10th Ave. NW")
abw_3035 = schedule.AddStop(lng=-105.557595, lat=50.395267, name = "Athabasca St. W @ 9th Ave. NW")
abw_3036 = schedule.AddStop(lng=-105.554396, lat=50.395343, name = "Athabasca St. W @ 8th Ave. NW")
abw_3037 = schedule.AddStop(lng=-105.549513, lat=50.395326, name = "Athabasca St. W @ 6th Ave. NW")
abw_3038 = schedule.AddStop(lng=-105.544721, lat=50.395335, name = "Athabasca St. W @ 4th Ave. NW")
abw_3039 = schedule.AddStop(lng=-105.539796, lat=50.395328, name = "Athabasca St. W @ 2nd Ave. NW")
abw_3040 = schedule.AddStop(lng=-105.534759, lat=50.395276, name = "Athabasca St. W @ Main St. N")
abw_3041 = schedule.AddStop(lng=-105.534585, lat=50.394379, name = "Main St. N @ Stadacona St. W")
abw_3042 = schedule.AddStop(lng=-105.534583, lat=50.393453, name = "Main St. N @ Ominica St. W")
## Westmount Stops
wes_4001 = schedule.AddStop(lng=-105.534838, lat=50.391840, name = "Main St. N @ High St. W")
wes_4002 = schedule.AddStop(lng=-105.534796, lat=50.389692, name = "Main St. N @ Manitoba St. E")
wes_4003 = schedule.AddStop(lng=-105.531977, lat=50.389588, name = "Manitoba St. E @ 1st Ave. NE")
wes_4004 = schedule.AddStop(lng=-105.534579, lat=50.385164, name = "Main St. S @ Home St. W")
wes_4005 = schedule.AddStop(lng=-105.534661, lat=50.384244, name = "Lillooet St. W @ Main St. S")
wes_4006 = schedule.AddStop(lng=-105.539767, lat=50.384217, name = "Lillooet St. W @ 2nd Ave. SW")
wes_4007 = schedule.AddStop(lng=-105.542239, lat=50.384208, name = "Lillooet St. W @ 3rd Ave. SW")
wes_4008 = schedule.AddStop(lng=-105.544732, lat=50.383928, name = "4th Ave. SW @ Lillooet St. W")
wes_4009 = schedule.AddStop(lng=-105.545133, lat=50.382475, name = "Coteau St. W @ 4th Ave. SW")
wes_4010 = schedule.AddStop(lng=-105.549483, lat=50.382410, name = "Coteau St. W @ 6th Ave. SW")
wes_4011 = schedule.AddStop(lng=-105.551911, lat=50.382406, name = "Coteau St. W @ 7th Ave. SW")
wes_4012 = schedule.AddStop(lng=-105.557565, lat=50.382390, name = "Coteau St. W @ 9th Ave. SW")
wes_4013 = schedule.AddStop(lng=-105.561365, lat=50.382375, name = "Coteau St. W @ 10th Ave. SW")
wes_4014 = schedule.AddStop(lng=-105.565221, lat=50.382406, name = "Coteau St. W @ 11th Ave. SW")
wes_4015 = schedule.AddStop(lng=-105.568985, lat=50.382233, name = "Coteau St. W @ 12th Ave. SW")
wes_4016 = schedule.AddStop(lng=-105.572540, lat=50.382258, name = "Coteau St. W @ Iroquois Dr.")
wes_4017 = schedule.AddStop(lng=-105.573745, lat=50.382241, name = "Coteau St. W @ Manitou Cres.")
wes_4018 = schedule.AddStop(lng=-105.576257, lat=50.382249, name = "Coteau St. W @ Souix Cres.")
wes_4019 = schedule.AddStop(lng=-105.578646, lat=50.382265, name = "Coteau St. W @ Cree Cres.")
wes_4020 = schedule.AddStop(lng=-105.580592, lat=50.380492, name = "16th Ave. SW @ Grandview Pl.")
wes_4021 = schedule.AddStop(lng=-105.580552, lat=50.378086, name = "16th Ave. SW @ Hastings St. W")
wes_4022 = schedule.AddStop(lng=-105.580547, lat=50.377234, name = "Warner St. W @ 16th Ave. SW")
wes_4023 = schedule.AddStop(lng=-105.577552, lat=50.377240, name = "Warner St. W @ 15th Ave. SW")
wes_4024 = schedule.AddStop(lng=-105.574645, lat=50.377239, name = "Warner St. W @ 14th Ave. SW")
wes_4025 = schedule.AddStop(lng=-105.571804, lat=50.377236, name = "Warner St. W @ 13th Ave. SW")
wes_4026 = schedule.AddStop(lng=-105.568900, lat=50.377231, name = "Warner St. W @ 12th Ave. SW")
wes_4027 = schedule.AddStop(lng=-105.568807, lat=50.379043, name = "Vaughan St. W @ 12th Ave. SW")
wes_4028 = schedule.AddStop(lng=-105.565020, lat=50.379014, name = "Vaughan St. W @ 11th Ave. SW")
wes_4029 = schedule.AddStop(lng=-105.561514, lat=50.379021, name = "Vaughan St. W @ 10th Ave. SW")
wes_4030 = schedule.AddStop(lng=-105.560026, lat=50.376368, name = "Bradley St.")
wes_4031 = schedule.AddStop(lng=-105.557551, lat=50.378756, name = "9th Ave SW @ Keith St. W")
wes_4032 = schedule.AddStop(lng=-105.557554, lat=50.379603, name = "Vaughan St. W @ 9th Ave. SW")
wes_4033 = schedule.AddStop(lng=-105.554339, lat=50.379635, name = "Vaughan St. W @ 8th Ave. SW")
wes_4034 = schedule.AddStop(lng=-105.551891, lat=50.379588, name = "Vaughan St. W @ 7th Ave. SW")
wes_4035 = schedule.AddStop(lng=-105.551908, lat=50.381473, name = "7th Ave. SW @ Duffield St. W")
wes_4036 = schedule.AddStop(lng=-105.549454, lat=50.381451, name = "Duffield St. W @ 6th Ave. SW")
wes_4037 = schedule.AddStop(lng=-105.547078, lat=50.381470, name = "Duffield St. W @ 5th Ave. SW")
wes_4038 = schedule.AddStop(lng=-105.544654, lat=50.381472, name = "Duffield St. W @ 4th Ave. SW")
wes_4039 = schedule.AddStop(lng=-105.544352, lat=50.382358, name = "4th Ave. SW @ Coteau St. W")
wes_4040 = schedule.AddStop(lng=-105.541103, lat=50.382374, name = "Coteau St. W @ Tapley Ave.")
wes_4041 = schedule.AddStop(lng=-105.538289, lat=50.382384, name = "Coteau St. W @ Scott St.")
wes_4042 = schedule.AddStop(lng=-105.534662, lat=50.382365, name = "Coteau St. W @ Main St. S")
wes_4043 = schedule.AddStop(lng=-105.532026, lat=50.382365, name = "Coteau St. W @ 1st Ave. SE")
wes_4044 = schedule.AddStop(lng=-105.529560, lat=50.382381, name = "2nd Ave. SE @ Coteau St. E")
wes_4045 = schedule.AddStop(lng=-105.531974, lat=50.384236, name = "Lillooet St. E @ 1st Ave. SE")
wes_4046 = schedule.AddStop(lng=-105.531973, lat=50.385158, name = "1st Ave. SE @ Home St. E")
wes_4047 = schedule.AddStop(lng=-105.529648, lat=50.389584, name = "2nd Ave. NE @ Manitoba St. E")
wes_4048 = schedule.AddStop(lng=-105.529674, lat=50.391562, name = "High St. E @ 2nd Ave. NE")
wes_4049 = schedule.AddStop(lng=-105.531760, lat=50.391626, name = "High St. E @ 1st Ave. NE")
# Trips
## ABE - 7:15:00
trip = ABE.AddTrip(schedule, headsign="To Downtown via Athabasca East")
trip.AddStopTime(abe_1001, stop_time='7:15:00')
trip.AddStopTime(abe_1002, stop_time='7:17:00')
trip.AddStopTime(abe_1003, stop_time='7:17:30')
trip.AddStopTime(abe_1004, stop_time='7:18:00')
trip.AddStopTime(abe_1005, stop_time='7:18:30')
trip.AddStopTime(abe_1006, stop_time='7:19:00')
trip.AddStopTime(abe_1007, stop_time='7:20:00')
trip.AddStopTime(abe_1008, stop_time='7:21:00')
trip.AddStopTime(abe_1009, stop_time='7:22:00')
trip.AddStopTime(abe_1010, stop_time='7:23:00')
trip.AddStopTime(abe_1011, stop_time='7:24:00')
trip.AddStopTime(abe_1012, stop_time='7:25:00')
trip.AddStopTime(abe_1013, stop_time='7:27:00')
trip.AddStopTime(abe_1014, stop_time='7:29:00')
trip.AddStopTime(abe_1015, stop_time='7:35:00')
trip.AddStopTime(abe_1016, stop_time='7:40:00')
trip.AddStopTime(abe_1017, stop_time='7:41:00')
trip.AddStopTime(abe_1018, stop_time='7:42:00')
trip.AddStopTime(abe_1019, stop_time='7:42:30')
trip.AddStopTime(abe_1020, stop_time='7:43:00')
trip.AddStopTime(abe_1021, stop_time='7:43:30')
trip.AddStopTime(abe_1022, stop_time='7:44:00')
trip.AddStopTime(abe_1023, stop_time='7:44:30')
trip.AddStopTime(abe_1024, stop_time='7:45:00')
trip.AddStopTime(abe_1025, stop_time='7:45:30')
trip.AddStopTime(abe_1026, stop_time='7:46:00')
trip.AddStopTime(abe_1027, stop_time='7:46:30')
trip.AddStopTime(abe_1028, stop_time='7:47:00')
trip.AddStopTime(abe_1029, stop_time='7:47:30')
trip.AddStopTime(abe_1030, stop_time='7:48:00')
trip.AddStopTime(abe_1031, stop_time='7:48:30')
trip.AddStopTime(abe_1032, stop_time='7:49:00')
trip.AddStopTime(abe_1033, stop_time='7:49:30')
trip.AddStopTime(abe_1034, stop_time='7:50:00')
#7:55:00
#8:35:00
#9:15:00
#9:55:00
#10:35:00
# ... add every trip throughout the day
#SUN - 7:15:00
trip = SUN.AddTrip(schedule, headsign="To Downtown via Sunningdale")
trip.AddStopTime(sun_2001, stop_time='7:15:00')
trip.AddStopTime(sun_2002, stop_time='7:16:00')
trip.AddStopTime(sun_2003, stop_time='7:17:00')
trip.AddStopTime(sun_2004, stop_time='7:18:00')
trip.AddStopTime(sun_2005, stop_time='7:20:00')
trip.AddStopTime(sun_2006, stop_time='7:20:30')
trip.AddStopTime(sun_2007, stop_time='7:21:00')
trip.AddStopTime(sun_2008, stop_time='7:22:00')
trip.AddStopTime(sun_2009, stop_time='7:22:30')
trip.AddStopTime(sun_2010, stop_time='7:23:00')
trip.AddStopTime(sun_2011, stop_time='7:24:30')
trip.AddStopTime(sun_2012, stop_time='7:25:00')
trip.AddStopTime(sun_2013, stop_time='7:26:00')
trip.AddStopTime(sun_2014, stop_time='7:27:00')
trip.AddStopTime(sun_2015, stop_time='7:28:00')
trip.AddStopTime(sun_2016, stop_time='7:28:30')
trip.AddStopTime(sun_2017, stop_time='7:29:00')
trip.AddStopTime(sun_2018, stop_time='7:29:30')
trip.AddStopTime(sun_2019, stop_time='7:30:00')
trip.AddStopTime(sun_2020, stop_time='7:35:00')
trip.AddStopTime(sun_2021, stop_time='7:38:00')
trip.AddStopTime(sun_2022, stop_time='7:39:00')
trip.AddStopTime(sun_2023, stop_time='7:40:00')
trip.AddStopTime(sun_2024, stop_time='7:41:00')
trip.AddStopTime(sun_2025, stop_time='7:41:30')
trip.AddStopTime(sun_2026, stop_time='7:42:00')
trip.AddStopTime(sun_2027, stop_time='7:42:30')
trip.AddStopTime(sun_2028, stop_time='7:43:00')
trip.AddStopTime(sun_2029, stop_time='7:43:30')
trip.AddStopTime(sun_2030, stop_time='7:44:00')
trip.AddStopTime(sun_2031, stop_time='7:45:00')
trip.AddStopTime(sun_2032, stop_time='7:46:00')
trip.AddStopTime(sun_2033, stop_time='7:47:00')
trip.AddStopTime(sun_2034, stop_time='7:48:00')
trip.AddStopTime(sun_2035, stop_time='7:49:00')
trip.AddStopTime(sun_2036, stop_time='7:50:00')
#7:55:00
#8:35:00
#9:15:00
#9:55:00
#10:35:00
# ... add every trip throughout the day
#ABW- 7:15:00
trip = ABW.AddTrip(schedule, headsign="To Downtown via Athabasca West")
trip.AddStopTime(abw_3001, stop_time='7:15:00')
trip.AddStopTime(abw_3002, stop_time='7:15:30')
trip.AddStopTime(abw_3003, stop_time='7:16:00')
trip.AddStopTime(abw_3004, stop_time='7:16:30')
trip.AddStopTime(abw_3005, stop_time='7:17:00')
trip.AddStopTime(abw_3006, stop_time='7:17:30')
trip.AddStopTime(abw_3007, stop_time='7:18:00')
trip.AddStopTime(abw_3008, stop_time='7:18:30')
trip.AddStopTime(abw_3009, stop_time='7:19:00')
trip.AddStopTime(abw_3010, stop_time='7:20:00')
trip.AddStopTime(abw_3011, stop_time='7:21:00')
trip.AddStopTime(abw_3012, stop_time='7:21:30')
trip.AddStopTime(abw_3013, stop_time='7:22:00')
trip.AddStopTime(abw_3014, stop_time='7:22:30')
trip.AddStopTime(abw_3015, stop_time='7:23:00')
trip.AddStopTime(abw_3016, stop_time='7:23:30')
trip.AddStopTime(abw_3017, stop_time='7:24:00')
trip.AddStopTime(abw_3018, stop_time='7:25:00')
trip.AddStopTime(abw_3019, stop_time='7:25:30')
trip.AddStopTime(abw_3020, stop_time='7:26:00')
trip.AddStopTime(abw_3021, stop_time='7:27:00')
trip.AddStopTime(abw_3022, stop_time='7:28:00')
trip.AddStopTime(abw_3023, stop_time='7:29:00')
trip.AddStopTime(abw_3024, stop_time='7:30:00')
trip.AddStopTime(abw_3025, stop_time='7:32:00')
trip.AddStopTime(abw_3026, stop_time='7:33:00')
trip.AddStopTime(abw_3027, stop_time='7:35:00')
trip.AddStopTime(abw_3028, stop_time='7:36:00')
trip.AddStopTime(abw_3029, stop_time='7:38:00')
trip.AddStopTime(abw_3030, stop_time='7:39:00')
trip.AddStopTime(abw_3031, stop_time='7:39:30')
trip.AddStopTime(abw_3032, stop_time='7:40:00')
trip.AddStopTime(abw_3033, stop_time='7:41:00')
trip.AddStopTime(abw_3034, stop_time='7:42:00')
trip.AddStopTime(abw_3035, stop_time='7:43:00')
trip.AddStopTime(abw_3036, stop_time='7:44:00')
trip.AddStopTime(abw_3037, stop_time='7:45:00')
trip.AddStopTime(abw_3038, stop_time='7:46:00')
trip.AddStopTime(abw_3039, stop_time='7:47:00')
trip.AddStopTime(abw_3040, stop_time='7:48:00')
trip.AddStopTime(abw_3041, stop_time='7:49:00')
trip.AddStopTime(abw_3042, stop_time='7:50:00')
#7:55:00
#8:35:00
#9:15:00
#9:55:00
#10:35:00
# ... add every trip throughout the day
#WES - 7:15:00
trip = WES.AddTrip(schedule, headsign="To Downtown via Westmount")
trip.AddStopTime(wes_4001, stop_time='7:15:00')
trip.AddStopTime(wes_4002, stop_time='7:16:00')
trip.AddStopTime(wes_4003, stop_time='7:17:00')
trip.AddStopTime(wes_4004, stop_time='7:18:00')
trip.AddStopTime(wes_4005, stop_time='7:19:00')
trip.AddStopTime(wes_4006, stop_time='7:20:00')
trip.AddStopTime(wes_4007, stop_time='7:21:00')
trip.AddStopTime(wes_4008, stop_time='7:22:00')
trip.AddStopTime(wes_4009, stop_time='7:23:30')
trip.AddStopTime(wes_4010, stop_time='7:24:00')
trip.AddStopTime(wes_4011, stop_time='7:24:30')
trip.AddStopTime(wes_4012, stop_time='7:25:00')
trip.AddStopTime(wes_4013, stop_time='7:25:30')
trip.AddStopTime(wes_4014, stop_time='7:26:00')
trip.AddStopTime(wes_4015, stop_time='7:27:00')
trip.AddStopTime(wes_4016, stop_time='7:27:30')
trip.AddStopTime(wes_4017, stop_time='7:28:00')
trip.AddStopTime(wes_4018, stop_time='7:28:30')
trip.AddStopTime(wes_4019, stop_time='7:29:00')
trip.AddStopTime(wes_4020, stop_time='7:29:30')
trip.AddStopTime(wes_4021, stop_time='7:30:00')
trip.AddStopTime(wes_4022, stop_time='7:30:30')
trip.AddStopTime(wes_4023, stop_time='7:31:00')
trip.AddStopTime(wes_4024, stop_time='7:32:00')
trip.AddStopTime(wes_4025, stop_time='7:32:30')
trip.AddStopTime(wes_4026, stop_time='7:33:00')
trip.AddStopTime(wes_4027, stop_time='7:34:30')
trip.AddStopTime(wes_4028, stop_time='7:36:00')
trip.AddStopTime(wes_4029, stop_time='7:37:30')
trip.AddStopTime(wes_4030, stop_time='7:39:00')
trip.AddStopTime(wes_4031, stop_time='7:39:30')
trip.AddStopTime(wes_4032, stop_time='7:40:00')
trip.AddStopTime(wes_4033, stop_time='7:40:30')
trip.AddStopTime(wes_4034, stop_time='7:41:00')
trip.AddStopTime(wes_4035, stop_time='7:41:30')
trip.AddStopTime(wes_4036, stop_time='7:42:00')
trip.AddStopTime(wes_4037, stop_time='7:42:30')
trip.AddStopTime(wes_4038, stop_time='7:43:00')
trip.AddStopTime(wes_4039, stop_time='7:43:30')
trip.AddStopTime(wes_4040, stop_time='7:44:00')
trip.AddStopTime(wes_4041, stop_time='7:44:30')
trip.AddStopTime(wes_4042, stop_time='7:45:00')
trip.AddStopTime(wes_4043, stop_time='7:46:00')
trip.AddStopTime(wes_4044, stop_time='7:46:30')
trip.AddStopTime(wes_4045, stop_time='7:47:00')
trip.AddStopTime(wes_4046, stop_time='7:47:30')
trip.AddStopTime(wes_4047, stop_time='7:48:00')
trip.AddStopTime(wes_4048, stop_time='7:49:00')
trip.AddStopTime(wes_4049, stop_time='7:50:00')
#7:55:00
#8:35:00
#9:15:00
#9:55:00
#10:35:00
# ... add every trip throughout the day
# Validate The Feed
schedule.Validate()
# Output Feed
schedule.WriteGoogleTransitFeed('output/mjts_gtfs_' + mjts_version + '.zip')
| theshka/MJTS_GTFS | src/generate_feed.py | Python | mit | 26,420 | [
"MOOSE"
] | 9b66dce408a1cab9c6351688c6bbec1e28abc0ddb5f1c05a9f0bfebfde8b68b5 |
from __future__ import absolute_import
import sys,os
import numpy as np
import pandas as pd
sys.path.append('/global/project/projectdirs/openmsi/jupyterhub_libs/anaconda/lib/python2.7/site-packages')
from rdkit import Chem
from rdkit.Chem import PandasTools
sdf_file = '/project/projectdirs/openmsi/projects/compound_data/chembl/chembl_21.sdf.gz'
df = PandasTools.LoadSDF(sdf_file)
df.to_pickle('/project/projectdirs/openmsi/projects/compound_data/chembl/chembl.pkl')
| metabolite-atlas/metatlas | metatlas/interfaces/compounds/load_chembl.py | Python | bsd-3-clause | 468 | [
"RDKit"
] | 39a6074c3940e5abe2627d9e0289fd761290de2fb5d8671e4c7a340ade78fd03 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMsnbase(RPackage):
"""Base Functions and Classes for Mass Spectrometry and Proteomics
MSnbase provides infrastructure for manipulation, processing and
visualisation of mass spectrometry and proteomics data, ranging from raw
to quantitative and annotated data."""
homepage = "https://bioconductor.org/packages/MSnbase"
git = "https://git.bioconductor.org/packages/MSnbase.git"
version('2.16.1', commit='4d88b4edd1af59474462b1b06ad0ec5831f3a878')
version('2.10.1', commit='4d5899bc9c714f0b1a70cddd537cd4621b2b53b0')
version('2.8.3', commit='ef883752c5e92d445647bc5b5d23d5df320db415')
version('2.6.4', commit='46836860ce0281eef135303f2e2948303d67f68c')
version('2.4.2', commit='c045d65daa730c7837852e6343a05cae9644ab5e')
version('2.2.0', commit='d6e8fb7f106d05096fa9074da0f829ac8f02c197')
depends_on('r@3.1:', type=('build', 'run'))
depends_on('r@3.5:', when='@2.16.1:', type=('build', 'run'))
depends_on('r-biocgenerics@0.7.1:', type=('build', 'run'))
depends_on('r-biobase@2.15.2:', type=('build', 'run'))
depends_on('r-mzr@2.7.6:', type=('build', 'run'))
depends_on('r-mzr@2.11.11:', when='@2.4.2:', type=('build', 'run'))
depends_on('r-mzr@2.13.6:', when='@2.6.4:', type=('build', 'run'))
depends_on('r-mzr@2.15.1:', when='@2.8.3:', type=('build', 'run'))
depends_on('r-mzr@2.17.3:', when='@2.10.1:', type=('build', 'run'))
depends_on('r-mzr@2.19.6:', when='@2.16.1:', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-protgenerics@1.5.1:', type=('build', 'run'))
depends_on('r-protgenerics@1.19.3:', when='@2.16.1:', type=('build', 'run'))
depends_on('r-biocparallel', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-iranges@2.13.28:', when='@2.6.4:', type=('build', 'run'))
depends_on('r-plyr', type=('build', 'run'))
depends_on('r-preprocesscore', type=('build', 'run'))
depends_on('r-vsn', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-impute', type=('build', 'run'))
depends_on('r-pcamethods', type=('build', 'run'))
depends_on('r-maldiquant@1.16:', type=('build', 'run'))
depends_on('r-mzid@1.5.2:', type=('build', 'run'))
depends_on('r-digest', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-xml', type=('build', 'run'))
depends_on('r-scales', when='@2.6.4:', type=('build', 'run'))
depends_on('r-mass', when='@2.6.4:', type=('build', 'run'))
depends_on('r-rcpp', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/r-msnbase/package.py | Python | lgpl-2.1 | 2,903 | [
"Bioconductor"
] | d576bc36609eda824b94239efc7e9d4822cad8a308937f81dee6f81664d6fd1f |
from SimpleCV.base import np, warnings
from SimpleCV.ImageClass import Image
class DFT:
"""
**SUMMARY**
The DFT class is the refactored class to crate DFT filters which can
be used to filter images by applying Digital Fourier Transform. This
is a factory class to create various DFT filters.
**PARAMETERS**
Any of the following parameters can be supplied to create
a simple DFT object.
* *width* - width of the filter
* *height* - height of the filter
* *channels* - number of channels of the filter
* *size* - size of the filter (width, height)
* *_numpy* - numpy array of the filter
* *_image* - SimpleCV.Image of the filter
* *_dia* - diameter of the filter
(applicable for gaussian, butterworth, notch)
* *_type* - Type of the filter
* *_order* - order of the butterworth filter
* *_freqpass* - frequency of the filter (lowpass, highpass, bandpass)
* *_xCutoffLow* - Lower horizontal cut off frequency for lowpassfilter
* *_yCutoffLow* - Lower vertical cut off frequency for lowpassfilter
* *_xCutoffHigh* - Upper horizontal cut off frequency for highpassfilter
* *_yCutoffHigh* - Upper vertical cut off frequency for highassfilter
**EXAMPLE**
>>> gauss = DFT.createGaussianFilter(dia=40, size=(512,512))
>>> dft = DFT()
>>> butterworth = dft.createButterworthFilter(dia=300, order=2, size=(300, 300))
"""
width = 0
height = 0
channels = 1
_numpy = None
_image = None
_dia = 0
_type = ""
_order = 0
_freqpass = ""
_xCutoffLow = 0
_yCutoffLow = 0
_xCutoffHigh = 0
_yCutoffHigh = 0
def __init__(self, **kwargs):
for key in kwargs:
if key == 'width':
self.width = kwargs[key]
elif key == 'height':
self.height = kwargs[key]
elif key == 'channels':
self.channels = kwargs[key]
elif key == 'size':
self.width, self.height = kwargs[key]
elif key == 'numpyarray':
self._numpy = kwargs[key]
#elif key == 'image':
# self._image = kwargs[key]
elif key == 'dia':
self._dia = kwargs[key]
elif key == 'type':
self._type = kwargs[key]
elif key == 'order':
self._order = kwargs[key]
elif key == 'frequency':
self._freqpass = kwargs[key]
elif key == 'xCutoffLow':
self._xCutoffLow = kwargs[key]
elif key == 'yCutoffLow':
self._yCutoffLow = kwargs[key]
elif key == 'xCutoffHigh':
self._xCutoffHigh = kwargs[key]
elif key == 'yCutoffHigh':
self._yCutoffHigh = kwargs[key]
def __repr__(self):
return "<SimpleCV.DFT Object: %s %s filter of size:(%d, %d) and channels: %d>" %(self._type, self._freqpass, self.width, self.height, self.channels)
def __add__(self, flt):
if not isinstance(flt, type(self)):
warnings.warn("Provide SimpleCV.DFT object")
return None
if self.size() != flt.size():
warnings.warn("Both SimpleCV.DFT object must have the same size")
return None
flt_numpy = self._numpy + flt._numpy
flt_image = Image(flt_numpy)
retVal = DFT(numpyarray=flt_numpy, image=flt_image, size=flt_image.size())
return retVal
def __invert__(self, flt):
return self.invert()
def _updateParams(self, flt):
self.channels = flt.channels
self._dia = flt._dia
self._type = flt._type
self._order = flt._order
self._freqpass = flt._freqpass
self._xCutoffLow = flt._xCutoffLow
self._yCutoffLow = flt._yCutoffLow
self._xCutoffHigh = flt._xCutoffHigh
self._yCutoffHigh = flt._yCutoffHigh
def invert(self):
"""
**SUMMARY**
Invert the filter. All values will be subtracted from 255.
**RETURNS**
Inverted Filter
**EXAMPLE**
>>> flt = DFT.createGaussianFilter()
>>> invertflt = flt.invert()
"""
flt = self._numpy
flt = 255 - flt
img = Image(flt)
invertedfilter = DFT(numpyarray=flt, image=img,
size=self.size(), type=self._type)
invertedfilter._updateParams(self)
return invertedfilter
@classmethod
def createGaussianFilter(self, dia=400, size=(64, 64), highpass=False):
"""
**SUMMARY**
Creates a gaussian filter of given size.
**PARAMETERS**
* *dia* - int - diameter of Gaussian filter
- list - provide a list of three diameters to create
a 3 channel filter
* *size* - size of the filter (width, height)
* *highpass*: - bool
True: highpass filter
False: lowpass filter
**RETURNS**
DFT filter.
**EXAMPLE**
>>> gauss = DFT.createGaussianfilter(200, (512, 512),
highpass=True)
>>> gauss = DFT.createGaussianfilter([100, 120, 140], (512, 512),
highpass=False)
>>> img = Image('lenna')
>>> gauss.applyFilter(img).show()
"""
if isinstance(dia, list):
if len(dia) != 3 and len(dia) != 1:
warnings.warn("diameter list must be of size 1 or 3")
return None
stackedfilter = DFT()
for d in dia:
stackedfilter = stackedfilter._stackFilters(self.createGaussianFilter(d, size, highpass))
image = Image(stackedfilter._numpy)
retVal = DFT(numpyarray=stackedfilter._numpy, image=image,
dia=dia, channels = len(dia), size=size,
type="Gaussian", frequency=stackedfilter._freqpass)
return retVal
freqpass = "lowpass"
sz_x, sz_y = size
x0 = sz_x/2
y0 = sz_y/2
X, Y = np.meshgrid(np.arange(sz_x), np.arange(sz_y))
D = np.sqrt((X-x0)**2+(Y-y0)**2)
flt = 255*np.exp(-0.5*(D/dia)**2)
if highpass:
flt = 255 - flt
freqpass = "highpass"
img = Image(flt)
retVal = DFT(size=size, numpyarray=flt, image=img, dia=dia,
type="Gaussian", frequency=freqpass)
return retVal
@classmethod
def createButterworthFilter(self, dia=400, size=(64, 64), order=2, highpass=False):
"""
**SUMMARY**
Creates a butterworth filter of given size and order.
**PARAMETERS**
* *dia* - int - diameter of Gaussian filter
- list - provide a list of three diameters to create
a 3 channel filter
* *size* - size of the filter (width, height)
* *order* - order of the filter
* *highpass*: - bool
True: highpass filter
False: lowpass filter
**RETURNS**
DFT filter.
**EXAMPLE**
>>> flt = DFT.createButterworthfilter(100, (512, 512), order=3,
highpass=True)
>>> flt = DFT.createButterworthfilter([100, 120, 140], (512, 512),
order=3, highpass=False)
>>> img = Image('lenna')
>>> flt.applyFilter(img).show()
"""
if isinstance(dia, list):
if len(dia) != 3 and len(dia) != 1:
warnings.warn("diameter list must be of size 1 or 3")
return None
stackedfilter = DFT()
for d in dia:
stackedfilter = stackedfilter._stackFilters(self.createButterworthFilter(d, size, order, highpass))
image = Image(stackedfilter._numpy)
retVal = DFT(numpyarray=stackedfilter._numpy, image=image,
dia=dia, channels = len(dia), size=size,
type=stackedfilter._type, order=order,
frequency=stackedfilter._freqpass)
return retVal
freqpass = "lowpass"
sz_x, sz_y = size
x0 = sz_x/2
y0 = sz_y/2
X, Y = np.meshgrid(np.arange(sz_x), np.arange(sz_y))
D = np.sqrt((X-x0)**2+(Y-y0)**2)
flt = 255/(1.0 + (D/dia)**(order*2))
if highpass:
frequency = "highpass"
flt = 255 - flt
img = Image(flt)
retVal = DFT(size=size, numpyarray=flt, image=img, dia=dia,
type="Butterworth", frequency=freqpass)
return retVal
@classmethod
def createLowpassFilter(self, xCutoff, yCutoff=None, size=(64, 64)):
"""
**SUMMARY**
Creates a lowpass filter of given size and order.
**PARAMETERS**
* *xCutoff* - int - horizontal cut off frequency
- list - provide a list of three cut off frequencies
to create a 3 channel filter
* *yCutoff* - int - vertical cut off frequency
- list - provide a list of three cut off frequencies
to create a 3 channel filter
* *size* - size of the filter (width, height)
**RETURNS**
DFT filter.
**EXAMPLE**
>>> flt = DFT.createLowpassFilter(xCutoff=75, size=(320, 280))
>>> flt = DFT.createLowpassFilter(xCutoff=[75], size=(320, 280))
>>> flt = DFT.createLowpassFilter(xCutoff=[75, 100, 120],
size=(320, 280))
>>> flt = DFT.createLowpassFilter(xCutoff=75, yCutoff=35,
size=(320, 280))
>>> flt = DFT.createLowpassFilter(xCutoff=[75], yCutoff=[35],
size=(320, 280))
>>> flt = DFT.createLowpassFilter(xCutoff=[75, 100, 125], yCutoff=35,
size=(320, 280))
>>> # yCutoff will be [35, 35, 35]
>>> flt = DFT.createLowpassFilter(xCutoff=[75, 113, 124],
yCutoff=[35, 45, 90],
size=(320, 280))
>>> img = Image('lenna')
>>> flt.applyFilter(img).show()
"""
if isinstance(xCutoff, list):
if len(xCutoff) != 3 and len(xCutoff) != 1:
warnings.warn("xCutoff list must be of size 3 or 1")
return None
if isinstance(yCutoff, list):
if len(yCutoff) != 3 and len(yCutoff) != 1:
warnings.warn("yCutoff list must be of size 3 or 1")
return None
if len(yCutoff) == 1:
yCutoff = [yCutoff[0]]*len(xCutoff)
else:
yCutoff = [yCutoff]*len(xCutoff)
stackedfilter = DFT()
for xfreq, yfreq in zip(xCutoff, yCutoff):
stackedfilter = stackedfilter._stackFilters(self.createLowpassFilter(xfreq, yfreq, size))
image = Image(stackedfilter._numpy)
retVal = DFT(numpyarray=stackedfilter._numpy, image=image,
xCutoffLow=xCutoff, yCutoffLow=yCutoff,
channels=len(xCutoff), size=size,
type=stackedfilter._type, order=self._order,
frequency=stackedfilter._freqpass)
return retVal
w, h = size
xCutoff = np.clip(int(xCutoff), 0, w/2)
if yCutoff is None:
yCutoff = xCutoff
yCutoff = np.clip(int(yCutoff), 0, h/2)
flt = np.zeros((w, h))
flt[0:xCutoff, 0:yCutoff] = 255
flt[0:xCutoff, h-yCutoff:h] = 255
flt[w-xCutoff:w, 0:yCutoff] = 255
flt[w-xCutoff:w, h-yCutoff:h] = 255
img = Image(flt)
lowpassFilter = DFT(size=size, numpyarray=flt, image=img,
type="Lowpass", xCutoffLow=xCutoff,
yCutoffLow=yCutoff, frequency="lowpass")
return lowpassFilter
@classmethod
def createHighpassFilter(self, xCutoff, yCutoff=None, size=(64, 64)):
"""
**SUMMARY**
Creates a highpass filter of given size and order.
**PARAMETERS**
* *xCutoff* - int - horizontal cut off frequency
- list - provide a list of three cut off frequencies
to create a 3 channel filter
* *yCutoff* - int - vertical cut off frequency
- list - provide a list of three cut off frequencies
to create a 3 channel filter
* *size* - size of the filter (width, height)
**RETURNS**
DFT filter.
**EXAMPLE**
>>> flt = DFT.createHighpassFilter(xCutoff=75, size=(320, 280))
>>> flt = DFT.createHighpassFilter(xCutoff=[75], size=(320, 280))
>>> flt = DFT.createHighpassFilter(xCutoff=[75, 100, 120],
size=(320, 280))
>>> flt = DFT.createHighpassFilter(xCutoff=75, yCutoff=35,
size=(320, 280))
>>> flt = DFT.createHighpassFilter(xCutoff=[75], yCutoff=[35],
size=(320, 280))
>>> flt = DFT.createHighpassFilter(xCutoff=[75, 100, 125], yCutoff=35,
size=(320, 280))
>>> # yCutoff will be [35, 35, 35]
>>> flt = DFT.createHighpassFilter(xCutoff=[75, 113, 124],
yCutoff=[35, 45, 90],
size=(320, 280))
>>> img = Image('lenna')
>>> flt.applyFilter(img).show()
"""
if isinstance(xCutoff, list):
if len(xCutoff) != 3 and len(xCutoff) != 1:
warnings.warn("xCutoff list must be of size 3 or 1")
return None
if isinstance(yCutoff, list):
if len(yCutoff) != 3 and len(yCutoff) != 1:
warnings.warn("yCutoff list must be of size 3 or 1")
return None
if len(yCutoff) == 1:
yCutoff = [yCutoff[0]]*len(xCutoff)
else:
yCutoff = [yCutoff]*len(xCutoff)
stackedfilter = DFT()
for xfreq, yfreq in zip(xCutoff, yCutoff):
stackedfilter = stackedfilter._stackFilters(
self.createHighpassFilter(xfreq, yfreq, size))
image = Image(stackedfilter._numpy)
retVal = DFT(numpyarray=stackedfilter._numpy, image=image,
xCutoffHigh=xCutoff, yCutoffHigh=yCutoff,
channels=len(xCutoff), size=size,
type=stackedfilter._type, order=self._order,
frequency=stackedfilter._freqpass)
return retVal
lowpass = self.createLowpassFilter(xCutoff, yCutoff, size)
w, h = lowpass.size()
flt = lowpass._numpy
flt = 255 - flt
img = Image(flt)
highpassFilter = DFT(size=size, numpyarray=flt, image=img,
type="Highpass", xCutoffHigh=xCutoff,
yCutoffHigh=yCutoff, frequency="highpass")
return highpassFilter
@classmethod
def createBandpassFilter(self, xCutoffLow, xCutoffHigh, yCutoffLow=None,
yCutoffHigh=None, size=(64, 64)):
"""
**SUMMARY**
Creates a banf filter of given size and order.
**PARAMETERS**
* *xCutoffLow* - int - horizontal lower cut off frequency
- list - provide a list of three cut off frequencies
* *xCutoffHigh* - int - horizontal higher cut off frequency
- list - provide a list of three cut off frequencies
* *yCutoffLow* - int - vertical lower cut off frequency
- list - provide a list of three cut off frequencies
* *yCutoffHigh* - int - verical higher cut off frequency
- list - provide a list of three cut off frequencies
to create a 3 channel filter
* *size* - size of the filter (width, height)
**RETURNS**
DFT filter.
**EXAMPLE**
>>> flt = DFT.createBandpassFilter(xCutoffLow=75,
xCutoffHigh=190, size=(320, 280))
>>> flt = DFT.createBandpassFilter(xCutoffLow=[75],
xCutoffHigh=[190], size=(320, 280))
>>> flt = DFT.createBandpassFilter(xCutoffLow=[75, 120, 132],
xCutoffHigh=[190, 210, 234],
size=(320, 280))
>>> flt = DFT.createBandpassFilter(xCutoffLow=75, xCutoffHigh=190,
yCutoffLow=60, yCutoffHigh=210,
size=(320, 280))
>>> flt = DFT.createBandpassFilter(xCutoffLow=[75], xCutoffHigh=[190],
yCutoffLow=[60], yCutoffHigh=[210],
size=(320, 280))
>>> flt = DFT.createBandpassFilter(xCutoffLow=[75, 120, 132],
xCutoffHigh=[190, 210, 234],
yCutoffLow=[70, 110, 112],
yCutoffHigh=[180, 220, 220],
size=(320, 280))
>>> img = Image('lenna')
>>> flt.applyFilter(img).show()
"""
lowpass = self.createLowpassFilter(xCutoffLow, yCutoffLow, size)
highpass = self.createHighpassFilter(xCutoffHigh, yCutoffHigh, size)
lowpassnumpy = lowpass._numpy
highpassnumpy = highpass._numpy
bandpassnumpy = lowpassnumpy + highpassnumpy
bandpassnumpy = np.clip(bandpassnumpy, 0, 255)
img = Image(bandpassnumpy)
bandpassFilter = DFT(size=size, image=img,
numpyarray=bandpassnumpy, type="bandpass",
xCutoffLow=xCutoffLow, yCutoffLow=yCutoffLow,
xCutoffHigh=xCutoffHigh, yCutoffHigh=yCutoffHigh,
frequency="bandpass", channels=lowpass.channels)
return bandpassFilter
@classmethod
def createNotchFilter(self, dia1, dia2=None, cen=None, size=(64, 64), type="lowpass"):
"""
**SUMMARY**
Creates a disk shaped notch filter of given diameter at given center.
**PARAMETERS**
* *dia1* - int - diameter of the disk shaped notch
- list - provide a list of three diameters to create
a 3 channel filter
* *dia2* - int - outer diameter of the disk shaped notch
used for bandpass filter
- list - provide a list of three diameters to create
a 3 channel filter
* *cen* - tuple (x, y) center of the disk shaped notch
if not provided, it will be at the center of the
filter
* *size* - size of the filter (width, height)
* *type*: - lowpass or highpass filter
**RETURNS**
DFT notch filter
**EXAMPLE**
>>> notch = DFT.createNotchFilter(dia1=200, cen=(200, 200),
size=(512, 512), type="highpass")
>>> notch = DFT.createNotchFilter(dia1=200, dia2=300, cen=(200, 200),
size=(512, 512))
>>> img = Image('lenna')
>>> notch.applyFilter(img).show()
"""
if isinstance(dia1, list):
if len(dia1) != 3 and len(dia1) != 1:
warnings.warn("diameter list must be of size 1 or 3")
return None
if isinstance(dia2, list):
if len(dia2) != 3 and len(dia2) != 1:
warnings.warn("diameter list must be of size 3 or 1")
return None
if len(dia2) == 1:
dia2 = [dia2[0]]*len(dia1)
else:
dia2 = [dia2]*len(dia1)
if isinstance(cen, list):
if len(cen) != 3 and len(cen) != 1:
warnings.warn("center list must be of size 3 or 1")
return None
if len(cen) == 1:
cen = [cen[0]]*len(dia1)
else:
cen = [cen]*len(dia1)
stackedfilter = DFT()
for d1, d2, c in zip(dia1, dia2, cen):
stackedfilter = stackedfilter._stackFilters(self.createNotchFilter(d1, d2, c, size, type))
image = Image(stackedfilter._numpy)
retVal = DFT(numpyarray=stackedfilter._numpy, image=image,
dia=dia1+dia2, channels = len(dia1), size=size,
type=stackedfilter._type,
frequency=stackedfilter._freqpass)
return retVal
w, h = size
if cen is None:
cen = (w/2, h/2)
a, b = cen
y, x = np.ogrid[-a:w-a, -b:h-b]
r = dia1/2
mask = x*x + y*y <= r*r
flt = np.ones((w, h))
flt[mask] = 255
if type == "highpass":
flt = 255-flt
if dia2 is not None:
a, b = cen
y, x = np.ogrid[-a:w-a, -b:h-b]
r = dia2/2
mask = x*x + y*y <= r*r
flt1 = np.ones((w, h))
flt1[mask] = 255
flt1 = 255 - flt1
flt = flt + flt1
np.clip(flt, 0, 255)
type = "bandpass"
img = Image(flt)
notchfilter = DFT(size=size, numpyarray=flt, image=img, dia=dia1,
type="Notch", frequency=type)
return notchfilter
def applyFilter(self, image, grayscale=False):
"""
**SUMMARY**
Apply the DFT filter to given image.
**PARAMETERS**
* *image* - SimpleCV.Image image
* *grayscale* - if this value is True we perfrom the operation on the
DFT of the gray version of the image and the result is
gray image. If grayscale is true we perform the
operation on each channel and the recombine them to
create the result.
**RETURNS**
Filtered Image.
**EXAMPLE**
>>> notch = DFT.createNotchFilter(dia1=200, cen=(200, 200),
size=(512, 512), type="highpass")
>>> img = Image('lenna')
>>> notch.applyFilter(img).show()
"""
if self.width == 0 or self.height == 0:
warnings.warn("Empty Filter. Returning the image.")
return image
w, h = image.size()
if grayscale:
image = image.toGray()
print self._numpy.dtype, "gray"
fltImg = Image(self._numpy)
if fltImg.size() != image.size():
fltImg = fltImg.resize(w, h)
filteredImage = image.applyDFTFilter(fltImg, grayscale)
return filteredImage
def getImage(self):
"""
**SUMMARY**
Get the SimpleCV Image of the filter
**RETURNS**
Image of the filter.
**EXAMPLE**
>>> notch = DFT.createNotchFilter(dia1=200, cen=(200, 200),
size=(512, 512), type="highpass")
>>> notch.getImage().show()
"""
print self._image
if isinstance(self._image, type(None)):
if isinstance(self._numpy, type(None)):
warnings.warn("Filter doesn't contain any image")
self._image = Image(self._numpy.astype(np.uint8))
return self._image
def getNumpy(self):
"""
**SUMMARY**
Get the numpy array of the filter
**RETURNS**
numpy array of the filter.
**EXAMPLE**
>>> notch = DFT.createNotchFilter(dia1=200, cen=(200, 200),
size=(512, 512), type="highpass")
>>> notch.getNumpy()
"""
if isinstance(self._numpy, type(None)):
if isinstance(self._image, type(None)):
warnings.warn("Filter doesn't contain any image")
self._numpy = self._image.getNumpy()
return self._numpy
def getOrder(self):
"""
**SUMMARY**
Get order of the butterworth filter
**RETURNS**
order of the butterworth filter
**EXAMPLE**
>>> flt = DFT.createButterworthFilter(order=4)
>>> print flt.getOrder()
"""
return self._order
def size(self):
"""
**SUMMARY**
Get size of the filter
**RETURNS**
tuple of (width, height)
**EXAMPLE**
>>> flt = DFT.createGaussianFilter(size=(380, 240))
>>> print flt.size()
"""
return (self.width, self.height)
def getDia(self):
"""
**SUMMARY**
Get diameter of the filter
**RETURNS**
diameter of the filter
**EXAMPLE**
>>> flt = DFT.createGaussianFilter(dia=200, size=(380, 240))
>>> print flt.getDia()
"""
return self._dia
def getType(self):
"""
**SUMMARY**
Get type of the filter
**RETURNS**
type of the filter
**EXAMPLE**
>>> flt = DFT.createGaussianFilter(dia=200, size=(380, 240))
>>> print flt.getType() # Gaussian
"""
return self._type
def stackFilters(self, flt1, flt2):
"""
**SUMMARY**
Stack three signle channel filters of the same size to create
a 3 channel filter.
**PARAMETERS**
* *flt1* - second filter to be stacked
* *flt2* - thrid filter to be stacked
**RETURNS**
DFT filter
**EXAMPLE**
>>> flt1 = DFT.createGaussianFilter(dia=200, size=(380, 240))
>>> flt2 = DFT.createGaussianFilter(dia=100, size=(380, 240))
>>> flt2 = DFT.createGaussianFilter(dia=70, size=(380, 240))
>>> flt = flt1.stackFilters(flt2, flt3) # 3 channel filter
"""
if not(self.channels == 1 and flt1.channels == 1 and flt2.channels == 1):
warnings.warn("Filters must have only 1 channel")
return None
if not (self.size() == flt1.size() and self.size() == flt2.size()):
warnings.warn("All the filters must be of same size")
return None
numpyflt = self._numpy
numpyflt1 = flt1._numpy
numpyflt2 = flt2._numpy
flt = np.dstack((numpyflt, numpyflt1, numpyflt2))
img = Image(flt)
stackedfilter = DFT(size=self.size(), numpyarray=flt, image=img, channels=3)
return stackedfilter
def _stackFilters(self, flt1):
"""
**SUMMARY**
stack two filters of same size. channels don't matter.
**PARAMETERS**
* *flt1* - second filter to be stacked
**RETURNS**
DFT filter
"""
if isinstance(self._numpy, type(None)):
return flt1
if not self.size() == flt1.size():
warnings.warn("All the filters must be of same size")
return None
numpyflt = self._numpy
numpyflt1 = flt1._numpy
flt = np.dstack((numpyflt, numpyflt1))
stackedfilter = DFT(size=self.size(), numpyarray=flt,
channels=self.channels+flt1.channels,
type=self._type, frequency=self._freqpass)
return stackedfilter
| jayrambhia/SimpleCV2 | SimpleCV/DFT.py | Python | bsd-3-clause | 28,461 | [
"Gaussian"
] | 28302604b51608b377e56bf216c7c8e6ce0ecae3052bb5afe1dcdab2537dd132 |
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see starthinker/scripts for possible source):
# - Command: "python starthinker_ui/manage.py airflow"
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
DV360 Report Emailed To BigQuery
Pulls a DV360 Report from a gMail email into BigQuery.
- The person executing this recipe must be the recipient of the email.
- Schedule a DV360 report to be sent to an email like .
- Or set up a redirect rule to forward a report you already receive.
- The report can be sent as an attachment or a link.
- Ensure this recipe runs after the report is email daily.
- Give a regular expression to match the email subject.
- Configure the destination in BigQuery to write the data.
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'auth_read':'user', # Credentials used for reading data.
'email':'', # Email address report was sent to.
'subject':'.*', # Regular expression to match subject. Double escape backslashes.
'dataset':'', # Existing dataset in BigQuery.
'table':'', # Name of table to be written to.
'dbm_schema':'[]', # Schema provided in JSON list format or empty list.
'is_incremental_load':False, # Append report data to table based on date column, de-duplicates.
}
RECIPE = {
'tasks':[
{
'email':{
'auth':{'field':{'name':'auth_read','kind':'authentication','order':1,'default':'user','description':'Credentials used for reading data.'}},
'read':{
'from':'noreply-dv360@google.com',
'to':{'field':{'name':'email','kind':'string','order':1,'default':'','description':'Email address report was sent to.'}},
'subject':{'field':{'name':'subject','kind':'string','order':2,'default':'.*','description':'Regular expression to match subject. Double escape backslashes.'}},
'link':'https://storage.googleapis.com/.*',
'attachment':'.*'
},
'write':{
'bigquery':{
'dataset':{'field':{'name':'dataset','kind':'string','order':3,'default':'','description':'Existing dataset in BigQuery.'}},
'table':{'field':{'name':'table','kind':'string','order':4,'default':'','description':'Name of table to be written to.'}},
'schema':{'field':{'name':'dbm_schema','kind':'json','order':5,'default':'[]','description':'Schema provided in JSON list format or empty list.'}},
'header':True,
'is_incremental_load':{'field':{'name':'is_incremental_load','kind':'boolean','order':6,'default':False,'description':'Append report data to table based on date column, de-duplicates.'}}
}
}
}
}
]
}
dag_maker = DAG_Factory('email_dv360_to_bigquery', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
| google/starthinker | dags/email_dv360_to_bigquery_dag.py | Python | apache-2.0 | 5,664 | [
"VisIt"
] | 55710ee201ae973383cfa26f85feecb591114bfcc5167391f9dcfc43d530d7c1 |
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
from testrunner.local import statusfile
from testrunner.local import testsuite
from testrunner.objects import testcase
class BenchmarksVariantGenerator(testsuite.VariantGenerator):
# Both --noopt and --stressopt are very slow. Add TF but without
# always opt to match the way the benchmarks are run for performance
# testing.
def FilterVariantsByTest(self, testcase):
if testcase.outcomes and statusfile.OnlyStandardVariant(
testcase.outcomes):
return self.standard_variant
return self.fast_variants
def GetFlagSets(self, testcase, variant):
return testsuite.FAST_VARIANT_FLAGS[variant]
class BenchmarksTestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(BenchmarksTestSuite, self).__init__(name, root)
self.testroot = os.path.join(root, "data")
def ListTests(self, context):
tests = []
for test in [
"kraken/ai-astar",
"kraken/audio-beat-detection",
"kraken/audio-dft",
"kraken/audio-fft",
"kraken/audio-oscillator",
"kraken/imaging-darkroom",
"kraken/imaging-desaturate",
"kraken/imaging-gaussian-blur",
"kraken/json-parse-financial",
"kraken/json-stringify-tinderbox",
"kraken/stanford-crypto-aes",
"kraken/stanford-crypto-ccm",
"kraken/stanford-crypto-pbkdf2",
"kraken/stanford-crypto-sha256-iterative",
"octane/box2d",
"octane/code-load",
"octane/crypto",
"octane/deltablue",
"octane/earley-boyer",
"octane/gbemu-part1",
"octane/mandreel",
"octane/navier-stokes",
"octane/pdfjs",
"octane/raytrace",
"octane/regexp",
"octane/richards",
"octane/splay",
"octane/typescript",
"octane/zlib",
"sunspider/3d-cube",
"sunspider/3d-morph",
"sunspider/3d-raytrace",
"sunspider/access-binary-trees",
"sunspider/access-fannkuch",
"sunspider/access-nbody",
"sunspider/access-nsieve",
"sunspider/bitops-3bit-bits-in-byte",
"sunspider/bitops-bits-in-byte",
"sunspider/bitops-bitwise-and",
"sunspider/bitops-nsieve-bits",
"sunspider/controlflow-recursive",
"sunspider/crypto-aes",
"sunspider/crypto-md5",
"sunspider/crypto-sha1",
"sunspider/date-format-tofte",
"sunspider/date-format-xparb",
"sunspider/math-cordic",
"sunspider/math-partial-sums",
"sunspider/math-spectral-norm",
"sunspider/regexp-dna",
"sunspider/string-base64",
"sunspider/string-fasta",
"sunspider/string-tagcloud",
"sunspider/string-unpack-code",
"sunspider/string-validate-input"]:
tests.append(testcase.TestCase(self, test))
return tests
def GetFlagsForTestCase(self, testcase, context):
result = []
result += context.mode_flags
if testcase.path.startswith("kraken"):
result.append(os.path.join(self.testroot, "%s-data.js" % testcase.path))
result.append(os.path.join(self.testroot, "%s.js" % testcase.path))
elif testcase.path.startswith("octane"):
result.append(os.path.join(self.testroot, "octane/base.js"))
result.append(os.path.join(self.testroot, "%s.js" % testcase.path))
if testcase.path.startswith("octane/gbemu"):
result.append(os.path.join(self.testroot, "octane/gbemu-part2.js"))
elif testcase.path.startswith("octane/typescript"):
result.append(os.path.join(self.testroot,
"octane/typescript-compiler.js"))
result.append(os.path.join(self.testroot, "octane/typescript-input.js"))
elif testcase.path.startswith("octane/zlib"):
result.append(os.path.join(self.testroot, "octane/zlib-data.js"))
result += ["-e", "BenchmarkSuite.RunSuites({});"]
elif testcase.path.startswith("sunspider"):
result.append(os.path.join(self.testroot, "%s.js" % testcase.path))
return testcase.flags + result
def GetSourceForTest(self, testcase):
filename = os.path.join(self.testroot, testcase.path + ".js")
with open(filename) as f:
return f.read()
def DownloadData(self):
print "Benchmarks download is deprecated. It's part of DEPS."
def rm_dir(directory):
directory_name = os.path.join(self.root, directory)
if os.path.exists(directory_name):
shutil.rmtree(directory_name)
# Clean up old directories and archive files.
rm_dir('kraken')
rm_dir('octane')
rm_dir('sunspider')
archive_files = [f for f in os.listdir(self.root)
if f.startswith("downloaded_") or
f.startswith("CHECKED_OUT_")]
if len(archive_files) > 0:
print "Clobber outdated test archives ..."
for f in archive_files:
os.remove(os.path.join(self.root, f))
def _VariantGeneratorFactory(self):
return BenchmarksVariantGenerator
def GetSuite(name, root):
return BenchmarksTestSuite(name, root)
| macchina-io/macchina.io | platform/JS/V8/v8/test/benchmarks/testcfg.py | Python | apache-2.0 | 6,637 | [
"Gaussian"
] | 42d55b3e7681d8912a38c805764c57758d1b45bfe0dccc3427d69c3fb68db063 |
import numpy as np
import ase.units as units
from gpaw import restart, GPAW
from gpaw.poisson import PoissonSolver
from gpaw.dipole_correction import DipoleCorrection
energies = []
for name in ['zero', 'periodic', 'corrected']:
if name == 'corrected':
calc = GPAW(name, txt=None,
poissonsolver=DipoleCorrection(PoissonSolver(), 2))
else:
calc = GPAW(name, txt=None)
energies.append(calc.get_potential_energy())
print energies
assert abs(energies[1] - energies[0]) < 0.003
assert abs(energies[2] - energies[0] - 0.0409) < 0.003
efermi = calc.get_fermi_level()
calc.restore_state()
v = (calc.hamiltonian.vHt_g * units.Hartree).mean(0).mean(0)
w1 = v[0] - efermi
w2 = v[-1] - efermi
print w1, w2
assert abs(w1 - 4.359) < 0.01
assert abs(w2 - 2.556) < 0.01
| robwarm/gpaw-symm | doc/tutorials/dipole_correction/check.py | Python | gpl-3.0 | 805 | [
"ASE",
"GPAW"
] | 7e6c8927dbf365d9f71fa2ed6e75e6728fb20d4d13065eaf609caeb2235ce04f |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import json
import numpy as np
from ..util import unnp
from ..physconst import psi_bohr2angstroms
from ..exceptions import *
from .to_string import formula_generator
def to_schema(molrec, dtype, units='Angstrom', return_type='json'):
"""Translate Psi4 json Molecule spec into json from other schemas.
Parameters
----------
molrec : dict
Psi4 json Molecule spec.
dtype : {'psi4', 1}
Molecule schema format.
units : {'Angstrom', 'Bohr'}
Units in which to write string. There is not an option to write in
intrinsic/input units. Some `dtype` may not allow all units.
return_type : {'json', 'yaml'}
Serialization format string to return.
Returns
-------
qcschema : str
String of the `return_type` repr of `molrec`.
"""
qcschema = {}
if molrec['units'] == 'Angstrom' and units == 'Angstrom':
factor = 1.
elif molrec['units'] == 'Angstrom' and units == 'Bohr':
if 'input_units_to_au' in molrec:
factor = molrec['input_units_to_au']
else:
factor = 1. / psi_bohr2angstroms
elif molrec['units'] == 'Bohr' and units == 'Angstrom':
factor = psi_bohr2angstroms
elif molrec['units'] == 'Bohr' and units == 'Bohr':
factor = 1.
else:
raise ValidationError("""units must be 'Angstrom'/'Bohr', not {}""".format(units))
geom = np.array(molrec['geom']) * factor
nat = geom.shape[0] // 3
name = molrec.get('name', formula_generator(molrec['elem']))
# tagline = """auto-generated by qcdb from molecule {}""".format(name)
if dtype == 'psi4':
qcschema = copy.deepcopy(molrec)
qcschema['geom'] = geom
qcschema['units'] = units
qcschema['name'] = name
elif dtype == 1:
if units != 'Bohr':
raise ValidationError("""QC_JSON_Schema {} allows only 'Bohr' coordinates, not {}.""".format(dtype, units))
qcschema['symbols'] = np.array(molrec['elem'])
qcschema['geometry'] = geom
qcschema['masses'] = np.array(molrec['mass'])
qcschema['name'] = name
qcschema['molecular_charge'] = molrec['molecular_charge']
qcschema['molecular_multiplicity'] = molrec['molecular_multiplicity']
qcschema['real'] = np.array(molrec['real'])
fidx = np.split(np.arange(nat), molrec['fragment_separators'])
qcschema['fragments'] = [fr.tolist() for fr in fidx]
qcschema['fragment_charges'] = np.array(molrec['fragment_charges'])
qcschema['fragment_multiplicities'] = np.array(molrec['fragment_multiplicities'])
qcschema['fix_com'] = molrec['fix_com']
qcschema['fix_orientation'] = molrec['fix_orientation']
else:
raise TypeError("dtype not understood, valid options are {'psi4', 1}. Found {}.".format(dtype))
# hmm, psi4/qcdb for provenance or does psi molrec need a passthrough field to preserve?
#qcschema['provenance'] creator, version, routine
qcschema = unnp(qcschema)
if return_type == 'json':
return json.dumps(qcschema)
elif return_type == 'yaml':
import yaml
return yaml.dump(qcschema)
else:
raise ValidationError("""Return type ({}) not recognized.""".format(return_type))
| amjames/psi4 | psi4/driver/qcdb/molparse/to_schema.py | Python | lgpl-3.0 | 4,197 | [
"Psi4"
] | 7c8be9b20ef3b75dd7b31ba5274923e91b100dbd04cbb1e03a58261c87d03c57 |
#pylint: disable=C0111
#pylint: disable=W0621
from lettuce import world, step
from common import create_studio_user
from django.contrib.auth.models import Group
from auth.authz import get_course_groupname_for_role, get_user_by_email
from nose.tools import assert_true
PASSWORD = 'test'
EMAIL_EXTENSION = '@edx.org'
@step(u'(I am viewing|s?he views) the course team settings')
def view_grading_settings(_step, whom):
world.click_course_settings()
link_css = 'li.nav-course-settings-team a'
world.css_click(link_css)
@step(u'the user "([^"]*)" exists( as a course (admin|staff member))?$')
def create_other_user(_step, name, has_extra_perms, role_name):
email = name + EMAIL_EXTENSION
user = create_studio_user(uname=name, password=PASSWORD, email=email)
if has_extra_perms:
location = world.scenario_dict["COURSE"].location
if role_name == "admin":
# admins get staff privileges, as well
roles = ("staff", "instructor")
else:
roles = ("staff",)
for role in roles:
groupname = get_course_groupname_for_role(location, role)
group, __ = Group.objects.get_or_create(name=groupname)
user.groups.add(group)
user.save()
@step(u'I add "([^"]*)" to the course team')
def add_other_user(_step, name):
new_user_css = 'a.create-user-button'
world.css_click(new_user_css)
world.wait(0.5)
email_css = 'input#user-email-input'
world.css_fill(email_css, name + EMAIL_EXTENSION)
if world.is_firefox():
world.trigger_event(email_css)
confirm_css = 'form.create-user button.action-primary'
world.css_click(confirm_css)
@step(u'I delete "([^"]*)" from the course team')
def delete_other_user(_step, name):
to_delete_css = '.user-item .item-actions a.remove-user[data-id="{email}"]'.format(
email="{0}{1}".format(name, EMAIL_EXTENSION))
world.css_click(to_delete_css)
# confirm prompt
# need to wait for the animation to be done, there isn't a good success condition that won't work both on latest chrome and jenkins
world.wait(.5)
world.css_click(".wrapper-prompt-warning .action-primary")
@step(u's?he deletes me from the course team')
def other_delete_self(_step):
to_delete_css = '.user-item .item-actions a.remove-user[data-id="{email}"]'.format(
email="robot+studio@edx.org")
world.css_click(to_delete_css)
# confirm prompt
world.wait(.5)
world.css_click(".wrapper-prompt-warning .action-primary")
@step(u'I make "([^"]*)" a course team admin')
def make_course_team_admin(_step, name):
admin_btn_css = '.user-item[data-email="{email}"] .user-actions .add-admin-role'.format(
email=name+EMAIL_EXTENSION)
world.css_click(admin_btn_css)
@step(u'I remove admin rights from ("([^"]*)"|myself)')
def remove_course_team_admin(_step, outer_capture, name):
if outer_capture == "myself":
email = world.scenario_dict["USER"].email
else:
email = name + EMAIL_EXTENSION
admin_btn_css = '.user-item[data-email="{email}"] .user-actions .remove-admin-role'.format(
email=email)
world.css_click(admin_btn_css)
@step(u'"([^"]*)" logs in$')
def other_user_login(_step, name):
world.browser.cookies.delete()
world.visit('/')
signin_css = 'a.action-signin'
world.is_css_present(signin_css)
world.css_click(signin_css)
def fill_login_form():
login_form = world.browser.find_by_css('form#login_form')
login_form.find_by_name('email').fill(name + EMAIL_EXTENSION)
login_form.find_by_name('password').fill(PASSWORD)
login_form.find_by_name('submit').click()
world.retry_on_exception(fill_login_form)
assert_true(world.is_css_present('.new-course-button'))
world.scenario_dict['USER'] = get_user_by_email(name + EMAIL_EXTENSION)
@step(u'I( do not)? see the course on my page')
@step(u's?he does( not)? see the course on (his|her) page')
def see_course(_step, inverted, gender='self'):
class_css = 'h3.course-title'
all_courses = world.css_find(class_css, wait_time=1)
all_names = [item.html for item in all_courses]
if inverted:
assert not world.scenario_dict['COURSE'].display_name in all_names
else:
assert world.scenario_dict['COURSE'].display_name in all_names
@step(u'"([^"]*)" should( not)? be marked as an admin')
def marked_as_admin(_step, name, inverted):
flag_css = '.user-item[data-email="{email}"] .flag-role.flag-role-admin'.format(
email=name+EMAIL_EXTENSION)
if inverted:
assert world.is_css_not_present(flag_css)
else:
assert world.is_css_present(flag_css)
@step(u'I should( not)? be marked as an admin')
def self_marked_as_admin(_step, inverted):
return marked_as_admin(_step, "robot+studio", inverted)
@step(u'I can(not)? delete users')
@step(u's?he can(not)? delete users')
def can_delete_users(_step, inverted):
to_delete_css = 'a.remove-user'
if inverted:
assert world.is_css_not_present(to_delete_css)
else:
assert world.is_css_present(to_delete_css)
@step(u'I can(not)? add users')
@step(u's?he can(not)? add users')
def can_add_users(_step, inverted):
add_css = 'a.create-user-button'
if inverted:
assert world.is_css_not_present(add_css)
else:
assert world.is_css_present(add_css)
@step(u'I can(not)? make ("([^"]*)"|myself) a course team admin')
@step(u's?he can(not)? make ("([^"]*)"|me) a course team admin')
def can_make_course_admin(_step, inverted, outer_capture, name):
if outer_capture == "myself":
email = world.scenario_dict["USER"].email
else:
email = name + EMAIL_EXTENSION
add_button_css = '.user-item[data-email="{email}"] .add-admin-role'.format(email=email)
if inverted:
assert world.is_css_not_present(add_button_css)
else:
assert world.is_css_present(add_button_css)
| wwj718/edx-video | cms/djangoapps/contentstore/features/course-team.py | Python | agpl-3.0 | 5,945 | [
"VisIt"
] | 7e4ea27b54295b3b93b451b2810c2f7367974f32a1094f13b0133ae252c8217e |
#!/usr/bin/env python
# -*- coding: utf-8
"""
Example generation modified from the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from time import time
import os
import shutil
import traceback
import glob
import sys
from StringIO import StringIO
import cPickle
import re
import urllib2
import gzip
import posixpath
try:
from PIL import Image
except:
import Image
import matplotlib
matplotlib.use('Agg')
import token
import tokenize
MAX_NB_LINES_STDOUT = 20
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
try:
resp = urllib2.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
except urllib2.HTTPError as err:
print 'Error downloading %s: %s' % (url, str(err))
return ''
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=[], relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
'package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
modules_test = [cobj['module_short']] + self.extra_modules_test
for module in modules_test:
full_name = module + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[value.keys()[0]]
fname_idx = value[0]
elif module in self._searchindex['objects']:
value = self._searchindex['objects'][module]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
break
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
url = False
for comb_name in ['%s.%s' % (module, cobj['name']) for module
in modules_test]:
if html.find(comb_name) >= 0:
url = link + '#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) 4i seconds
.. raw:: html
<div class="social-button-container">
<div class="social-button">
<a href="https://twitter.com/share" class="twitter-share-button">Tweet</a>
</div>
<div class="social-button">
<g:plusone annotation="inline" width="120" size="medium"></g:plusone>
</div>
<div class="social-button">
<div id="fb-root"></div>
<script>(function(d, s, id) {
var js, fjs = d.getElementsByTagName(s)[0];
if (d.getElementById(id)) return;
js = d.createElement(s); js.id = id;
js.src = "//connect.facebook.net/en_US/all.js#xfbml=1";
fjs.parentNode.insertBefore(js, fjs);
}(document, 'script', 'facebook-jssdk'));
</script>
<div class="fb-like" data-send="false" data-width="450" data-show-faces="false"></div>
</div>
</div>
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
def extract_docstring(filename):
""" Extract a module-level docstring, if any
"""
lines = file(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(iter(lines).next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(line.rstrip() for line in
docstring.split('\n')).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(app.builder.srcdir + '/../../' + 'examples')
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
# we create an index.rst with all examples
fhindex = file(os.path.join(root_dir, 'index.rst'), 'w')
#Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
display: none;
}
.figure {
float: left;
margin: 10px;
width: auto;
height: 200px;
width: 180px;
}
.figure img {
display: inline;
}
.figure .caption {
width: 180px;
text-align: center !important;
}
</style>
Examples
========
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery)
for dir in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, dir)):
generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery)
fhindex.flush()
def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery):
""" Generate the rst file for an example directory.
"""
if not dir == '.':
target_dir = os.path.join(root_dir, dir)
src_dir = os.path.join(example_dir, dir)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
print 80 * '_'
print ('Example directory %s does not have a README.txt file'
% src_dir)
print 'Skipping this directory'
print 80 * '_'
return
fhindex.write("""
%s
""" % file(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
def sort_key(a):
# put last elements without a plot
if not a.startswith('plot') and a.endswith('.py'):
return 'zz' + a
return a
for fname in sorted(os.listdir(src_dir), key=sort_key):
if not os.path.split(fname)[-1].startswith('plot_'):
continue
if fname.endswith('py'):
generate_file_rst(fname, target_dir, src_dir, plot_gallery)
thumb = os.path.join(dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(dir, fname).replace(os.path.sep, '_')
fhindex.write('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if dir != '.':
fhindex.write(' :target: ./%s/%s.html\n\n' % (dir,
fname[:-3]))
else:
fhindex.write(' :target: ./%s.html\n\n' % link_name[:-3])
fhindex.write(""" :ref:`example_%s`
.. toctree::
:hidden:
%s/%s
""" % (link_name, dir, fname[:-3]))
fhindex.write("""
.. raw:: html
<div style="clear: both"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['mne', 'matplotlib', 'numpy', 'scipy', 'mayavi']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) / 2, (height - height_sc) / 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
def scale_image(in_fname, max_width):
"""Scale image such that width <= max_width
"""
img = Image.open(in_fname)
width_in, height_in = img.size
if width_in <= max_width:
return
scale = max_width / float(width_in)
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# overwrite the image
img.save(in_fname)
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
""" Generate the rst file for a given example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%s.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
if plot_gallery:
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if (not os.path.exists(first_image_file) or
os.stat(first_image_file).st_mtime
<= os.stat(src_file).st_mtime):
# We need to execute the code
print 'plotting %s' % fname
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
try:
from mayavi import mlab
except Exception, e:
from enthought.mayavi import mlab
mlab.close(all=True)
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
# get variables so we can later add links to the documentation
example_code_obj = {}
for var_name, var in my_globals.iteritems():
if not hasattr(var, '__module__'):
continue
if not isinstance(var.__module__, basestring):
continue
if var.__module__.split('.')[0] not in DOCMODULES:
continue
# get the type as a string with other things stripped
tstr = str(type(var))
tstr = (tstr[tstr.find('\'')
+ 1:tstr.rfind('\'')].split('.')[-1])
# get shortened module name
module_short = get_short_module_name(var.__module__,
tstr)
cobj = {'name': tstr, 'module': var.__module__,
'module_short': module_short,
'obj_type': 'object'}
example_code_obj[var_name] = cobj
# find functions so we can later add links to the documentation
funregex = re.compile('[\w.]+\(')
fun_exclude = ['print']
with open(src_file, 'rt') as fid:
for line in fid.readlines():
if line.startswith('#'):
continue
for match in funregex.findall(line):
fun_name = match[:-1]
if fun_name in fun_exclude:
continue
try:
exec('this_fun = %s' % fun_name, my_globals)
except Exception as err:
print ('Error: extracting function %s failed: '
'%s' % (fun_name, str(err)))
continue
this_fun = my_globals['this_fun']
if not callable(this_fun):
continue
if not hasattr(this_fun, '__module__'):
continue
if not isinstance(this_fun.__module__, basestring):
continue
if (this_fun.__module__.split('.')[0]
not in DOCMODULES):
continue
# get shortened module name
fun_name_short = fun_name.split('.')[-1]
module_short = get_short_module_name(
this_fun.__module__, fun_name_short)
cobj = {'name': fun_name_short,
'module': this_fun.__module__,
'module_short': module_short,
'obj_type': 'function'}
example_code_obj[fun_name] = cobj
fid.close()
if len(example_code_obj) > 0:
# save the dictionary, so we can later add hyperlinks
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
cPickle.dump(example_code_obj, fid,
cPickle.HIGHEST_PROTOCOL)
fid.close()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(my_globals['__doc__'],
'')
my_stdout = my_stdout.strip()
if my_stdout:
output_lines = my_stdout.split('\n')
if len(output_lines) > MAX_NB_LINES_STDOUT:
output_lines = output_lines[:MAX_NB_LINES_STDOUT]
output_lines.append('...')
stdout = ('**Script output**::\n\n %s\n\n'
% ('\n '.join(output_lines)))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
last_fig_num = 0
for fig_num in (fig_mngr.num for fig_mngr in
matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_num)
# hack to keep black bg
facecolor = plt.gcf().get_facecolor()
if facecolor == (0.0, 0.0, 0.0, 1.0):
plt.savefig(image_path % fig_num, facecolor='black')
else:
plt.savefig(image_path % fig_num)
# make sure the image is not too large
scale_image(image_path % fig_num, 850)
figure_list.append(image_fname % fig_num)
last_fig_num = fig_num
e = mlab.get_engine()
for scene in e.scenes:
last_fig_num += 1
mlab.savefig(image_path % last_fig_num)
# make sure the image is not too large
scale_image(image_path % last_fig_num, 850)
figure_list.append(image_fname % last_fig_num)
mlab.close(scene)
except:
print 80 * '_'
print '%s is not compiling:' % fname
traceback.print_exc()
print 80 * '_'
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print " - time elapsed : %.2g sec" % time_elapsed
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path % '[1-9]')]
# generate thumb file
this_template = plot_rst_template
if os.path.exists(first_image_file):
make_thumbnail(first_image_file, thumb_file, 180, 120)
if not os.path.exists(thumb_file):
# use the default thumbnail
make_thumbnail('source/_images/mne_helmet.png', thumb_file, 180, 120)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print 'Embedding documentation hyperlinks in examples..'
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['mne'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
doc_resolvers['matplotlib'] = SphinxDocLinkResolver(
'http://matplotlib.org')
doc_resolvers['numpy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/numpy-1.6.0')
doc_resolvers['scipy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/scipy-0.11.0/reference')
doc_resolvers['mayavi'] = SphinxDocLinkResolver(
'http://docs.enthought.com/mayavi/mayavi',
extra_modules_test=['mayavi.mlab'])
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print '\tprocessing: %s' % fname
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = cPickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.iteritems():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
if link is not None:
parts = name.split('.')
name_html = orig_pattern % parts[0]
for part in parts[1:]:
name_html += period + orig_pattern % part
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
if len(str_repl) > 0:
with open(full_fname, 'rt') as fid:
lines_in = fid.readlines()
fid.close()
with open(full_fname, 'wt') as fid:
for line in lines_in:
for name, link in str_repl.iteritems():
line = line.replace(name.encode('utf-8'),
link.encode('utf-8'))
fid.write(line)
fid.close()
print '[done]'
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = 'build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
| effigies/mne-python | doc/sphinxext/gen_rst.py | Python | bsd-3-clause | 33,312 | [
"Mayavi"
] | a987ac2236849293fdc573e7db4aa2624c75cede7407fed6a67bdc30cbd13358 |
"""
=================================================
Deterministic Tracking with EuDX on Tensor Fields
=================================================
In this example we do deterministic fiber tracking on Tensor fields with EuDX
[Garyfallidis12]_.
This example requires to import example `reconst_dti.py` to run. EuDX was
primarily made with cpu efficiency in mind. Therefore, it should be useful to
give you a quick overview of your reconstruction results with the help of
tracking.
"""
import os
import sys
import numpy as np
import nibabel as nib
if not os.path.exists('tensor_fa.nii.gz'):
import reconst_dti
"""
EuDX will use the directions (eigen vectors) of the Tensors to propagate
streamlines from voxel to voxel and fractional anisotropy to stop tracking.
"""
fa_img = nib.load('tensor_fa.nii.gz')
FA = fa_img.get_data()
evecs_img = nib.load('tensor_evecs.nii.gz')
evecs = evecs_img.get_data()
"""
In the background of the image the fitting will not be accurate because there all
measured signal is mostly noise and possibly we will find FA values with nans
(not a number). We can easily remove these in the following way.
"""
FA[np.isnan(FA)] = 0
"""
EuDX takes as input discretized voxel directions on a unit sphere. Therefore,
it is necessary to discretize the eigen vectors before feeding them in EuDX.
For the discretization procedure we use an evenly distributed sphere of 724
points which we can access using the get_sphere function.
"""
from dipy.data import get_sphere
sphere = get_sphere('symmetric724')
"""
We use quantize_evecs (evecs here stands for eigen vectors) to apply the
discretization.
"""
from dipy.reconst.dti import quantize_evecs
peak_indices = quantize_evecs(evecs, sphere.vertices)
"""
EuDX is the fiber tracking algorithm that we use in this example.
The most important parameters are the first one which represents the
magnitude of the peak of a scalar anisotropic function, the
second which represents the indices of the discretized directions of
the peaks and odf_vertices are the vertices of the input sphere.
"""
from dipy.tracking.eudx import EuDX
eu = EuDX(FA.astype('f8'), peak_indices, seeds=50000, odf_vertices = sphere.vertices, a_low=0.2)
tensor_streamlines = [streamline for streamline in eu]
"""
We can now save the results in the disk. For this purpose we can use the
TrackVis format (``*.trk``). First, we need to create a header.
"""
hdr = nib.trackvis.empty_header()
hdr['voxel_size'] = fa_img.get_header().get_zooms()[:3]
hdr['voxel_order'] = 'LAS'
hdr['dim'] = FA.shape
"""
Then we need to input the streamlines in the way that Trackvis format expects them.
"""
tensor_streamlines_trk = ((sl, None, None) for sl in tensor_streamlines)
ten_sl_fname = 'tensor_streamlines.trk'
"""
Save the streamlines.
"""
nib.trackvis.write(ten_sl_fname, tensor_streamlines_trk, hdr, points_space='voxel')
"""
If you don't want to use Trackvis to visualize the file you can use our
lightweight `fvtk` module.
"""
try:
from dipy.viz import fvtk
except ImportError:
raise ImportError('Python vtk module is not installed')
sys.exit()
"""
Create a scene.
"""
ren = fvtk.ren()
"""
Every streamline will be coloured according to its orientation
"""
from dipy.viz.colormap import line_colors
"""
fvtk.line adds a streamline actor for streamline visualization
and fvtk.add adds this actor in the scene
"""
fvtk.add(ren, fvtk.streamtube(tensor_streamlines, line_colors(tensor_streamlines)))
print('Saving illustration as tensor_tracks.png')
ren.SetBackground(1, 1, 1)
fvtk.record(ren, n_frames=1, out_path='tensor_tracks.png', size=(600, 600))
"""
.. figure:: tensor_tracks.png
:align: center
**Deterministic streamlines with EuDX on a Tensor Field**.
.. [Garyfallidis12] Garyfallidis E., "Towards an accurate brain tractography", PhD thesis, University of Cambridge, 2012.
.. include:: ../links_names.inc
"""
| StongeEtienne/dipy | doc/examples/tracking_eudx_tensor.py | Python | bsd-3-clause | 3,897 | [
"VTK"
] | d352f9123da3613e9930159f4271019beb4610e9655f50e41dcd75bcedc319b4 |
from __future__ import absolute_import
from .core import MaskedLayer
from .. import backend as K
class GaussianNoise(MaskedLayer):
'''
Corruption process with GaussianNoise
'''
def __init__(self, sigma, **kwargs):
super(GaussianNoise, self).__init__(**kwargs)
self.sigma = sigma
def get_output(self, train=False):
X = self.get_input(train)
if not train or self.sigma == 0:
return X
else:
return X + K.random_normal(shape=K.shape(X),
mean=0.,
std=self.sigma)
def get_config(self):
config = {"name": self.__class__.__name__,
"sigma": self.sigma}
base_config = super(GaussianNoise, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GaussianDropout(MaskedLayer):
'''
Multiplicative Gaussian Noise
Reference:
Dropout: A Simple Way to Prevent Neural Networks from Overfitting
Srivastava, Hinton, et al. 2014
http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf
'''
def __init__(self, p, **kwargs):
super(GaussianDropout, self).__init__(**kwargs)
self.p = p
def get_output(self, train):
X = self.get_input(train)
if train:
# self.p refers to drop probability rather than
# retain probability (as in paper), for consistency
X *= K.random_normal(shape=K.shape(X), mean=1.0,
std=self.p / (1.0 - self.p))
return X
def get_config(self):
config = {"name": self.__class__.__name__,
"p": self.p}
base_config = super(GaussianDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/keras/layers/noise.py | Python | gpl-2.0 | 1,887 | [
"Gaussian"
] | 5305d87e803dbd03dd44050246452ceb8dba64903a80d6206ffbedc5d2a0df75 |
from tests import base
from tests import factories as f
outbox_len = 0
password = '123123'
def test_workshop_create(base_url, browser, outbox):
"""
"""
f.create_usertype(slug='tutor', display_name='tutor')
poc_type = f.create_usertype(slug='poc', display_name='poc')
state = f.create_state()
user = base.create_user(password)
url = base_url + '/workshop/'
base.login_and_confirm(browser, url, outbox, user, password)
user.save()
location = f.create_locaiton(name='location1')
section1 = f.create_workshop_section(name='section1')
user.profile.location = location
user.profile.usertype.clear()
user.profile.usertype.add(poc_type)
user.profile.interested_states.add(state)
user.profile.mobile = '1234567890'
# browser.select('usertype', poc_type.id)
user.profile.interested_sections.add(section1)
user.profile.occupation = 'occupation'
user.profile.work_location = 'work_location'
user.profile.save()
url = base_url + '/workshop/'
base.login(browser, url, user, password)
# validate if user belongs to organisation
url = base_url + '/workshop/create/'
browser.visit(url)
assert browser.is_text_present("create organisaiton.")
# Create org
org = f.create_organisation(location=location)
org.user.add(user)
user.profile.interested_locations.add(org.location)
# user.profile.location = org.location
# user.profile.save()
org.save()
# section1 = f.create_workshop_section(name='section1')
# invalid form
url = base_url + '/workshop/create/'
browser.visit(url)
browser.select('no_of_participants', 10)
browser.fill('expected_date', '11/12/2018')
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# valid form
url = base_url + '/workshop/create/'
base.workshop_create(browser, url, org, section1)
| pythonindia/wye | tests/functional/workshop/test_create_workshop.py | Python | mit | 1,933 | [
"VisIt"
] | 25922a988fde708e2e7b5312e24ff24c434576166c1939bbe33db21313773718 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/grossmj/PycharmProjects/gns3-gui/gns3/ui/about_dialog.ui'
#
# Created: Mon Mar 23 22:22:17 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_AboutDialog(object):
def setupUi(self, AboutDialog):
AboutDialog.setObjectName(_fromUtf8("AboutDialog"))
AboutDialog.setWindowModality(QtCore.Qt.WindowModal)
AboutDialog.resize(378, 234)
self.gridLayout_2 = QtGui.QGridLayout(AboutDialog)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.tabWidget = QtGui.QTabWidget(AboutDialog)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.gridLayout = QtGui.QGridLayout(self.tab)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.uiLogoLabel = QtGui.QLabel(self.tab)
self.uiLogoLabel.setText(_fromUtf8(""))
self.uiLogoLabel.setPixmap(QtGui.QPixmap(_fromUtf8(":/images/gns3_logo.png")))
self.uiLogoLabel.setObjectName(_fromUtf8("uiLogoLabel"))
self.horizontalLayout.addWidget(self.uiLogoLabel)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.uiAboutTextLabel = QtGui.QLabel(self.tab)
self.uiAboutTextLabel.setOpenExternalLinks(True)
self.uiAboutTextLabel.setObjectName(_fromUtf8("uiAboutTextLabel"))
self.horizontalLayout.addWidget(self.uiAboutTextLabel)
self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab, _fromUtf8(""))
self.tab_4 = QtGui.QWidget()
self.tab_4.setObjectName(_fromUtf8("tab_4"))
self.verticalLayout = QtGui.QVBoxLayout(self.tab_4)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.uiTeamTextEdit = QtGui.QTextEdit(self.tab_4)
self.uiTeamTextEdit.setReadOnly(True)
self.uiTeamTextEdit.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction)
self.uiTeamTextEdit.setObjectName(_fromUtf8("uiTeamTextEdit"))
self.verticalLayout.addWidget(self.uiTeamTextEdit)
self.tabWidget.addTab(self.tab_4, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.vboxlayout = QtGui.QVBoxLayout(self.tab_2)
self.vboxlayout.setObjectName(_fromUtf8("vboxlayout"))
self.uiThanksPlainTextEdit = QtGui.QPlainTextEdit(self.tab_2)
self.uiThanksPlainTextEdit.setReadOnly(True)
self.uiThanksPlainTextEdit.setObjectName(_fromUtf8("uiThanksPlainTextEdit"))
self.vboxlayout.addWidget(self.uiThanksPlainTextEdit)
self.tabWidget.addTab(self.tab_2, _fromUtf8(""))
self.tab_3 = QtGui.QWidget()
self.tab_3.setObjectName(_fromUtf8("tab_3"))
self.vboxlayout1 = QtGui.QVBoxLayout(self.tab_3)
self.vboxlayout1.setObjectName(_fromUtf8("vboxlayout1"))
self.uiLicensePlainTextEdit = QtGui.QPlainTextEdit(self.tab_3)
self.uiLicensePlainTextEdit.setReadOnly(True)
self.uiLicensePlainTextEdit.setObjectName(_fromUtf8("uiLicensePlainTextEdit"))
self.vboxlayout1.addWidget(self.uiLicensePlainTextEdit)
self.tabWidget.addTab(self.tab_3, _fromUtf8(""))
self.gridLayout_2.addWidget(self.tabWidget, 0, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(AboutDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout_2.addWidget(self.buttonBox, 1, 0, 1, 1)
self.retranslateUi(AboutDialog)
self.tabWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), AboutDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), AboutDialog.reject)
QtCore.QMetaObject.connectSlotsByName(AboutDialog)
AboutDialog.setTabOrder(self.uiThanksPlainTextEdit, self.uiLicensePlainTextEdit)
def retranslateUi(self, AboutDialog):
AboutDialog.setWindowTitle(_translate("AboutDialog", "About", None))
self.uiAboutTextLabel.setText(_translate("AboutDialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans Serif\'; font-size:14pt; font-weight:600;\">GNS3 %VERSION%</span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans Serif\'; font-size:10pt; font-weight:600;\">Under GPL v3 license</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Sans Serif\'; font-size:9pt; font-weight:600;\"><br /></p></body></html>", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("AboutDialog", "&About", None))
self.uiTeamTextEdit.setHtml(_translate("AboutDialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">Developers</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Jeremy Grossmann</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Julien Duponchelle</p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">Contributors</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-weight:600;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">James E. Carpenter (IOU support)</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Daniel Lintott (NET file import)</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Marc Weisel (Mac OS X packaging)</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Alexey Eromenko (VirtualBox support)</p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">Contractors</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><a href=\"https://github.com/planctechnologies\"><span style=\" text-decoration: underline; color:#0000ff;\">Plan C Technologies</span></a></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; text-decoration: underline; color:#0000ff;\"><br /></p></body></html>", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("AboutDialog", "&Team", None))
self.uiThanksPlainTextEdit.setPlainText(_translate("AboutDialog", "James Borden\n"
"Tenzin Rigdol Oshoe\n"
"Brian Jacobson\n"
"Chad Hoevenaars\n"
"Jesus Espinoza\n"
"Des Kharisma Tarigan\n"
"David Bigerstaff\n"
"Christopher Good\n"
"Brian Farmer\n"
"Jimmy Bruneel\n"
"Rob Edwards\n"
"BEGQTh34\n"
"Nebulis01\n"
"The JWag (James Wagner)\n"
"JONATHAN BALEVA VIRAY\n"
"Jamie Hoyt\n"
"Jake Guffey\n"
"Enis Byci\n"
"Avery Wright\n"
"Maxim Klimanov\n"
"Salomon ADJOVI\n"
"Keith McCulloch\n"
"Guillermoi A Pierluissi\n"
"MiraliN\n"
"ALexandr Ismagilov\n"
"mistabatu\n"
"AFIF JABADO\n"
"Allen Hadder \n"
"Jonathan M. Olivier\n"
"Muhammad Adeel Malik\n"
"PhatWheZ\n"
"Christopher D Perrett\n"
"James Clifford\n"
"David Henderson\n"
"Andrea Florio\n"
"Michael Eva\n"
"Abdul Wahid\n"
"SebastianHelmer\n"
"Eric Maxwell\n"
"Bradly Leiws\n"
"Pavel Afanasjev\n"
"Jan Jessen\n"
"Brandon Raikes\n"
"Geron\n"
"Joel-Brook-Leeds-yay!\n"
"Robert Novak\n"
"Barry Weiss\n"
"Sami Alsubhi\n"
"Packetboss\n"
"Ionut Vasile\n"
"Nick Yeager\n"
"Pioneer Network Solutions, LLC.\n"
"Randy Timmermans\n"
"Pablo Lucena\n"
"Michal Dulovic\n"
"Ramon Pinyol\n"
"Roberto Taccon\n"
"Andras Dosztal\n"
"Matthieu Dalcourt\n"
"Matt Dominey\n"
"Wanderson Cristony Tinti\n"
"Alexandre Maia Vlahos\n"
"DarronDazlinSmith\n"
"JOHN BAPTIST OBILIL\n"
"Jimmy Copeland\n"
"Roy Benoy\n"
"Henrik Kruse Larsen\n"
"cpt2040\n"
"Martin Varga\n"
"Jonathan Gonzalez\n"
"Luke Withers\n"
"Mahir Ali Ahmed\n"
"Dean Ryan Perrine\n"
"Robert F. Dias Jr\n"
"Ashenafi\n"
"Justin L. Spencer\n"
"Carlo Gagliardi\n"
"Sean Barmettler\n"
"focaccio\n"
"rhein021\n"
"Frederick Poirier\n"
"Dmitri Fedotov\n"
"Ernie Maestas\n"
"Colin Taylor \n"
"HugoAmaro\n"
"Solim\n"
"Volodymyr Kharytonov\n"
"Max \"Big Hoss\" Clise\n"
"Alex_Gutierrez\n"
"Saad Najaf Khan\n"
"dahveed311\n"
"Michael Loft Mikkelsen - DK\n"
"HyperSCSI\n"
"Enrico Floris\n"
"Grigoris Papalexis\n"
"Prithvi Mandava\n"
"YOUNIS BADAWI\n"
"SelvaagIT\n"
"steve brokenshire\n"
"Brian Adelson\n"
"Michael A Hern\n"
"Umair Ali Soomro\n"
"Matt_Earp\n"
"Martin L Lora\n"
"Diego Monasterios\n"
"Chris T Sims\n"
"Jeff Wagner\n"
"Harinder Singh\n"
"David Gaytan\n"
"Jonathan Moulton\n"
"OGUZHAN VEYSEL ATALAR\n"
"Bent Mathiesen\n"
"GuruSue\n"
"mmetaphor\n"
"Paul Stoner\n"
"Ryan Corcoran\n"
"OmarS\n"
"Dudiefa Tonye\n"
"Christian Elsen (San Francisco)\n"
"Victor Venturelli Mosconi\n"
"Barry Jones\n"
"Syed Zayed Bukhari\n"
"Kareo\n"
"Urs \"Whity\" Weiss\n"
"Jesus Hernando Lopez\n"
"Mike McCauley\n"
"Carl Francis AKA tEeJ\n"
"Imad Daou\n"
"orangejam72\n"
"Paulo \"elTechno\" Alvarado\n"
"Don Coulson\n"
"Jeff Nierman\n"
"Brian Whelton\n"
"Garang Deng\n"
"Marcus Watson\n"
"Paul@GGR\n"
"Anthony Branchaud\n"
"Rameez-C\n"
"Patrick Williams\n"
"Jonathan Madore\n"
"Robert Gene Horn\n"
"Jaime\n"
"Ken Richardson\n"
"Eduard Ch.\n"
"Ionut Tartau\n"
"Fred Ellis Jr\n"
"Ferenc Kuris\n"
"David Tinsley\n"
"Jeroen Roos\n"
"Peter Palasti\n"
"Dave Hardy\n"
"Herceg Andras\n"
"Bryan Scarbrough\n"
"Filipe Matias\n"
"Muelly\n"
"tr3quart1sta\n"
"Ian C Scarborough\n"
"Robert B Romel\n"
"Pio P. Prado\n"
"zasplas\n"
"Pantelis Stoufis\n"
"Daniel R Moore\n"
"Daniel Tinsley\n"
"Derek Ivey\n"
"Alessandro Piva\n"
"Tony DelGreco\n"
"Tyler Carlisle\n"
"Christopher Buckley\n"
"Korstiaan Kooijman\n"
"Paul Schriever\n"
"Mawat\n"
"Ivo Bottenheft\n"
"Soundwave\n"
"Markus Karlsson\n"
"molan1\n"
"Nikola Sambolic\n"
"Joachim Boadu Kwaku\n"
"Adi Wong\n"
"Joao Teixeira\n"
"Alexandru Badea\n"
"Bryan Hubbard\n"
"Rajesh Anne\n"
"Bartosz Durma\n"
"Vladusha\n"
"HasanTheWise\n"
"Tejinder Panesar\n"
"Alexandru Stefan Marinescu\n"
"Giovanni Augusto\n"
"Kyryl Tumanov\n"
"Rashiem Mims\n"
"Maky Robert\n"
"Timur Mezentsev\n"
"Matt Blackwell\n"
"James Moore\n"
"elhatab\n"
"Ruben Lihet\n"
"Paul Kiela\n"
"Jeffrey Kraemer\n"
"Andrew K.\n"
"Comein Arnaud\n"
"Riccardo Bruzzone\n"
"Ulrich Kempken\n"
"Simon Wilkinson\n"
"Susan Chatterjee\n"
"Jose Isidro Cantu Alvarez\n"
"Mcglon\n"
"Asimios Kiropoulos\n"
"slobodan simic\n"
"Jacob Adlon\n"
"arkSyne\n"
"Rakesh M. Thakkar\n"
"Tchitchi\n"
"Mohamed Rezard Meeralebbe\n"
"JaeHyeong Jeon\n"
"enderst\n"
"Yves Kerbens\n"
"Ramon Kidd\n"
"Frank Olshansky\n"
"Steve Miller\n"
"Curt Wheeler\n"
"Tony M Nguyen\n"
"Chris Kuhn\n"
"John K Williamson\n"
"Swinster\n"
"Jared Heinrichs\n"
"macbroadcast\n"
"Cyrinojuca\n"
"Sean Cardus\n"
"Lazaro Rene Ortega Rodriguez\n"
"Andrew Peter Kirkby\n"
"Luis Ramos\n"
"Sean McGrath\n"
"Manuel Neuner (Austria)\n"
"Matt Siegel\n"
"J. Neil Marlowe\n"
"Shibata\n"
"Michael Maguire\n"
"Harmeet Singh Sian\n"
"Britt Adams\n"
"MuslimSubmittedToGod\n"
"Samuel\n"
"Steve Housego\n"
"Pablo Hauptmann\n"
"Stevan Sheely\n"
"Brian \"Red\" Williamson\n"
"Mark Fietje\n"
"Muhammet Said Temelli\n"
"Skeeve Stevens\n"
"JEFF OLIVER\n"
"Frederik Bjerggaard Nielsen\n"
"Anar R. Iskandarli - Azerbaijan\n"
"Anthony_Oliverio\n"
"Jason Cook\n"
"Kalamoukos Dimitris\n"
"Francis Ruddy\n"
"roxtonuk\n"
"Przemyslaw \'UWillC\' Snowacki\n"
"Chipa\n"
"M Sawatzky\n"
"dzap\n"
"Shaun Carlin\n"
"Jegan\n"
"Enrique Gómez Monreal\n"
"Marco Simontacchi\n"
"Christopher Robert Lee\n"
"esreverse\n"
"milan peyrac\n"
"Robin Wood - Digininja\n"
"Dan Nelson\n"
"Gareth Mitchelson\n"
"Michael V King, Sr.\n"
"Brent A Walter\n"
"smcghie\n"
"Jose Carvalho\n"
"IPrgomet\n"
"Richard Harris 1\n"
"Wilson Mendes\n"
"Jovito Cueva\n"
"Conrad Jedynak\n"
"Martin Gursky\n"
"paul wynne\n"
"Oyama Hall\n"
"Giles Mathew Broom\n"
"Akwasi Adu\n"
"Dan Roering\n"
"Sal Amico\n"
"Jakso Laszlo 06101963\n"
"Engelbert Luechinger\n"
"MWenglikowski\n"
"Waqas Bashir\n"
"Garrett Skjelstad\n"
"Renato de Pierri - SP Brazil\n"
"Ebo Semenhyia Thompson\n"
"Marcos Rodrigo Santos de Souza\n"
"OGINNI OLUWASEYI\n"
"Jordan Foster(Jez2cool)\n"
"Bluca\n"
"Earl Bovell\n"
"Steve Stewart\n"
"Mike Boughton\n"
"Adrian Z.\n"
"Phillip White\n"
"Mustafa Ameen Al-Khateeb\n"
"Vikas Sharma\n"
"Noel Mulryan\n"
"Johan2004\n"
"James Ramsay\n"
"Brian Lehr\n"
"ecium\n"
"Yatin K Shah\n"
"Neil Sheridan\n"
"Francis Pena (JigaX)\n"
"Hank Yeomans\n"
"Peter Stanczak\n"
"jhrcariazo\n"
"Matthew Gosling\n"
"Danni Nissen Bardino\n"
"Duane McNulty\n"
"Adam Wysockyj\n"
"Benjamin Donald Byers \n"
"Terrance Richards\n"
"Erik Jacobson II\n"
"DIAS\n"
"ALDO LEIVA-MARCHISSANO (DECO5)\n"
"dedycahyadi-unmul\n"
"Robert Trow\n"
"Adam Johnston\n"
"Mace\n"
"Alexander Lubenski\n"
"John Braswell\n"
"Brian Sanders\n"
"David Vierling\n"
"André Jan Smit\n"
"RedNectar Chris Welsh\n"
"Edmarc Vitz Oliveros\n"
"Vivek Bannore\n"
"Emilio Sanchez Parbul\n"
"Luke Jay Cooper\n"
"Bill Laubsch\n"
"Luke Cooper\n"
"Chris Cook\n"
"KloudFyre Networks\n"
"Jason Miller\n"
"Chase Wright\n"
"Joseph Paveglio\n"
"Shing Hei Yee\n"
"MARSHG\n"
"Rowan Brandon Lee\n"
"AbdulHayee\n"
"Wanxue Hai\n"
"Net9 Telecom\n"
"Dean Lofts\n"
"Gordon Duncan\n"
"Daniel Gurgel\n"
"Bill Scully\n"
"Scott Dalton\n"
"brato\n"
"Peter Hansen\n"
"Jose E. Silva\n"
"reySkata\n"
"Francis Xavier Todd II\n"
"Nick Cochrane\n"
"Geoff Kuchera\n"
"Geoff Ladwig\n"
"Murilo R. Esplugues\n"
"Srinivasa R Emani\n"
"Terrence Battlehunt\n"
"Muhammad Bhatti\n"
"John Miller\n"
"Tiago Lima (TiLima)\n"
"Luis Carlos Gomes Alves\n"
"Giuseppe De Luca\n"
"Alex Kieser\n"
"Adam Steele\n"
"Joey Caldwell\n"
"Aaron Oliver\n"
"Adil Nasser\n"
"Marcelo Chicralla\n"
"Chris Smolen\n"
"Oleg Ivin\n"
"davidwthomas\n"
"pingwarrior\n"
"Yared Demissie\n"
"SIM Kwang Meng\n"
"Thomas Irwan\n"
"Jordan Dalton\n"
"Debasish\n"
"Ernest Saadat\n"
"Dr Bankim Jani\n"
"Nasir Bilal\n"
"Tran Minh Luan\n"
"Brad Haynes\n"
"Gerald Gonzalez Santoni\n"
"Campesi\n"
"Richard Danu\n"
"Olugbenga Adara\n"
"Emad Salahuddin\n"
"Ezra Reang\n"
"Serge Cherestal\n"
"Mike Shafer\n"
"Vikram Sokhi\n"
"Arliton Martiniano Rocha\n"
"Faith Solutions pvt ltd \n"
"Arnold A. Madulin\n"
"cqaigy\n"
"Ti-Chris\n"
"Sergey Goffert\n"
"Chris Higgins\n"
"George P. Burdell\n"
"Todd Graham\n"
"Barry Middleton\n"
"Pdw8\n"
"Lê Việt Thanh\n"
"Pavel Glushkov\n"
"Alan Rizgar Ali\n"
"Aung Ye Thu\n"
"Khurshid Hassan Khan\n"
"Kamran Mehdi\n"
"Richard Mantilla\n"
"David R. Burgess\n"
"Thomas Whitmore\n"
"Lee Kong Wah\n"
"SUNANDA DAS\n"
"Irvan Tambunan\n"
"Vasanthkumar\n"
"TJ West\n"
"Kanthi Kiran Narisetti\n"
"DarwinLabs\n"
"Weston Brown\n"
"Manikant Thakur\n"
"Intoy23_\n"
"Muhammad Hussain\n"
"jakimm\n"
"Gengxiaojun\n"
"Dilan Perera\n"
"Yagnesh Chauhan\n"
"James R. Gross Jr.\n"
"Abraxas\n"
"Patrick Silwimba\n"
"Rajavel\n"
"Idris Adesanya\n"
"John Bluhdorn\n"
"Xulnoor Syed\n"
"Gerard Sharpe\n"
"Warren Sullivan\n"
"Greg Gephart\n"
"Ian Verno\n"
"Mark Odette II\n"
"Christopher Megyeri\n"
"Vladimir Kuchar\n"
"Yazeed Fataar\n"
"Rajesh Kakkanatt Jolly\n"
"Radovan Brezula\n"
"Russell W Wood\n"
"Cameron Rooke\n"
"Michael Dawson\n"
"Ronald Boestfleisch Jr\n"
"Jade Rampulla\n"
"Mohammed Aftab\n"
"Orkhan Farajov\n"
"sumitava chatterjee\n"
"Hugh Mann\n"
"Rasto Szaszak\n"
"Name, last name and country.\n"
"Milan Zapletal\n"
"MJM Boyens\n"
"Ernesto Hernandez, Finland.\n"
"Filippos Kolovos\n"
"David Julson\n"
"Net Center, Inc.\n"
"Robert Rittenhouse\n"
"asdfghjkl\n"
"Derek Michael Neves\n"
"GLB\n"
"Nadeem Rafi\n"
"Steve De Jongh\n"
"Rich Gibbons\n"
"Krishna Kumaran GK\n"
"Sebastian Gosenheimer\n"
"dante\n"
"Frank Hopkins\n"
"Wyld Stallyn\n"
"George Paa Kwesi Williams\n"
"Andrzej Krzyżak\n"
"TheMasonFamily\n"
"Neill Wilkinson - Aeonvista Ltd\n"
"Guillano L\'intelligent\n"
"Tiago Sousa\n"
"Ingo Bothe\n"
"Guillano L\n"
"Dingxia Li\n"
"Bongie\n"
"Orlov Vladimir\n"
"Dmitry Tyutikov\n"
"Jon Walker\n"
"Theofanis Katsiaounis\n"
"praneel prasad\n"
"norwizzle\n"
"tmcraig08\n"
"Simon Huber\n"
"Pedro Duarte ESTCB\n"
"Akov\n"
"Mihai Alexandru Vasiliu\n"
"ronnietheengineer\n"
"Timothy Manito\n"
"Dmitry Seliverstov\n"
"Matija Grabar\n"
"mckrsta\n"
"magulito\n"
"GazandKim\n"
"Chris Davies\n"
"Zack Manriquez\n"
"Adnan Pajalic\n"
"Anton Ovsyannikov\n"
"RushenYYZ\n"
"Alejandro Nolla\n"
"Igor Plokhikh\n"
"Ilya Kupriyanov\n"
"zahid yasin\n"
"joey fernandez mirador\n"
"Orlando Amador\n"
"Christiano Lucas\n"
"Timothy Call-Buckeyes#1\n"
"Chris Graves\n"
"Roman Florea\n"
"Andrew Banwell\n"
"Randy Brizuela\n"
"Tibor Torma\n"
"KIOUSIS THANASIS\n"
"Paulo Vaz\n"
"primmus\n"
"Ovidu Catrina\n"
"Emi.Tiamo.\n"
"Willian Kassem\n"
"Xianli Huang\n"
"karanarora192006\n"
"Jens THIES\n"
"Emberlight\n"
"Luis Knob\n"
"WarrenHayhurst\n"
"Richard Karus\n"
"Pablo Olveira\n"
"ghisisan\n"
"Antonio Gerbino\n"
"Bijay Swain\n"
"Simon Minery\n"
"Matt du Jardin\n"
"Ian Blaney\n"
"Nallanathan Sivatharzan\n"
"Paulo Guerreiro\n"
"Joe Audet\n"
"Noman Choudhury\n"
"Silvyse\n"
"Azam Saddiq\n"
"Rodrigo Henrique Castro da Silva\n"
"Rickard Körkkö\n"
"Thomas quero\n"
"Matus Lenhart\n"
"Frank Bailey\n"
"herdoc2005\n"
"Kurt Holm-Andersen\n"
"Dimitar Leskov\n"
"David Abreu de Sousa Junior\n"
"Anthony R Junk\n"
"Aung Zaw Latt\n"
"Fred Bisel\n"
"khaisar\n"
"MOHAMMAD FAIZAL BIN MOHAMED SALLEHIN\n"
"Svindler\n"
"Marcel Slagter\n"
"Brandon.nmi.Johnson\n"
"Farias\n"
"Quinton Bakies\n"
"Yoshihiro Ano\n"
"Alex K\n"
"Yohannes Iyob\n"
"JPETALLAR\n"
"William Gruesbeck Jr.\n"
"Yollande S Chang\n"
"Chandler B.\n"
"Roberto Ríos Camilo\n"
"poolisa\n"
"Luke Grant-Jones\n"
"Bhuiya\n"
"Kenneth Vargas Vargas\n"
"Shivam Agarwal\n"
"Alexander Slipak\n"
"Gary W. McCullors\n"
"HD_37432\n"
"Huy Tran\n"
"David Krantz \n"
"Tony Davis\n"
"Richie Hor\n"
"Luis Diego Flores\n"
"Felipe Lima (Sheriff)\n"
"John Merry\n"
"Syed Jamil Hussain Jafri\n"
"Randy Llandelar\n"
"Fahim Ahmed\n"
"Dan Ryan\n"
"Kyle Bauer\n"
"Michael Campbell\n"
"Matthew Gillespie\n"
"Khuram Rafique\n"
"Sean Salvadalena\n"
"steven richardson\n"
"Jarod Mohlmaster\n"
"Loc Vinh Nguyen\n"
"Rick Vaquera\n"
"Pritam Banerjee\n"
"claytondf\n"
"Andrew Chapman\n"
"Filip Štirn\n"
"Emeka Ezekoka\n"
"Steve of the family McGonigle\n"
"Eliott Stidd\n"
"Tim Peel\n"
"williamhotel\n"
"Thomas Kager\n"
"Purushotham G\n"
"Karthik Bonthu\n"
"Nathan Howard\n"
"Slobodan Nastevski\n"
"DustyWeatherby\n"
"Andrew Kotska\n"
"Wesly Inbaraj\n"
"Frantzcy Paisible\n"
"Emmett G\n"
"Finn Mann\n"
"Tariq Raza\n"
"Darick W. LaSelle\n"
"jwiggins\n"
"Jonathan Marson\n"
"Thiago Takayama\n"
"Jason Gibbs\n"
"\"THE\" James Greene\n"
"Micah Stanley\n"
"Benjamin Lutz\n"
"David Correa\n"
"Knack Black\n"
"MJ DeFilippis\n"
"Michael Dunn\n"
"jaypo\n"
"Artemio\n"
"Mike Johnson\n"
"Nic Bumgart\n"
"Randall Bradford\n"
"Wilfried Olthoff\n"
"Ben Arblaster\n"
"Mark F. Boules\n"
"Tommy \"BigDaddy\" Grigsby\n"
"Michael Jones\n"
"Krynos18\n"
"Ken Clifford\n"
"mast nishant\n"
"Furqan\n"
"Douglas R. Carirngton, Excelibrium Inc\n"
"tulley\n"
"Felix A Leiva\n"
"abrhim\n"
"Ed Vanderpool\n"
"neo_renshaw\n"
"Anders Berggren\n"
"Morgan Zahra\n"
"Grillmon\n"
"Jorgen Rhode Jensen\n"
"jerrysimila\n"
"Louis Gonzales\n"
"j.d.wegner\n"
"Jared J. Fernandez, E.I.T.\n"
"KQuinn\n"
"Felipe Alvarez Diaz\n"
"SecretServiceOO\n"
"NOSaturn\n"
"Clayton Coelho\n"
"CJCCIE25938\n"
"Anton Antony Arockiasamy\n"
"Nicholas Alessi\n"
"George Young\n"
"Shaheer Osman\n"
"Mahmud Rahman Jami\n"
"Yassine EL Jazouli\n"
"IAmBuldog\n"
"Imtiyazuddin.Shaikh\n"
"Les\n"
"Xavier Alexander Gilmet\n"
"Hikmet Terzioglu\n"
"Daniel Wichlacz\n"
"Karl Newell\n"
"Nanda Kyaw\n"
"Rafael Bianco Nacif\n"
"Nick O\'Neill\n"
"BigRhino\n"
"jeff pliska\n"
"Richard Vincent Perez\n"
"Gilberto Cloud Galvan\n"
"Sid Lorenzana\n"
"David Kumar\n"
"Ellert Hardarson\n"
"Peter Prusinowski\n"
"Pacerfan9\n"
"Jason Alan Barretta\n"
"Tyler Pell\n"
"Dan Devlin\n"
"Travis Eberhardt\n"
"Jeremy Frush\n"
"Cisc0kid\n"
"Joe A. Lourenco\n"
"JeffA\n"
"Jacqueco Peenz\n"
"Siggi Bjarnason\n"
"Samuel Ajakaiye\n"
"Georgios Ntib\n"
"Bartosz Miklaszewski \n"
"pepso100\n"
"Digit Oktavianto\n"
"Vineet Bulbule\n"
"Rui Ruivo\n"
"DWPerry\n"
"Frantisek Pupik\n"
"Christopher Robert Sutton\n"
"Casey Ajalat\n"
"Riku Pessinen\n"
"g.montoanelli\n"
"Dave Thompson\n"
"Paul Knight\n"
"Schisme\n"
"Christopher Edwards\n"
"Karel Perez Castillo\n"
"ErrorCode67\n"
"John Perry\n"
"Nick Bonifacio CCIE #38473\n"
"Matt Humphries\n"
"Nathan Miller - Youngstown, OH\n"
"Kevin D\'Arcy\n"
"CelsoAraujo\n"
"Paul Sherratt\n"
"Francis Illuzzi\n"
"Lupe SIlva\n"
"Sukhdeep Kahlon\n"
"Scott K. Bridges\n"
"Demetrius Cagampan\n"
"Jon \"The Man\" Marte\n"
"Farhan Qazi\n"
"Craig Zambra\n"
"Alexey Zhigaltsov\n"
"Josh R Blaylock\n"
"davidbarclay@nlets\n"
"Katherine McNamara\n"
"T.J. Patterson\n"
"Cash_97\n"
"David Undernehr\n"
"Andrew Conway\n"
"David Earney\n"
"Joel Bergmark\n"
"Chris DiPaola\n"
"copeland\n"
"Blair&BenedictRepuyan\n"
"Titus\n"
"Damian Mainwaring Davies\n"
"Nathanael Kenyon\n"
"Ilia Korsakov\n"
"Michael Dubery\n"
"Alex Giunta\n"
"RSaleh\n"
"Tassos Chatzithomaoglou\n"
"Franklin Valdez\n"
"loop\n"
"Ulf Bye\n"
"giantbullfrog\n"
"Luke Russell\n"
"voke\n"
"CoCoHimself\n"
"Nicholas Andrew Critten\n"
"Mahrazi Mohd Kamal\n"
"Dmitry Stashkov\n"
"Bilal Issa\n"
"Momodou Sarr\n"
"Ryan Barrett\n"
"Robin Harteveld\n"
"DMSwope\n"
"Dave Noles\n"
"Jason Salomons\n"
"Luis\n"
"LIZHAO DING\n"
"Douglas R. Musser\n"
"Jason Vizmanos\n"
"Prakash Purohit\n"
"Rafique Ahmed Narejo\n"
"Rustam Davletshin\n"
"Krzysztof Andrzej Szukała\n"
"Dimitris Armenatzoglou\n"
"Torstein Mauseth\n"
"Marius Landsem\n"
"Caio Vianna\n"
"Nauman Tariq\n"
"ZAW THANT KYAW\n"
"Graham Redish\n"
"Kinopcs\n"
"Suren Tsaturyan\n"
"Dustin Max Calderon\n"
"Man Gao\n"
"Phillip A Camera\n"
"José Luis Berlanga V.\n"
"einfachgust\n"
"Paul Lampron\n"
"Alex Presse\n"
"Jacob Johnson\n"
"Adam Troxel\n"
"Tom Dastrup\n"
"Chad Abbs\n"
"John Rubin\n"
"Cory Light\n"
"Jason Rearick\n"
"Shamikh Irshad\n"
"Kier Fretenborough\n"
"Saravana Thoppay\n"
"Roulston\n"
"Axel Siöland\n"
"Zhang Liang\n"
"Pritpal Bisla\n"
"Thien-Tam TRAN\n"
"Philippe Tang\n"
"Muawia Yasin\n"
"ErikJWells\n"
"Alan Esterman\n"
"Aju Jose\n"
"Jason R. Rokeach\n"
"Yeyo\n"
"Daniel Maxwell\n"
"Mark C Bernier\n"
"Luis Francisco Rivera Ospina\n"
"In Memory of Ben Gibbs\n"
"Mirza Zeshan Baig\n"
"Marcel Loesberg\n"
"Santoshi Rajani Kumar\n"
"Sathursan Kantharajah\n"
"Richard Harris \n"
"Jon Kolman\n"
"Robert Magrath\n"
"suguita\n"
"Sergey Solodov\n"
"Juan Gabriel Delgado Picado\n"
"Andrei Matei\n"
"Andit Tjahjo\n"
"Ian Wandless\n"
"Ismael Mamadou\n"
"Patrick Donald Bostwick\n"
"BDG\n"
"Aouachria Nassin\n"
"vdeluca-BR\n"
"Tyler Milam\n"
"Carsten Ellermann\n"
"Stunod7\n"
"TrueNie\n"
"Brody Murfin\n"
"Steven Howard Kalupson\n"
"Woodard\n"
"tr4ck3r\n"
"Andrew Walding\n"
"Eric Geib\n"
"Mark D Rametta\n"
"Adam Pinkerton\n"
"Evan Fox\n"
"Richmond Porath\n"
"Bestone Kasoloka\n"
"Sergey Kuzyurov\n"
"Warren\n"
"Conrad J. Moore\n"
"Michael Molina Lasprilla\n"
"Erik White\n"
"Michael Torres\n"
"Omrelliug A Senoj III\n"
"Jacob Bolm\n"
"Beau Daniel Jones \n"
"Maikel Mantilla\n"
"Oscar Quinonez\n"
"Xavier Lario\n"
"Stefanio Lourenço\n"
"Aaron Robinson\n"
"Adrian Gibbons\n"
"Milan P\n"
"Kristian J Francisco\n"
"Daniel Gos\n"
"Joel Lawless\n"
"Sumit Kumar\n"
"CodyPorter\n"
"Chris Gauthier\n"
"Krishna Varma\n"
"Aidan Burnett\n"
"Matthew Clarke Gordon\n"
"Ali Youssef Ahmad\n"
"Lauren Brittany Taylor\n"
"Avery Abbott\n"
"WANG KAI\n"
"gila\n"
"Michael Molina Lasprilla Jr\n"
"Thomas J Tablada\n"
"Valentin Vanguelov\n"
"Mario Kool - Netherlands\n"
"Leigh Bogardis\n"
"Johnny T\n"
"RDB\n"
"Aaron P. Clark\n"
"najouaimtiyaz shaikh\n"
"Durmus -dprogrammer- Celep\n"
"Marc van Lier\n"
"Felipe Soares de Souza\n"
"ccie4451\n"
"Dustin L. \'Stonewolf\' Derry\n"
"Rich Johnson\n"
"Infrastructure Solutions LLC\n"
"Niklas Riddarlo\n"
"Waleed Alamodi\n"
"Adolfo Aksel\n"
"Schladi\n"
"Tomasz Kacprzynski\n"
"SHU2\n"
"Nathan Hitchcock\n"
"RefAndante\n"
"Ade Alder\n"
"Raphael THIERRY\n"
"Mr Morgan\n"
"AurieP\n"
"Jason Bishop\n"
"Pavel Kilipko\n"
"Chris Albert Beltran\n"
"Manny Franqueira\n"
"foolproofalibi\n"
"gregwards1987\n"
"Aaron Day\n"
"kmaslashdevslashnull\n"
"Leon Cassidy (NZ)\n"
"John Dowling\n"
"Ramon Jr Garcia Yu\n"
"zdenotim\n"
"Prasad Kunjeer\n"
"Felipe.Ferrugem\n"
"Simone Groce\n"
"Sinan Sulaiman\n"
"JEFF HANSEN\n"
"Dániel Okos\n"
"Phillip D Larson\n"
"Dickson Wong \n"
"Gaurav\n"
"becakgurun\n"
"Jun Liu\n"
"Mohamed Faisal CCIE#36553\n"
"Rose Parnsoonthorn\n"
"Jeremiah Land\n"
"Joh4xx0rn Andersson\n"
"Mike \"ChimChim\" Pritchard\n"
"Dan W\n"
"Dhanushka Chandrasiri\n"
"Steve Redford\n"
"Keith Shum\n"
"Sean McQuilling\n"
"Simon Given\n"
"Colm Kilmartin\n"
"Marius Geonea\n"
"SteveBeans\n"
"Kwaggaken\n"
"Ben Murray (@Cache22)\n"
"LaKellyEllaDustyMath\n"
"PotatoGim\n"
"Pikador*Hopto.hu\n"
"Mustafa\n"
"Liqua_Thrushbane\n"
"GNS34624\n"
"juan\n"
"Michael Klose\n"
"Tuan Huynh\n"
"Peter E Gregg\n"
"thosi\n"
"Salmanees\n"
"Renjith G Nair\n"
"Saqib Raza\n"
"Jose Ignacio Jorquera\n"
"Showpanda\n"
"Syed Mohd Mohsein\n"
"Eric Yoakum\n"
"Alex Beal\n"
"Ronald Boestfleisch Jr.\n"
"Moussa BRAHIMI\n"
"Lee Gardner\n"
"Robbins\n"
"PaulEyre\n"
"Geir Hogberg\n"
"Dimitrios Misichronis\n"
"Boris Bozicevic\n"
"Saleh Batouq\n"
"William Walton\n"
"wolly\n"
"Gopinath v\n"
"Hamad Khalaf\n"
"David Dietrich\n"
"Jericho Gutierrez\n"
"Cameron Exley\n"
"Daniel Grubbs\n"
"Stephen Savaso\n"
"Marcus Morais\n"
"Roilen Palmer\n"
"David Bradwell\n"
"Adrian Lizuniga\n"
"Thomas Meinhardt\n"
"Mohammed Arshad Irfan\n"
"Kevin R. Owens II\n"
"Xiaodong Zhang\n"
"Suman Saha\n"
"GoldenBough\n"
"Jeff Nagel\n"
"Paul Regan\n"
"Andrew Haase\n"
"Gregory Bruccoleri\n"
"Ian Tuten\n"
"Joshua Senft\n"
"Pierre \"Ghostdog\" Blanchet\n"
"Chris Brown\n"
"Luis Felipe Sass\n"
"Dimitar L. Yosifov\n"
"Matt Heim\n"
"Codey Oxley\n"
"J.D. Wegner\n"
"AJ\n"
"Brannen Taylor\n"
"Billy Calderon\n"
"Carl Thompson\n"
"Alexandre Paradis\n"
"Fahad Hamid Q Aljuhani\n"
"Bruno HAON\n"
"AerOne\n"
"Manuel Rojas\n"
"Jason Brammer\n"
"Jason M Heath\n"
"Christopher Bellman\n"
"Jay Drennen\n"
"Adam Born\n"
"Bootcamp20\n"
"Fabrício Pimenta de Avila\n"
"Stephen Lynch\n"
"Olivia Frances Potter\n"
"Mitch Dolan\n"
"Seer Snively\n"
"Taígo\n"
"zerny\n"
"Kevin Sheahan\n"
"khurshid hassan khan\n"
"Marcellus Hunt\n"
"Fabricio Neves\n"
"Hany Garass\n"
"Bryce Nicholls\n"
"Hristo Neykov\n"
"Leart Sapun\n"
"Tony M Taylor\n"
"Scott Holwerda\n"
"Troy Perkins\n"
"Mason Feuhrer\n"
"Jonathan W Cole\n"
"Pat Golob\n"
"Pendragon\n"
"Stuart(big Stu) Cameron\n"
"FernandoLuisJanuario\n"
"Tim Davis\n"
"Eric John Fiedler\n"
"ciscopotato\n"
"TJ Plummer\n"
"Rawhide\n"
"Brent \"B\" Stevenson\n"
"Kyle Byrne\n"
"Hutch Theriault\n"
"Tyler Robertson\n"
"sharifraaf\n"
"thepacketgeek\n"
"Matt Hill\n"
"Zayed Syed Bukhari\n"
"Joe Uhl\n"
"Eric Sundquist\n"
"Jason D. Ladd\n"
"Brooke Lexi\n"
"Dave Kretzmer\n"
"Mike Gatti\n"
"G. David Malner\n"
"Evelyn Bowman\n"
"Stephen \"Mr. Poker\" Schroeder\n"
"Christian Fernandez Herrero\n"
"Sten-Erik Winborg\n"
"Zhengzhong\n"
"jose olivo\n"
"Julian West\n"
"Marek Kupsta\n"
"Albert PR\n"
"Lucian Cretu\n"
"dhuzen\n"
"nycsilvaj\n"
"Alex Nieves\n"
"Anthony Luis Cirillo\n"
"A318A\n"
"Brian Bristlin\n"
"Judd\n"
"D. Paul Hemphill\n"
"Trevor Roberts Jr\n"
"Genaro \n"
"Justin Seabrook-Rocha\n"
"Ramogpal Reddy M\n"
"Ramgopal Reddy M\n"
"Anthony Smith\n"
"Amhar Saputra\n"
"Fred W Schulze\n"
"Mojoman\n"
"Edwinrg00\n"
"Bruno Fagioli\n"
"Diego Morandini\n"
"Garcia Rice\n"
"davenaisbitt\n"
"Bharath Kumar Sole\n"
"Chad Routh\n"
"Daniel Dias\n"
"Robert Ankeny\n"
"Dean Stamler\n"
"Ofer Ben Zvi\n"
"Marcus Beaman\n"
"Jakub Pullmann\n"
"Larry Nicholson\n"
"chekoceronn\n"
"GeorgeTrip\n"
"Leighton Andrews\n"
"James P Feely\n"
"Ed Walsh, Jr.\n"
"Dennis Janak\n"
"Sylvain B\n"
"Eli Haiby\n"
"CiscoCowKiller\n"
"Wong Khai Loon\n"
"Mike Pierce\n"
"Rodrigo de Paula Cordeiro\n"
"steveo\n"
"droeseler\n"
"Michael Callahan\n"
"christy b mathai\n"
"Nicholas Nelson\n"
"Shawn Cunningham\n"
"Randy L. Betke\n"
"Nuno Cruz\n"
"Hamid Ben Zaina\n"
"Janet Knotts\n"
"Eduardo Acuna Salas\n"
"Shamal Weerakoon\n"
"Scott McAndrew\n"
"ajmatson\n"
"Ramin Asghari Ardebili\n"
"Joel Jones\n"
"True St. Thomas\n"
"Joshua Baker\n"
"Feix A Leiva\n"
"sirclicksalot\n"
"Hovik Mosessi\n"
"NguyenTan512\n"
"John M. McCreight\n"
"Philip Weekly\n"
"Dusty Roy\n"
"CFESH\n"
"iWasimAbbas\n"
"cstizza\n"
"Hans Ramdien\n"
"Itay Shem-tov\n"
"dakidfresh1\n"
"Marcelo Pires de Souza - MAPIS\n"
"Daniel A. Zoquier\n"
"Mihael Hutton\n"
"RobPrescott\n"
"Vinicius Guimaraes\n"
"Kaylee May Fox\n"
"DannyRodriguez79\n"
"Nicholas Wysocki\n"
"Tyler Potenberg\n"
"Gabe Rivas\n"
"Keunwoo Nam\n"
"Aldo Francescon\n"
"NetJimB\n"
"Jihan\n"
"Bryan Cordoba Sanchez\n"
"Anton Lindman\n"
"Thomas Siczek (Poland)\n"
"lblakesley\n"
"Mike W. Burks\n"
"James Stephens\n"
"@MacBachi\n"
"Michael Higgins\n"
"Rajendra Thuvarakan\n"
"Robert Lund\n"
"Christopher Lee Shanks\n"
"Brian30024\n"
"Dan Tedrick\n"
"James Bothe\n"
"lewiryan\n"
"jecrane\n"
"Titus Warui\n"
"Aditya Limbu\n"
"Erik Szlaur\n"
"Bishop Betteridge\n"
"Garth \"Stogyman\" Fish\n"
"Dr Vivek Bannore\n"
"Tony Bushong\n"
"Szymon Kufel\n"
"Muzeyen Hassen\n"
"Blacho\n"
"ErmiasBekele\n"
"Jose Miguel Montoya Sierra\n"
"Greg Murphy\n"
"Robin Williams \n"
"RichieB\n"
"RFK786\n"
"Edson Raya\n"
"Nathan J Downes\n"
"Eduardo Cardoso\n"
"Julian Ben\n"
"Ethan William Hussong\n"
"Richard Rigby\n"
"Gage Brewer\n"
"Adeola Adegbolagun\n"
"Faith Solutions pvt ltd, Maldives\n"
"Steve Cameron\n"
"Kevin Le\n"
"hnamanh \n"
"Jesse Stilwell\n"
"Chandrasekar Ramachandran\n"
"Lee Shouse\n"
"Kyle \"Mr. CrossFit\" Moschetto\n"
"Engr. Jeffrey B. Anarna\n"
"Nicholas D. Yates\n"
"Stefan Nagy\n"
"Jason DeGroote\n"
"Jeremy Corkery\n"
"Harry Lewins\n"
"Ashwinkumar CS\n"
"Rahul Long\n"
"Cloyd Crisostomo\n"
"Ivan Hollins\n"
"Eslam T. Mohamed\n"
"Roger Morehouse\n"
"Darrel Perucho\n"
"Trent Scholl\n"
"Dinesh Seyyadri\n"
"Joe Frixon\n"
"Thineshan\n"
"Gabriel Ciocan\n"
"Eric Stratton \"OTTER\"\n"
"ALEKSANDR ZASADYUK\n"
"RStuivenbergNL\n"
"MattE\n"
"Peter Joseph\n"
"Silviu Romonti\n"
"Haidar Noah \n"
"Alexandre Gnutzmann\n"
"Robert Mikołajczyk\n"
"Francesco Benucci\n"
"Eduardo Luditza Quintal\n"
"Nirav Dave\n"
"Luciano Barros\n"
"Brewhound\n"
"Tedel Baca\n"
"Odenike Taiye\n"
"Myash\n"
"DreamVision Networks LLC\n"
"Pablo Lues\n"
"Rayhiyaan\n"
"Stephen R Catto\n"
"Petr Bonbon Adamec\n"
"Arek Wrobel\n"
"Jasper Brouwer\n"
"Fyase\n"
"Richard Rebman\n"
"bgardner15\n"
"LishanMirando\n"
"Paul Vila\n"
"Juan David Ruiz\n"
"John_Shoffeitt\n"
"Jason Punda\n"
"Radoslaw Gorski\n"
"AJ NOURI\n"
"Mauro de Sousa\n"
"Alan Young\n"
"Marcin Kapturski\n"
"Ban Wong\n"
"AndresZ\n"
"configbytes\n"
"Edwin Campbell\n"
"Omberai Dzingirai\n"
"Felippe Dias\n"
"SOSSOUGAH ALEXIS\n"
"Achiraf CHARIF\n"
"Junho Lee\n"
"Bavo Seesink\n"
"Brent Sieling\n"
"Nicholas G Zerwig\n"
"Renato de Pierri SP Brazil\n"
"EdWillson\n"
"Bladerunner\n"
"Rob Boyko\n"
"Jonathan Isley\n"
"Tyler Ruckinger\n"
"Lachlan Holmes\n"
"Ariel Pablo Roza\n"
"Shahin Ahmadi\n"
"zuozhenqing\n"
"Ratanajantra\n"
"Irfan Zaffarulla\n"
"David P. Schuetz\n"
"Tonycarreno\n"
"Bruno Watt\n"
"Guy Winfrey\n"
"PHAung\n"
"Berny Ramirez Gonzalez\n"
"Liao Hsueh Hung\n"
"Derrick Nguyen\n"
"Dave Powers\n"
"Milap Joshi\n"
"Noel Bravo\n"
"Ayoob Eshaq\n"
"Binuraj Rajappan\n"
"Richard Wambua\n"
"mocotel\n"
"Paul S Payne\n"
"Anand Shah\n"
"Dawid Mitura\n"
"Kryssar approves of this software!\n"
"Bruno Galhoufa\n"
"David Poulton\n"
"Che Ge\n"
"Saed Zahedi\n"
"thejapa\n"
"PJ Archer\n"
"Marcos Yanase\n"
"cwmdavies\n"
"Wellington Ferrraz\n"
"Derek Nückel\n"
"alexandros\n"
"Jonathan Stevenson\n"
"Bolanu Andrei\n"
"M.Elhassan\n"
"EBRAHIM DARYA\n"
"COLTAR\n"
"Eduardo Honorato\n"
"Marcelo Lyra Bastos\n"
"George G\n"
"Clint Collins - Oziboys\n"
"Pradeep George\n"
"Steve Kittinger\n"
"cmbellgardt\n"
"Ramesh\n"
"Nicki Kristensen\n"
"Jose D. Mora\n"
"Seydou Belemvire\n"
"J. Pell\n"
"Javed Rehman\n"
"lwdbos\n"
"Chun K Lam\n"
"J. David Gregg\n"
"Chris Lääng\n"
"Armstrong Ndemalia\n"
"Shoaib Zahidi\n"
"Rogerio Mota\n"
"Ignacio Hernandez\n"
"Marcin Krajnik\n"
"ChettiandNatti\n"
"Elias Sisya\n"
"Jacob Henson\n"
"Mike McPhee\n"
"Joe Kraxner\n"
"A Witherspoon\n"
"Rasmus Elmholt\n"
"Tom Penrose\n"
"Ivan Velasquez\n"
"Michael Quill\n"
"carlitosP\n"
"larrybpsu\n"
"Gabriel John\n"
"Blake Barnett\n"
"robeson\n"
"Robert Ramoutar \n"
"Anthony J Wathen\n"
"Jayir Mansuclal\n"
"Luke Evans\n"
"VahidRezaMehrasaGNS38826\n"
"Damian Roche\n"
"Gerry\n"
"Jan-Erik Svendsen\n"
"YuRJ\n"
"Michael Crouch\n"
"NightmareFH\n"
"Clemens Dubbick\n"
"HishamHowege\n"
"Tome Spirit Petkovski\n"
"Osmo\n"
"Japd\n"
"Dane Freeman\n"
"netwrklab\n"
"Simon Boam\n"
"ComputerRick\n"
"Gabriel DeMarco\n"
"GigaRacer\n"
"Cat Rodery\n"
"Jeff Breon\n"
"mikynik\n"
"Stefanio Lourenco da Silva\n"
"Denny Höglund\n"
"Steven Vlahakis\n"
"Przemek Rogala\n"
"Harlen Zuluaga\n"
"Ronaldo Echevenguá\n"
"Christian Leonardelli\n"
"高渐星\n"
"earth\n"
"KyungRyul\n"
"Dmitry MBR Sokolov\n"
"mohammed fouad shat\n"
"Josh Gomez\n"
"James Packard\n"
"Ben Millington-Drake\n"
"rluisem\n"
"Michael McNamara\n"
"Tubejockey\n"
"Nick Henderson\n"
"esc0\n"
"Bradford Wright\n"
"Frank S\n"
"Vitaliy Zinatov\n"
"Peter Tavenier\n"
"Guillermo Rodriguez\n"
"Charles Cecela\n"
"ali abdulsalam\n"
"NSandone\n"
"Ismael Mustapha\n"
"Jacob Uecker\n"
"plex\n"
"ciscoraz\n"
"Mehul Patel \n"
"Abbas Khalil\n"
"JAVID IBRAHIMOV\n"
"Brandon Porter\n"
"dragon\n"
"Thor\n"
"Lukasz Teodorowski\n"
"John Montgomery\n"
"Erin Clay Smith\n"
"James David Schell\n"
"Antonello Moneta\n"
"IE-Guru\n"
"Alex \"P.h.o.t.o\" Drozdov\n"
"Amelie Haley\n"
"Leonardo Migliorini\n"
"Ole Morten Aaslund\n"
"Anishkumar Kurup\n"
"Joseph I Gilbert\n"
"Mike Scioscia\n"
"Christopher David Keith Garrett\n"
"Leonardo Magbuhos Jr\n"
"Francisco Alfaro\n"
"Jude Kenneth Bickerton Lymn\n"
"ir0ck\n"
"BrettWCook\n"
"Ayokunle Adekanle\n"
" HsinHong,Huang\n"
"Tomi Olamigoke\n"
"IrishFighter\n"
"Dennis Dumont\n"
"Josh Micone\n"
"Antonio B Gerbino\n"
"F7EE\n"
"Jon Sawyer\n"
"Brian Matson\n"
"Erik Burdeaux\n"
"Andre Jones\n"
"Karl Fukushima\n"
"Rajesh.R\n"
"Mohamed Furqan Firdous\n"
"Sean B Crites\n"
"Eulic McGee\n"
"Noel Rivera\n"
"Paul \'Twig\' Firth\n"
"Rob Michel\n"
"BrockSouza808\n"
"Kravnik\n"
"jajulius\n"
"Marcin Zablocki\n"
"Gian Karlo Andrade\n"
"Aaron Picht\n"
"XIAiYIP\n"
"Maciej sk84life Smolak\n"
"Ashleigh Wilson\n"
"Robin Harper\n"
"Keith Barker\n"
"Cristian Sima\n"
"AKAHAW\n"
"Rodrigo Dominguez\n"
"Joejaders971\n"
"Joe Buemi III\n"
"McAnder\n"
"Sauceboss24\n"
"POORNA SHASHANK PARVATALA\n"
"Rutvij Brahmbhatt\n"
"Axis\n"
"V O\' Rourke\n"
"Karim Al-Azzawi\n"
"Eduardo Romero Peña\n"
"Vandeth Nop\n"
"Mortch\n"
"Moidin Kunhi\n"
"Wei Shu\n"
"Patrick den Hoed\n"
"Kurauone Mahachi\n"
"Kurauone B. Mahachi\n"
"Catalin Butiseaca\n"
"Debayan\n"
"Gordon Fountain\n"
"Saikeo Kavhanxay\n"
"Danilo Bottino\n"
"Viliam Schrojf\n"
"Kumaralingam\n"
"LeighHarrison\n"
"Eduardo Campos\n"
"John Voirol\n"
"John Ellison\n"
"Juan Larriega\n"
"Steven Perée\n"
"Aivis Purvinsh\n"
"Rizwan Hanif\n"
"Reynaldo Gonzalez II\n"
"Pipeiden\n"
"Michael Aird\n"
"@espennilsen\n"
"Denilsod\n"
"gizmo\n"
"Josh1984\n"
"Tobi Adesanoye\n"
"Randy Damron\n"
"Pedro Collado\n"
"Arwa\n"
"Nazish Ahmed\n"
"Nicolas POIRIER\n"
"Kostas Gerakaris\n"
"Peter\n"
"John Jeremy Skog Sullivan\n"
"Todd Snyder\n"
"Tony Virath\n"
"Oscar Reyes Garcia\n"
"Arturas Zalenekas\n"
"javalogicuser\n"
"Andrew Baughan\n"
"P. JH. Pelletier\n"
"MES\n"
"Marlon Zackery\n"
"Loyd W. Altebaumer\n"
"Josh Hitt\n"
"GNS LLC\n"
"Brent Andrews of Trinidad & Tobago\n"
"Marek Macovsky\n"
"Stumper\n"
"Jan Sidlo\n"
"Vinesh Raniga\n"
"Denis \"GRinch\" Karpushin\n"
"Christos Cambanellis\n"
"Sylvester Kolakowski\n"
"Liam and Quinn Densmore\n"
"Jacco Rorman\n"
"Rüdiger\n"
"Rc7200\n"
"Nicholas McManamy\n"
"Elvin Baez\n"
"Dai Yiru\n"
"Matt Engelbrecht\n"
"gerald greer\n"
"DR MAHESH KUMAR\n"
"Matthew Longwell\n"
"Gordon Murray\n"
"Steven A. Wilson\n"
"Jose Bracero\n"
"ireDave\n"
"Torgny Holmlund\n"
"Craig Timchak\n"
"John Mathews\n"
"Joshua P Roybal\n"
"scoty1\n"
"Oscar Dotel\n"
"Aaron Bauman\n"
"CIOBY\n"
"Alexey Zhukov CCIE#36822\n"
"Omar Coeto Hernandez\n"
"Jonathan King\n"
"Carlos Pata Oi! Munoz\n"
"Tiffany A. Whittington\n"
"Martin Steele\n"
"Dizzydonut\n"
"gutonjusa\n"
"Jay Shepherd\n"
"Song Kim\n"
"Charles A Moore\n"
"Kevin Louie\n"
"Shane Walton\n"
"Jan Barkawitz\n"
"Chris Stewart\n"
"Cole Two Bears\n"
"Taz\n"
"Chris Adams\n"
"Brian Rodriguez\n"
"Johnoel Stafford\n"
"Janmesh Jani`\n"
"Jeff Grimm\n"
"John L. Ricketts, Ph.D.\n"
"George Watkins\n"
"Abdul Bajaber\n"
"Brandon Bennett\n"
"Daniil Yanov\n"
"Al Reid\n"
"Rob the animal Steers\n"
"cargostud\n"
"cjf de kievith\n"
"Aaminah H Rashid\n"
"Eric Park\n"
"Muhammad Shehab\n"
"Amit\n"
"Aki Pap\n"
"Michael Doe\n"
"Todor Bozhinov\n"
"CraigThomsonSCOTLAND\n"
"MarkThomas \n"
"Eric Davis\n"
"Yuta G. Shimizu\n"
"Arthit Chinnachot\n"
"5UCC355\n"
"Patrick van Bennekom\n"
"Ricardo Oto Engelmann\n"
"Marcus B. Ullrich\n"
"Douglas Wagner\n"
"Artiom Lichtenstein\n"
"Chad Pembleton\n"
"Zhandos Shalmanov\n"
"Miguel Fuentes Gonzales\n"
"JamesCawte\n"
"Marcello71\n"
"Ali Fadhl\n"
"ALI FADHL\n"
"DAlicea\n"
"Dion.I.Seuss\n"
"Antoine Reid\n"
"Richard Kok\n"
"Soe Naing \n"
"Simon Jack\n"
"Nick Morrison\n"
"Dennis R. Miranda\n"
"Rameshbabu\n"
"Bryan Kosten\n"
"Tim Potticary\n"
"Thomas Knowles\n"
"el3ctron\n"
"Gerard.v.Geffen\n"
"lombers\n"
"waqar\n"
"kakalo\n"
"Michael Magnus\n"
"Brent Walter\n"
"Gnani\n"
"Ishan Arora\n"
"Dennis Pham\n"
"Emanuel Jose Hernandez\n"
"Alex Dumitrescu\n"
"Jonathan Els\n"
"Modathir Bashir\n"
"Todd Albiston\n"
"Daniel Morales (RCL)\n"
"Flaviano Reis\n"
"Mike H.\n"
"Elijah S. Rodriguez\n"
"imperorr\n"
"Phill Johntony\n"
"ChulhansOfTrinidad\n"
"Celia\n"
"Christopher Fata\n"
"Vince Ward\n"
"Ricky Innes\n"
"Nizar Hamoudah\n"
"Salman Chougle\n"
"Darpan Gadhiya\n"
"Watashi No Kamei\n"
"CCIE#18444\n"
"Kevin Eze\n"
"strontiumdog\n"
"David Alpizar\n"
"Suelzwurst\n"
"Victor M Hanna\n"
"Abdullah\n"
"Sahir Algharibih\n"
"Jimmy\n"
"Andrew Victor Godfrey\n"
"tom.meadows\n"
"laljohnson\n"
"ShawayneD\n"
"Jason Huseman\n"
"Kristian L Brown\n"
"Syed Abbas Ali\n"
"RMasuda\n"
"Hooman.Abbasi\n"
"Marty Nelson\n"
"Harjinder Singh\n"
"rjbanker\n"
"Josh Barenthin\n"
"TheSorrow\n"
"Mark Ducote\n"
"CedarLee\n"
"Mayank Nauni\n"
"Richard (Tricky) Town\n"
"Mathieu Poussin\n"
"Constantin Mihuta \n"
"LikeIT\n"
"Olrik Lenstra\n"
"Georgios Pelekanos\n"
"FONTAINE Aurélien\n"
"Mohamad Yehia Siblini\n"
"Bryan Lurer\n"
"Gianremo Smisek\n"
"Mark (Noof) Ducote\n"
"Michael Routledge\n"
"d0gbert\n"
"HollisDarby\n"
"Bert\n"
"Mohammed Kamal\n"
"Brandon Glaze\n"
"FlonMaloku\n"
"Tameem ALjanabi\n"
"Fabrizio Micucci\n"
"Paul Burmeister\n"
"Ian Bartels\n"
"kalo.evang\n"
"Yan Cote\n"
"Nicholas Vogtman\n"
"Edgar Fernando Monroy\n"
"HIM\n"
"igat78\n"
"Alfredo Pasigan Salipot\n"
"Artem N Kuznetsov\n"
"Siem Hermans\n"
"Tony Marrazzo\n"
"Josh Lowe @ UOIT\n"
"ziaul\n"
"Bahman Arbabzadeh\n"
"Hussain A.Aziz Al Alwani\n"
"githure\n"
"Stephen Snively\n"
"David Gierke\n"
"James Julier\n"
"David Bell\n"
"Ashkan\n"
"Kamaleldin Sadik\n"
"Mike Dupuis\n"
"Jacques Baribeault\n"
"Ait bennour\n"
"Abrhim\n"
"N1029676\n"
"Major Dallas\n"
"Pedro A. Collado\n"
"Luca Zanin8\n"
"Karl Billington\n"
"Jared Murrell\n"
"RhoninXPowers\n"
"Ali Abed\n"
"Lee Hutchings\n"
"Mesikoo Kamali\n"
"Kanwar Muhammad Zeeshan\n"
"Dan West\n"
"Joshua Hublar\n"
"Hameed Alzahrani\n"
"Kev Ward UK\n"
"Jon Waters\n"
"Mika Ilvesmaki\n"
"Patrick McMichael\n"
"Ron Cannella\n"
"James McCutcheon\n"
"Russel Crozier\n"
"ValMor\n"
"Maximilian Lehrbaum\n"
"RiCor874\n"
"Russell Cassidy\n"
"Sherwin A. Crown\n"
"Jim \"The Great Mullet\" Rethmann\n"
"Ovunc Alper Mert\n"
"Glenn Tobey\n"
"kjsrandhawa\n"
"LeeJBurton\n"
"Zygimantas Vrubliauskas\n"
"Mike Manning\n"
"Andrew C Bodine\n"
"Edward Beheler\n"
"David Monteclaro\n"
"yasir\n"
"Matt Wardle\n"
"Kevin Menzel\n"
"Jude Wu\n"
"Richard Alvarez\n"
"Jay Shah\n"
"Alexander E. Deems AKA TSDOI\n"
"SOMANDAC\n"
"Mark Degner\n"
"Paul McParland\n"
"cham00ko\n"
"Jeffrey Charlite\n"
"bootcamp20\n"
"Nilson Medrano\n"
"LoveKatyaPa\n"
"Sanjay Kumar Patel\n"
"Kirk J Iles\n"
"Munyaradzi Manonose\n"
"Munyaradzi Ncube\n"
"Afrasinei Doru Mihai\n"
"patrez\n"
"Tsepo Lesibe\n"
"Curtis Stanley\n"
"Alex Guzzardo\n"
"WILLIAN PEREIRA VIEIRA DA CUNHA\n"
"Michael Camara\n"
"Prankit Jain\n"
"Michael Field\n"
"Edward Fahner\n"
"Tom Jehn\n"
"Thomas Tablada\n"
"faizkhanzada\n"
"Lonardoni\n"
"Hugo Klein Nagelvoort\n"
"MarkP\n"
"Michigan J. Frog\n"
"Cyberbird\n"
"pingaq\n"
"Christopher Michael Murray\n"
"Shailesh R Patel\n"
"WTRoss\n"
"Chad Stambaugh\n"
"Daniel Toland\n"
"Damian Baran\n"
"Ashley Georgeson\n"
"DaveNaisbitt \n"
"Anthony Burke\n"
"Kevin Denton\n"
"Fordo\n"
"Julios Ñahuero Balbin\n"
"Richard Sias\n"
"Ibon Arretxe\n"
"EODChief\n"
"Garrett W Taylor\n"
"Sulaywan Benoit F. TUFFERY\n"
"Nionios\n"
"obsidianlock\n"
"Kamal Osman\n"
"Sachin\n"
"woodard\n"
"SCarriveau\n"
"Jason Brake\n"
"Kelvin BigEasy Stallings\n"
"Oufaki_Boulaki\n"
"Paulo Pinto da Silva\n"
"Angus Lepper\n"
"Pawel Sniechowski\n"
"BCQuest\n"
"Robin - Digininja\n"
"networker\n"
"Chris O\'Shea\n"
"Andrés Rosales Luna\n"
"Alvarito\n"
"John Tobin\n"
"Jeff Cameron\n"
"Gauntlet Smith\n"
"Catpuking\n"
"Voujon L.\n"
"Deez\n"
"Alex DeAraujo\n"
"Manny Garcia\n"
"Garry Baker\n"
"Silvio Pinheiro\n"
"Faisal\n"
"doncompu\n"
"Happy Hanna\n"
"dcsmwy\n"
"LGSalsero\n"
"Liudas Duoba\n"
"Jamie Wells\n"
"Alfredo Vera\n"
"eiasan\n"
"Mohammad Ali Naghval\n"
"Vinicius Machado\n"
"Mike & Cindy M\n"
"MikeCarel\n"
"barkusdawg\n"
"Robert McKee\n"
"Charles Ferguson\n"
"Tonicarreno\n"
"Steve Blizzard\n"
"Osamuyi imade\n"
"McNeelyYoung\n"
"Adolfo Cabrera\n"
"Megajove\n"
"Francisco R. Butera\n"
"Carl Baccus\n"
"Dallas Bishop\n"
"Alexei Nikolaev\n"
"Houssam Chahine\n"
"Charlie Gombert\n"
"Praveen Kumar \n"
"Mahmood Naiser\n"
"tudor evil1\n"
"ak2766\n"
"Alex Clipper\n"
"Bob Benson\n"
"Kakos\n"
"Andre Jonker\n"
"LeeWray-CCIEtoBe\n"
"GMcCray@Orange\n"
"Phil Chapman\n"
"Ze \'Qzx\' Qronique\n"
"Trevor Conkle\n"
"Mike Mikeson\n"
"Krzysztof Miłek\n"
"Joseph Becker\n"
"Don Floyd\n"
"Andrew Lemin\n"
"Kimmer\n"
"Cotomacio\n"
"MarkF\n"
"Ronak Maniar\n"
"Deepak Bysani\n"
"Mohau Nchake\n"
"Roderick Williams Sr.\n"
"Bob Lyons\n"
"Kieran123\n"
"Blaine G\n"
"ikehoff\n"
"Tony Dennis\n"
"AbdulWahabSoomro\n"
"ILIAS BATSILAS\n"
"Omar Bounoun\n"
"Leandro Pina\n"
"ZorprimeSDF1\n"
"Kevin Fisher\n"
"Andre Heyliger\n"
"jethrocruz3\n"
"Nnamdi Anyanwu\n"
"nazir8121\n"
"Karim Souissi\n"
"Ramon Jr Yu\n"
"Peter Hinds\n"
"MarkMalone\n"
"Francivaldo B. Cavalcante\n"
"Roberto Otero\n"
"GersonEx\n"
"Mark Horsfield\n"
"dsgm\n"
"New Generation Network NGN\n"
"Derrick Lowder\n"
"Daniel López\n"
"ilya zindman\n"
"jerryjmcconnell\n"
"Craig Munro\n"
"WeirdLilMidget\n"
"Jonathan Miller\n"
"MEHDI BASRI\n"
"Andy Burnett\n"
"Doug George\n"
"Faheem\n"
"Mike Rozman\n"
"Maz Shah\n"
"Bulost\n"
"Dennis Bailey\n"
"Craine Runton\n"
"AJMottola\n"
"irwandi.guritno\n"
"ManuelR\n"
"tchilljr\n"
"Fernando Mauro\n"
"Rafael Meira Salomao\n"
"William A Engel\n"
"Kevin Seales\n"
"Sérgio\n"
"Sergio Pereira\n"
"JerryCakes\n"
"Mukom Akong TAMON\n"
"Roger M. Jones\n"
"WaltDoGG\n"
"Ashish Batajoo\n"
"gianghip\n"
"Hakan \"deegan\" Bjorklund\n"
"satmatzidis\n"
"PanosK\n"
"Frantisek Cmuchar\n"
"MJPollard\n"
"Jace Thompson\n"
"Shawn Nay\n"
"Aaron Paul Clark\n"
"Jeremy Furr\n"
"kcriscuolo\n"
"jaberzadeh\n"
"Haridralingam\n"
"Brett W. Cook\n"
"Michael-Vogler\n"
"pkaldani\n"
"James Grace\n"
"Andres Bolaños Rojas\n"
"Bradley Stevens\n"
"Renaud Pellerin Levesque\n"
"Bizkitcan\n"
"Roberto Velez\n"
"912m0fr34k(Digvijay Singh)\n"
"TRAVIS K\n"
"Jamal Mohamed Ahmed Aly\n"
"Ashwin Ramdin\n"
"MarcoEnrique\n"
"Ray Manriquez\n"
"Jason \"jabreity\" Breitwieser\n"
"Ruben Genaro Villanueva\n"
"Robert Bowen\n"
"Brad Peterson\n"
"Renaud Pellerin-Levesque\n"
"Johnathan Andrew Greeley\n"
"dbamurray\n"
"EMAD ABID\n"
"Dustin Sammut\n"
"Alexander Bauer\n"
"irvinleovillanueva\n"
"Munir B. Abdullahi\n"
"Dag H. Richards\n"
"ShamYavagal\n"
"Fraser Hugh Munro\n"
"shankerbm\n"
"Andrew Dashner\n"
"Nathan Gautrey\n"
"Brian Eckblad\n"
"Riley C. Robertson\n"
"TuRaNiK\n"
"Sergio Martinez\n"
"Brandon Gonzalez\n"
"amrinder kamboj\n"
"Shaun Parrish\n"
"Damian Wikkeling\n"
"Justin Neisler\n"
"Ian J Adams\n"
"NHH\n"
"NAHH\n"
"Walshburg\n"
"Jamie Cresdee\n"
"Nash\n"
"Jeff Taylor\n"
"SomeScaryGirl\n"
"Mike Melo\n"
"Vlad \"Rivera\" Ivanov\n"
"racole\n"
"VDR\n"
"Christopher H. George\n"
"Colby Collier\n"
"Patrick Ley\n"
"JHBrashear\n"
"Lochocki\n"
"Alberto González Rosales\n"
"HaOsLsE\n"
"Rich Cortes\n"
"2affa7\n"
"Dustin Goode\n"
"Daniel Huber\n"
"Cory Thomas\n"
"Michelle Laydon\n"
"Jason-Liang\n"
"fangfangjt\n"
"Lateef Law\n"
"Andy Goodley\n"
"Nicholas Zerwig\n"
"Matt Long\n"
"Matthew Allen Weber\n"
"Chukwuemeka Ezekoka\n"
"Neil A. Briscoe\n"
"Roman Chikunov\n"
"Jesse Barto\n"
"Alexander Shivers\n"
"HSIEH CHANG MING\n"
"Alfred B. Dullano\n"
"Jez2cool&Bebo\n"
"Tom Huynh\n"
"Will Kerr\n"
"Granite Ledford\n"
"Nicholas Strov Alexander\n"
"Sajjad Ahmed\n"
"Theo Hill\n"
"scb\n"
"Andrew Roderos\n"
"Tofig Ahmed\n"
"Mark Buono\n"
"Eric Coleman\n"
"Jessie Carabajal\n"
"Marcel _N:L:\n"
"Moshe.Itzhaki\n"
"Carl Joseph Ellement\n"
"aelso.silva\n"
"gtz\n"
"Adam Knutson\n"
"Augusto Castelan Carlson\n"
"Erik\n"
"Michael Patton\n"
"Thomas Anderson\n"
"Geoff Thornton\n"
"Tzalas Konstantinos\n"
"Miguel Rosa\n"
"Daniel Demers\n"
"Jeremy Goyette\n"
"N. van Zwieten\n"
"Windsor\n"
"Brendan Mason\n"
"David J Smith\n"
"Syed Ahmed Quadri\n"
"Anders Cai\n"
"Bonno\n"
"Rob Looby\n"
"Olivia the Cat\n"
"Travis Heinz\n"
"pranay solanki\n"
"skennedy\n"
"ChaXueHer\n"
"Even Hernandez\n"
"Mark Shield\n"
"POPQUIZZZ\n"
"Thomas Pedersen\n"
"James T. Walston Jr.\n"
"James Rosenthal\n"
"Jesus Garcia Chavez\n"
"Matthew Songy\n"
"Kevin Groves\n"
"Warakorn Sae-Tang\n"
"Roberto Carratala\n"
"Paul Harris\n"
"Hasan Aljumaily\n"
"skHosting.eu s.r.o.\n"
"EJAZ MUNIR\n"
"Luke Tidd\n"
"Jason Roth\n"
"Ali Fard\n"
"RechieCebreros\n"
"Samaan AlKhaldi\n"
"Abhijeet Kunde\n"
"Aldin Ringor\n"
"Shine Sandhu\n"
"Øystein Gulliksen\n"
"Nick Travalini\n"
"Joon Park\n"
"M.Philip\n"
"Kamron\n"
"MikeCindyAkinaAlek\n"
"Bjoern Drewes (Verfriemelt)\n"
"Spencer Onsongo\n"
"Corey_age_00110000\n"
"Jegan-Malaysia\n"
"David Okeyode\n"
"Md Rasel Mia\n"
"Jesse Loggins CCIE#14661\n"
"Jacob Bennefield\n"
"Mohamed Nizwan\n"
"Ali\n"
"yes\n"
"Andrey Bozhko\n"
"GHaKK\n"
"Jubal Gil Vivas\n"
"Julio Maura\n"
"CARLOS ALBERTO PEREZ RAMIREZ\n"
"sidney berkenbrock\n"
"Richard Wilson\n"
"Chase Raptor Wilson\n"
"Rakee\n"
"Felipe Solís\n"
"escartinis\n"
"Erick Parsons\n"
"Alexander O Chard\n"
"Davi Junior\n"
"Zachary McLemore\n"
"Marc Simcox\n"
"Juan José Castro\n"
"Dharnesh\n"
"Mostafa Hassan\n"
"Zaheid Iqbal\n"
"Samarth Chidanand\n"
"Ping Petchged\n"
"Joe Sanchez\n"
"Alan Dunne\n"
"David Scolamiero\n"
"Wade Edwards\n"
"Teng Lee\n"
"Voramit Y.\n"
"Harshit Namdeo\n"
"Michael A. Martino\n"
"Ahmad Maher Che Mohd Adib\n"
"h.msahbin\n"
"tataroktay\n"
"Tony Kenneth\n"
"Adrien Demma\n"
"Thiago R. Fanfoni\n"
"vladys\n"
"Joe Polak\n"
"Eric Belinsky\n"
"linuxdancer\n"
"Ian Woods UK\n"
"Michael Ciacco\n"
"Suradech Pornsomboonkit\n"
"Gauravdeep Singh \n"
"resoldab\n"
"Keith Gardiner\n"
"JayWifi\n"
"PrabhuWorld & Nishanth\n"
"Prabhu & Agalya\n"
"Prabhu & Duraikannan & Vanitha\n"
"Nick Young\n"
"yurezplace\n"
"Seth Eshun\n"
"TLACKI\n"
"University of Texas at Dallas\n"
"Richard Antiabong\n"
"Aaminah Haniyah Rashid\n"
"Michael Gilton\n"
"emil\n"
"LynnH\n"
"Emanuel Facundo Campos\n"
"Leandro Rudolph Araujo\n"
"Arabi\n"
"chekoceron\n"
"Sonny\"Criminal\"Wigmore\n"
"Hervey Allen\n"
"Steve Brosseau\n"
"Fishlogic\n"
"Alex Kirby\n"
"Kyle Steinkamp\n"
"Louis Lloyd\n"
"cblessed\n"
"Jason Biscuit Williard\n"
"Steven H. Kalupson\n"
"Oshoe\n"
"John E. Durrett\n"
"WALA\n"
"Adrian Huston\n"
"Chad Hart\n"
"Blessing Matore\n"
"Julien BERTON\n"
"Michel Askenfeldt\n"
"Stephen Neary\n"
"WavetechSystemsLLC\n"
"CDeHoust\n"
"James P Hollingsworth\n"
"MMPD\n"
"KJ Iqbal\n"
"Ray Belshaw\n"
"Cipriano\n"
"Chris Fucking Johnson\n"
"Mondilla1985\n"
"Drew Mallett\n"
"Kevin Taylor\n"
"Chen Huangdong\n"
"Dan Brito\n"
"Charles Moore\n"
"Rowan George\n"
"Simon Joiner\n"
"Ebnöther Christian\n"
"Reza Hossain Khan\n"
"John Regan\n"
"Daniel D Patrick\n"
"Gared\n"
"George Solorzano \n"
"Nathan Bender\n"
"AJ Cochenour\n"
"Noel R\n"
"Tseggai\n"
"sSkulltrail\n"
"VegasRatt\n"
"Loek Canisius\n"
"Ismael A. Torres\n"
"Carlos Gobea\n"
"Patricio Salmeron\n"
"Alex Viman\n"
"Aram Nazarin\n"
"KhaisarMasoodAhmed\n"
"CameronRake\n"
"Adnan Shaikh\n"
"John Huston\n"
"Rob Stoop\n"
"route_bub\n"
"medtemo\n"
"Mark Mcfarland\n"
"slaxative\n"
"Warren Evans\n"
"Herme\n"
"Krzysztof Milek\n"
"Gobinath Chandrasekaran\n"
"julius gilmore\n"
"jeremyjenkins5\n"
"junj2121\n"
"Eduard Coll Pascual\n"
"SwampRabbit\n"
"Chintan Patel\n"
"Chad Monroe\n"
"Benjamin A Albee\n"
"S2MFH\n"
"Rajan Thanki\n"
"Dr. Christopher DesMarteau\n"
"Antonio Molina Cádiz\n"
"csegovia\n"
"Kamaleldin M Sadik\n"
"Tzu-Che HUANG\n"
"Zandro Bacani\n"
"Gabriele Di Benedetto\n"
"ARGCCIE2013\n"
"Tyler Kor\n"
"petenugent\n"
"Will Sim \n"
"wam\n"
"Chris Aldridge\n"
"Dwarfmage\n"
"Glenn \"Moose\" McHenry\n"
"Peter Baffoe\n"
"John and Crystal Vaughan\n"
"JAPD\n"
"Henrique Roma\n"
"Joseph Webb\n"
"Anton Denisevich\n"
"Brennan Nehemiah Jones\n"
"Justin Reagan\n"
"Stewart Beam\n"
"AlexBransome\n"
"Dwayne Towns\n"
"Derek Smiley\n"
"Ferran Orsola\n"
"Jason Craft\n"
"devesh\n"
"Luis A. Rocca Vazquez\n"
"Borja López Montilla\n"
"Adib Amrani\n"
"Debasish Gharami\n"
"Alessandro Lovati\n"
"Cristian Vendemiati\n"
"somescarygirl\n"
"Neil Shankar\n"
"Manjunath S Chickmath\n"
"Phill Proud\n"
"RolandVogels\n"
"Sam Mackenzie\n"
"Anthony Fuentes\n"
"Rich N\n"
"Meneertjes\n"
"darma96\n"
"Daniel Stamatov\n"
"Jakub Peterek\n"
"David G Boyle\n"
"Chad Carrington\n"
"CCIE28839\n"
"Lilliana \n"
"Raffaele \n"
"Brad Powlison\n"
"Bharath Ramakrishna\n"
"MCyagli\n"
"Scott Maderitz\n"
"Eugeniu Babin\n"
"William M Zambrano\n"
"Elton Hubner\n"
"Marek Macovsky\n"
"Luccas Aguiar\n"
"Keith Nowosielski\n"
"Dr Bankim Jani\n"
"JS\n"
"Tuxufologo\n"
"kishiro\n"
"Eric Gamess\n"
"Nguyen Thai Nguyen\n"
"Dmitry Pavlov\n"
"AS97\n"
"Axel Sioland\n"
"Louis Bartay\n"
"yokota shinichi\n"
"medtemo\n"
"Fekadu Berhane\n"
"Dmitriy E. Koshkin\n"
"amrinder kamboj\n"
"Muyeen Parveez\n"
"Peter Tavenier\n"
"kalpana\n"
"DlteC do Brasil\n"
"Tim Wilkes\n"
"Dave Malner\n"
"Ali Fadhl Hussein\n"
"elgrecos\n"
"Brandon Fields\n"
"Richard Jenniss\n"
"mikemick\n"
"Hitesh Panchal\n"
"W3FTM\n"
"TxetxuMC\n"
"Rotero82\n"
"JD Keith\n"
"MichaelDHays\n"
"yeyo\n"
"Mohamed Mohamud\n"
"Tarik houmaiza\n"
"Alessandro Piva\n"
"Arabi\n"
"Victor Salas\n"
"Santosh Nair\n"
"Shashank Bhargava\n"
"Andrew Tagliani\n"
"Shamal Weerakoon\n"
"Matt \"The Beard\" Powell\n"
"Douglas Edward Luce\n"
"John the PawWow\n"
"Dave Persuhn\n"
"Peter Daniel Bartyik\n"
"Richard P. Mauer\n"
"Juan C. Guerrero\n"
"Randall Nieland\n"
"Carlos Cabrera\n"
"Jason Giles\n"
"Robert Pribanic\n"
"Renato Pinheiro de Souza\n"
"Antonio J Remedios\n"
"Adam Reblitz\n"
"Jose Rodriguez\n"
"Glenn Eberhard\n"
"EricWibowo\n"
"Peter M Nikodem\n"
"Sergio Ryan La Torre\n"
"Tommie James\n"
"Sabine Reis\n"
"Skyler Hayes\n"
"Yoruba\n"
"Jens Stark\n"
"Dean Lewis\n"
"Recundis\n"
"Yazeed Fataar\n"
"IOLARIU\n"
"Kurt & Ethan Steed\n"
"Paul Slater\n"
"Malick FALL\n"
"eduinho\n"
"Shev\n"
"v0ha\n"
"Bezmalinovic\n"
"Les Bowditch\n"
"Rick Sattler\n"
"PGX\n"
"Abhinandan Juyal\n"
"Ernest Edwards\n"
"Paul Woodhouse\n"
"Ian Benoit\n"
"Sasha Dee Robbins\n"
"Ferrel\n"
"Marc Snelgrove\n"
"Nicolás A. Escudero\n"
"Marcus Auman\n"
"sides14\n"
"albokillers\n"
"Juan C. Espinoza\n"
"Filipe Torres\n"
"J.C. Cederboom BICT\n"
"Jovanni\n"
"Matthew C Hickey\n"
"Michael Anthony Cummisky\n"
"Dmitry Figol\n"
"Brian Wusu\n"
"ZEULEO\n"
"SeongJaeYang\n"
"Jesus Alirio Diaz Castro\n"
"C Dennis Ferguson\n"
"Viktor Pakin\n"
"Shelbie_Fann\n"
"Jay Canfield\n"
"Andrei Bolanu\n"
"Juan Felipe Palacios\n"
"Igor Boyko\n"
"hadinhphu\n"
"Esvi Molina G.\n"
"Shawn McHenry\n"
"korjjj\n"
"Abdul Jawad Shakoor\n"
"Edmarc Vitz Oliveros\n"
"Mike Melo\n"
"Walter Haeffner\n"
"TKH\n"
"Matthew Weber\n"
"H4K3R\n"
"Shawn Hanff\n"
"Ateeb Ahmed\n"
"Jerry Kanoholani\n"
"Anton Björkstrand\n"
"Ryan Corcoran\n"
"Eric Persch\n"
"Dane Straub\n"
"Syed Hashmi\n"
"Muhammad Furqan Butt\n"
"Jima\n"
"Bill Scheirer\n"
"Mikael H.\n"
"Internetworkerz\n"
"Mike Ferguson\n"
"Matthew Mason\n"
"Sujith TK\n"
"TheSource\n"
"Eduardo Romero Peña\n"
"Andrei Niamtu\n"
"Shafaq Ali\n"
"Harold H Hall III\n"
"Dion Rupert\n"
"Iavarone\n"
"Eric Geib\n"
"Prakash\n"
"Tom S Pedersen\n"
"bash550\n"
"Mukendi Zamba\n"
"Daniel Gheorghe Luca\n"
"Phillip B. deChantal III\n"
"Aatif Nawaz\n"
"Yuri (aka itAvgur) Melnikov\n"
"iKaruS\n"
"Jon Christopher Matthews\n"
"Stuart Fordham\n"
"olga1126\n"
"Ashwani Patel\n"
"Ronald White\n"
"Rob VanHooren\n"
"Ifti Hussain\n"
"Jan Král\n"
"pramod mangatha\n"
"Franklin Davis\n"
"Kendrick Som\n"
"Joseph Amen\n"
"Michael Lee Mosher\n"
"Abel G. Aberra\n"
"jbrake\n"
"joso\n"
"Renzo Tovar Ledesma\n"
"Rodrigo Lube\n"
"Emad Abid\n"
"Joshua St.Clair\n"
"Gabor Keri\n"
"Chris Ortiz\n"
"Michael P Goodwins\n"
"Adrian Oden\n"
"Paul Eugene Smth\n"
"Sahil Pujani\n"
"manokum6\n"
"Ruslan Foutorianski\n"
"Sinan Sulaiman\n"
"Philip Weekly\n"
"hernan.cruz.iii\n"
"Torgny Holmlund\n"
"A.Autricque\n"
"LeeJBurton\n"
"Daniel Elkins\n"
"Ahmed Shetta\n"
"Mihai Mirita\n"
"AsdfghjkL\n"
"Anton Zinchenko\n"
"Anders Låstad\n"
"Asimios Kiropoulos\n"
"cdljel\n"
"Meeralebbe Mohamed Rezard\n"
"Chris Weber\n"
"Shawn Nay\n"
"Gabe Rivas\n"
"Andrea Dainese\n"
"Eric SAUGNAC\n"
"vahid rezaabadi\n"
"Don Kanicki\n"
"Joe Mendola\n"
"Stanislav_CCIE_NextGen\n"
"Steven Coutts\n"
"Ahmad Amran Ahmad\n"
"ChewableFritter\n"
"Torgny Tonna Holmlund\n"
"Taiwo Awoyinfa\n"
"Aju Jose\n"
"Cory Light\n"
"Daniel de Morais Gurgel\n"
"pkillur\n"
"Alex D\n"
"Felipe Solis\n"
"Vijayendra Shetty\n"
"Frédéric \"Strall\" C.\n"
"JP Scholten\n"
"Olugbenga Adara\n"
"José Ignacio Jorquera G.\n"
"RICHMAC\n"
"Timi Shoyele\n"
"Andrew Roderos\n"
"Sathursan Kantharajah\n"
"Edj1963\n"
"Lasse Haugen\n"
"Thiago Bastos (TBastos)\n"
"Diogo Mendes\n"
"Eric Villeneuve\n"
"ARNOLD KWAME MARKSON\n"
"Tomek Szulczynski\n"
"Achiraf\n"
"Brent O\'Keeffe - University of Chicago\n"
"Trenton John Skoog\n"
"Mark Hicks\n"
"A Davis\n"
"VASANT\n"
"Dat Nguyen Thanh\n"
"Robert W Goguen\n"
"Noel Sergio\n"
"Peter Clemenko III\n"
"Happy Hanna\n"
"Alexander Atanasow\n"
"Nkosinathi Mntambo\n"
"Jeff Oliver\n"
"Sean Wauchop\n"
"Simon Priest\n"
"oliverdiese\n"
"Simon Macpherson\n"
"Dennis Weijenberg\n"
"Jonathan Worth\n"
"Pauly Comtois\n"
"Dynamic Network Security Ltd\n"
"siggjen\n"
"TimmyK\n"
"Hernan \"IP\" Marquez\n"
"Ryan Young\n"
"Abdiweli Haji\n"
"Thomas Kager\n"
"ElaineJoyDelaCruz\n"
"Jeff Smith\n"
"Lionel PONCELET\n"
"Frode Figenschou\n"
"EvelynBowman\n"
"Dr Barra Touray\n"
"Irvin Leo Villanueva\n"
"Steven Bowman\n"
"GurcharanSingh\n"
"Shane Bradley\n"
"Alex Kruch\n"
"Andrew Quinton\n"
"Jeremiah Sholes\n"
"David E. Cohen\n"
"Michael Ciacco\n"
"Ziggy\n"
"Abrhim\n"
"Des Kharisma\n"
"yusyd\n"
"Tomasz Grzelak\n"
"mosipd\n"
"J.D. Wegner\n"
"Fred O Asante\n"
"George C. Dean\n"
"Jose Ricardo Feliz de Oliveira\n"
"Muhammad Nouman\n"
"Hamid Reza Farahani Farid\n"
"Kaizad Anklesaria\n"
"Lonie Packer\n"
"aelso.silva\n"
"Erik de Wildt\n"
"thepcdoctor\n"
"Ben Murray (Cache22)\n"
"kuji\n"
"Bobbie Edwards\n"
"AugurJ\n"
"Rodrigo de Paula Cordeiro\n"
"Edson Siqueira, RJ - Brazil\n"
"JMB^3\n"
"cgbfish\n"
"zdenotim\n"
"Jose Barrantes\n"
"Gordon Howard Hannan\n"
"Aboubacar Ballo\n"
"Matt \'Jacktooth\' Allen\n"
"Gabe\n"
"Junaid Khan\n"
"Markus Schaufler\n"
"Terry Rawleigh\n"
"Abdulla Solutions.net\n"
"Luis Carlos Salazar\n"
"Charles Boston\n"
"Tyrone Penn\n"
"Paul Denning\n"
"GSingh14\n"
"preston\n"
"Stephen Wilson\n"
"JamesBernardVallespin\n"
"Mircea Ion Nedelea\n"
"Sung Jae Park\n"
"Khaled Alghazi\n"
"Ewart Duncan\n"
"Nov Alpha Kilo\n"
"Billy J Bryant \"Diomenas\"\n"
"Tom_Evan_Alexa_Tif_Tippen\n"
"abcdefgh1234\n"
"Florian Ohnemüller\n"
"gnos\n"
"ankur singh\n"
"Donna Murphy\n"
"Joey Lamb\n"
"Satyajit Mandal\n"
"Jean Guandalini\n"
"Mohammed Elqazzaz\n"
"NWN Dan Hellwig\n"
"Raghav Gurung\n"
"Jon Halar\n"
"Daniel G Tootell\n"
"Graham Shaw\n"
"Lando Thomas\n"
"Joe Sanchez\n"
"Chris Corbin\n"
"Samuel E. Brown\n"
"Dmitri\n"
"Omar Fawaz Thoaib Al-Sammarraie\n"
"nyasha muzwidziwa\n"
"snuthall\n"
"Craig Coffey\n"
"Ramgopal Reddy M\n"
"Guilherme Montoanelli\n"
"l3y3t\n"
"Tony.Davis\n"
"Pipeiden\n"
"Thomas Tablada\n"
"Matthew Piechotta\n"
"David Michael Alan Daniel Gross\n"
"Robert Bezerra\n"
"Sergey Polski\n"
"Ivan Haralamov\n"
"Harley Hopkins\n"
"Dave Kretzmer\n"
"Timur Mezentsev\n"
"David Bianchi-Pastori\n"
"Przemyslaw \'UWillC\' Snowacki\n"
"Samuel Oppong\n"
"Joshua Riesenweber\n"
"Vlad Ivanovic\n"
"bryanamv\n"
"Tomasz Łęgowiak\n"
"M. Dustin Brimberry\n"
"Monthon Thunboonma\n"
"Parambir Singh Bhullar\n"
"Fahad\n"
"Thomas Wing\n"
"Devan Hardwick\n"
"Bahman Arbab\n"
"Jared M.\n"
"Philip Wong\n"
"Paulo Galluzzi\n"
"Gregory L. Johnson, Jr.\n"
"Mario Mariscal\n"
"KYLE LYNCH\n"
"Ross Anderson\n"
"John Littler\n"
"Kingsley Tambe-Ebot\n"
"Muhammed Ali Bulut\n"
"Sanjay Kumar Patel\n"
"MUJTABA HAIDARY\n"
"juliox\n"
"Robert Morris\n"
"Nick Duff\n"
"Andrew Krist\n"
"GNS41292\n"
"Vishnu Nagepally\n"
"Prithvi\n"
"Jomar Vincent Miller\n"
"Gildas Pambo\n"
"Michalis Polyadis\n"
"A.W. van Bart\n"
"Erwyn Tadong\n"
"Robert Laidlaw\n"
"Lance Heckerman\n"
"Tzu-Che HUANG\n"
"S.Gerry\n"
"Kevin R. Owens II\n"
"Ahmed Alayyoubi\n"
"GNS40116\n"
"KeshavS\n"
"Panagiotis Dasouras\n"
"Francesc Lumbierres\n"
"Brandon Benchley\n"
"Rusu Mihai\n"
"Imran Yousaf\n"
"bsdam\n"
"Curtis Bunch\n"
"Diego Antonio Quintana Solano\n"
"Kendell-Lee\n"
"Scott Pickles\n"
"Ahmed AlGherbawi\n"
"Unzagi\n"
"Tim Sedlmeyer\n"
"Ethan Tan YW\n"
"Bad Mother Fucker\n"
"Gurpreet Grewal\n"
"veers\n"
"Ehsan Hzare\n"
"Dennis Pham\n"
"Edsil Welch\n"
"yarrrr\n"
"Alexey Eromenko \"Technologov\"\n"
"Junior J George\n"
"Q Ozzman\n"
"Jakso Ozi Laszlo\n"
"Origosis\n"
"Dave Martin\n"
"Matthew A. Kurowski\n"
"FosNA\n"
"Md.Rafiqul Islam\n"
"Wuggles\n"
"Jason Marley\n"
"Adnan Kolakovic\n"
"Thomas A York\n"
"mikedesanto\n"
"Ramadevu Ramkumar\n"
"Joe Audet\n"
"Barz Dove\n"
"BremerH0\n"
"Karwan\n"
"Mark McGrady\n"
"Herbie\n"
"Adefisayo Adegoke\n"
"Luke Dominy\n"
"Gareth Rhys Williams\n"
"Harley Thomas Jones\n"
"Elvis Lunga\n"
"elinathan\n"
"Chris Dent\n"
"Omeata Ifeanyi\n"
"Sanyi Wakgari\n"
"gizmo\n"
"routenull0\n"
"Simon Wilkinson\n"
"Andy Britten 59 Commando RE\n"
"THEIS\n"
"F Graham\n"
"Graham MacGregor\n"
"wgamper\n"
"Mark Buono\n"
"John Li\n"
"Daniel David\n"
"Ahmed Thabet\n"
"Prithvi Das\n"
"Mike Mcleod Jr.\n"
"Michael Higgins\n"
"Praveen Kumar\n"
"Rick Mills\n"
"Miguel Sama\n"
"Jim Aurouze\n"
"Pramod Mangatha\n"
"Ghiyas Haider\n"
"frank hopkins\n"
"Bizkitcan\n"
"John Rockwell\n"
"Patrick Joseph A Evangelista\n"
"Tom Deamer\n"
"TiagoWR\n"
"Iman Ebrahimi Tajadod\n"
"Somone77 was here\n"
"mckrsta@hotmail.com\n"
"Joseph Munyongi\n"
"Chris Palmer\n"
"Himanshu Bhatt\n"
"Phil Henson\n"
"Rfernandes\n"
"Eze\n"
"Ezenwa\n"
"DaRyan Horn\n"
"Waisudin Farzam\n"
"Vyacheslav Sobchenko\n"
"Robert Gorbul\n"
"MrBultitude\n"
"Dave Massam\n"
"Khumi\n"
"Francisco \"TuKoX\" Briano\n"
"Aijaz\n"
"rittam\n"
"Seweryn Obieglo\n"
"Tessius\n"
"Hans Lossman\n"
"Libi Pappachen\n"
"TSAV\n"
"Deskha\n"
"Mohammad Al Amaireh\n"
"Megajove\n"
"Nico Heijnen\n"
"Jeff Nagel\n"
"Travis Baker\n"
"Bryan Tabb\n"
"MNL1960\n"
"LsTRA\n"
"GarethHall\n"
"Vanquish1986\n"
"JAMES SNEW\n"
"Muhammad Bhatti\n"
"Tiago Marques\n"
"Jason Pietrzak\n"
"Ping Petchged\n"
"Herman\n"
"Nikul Patel\n"
"Alfred Eric Jones III\n"
"Josh Barenthin\n"
"Kanuj Behl KjB\n"
"philbeau\n"
"YALEW TAKELE\n"
"phletchmattic\n"
"Jayir Mansuclal\n"
"Phil Beaudoin\n"
"Steve Minnick\n"
"Eugenio de la TorreGNS30928\n"
"Eugenio de la Torre\n"
"Aelso.silva\n"
"Jodoval Luiz dos Santos Junior\n"
"Zack Manriquez\n"
"WOS\n"
"Chris Good\n"
"ALI QASSEM ALMHDI\n"
"Clifton Bissick\n"
"Edward Cohen\n"
"Driton B\n"
"Brian Keifer\n"
"Henry Dsouza\n"
"Garrett Haynie\n"
"Bob Lyons\n"
"David E Soto JR\n"
"Rob Jeff\n"
"Cicero Avila\n"
"fulopa7\n"
"Arnaud Helin\n"
"Dan Alongi\n"
"JamesDanielMorris\n"
"Vladimir Novakovic\n"
"Daniel Baeza\n"
"Manolo164\n"
"Mike Simkins\n"
"Craig Ellegood\n"
"Angel Serrano\n"
"Jude Lymn\n"
"Robert Jordan\n"
"Karl Struss\n"
"Curt Kellum\n"
"Paul Gunter Bravo Ch.\n"
"Guy Verdegem\n"
"Walter Johnstone-Breen\n"
"Maged Atef\n"
"Morgan Chea\n"
"Microland Limited\n"
"Saed Zahedi\n"
"Reynaldo Bilan\n"
"Josh Lumahan\n"
"Nima Javidi\n"
"CraftedPacket\n"
"Mike McPhee\n"
"Christopher Beyer (CT)\n"
"lishanmirando\n"
"Christian Chavez\n"
"Shawn H4K3R Hanff\n"
"Marcin Markowski\n"
"Ibrahim El-Ali\n"
"Muhammad Agung Nugroho\n"
"Don Taylor II\n"
"Ian Davidson\n"
"Henry Trombley II\n"
"Nestor Rubio\n"
"Fritters\n"
"Korish\n"
"Humaiun\n"
"Bilal Issa\n"
"Francisco Alfaro Espinoza\n"
"Travis Kreikemeier\n"
"Chee Vooi Lew\n"
"Ovidiu Constantinescu\n"
"Sergio Martinez Leon\n"
"saran\n"
"SPUCKETT\n"
"Krishna Kumaran GK\n"
"StefanoLaguardia\n"
"Patrick McGirr\n"
"Anderson vakaoBR Leite\n"
"Felippe Dias\n"
"Vladimír Vladys Jančich\n"
"Johnboy3\n"
"Shane Froebel\n"
"Cushgod\n"
"Craig Zambra\n"
"tsk700\n"
"Erik Pettersson\n"
"Daniel Leece\n"
"Tim Ingalls\n"
"Matti Huotari\n"
"Duminda Wehalle\n"
"MACE529\n"
"Mohamed Furqan Firdous\n"
"Dinesh Rupan\n"
"Chad Stambaugh\n"
"Thomas Marchsteiner\n"
"Luis Gustavo Fernandes\n"
"Aouachria Nassim\n"
"Gerardo Estrada Gutierrez\n"
"Erik A Bodholt\n"
"Ali Khuram\n"
"SysWiz\n"
"Guilherme Ladvocat\n"
"Ezy\n"
"Chris Bell\n"
"Paul Heck\n"
"Bjørn Petter Kysnes\n"
"Andre Jonker\n"
"BARY\n"
"Grillmon\n"
"Hisham Jaghloul\n"
"Straube\n"
"Ian Patterson\n"
"Andrew J. Thompson\n"
"Damian Barlow\n"
"Marcus\n"
"Kevindenton\n"
"KoolFadil\n"
"Fadil Kadrat\n"
"Jacob McCoy\n"
"Penny Yeung\n"
"SOE NAING OO\n"
"Victor Reyes II\n"
"Ronnie van Eecke\n"
"Derek Fries\n"
"ZAIM Lotfi\n"
"Larry Peterson\n"
"Stephen Arogbonlo\n"
"Cristian Silva Guerra\n"
"Bill Miller\n"
"jaberzadeh\n"
"Peyton Quast\n"
"Ola\n"
"D O\'Raghallaigh\n"
"Shaik Mahamood\n"
"ZardozTrampoline\n"
"Jason Zimmerman\n"
"ALI Q ALMHDI\n"
"Maxim Klimanov\n"
"DBQ1969\n"
"Drew Gough\n"
"Aamir Ismail\n"
"Ejdayid\n"
"Charles Maze\n"
"GunnarHakonarson\n"
"Mark Wyss\n"
"nicku\n"
"Adam Norman\n"
"Ahmad Rateb\n"
"Kingsley Tako\n"
"Saminder Sandhu\n"
"Ahmed (Sigey)\n"
"Rémy POUPPEVILLE\n"
"JeroenPeeters\n"
"d3vild0g\n"
"Stefan Stoyanov\n"
"Anthony Hopkins\n"
"Paul Jeffrey\n"
"Tommy Faucher\n"
"Gessesse Eshetu\n"
"Christopher Pratt\n"
"Jani Laakkonen\n"
"Marios Nicou Louca\n"
"Aaron Schmierer\n"
"Chris Luke\n"
"Preston Taylor\n"
"Chris \"Sasquatch\" Saxton\n"
"Brian Dalhover\n"
"Aaron Mayfield\n"
"Engjell R. Pllana\n"
"KalSarai\n"
"Mike Nathan\n"
"Le Viet Thanh\n"
"Stumper\n"
"Brian Ladd\n"
"Brian Binion\n"
"Marcos Umino\n"
"Alex Martins\n"
"Petrollese\n"
"Marion Bogdanov\n"
"Tom Jehn\n"
"Kevin \"MadDOG\" Livingston\n"
"Mike Profitt\n"
"Sean Treschen Pillay\n"
"adunselman\n"
"Andre Dunselman\n"
"Don McDonald\n"
"Jeff Cummings\n"
"Carlos M Cabrera Vargas\n"
"Dilip Ratna\n"
"Viet Nguyen\n"
"Nelson Lee\n"
"John Anthony Jones\n"
"Jack and Mia\n"
"JamesANDDanielMorris\n"
"Giuliano Barros\n"
"Ingo Bothe\n"
"Malik Lolonga\n"
"NAIF - ALthubaiti\n"
"Faisal Rehman\n"
"Juergen Morgenstern\n"
"Adli Hajarat\n"
"SONIDA\n"
"Dan Morgan - VisuMAX\n"
"Joe Sarah Vivien Brantley\n"
"Joe Sarah Vivien Sam Brantley\n"
"Alex Kiwerski\n"
"Christian Hansen\n"
"Digital Design Networks, LLC\n"
"TorentZachary\n"
"RobinM\n"
"Michael Amador\n"
"claytondf\n"
"James W.A. Albert\n"
"Mike Carty , Alliance\n"
"Damian Zaremba\n"
"Sol Birnbaum\n"
"Mark Butler\n"
"Sam Alletto\n"
"Ron Cannella\n"
"RIP Benjamin Gibbs\n"
"Joshua Settle\n"
"rotimi gbadamosi\n"
"rivmont\n"
"CCIE28826\n"
"Miltiades Hadjioannou\n"
"Psyche607\n"
"Yohan\n"
"Denilson Dejesus\n"
"Allan Que\n"
"Rudy R Guerra\n"
"Palle Christoffersen\n"
"Kishor\n"
"Vinicius, O Caixeta\n"
"Mohamed Rezard Meeralebbe\n"
"EmreR\n"
"Alex Hubery\n"
"Sol Huebner\n"
"alepo\n"
"Andrés oRtA eLiZalde\n"
"Frank Garrison\n"
"Vincent van der Sluijs\n"
"NelsonNetworks\n"
"Marc Weisel\n"
"wobe\n"
"Kev Ward,UK\n"
"Mitch Vaughan\n"
"Mujtaba Mir\n"
"Arni Birgisson\n"
"Abel Aberra\n"
"Jeff Nierman\n"
"godZilla\n"
"gingerpower121\n"
"cyrinojuca\n"
"Peter Joseph\n"
"Davisteraz\n"
"Eduardo Cardoso dos Santos\n"
"MARK \"BIG MAC\" MCGRADY\n"
"chekoceron\n"
"GuillermoJimenez\n"
"Alvin Phillip\n"
"jerrysimila\n"
"Patrick den Hoed\n"
"Ciro Centro Strico Salerno\n"
"Emilio Morla\n"
"Mike Shafer\n"
"agho\n"
"Fernando Requena\n"
"Dovydas Stepanavicius\n"
"Joseph W. Anderson\n"
"Nathan Chisholm\n"
"Yahngel\n"
"rolf1974\n"
"Joe Polak\n"
"Stephen Moore\n"
"Van Michael\n"
"Ondrej Köver\n"
"Brian Adelson\n"
"Vasco Ferraz\n"
"CHONG YU NAM\n"
"Jody L. Whitlock\n"
"Kanangu\n"
"Godwin Nsubuga\n"
"John Hennessy\n"
"Michael A. Little\n"
"stibibby\n"
"Shane K Hunt\n"
"Adroit Tutoring\n"
"JamesBernardBenjaminVallespin\n"
"Ken B.\n"
"Asib Yussuf\n"
"Miguel Edison Santos - CAF\n"
"Robson Luis Sgai\n"
"Leon Deguenon\n"
"Gabriel Urra\n"
"Rob Coote\n"
"Wickedkuul_Beehotsch\n"
"Robert Mikołajczyk\n"
"Yasser A. Alluhaidan\n"
"Gessesse ESHETU\n"
"Joe Kukis\n"
"rgb943\n"
"Joe Wills\n"
"Robert Rittenhouse\n"
"Reinaldo Benitez\n"
"vrhvenkatesh\n"
"Nirav Bhatt\n"
"David Perry\n"
"Carsten Ellermann\n"
"Rufus Methu Igoro\n"
"Jeff Pooley\n"
"Andrew B. Shipton\n"
"YeowKee YK@TECHNICALES.COM\n"
"naveed4k143\n"
"Nathan Ash #12148057\n"
"Robert Marmo\n"
"George Vanburgh\n"
"Tudor Davies\n"
"Jeremy Bowen\n"
"Andrew Fox\n"
"Junior Taitt\n"
"Tropizm\n"
"Potcholo \"Nick\" Nicolas\n"
"Brandon Bennett\n"
"ntwrks\n"
"Hjalti Pálmason\n"
"Zachary Hill\n"
"Ian Verno\n"
"Jean-Christophe Baptiste\n"
"chandima Ediriweera\n"
"GunnarH\n"
"Kenrick Wong\n"
"David Afro Lezama Castro\n"
"Asad Munir\n"
"mohammed hashim\n"
"José Luis Berlanga Villlarreal\n"
"aaden\n"
"Martin Schumacher\n"
"Ðrizz\n"
"Daniel Lintott\n"
"tekdork\n"
"Sergio Pereira\n"
"James Wamburi\n"
"Mark Wecker\n"
"umar.hanafi86@gmail.com\n"
"Edson Tadeu Almeida da Silveira\n"
"Christopher Kilger\n"
"Alex Beal\n"
"Johnny Uribe\n"
"Kevin P Sheahan\n"
"Alex C. Rodich\n"
"Joao Teixeira\n"
"Caio Vianna Mello\n"
"fierceg37s\n"
"Doug Lardo\n"
"Conrad Jedynak\n"
"Andy Kelsall Jr.\n"
"Marco Basurco\n"
"Julio Delgado Jr\n"
"Ken Hallstrom-Meade\n"
"Nathan Loop\n"
"daxm\n"
"Kevin Pryce\n"
"Skipdog\n"
"Jean D Ulysse\n"
"Richard Kwame Peasah\n"
"Peter Musolino\n"
"Eric Andrews\n"
"Essam M Mahmod Ahmed\n"
"Mir Ali\n"
"JesurajAP\n"
"David Darryl Pitre\n"
"Vladimir Zalles\n"
"C_S_Ginn\n"
"Gustaf Hyllested Serve\n"
"Matt Blackwell\n"
"Skilldibop\n"
"Paul \"Skilldibop\" Jerome\n"
"Gera74\n"
"AAronC\n"
"Rob Wilkes\n"
"steve brokenshire\n"
"Mardie Kartosoewito\n"
"Ryan Bagley\n"
"Mario Seoane\n"
"James Vickery\n"
"Peter Scheele\n"
"pdxDavid\n"
"Pedro Sobral\n"
"Bryan E. Pratt Jr.\n"
"Eddy Loffeld\n"
"Joshua Guillory\n"
"ClarenceCaldwell\n"
"Afrim Kwarteng\n"
"Chika\n"
"Marco Caruso\n"
"Abubakr A Binafif\n"
"chuck russell\n"
"Jake Vallejo\n"
"Kostia\n"
"Biswajit\n"
"Marcelo da Silva Conterato\n"
"Ellert Hardarson\n"
"Rowan Lee\n"
"Mossyrik78\n"
"Mikhail Shpak\n"
"Gianremo Smisek\n"
"xtify21\n"
"Travis Newton\n"
"Stephen Turnbull\n"
"James Grace\n"
"Eric \"Frenchie\" Villeneuve\n"
"Rick Brady\n"
"Bodizzle\n"
"rbailey-NTAI\n"
"Asamoto\n"
"Allen Taylor\n"
"NetJimB\n"
"kyoungyong lee\n"
"Damian Marcrum\n"
"Nicolas Bautista\n"
"Brad Allatt\n"
"Salman Ahmad\n"
"Gerardo Guzman\n"
"Kovan Azeez\n"
"Alkhamisy Adel Hamad\n"
"RajJyothi\n"
"Leonardo S Jeronimo\n"
"Xcratburma\n"
"Bjørn-Inge Haga\n"
"Tristian Howard\n"
"Robert Wilson\n"
"BattyTheJedi\n"
"Kosala\n"
"Nazmus Sakib\n"
"Alfred Eric Jones\n"
"J Grant Bellchamber\n"
"Lekan Aje\n"
"Jacob Winkle\n"
"Maybe translate to Norwegian?\n"
"Anthony R Junk\n"
"Amplex Electric, Inc.\n"
"Sue Chatterjee\n"
"Glen Stadig\n"
"Christian Menz\n"
"Billy Nix\n"
"JoeyL.\n"
"Alessandro.Veras\n"
"Steven Vlahakis\n"
"Kevin Bowen\n"
"NG11geek\n"
"Matvey Gubanov\n"
"frano-dalmatinac\n"
"OrceDimitrovski\n"
"boogsalmighty\n"
"Maky Robert\n"
"Stuart Walker\n"
"Vipin Kumar\n"
"N.Sivatharzan\n"
"Dannie Norman\n"
"Sylvester Metieh\n"
"Tyler Pruess\n"
"DanielMB\n"
"Ryan Douglass Milton\n"
"Ahmad Amran - Webcore\n"
"Benjamin Feld\n"
"GWSchweickert\n"
"Dirk Fettke\n"
"AJ NOURI\n"
"Stefan_Basson_Sydney_AUS\n"
"Tang Choong Whye\n"
"Ofer Ben Zvi\n"
"5UCC355\n"
"Rikard Borginger\n"
"Robert K. Saydee Sr.\n"
"D.Coty\n"
"Michael G. Taylor Sr.\n"
"Erich Schommarz\n"
"Cassius Thomas\n"
"Dominic Metzger\n"
"Mike Manning\n"
"José M. Castro Jr.\n"
"Justin Bridgman\n"
"Francis Enmanuel Baez Metz\n"
"Ievgen Morskyi\n"
"itAvgur-Irkutsk-Russia\n"
"Padraic D. Hallinan\n"
"Thomas Deamer\n"
"Marco Paulo Ferreira\n"
"Nicolas Pagano\n"
"JR Garcia\n"
"SIAGHY\n"
"Naj Qazi\n"
"reynaldo bilan\n"
"Matt Egan\n"
"Chris Beach\n"
"Fco. Jose Santos\n"
"Johnny Miller\n"
"Muhammad Tahir Munir\n"
"BJoslin\n"
"Nick Fouts\n"
"Pete Kowalsky\n"
"developer\n"
"Tyler Conrad\n"
"Michael Weaver Johnson\n"
"s2mfh\n"
"Scotty2mfh\n"
"Nicolas Dangeon\n"
"Michial Cantrell\n"
"Sébastien Larivière\n"
"Primmus\n"
"Wonder Jones\n"
"Weston L Myers\n"
"Steve Brosseau\n"
"Ryan Mortier\n"
"Steve Snavely\n"
"Kirk J Iles\n"
"Tiago Sousa\n"
"Jesuraj Amaladas\n"
"Mark C Bernier\n"
"Jonathan David Rees\n"
"David Bigerstaff\n"
"Widmo\n"
"Karl Billington\n"
"naner2k\n"
"Jose Daniel\n"
"Mark.Foutch\n"
"Bootcamp20\n"
"Peter Hansen\n"
"Dharnesh\n"
"EnglishRob\n"
"Jyotirmoy Lahkar\n"
"Guggilam Guru Vasudeva\n"
"Chris Higgins\n"
"Blas Díaz (Colombia)\n"
"Ahmed Albadree\n"
"Jens Holtmann\n"
"TJ Kwentus\n"
"Jacob Smith\n"
"Damian Baran\n"
"phocean\n"
"Mark \"BigMac\" McGrady\n"
"teav-sovandara\n"
"Dinesh Galani\n"
"Raman Thapa\n"
"RechieCebreros\n"
"Jade Rampulla\n"
"Fred Quan\n"
"Martin Wall\n"
"Louis Duzant\n"
"SebasNati\n"
"Erik Fairbanks\n"
"Simon Boadi\n"
"Darren Smurphin Murphy\n"
"Stephanos Christou\n"
"Rob Hinst\n"
"Carlos A Salas\n"
"Snehal Patel\n"
"S. O\'Reilly\n"
"John Volter\n"
"dkmahajan\n"
"mansoor\n"
"Charles Crosland\n"
"Thiha Soe\n"
"Ram K Bista\n"
"toyosiolabs\n"
"Shardul Ingle\n"
"KhurramArif\n"
"AaronMcKenna\n"
"FaustoSampaio\n"
"Justin Lemme\n"
"Jquest\n"
"Michael Brister\n"
"Nicholas Donathan\n"
"Slawomir Babicz\n"
"Bradley Grein\n"
"Roy\n"
"RoyD\n"
"Adam Bailey\n"
"Daniel Neculai\n"
"Kevin Vogt\n"
"ALFREDO PASIGAN SALIPOT\n"
"Giovanni Lojica\n"
"Nicolas A. Escudero\n"
"The Big Dirty\n"
"Arun Joshi\n"
"John Michael Santiago - boogs\n"
"Gian-Luca Casella\n"
"Geron Craig\n"
"Christopher Young\n"
"John Buelk\n"
"Julio Moraes\n"
"Aaron Meade\n"
"Paulo Guerreiro (Portugal)\n"
"CR Thompson\n"
"Timár Zsolt\n"
"Leobis\n"
"Alan Matson\n"
"Eric Park\n"
"Pavel Tishkov\n"
"L@mine S@lhi\n"
"Per-Erik Brask\n"
"Matt Williams\n"
"Favre was here.\n"
"Sergey Zheleznyak\n"
"Alongi Daniel\n"
"Lee Jeong Goo\n"
"Chris Swinney\n"
"Ayyappan Ramanan\n"
"GSEC4959\n"
"Cesar Honores\n"
"Ravinderpal Singh\n"
"Lee Haynes\n"
"Jose Luis Bosquez Echevers\n"
"Yevgeniy Orman\n"
"Ankur Singh\n"
"Marcos de Jesus Magagalhaes\n"
"Dave Heinz\n"
"Emberly\n"
"Chris Gauthier\n"
"Blair&BenedictRepuyan\n"
"Alex Recupero\n"
"Victor Knell\n"
"Travis Yates\n"
"John Yates III\n"
"Ian_C\n"
"Jesse Jensen\n"
"Drew Ludwick\n"
"Chris Barlow\n"
"Alessandro Veras\n"
"Alan Matos\n"
"Harold Alden A. Sanchez\n"
"Munir B. Abdullahi\n"
"FARTXaler\n"
"DEWTEK\n"
"DewTech\n"
"Daniel Darby\n"
"Tishkov Pavel [64]\n"
"Wilhelm Jonker", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("AboutDialog", "&Thanks to", None))
self.uiLicensePlainTextEdit.setPlainText(_translate("AboutDialog", " GNU GENERAL PUBLIC LICENSE\n"
" Version 3, 29 June 2007\n"
"\n"
" Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n"
" Everyone is permitted to copy and distribute verbatim copies\n"
" of this license document, but changing it is not allowed.\n"
"\n"
" Preamble\n"
"\n"
" The GNU General Public License is a free, copyleft license for\n"
"software and other kinds of works.\n"
"\n"
" The licenses for most software and other practical works are designed\n"
"to take away your freedom to share and change the works. By contrast,\n"
"the GNU General Public License is intended to guarantee your freedom to\n"
"share and change all versions of a program--to make sure it remains free\n"
"software for all its users. We, the Free Software Foundation, use the\n"
"GNU General Public License for most of our software; it applies also to\n"
"any other work released this way by its authors. You can apply it to\n"
"your programs, too.\n"
"\n"
" When we speak of free software, we are referring to freedom, not\n"
"price. Our General Public Licenses are designed to make sure that you\n"
"have the freedom to distribute copies of free software (and charge for\n"
"them if you wish), that you receive source code or can get it if you\n"
"want it, that you can change the software or use pieces of it in new\n"
"free programs, and that you know you can do these things.\n"
"\n"
" To protect your rights, we need to prevent others from denying you\n"
"these rights or asking you to surrender the rights. Therefore, you have\n"
"certain responsibilities if you distribute copies of the software, or if\n"
"you modify it: responsibilities to respect the freedom of others.\n"
"\n"
" For example, if you distribute copies of such a program, whether\n"
"gratis or for a fee, you must pass on to the recipients the same\n"
"freedoms that you received. You must make sure that they, too, receive\n"
"or can get the source code. And you must show them these terms so they\n"
"know their rights.\n"
"\n"
" Developers that use the GNU GPL protect your rights with two steps:\n"
"(1) assert copyright on the software, and (2) offer you this License\n"
"giving you legal permission to copy, distribute and/or modify it.\n"
"\n"
" For the developers\' and authors\' protection, the GPL clearly explains\n"
"that there is no warranty for this free software. For both users\' and\n"
"authors\' sake, the GPL requires that modified versions be marked as\n"
"changed, so that their problems will not be attributed erroneously to\n"
"authors of previous versions.\n"
"\n"
" Some devices are designed to deny users access to install or run\n"
"modified versions of the software inside them, although the manufacturer\n"
"can do so. This is fundamentally incompatible with the aim of\n"
"protecting users\' freedom to change the software. The systematic\n"
"pattern of such abuse occurs in the area of products for individuals to\n"
"use, which is precisely where it is most unacceptable. Therefore, we\n"
"have designed this version of the GPL to prohibit the practice for those\n"
"products. If such problems arise substantially in other domains, we\n"
"stand ready to extend this provision to those domains in future versions\n"
"of the GPL, as needed to protect the freedom of users.\n"
"\n"
" Finally, every program is threatened constantly by software patents.\n"
"States should not allow patents to restrict development and use of\n"
"software on general-purpose computers, but in those that do, we wish to\n"
"avoid the special danger that patents applied to a free program could\n"
"make it effectively proprietary. To prevent this, the GPL assures that\n"
"patents cannot be used to render the program non-free.\n"
"\n"
" The precise terms and conditions for copying, distribution and\n"
"modification follow.\n"
"\n"
" TERMS AND CONDITIONS\n"
"\n"
" 0. Definitions.\n"
"\n"
" \"This License\" refers to version 3 of the GNU General Public License.\n"
"\n"
" \"Copyright\" also means copyright-like laws that apply to other kinds of\n"
"works, such as semiconductor masks.\n"
"\n"
" \"The Program\" refers to any copyrightable work licensed under this\n"
"License. Each licensee is addressed as \"you\". \"Licensees\" and\n"
"\"recipients\" may be individuals or organizations.\n"
"\n"
" To \"modify\" a work means to copy from or adapt all or part of the work\n"
"in a fashion requiring copyright permission, other than the making of an\n"
"exact copy. The resulting work is called a \"modified version\" of the\n"
"earlier work or a work \"based on\" the earlier work.\n"
"\n"
" A \"covered work\" means either the unmodified Program or a work based\n"
"on the Program.\n"
"\n"
" To \"propagate\" a work means to do anything with it that, without\n"
"permission, would make you directly or secondarily liable for\n"
"infringement under applicable copyright law, except executing it on a\n"
"computer or modifying a private copy. Propagation includes copying,\n"
"distribution (with or without modification), making available to the\n"
"public, and in some countries other activities as well.\n"
"\n"
" To \"convey\" a work means any kind of propagation that enables other\n"
"parties to make or receive copies. Mere interaction with a user through\n"
"a computer network, with no transfer of a copy, is not conveying.\n"
"\n"
" An interactive user interface displays \"Appropriate Legal Notices\"\n"
"to the extent that it includes a convenient and prominently visible\n"
"feature that (1) displays an appropriate copyright notice, and (2)\n"
"tells the user that there is no warranty for the work (except to the\n"
"extent that warranties are provided), that licensees may convey the\n"
"work under this License, and how to view a copy of this License. If\n"
"the interface presents a list of user commands or options, such as a\n"
"menu, a prominent item in the list meets this criterion.\n"
"\n"
" 1. Source Code.\n"
"\n"
" The \"source code\" for a work means the preferred form of the work\n"
"for making modifications to it. \"Object code\" means any non-source\n"
"form of a work.\n"
"\n"
" A \"Standard Interface\" means an interface that either is an official\n"
"standard defined by a recognized standards body, or, in the case of\n"
"interfaces specified for a particular programming language, one that\n"
"is widely used among developers working in that language.\n"
"\n"
" The \"System Libraries\" of an executable work include anything, other\n"
"than the work as a whole, that (a) is included in the normal form of\n"
"packaging a Major Component, but which is not part of that Major\n"
"Component, and (b) serves only to enable use of the work with that\n"
"Major Component, or to implement a Standard Interface for which an\n"
"implementation is available to the public in source code form. A\n"
"\"Major Component\", in this context, means a major essential component\n"
"(kernel, window system, and so on) of the specific operating system\n"
"(if any) on which the executable work runs, or a compiler used to\n"
"produce the work, or an object code interpreter used to run it.\n"
"\n"
" The \"Corresponding Source\" for a work in object code form means all\n"
"the source code needed to generate, install, and (for an executable\n"
"work) run the object code and to modify the work, including scripts to\n"
"control those activities. However, it does not include the work\'s\n"
"System Libraries, or general-purpose tools or generally available free\n"
"programs which are used unmodified in performing those activities but\n"
"which are not part of the work. For example, Corresponding Source\n"
"includes interface definition files associated with source files for\n"
"the work, and the source code for shared libraries and dynamically\n"
"linked subprograms that the work is specifically designed to require,\n"
"such as by intimate data communication or control flow between those\n"
"subprograms and other parts of the work.\n"
"\n"
" The Corresponding Source need not include anything that users\n"
"can regenerate automatically from other parts of the Corresponding\n"
"Source.\n"
"\n"
" The Corresponding Source for a work in source code form is that\n"
"same work.\n"
"\n"
" 2. Basic Permissions.\n"
"\n"
" All rights granted under this License are granted for the term of\n"
"copyright on the Program, and are irrevocable provided the stated\n"
"conditions are met. This License explicitly affirms your unlimited\n"
"permission to run the unmodified Program. The output from running a\n"
"covered work is covered by this License only if the output, given its\n"
"content, constitutes a covered work. This License acknowledges your\n"
"rights of fair use or other equivalent, as provided by copyright law.\n"
"\n"
" You may make, run and propagate covered works that you do not\n"
"convey, without conditions so long as your license otherwise remains\n"
"in force. You may convey covered works to others for the sole purpose\n"
"of having them make modifications exclusively for you, or provide you\n"
"with facilities for running those works, provided that you comply with\n"
"the terms of this License in conveying all material for which you do\n"
"not control copyright. Those thus making or running the covered works\n"
"for you must do so exclusively on your behalf, under your direction\n"
"and control, on terms that prohibit them from making any copies of\n"
"your copyrighted material outside their relationship with you.\n"
"\n"
" Conveying under any other circumstances is permitted solely under\n"
"the conditions stated below. Sublicensing is not allowed; section 10\n"
"makes it unnecessary.\n"
"\n"
" 3. Protecting Users\' Legal Rights From Anti-Circumvention Law.\n"
"\n"
" No covered work shall be deemed part of an effective technological\n"
"measure under any applicable law fulfilling obligations under article\n"
"11 of the WIPO copyright treaty adopted on 20 December 1996, or\n"
"similar laws prohibiting or restricting circumvention of such\n"
"measures.\n"
"\n"
" When you convey a covered work, you waive any legal power to forbid\n"
"circumvention of technological measures to the extent such circumvention\n"
"is effected by exercising rights under this License with respect to\n"
"the covered work, and you disclaim any intention to limit operation or\n"
"modification of the work as a means of enforcing, against the work\'s\n"
"users, your or third parties\' legal rights to forbid circumvention of\n"
"technological measures.\n"
"\n"
" 4. Conveying Verbatim Copies.\n"
"\n"
" You may convey verbatim copies of the Program\'s source code as you\n"
"receive it, in any medium, provided that you conspicuously and\n"
"appropriately publish on each copy an appropriate copyright notice;\n"
"keep intact all notices stating that this License and any\n"
"non-permissive terms added in accord with section 7 apply to the code;\n"
"keep intact all notices of the absence of any warranty; and give all\n"
"recipients a copy of this License along with the Program.\n"
"\n"
" You may charge any price or no price for each copy that you convey,\n"
"and you may offer support or warranty protection for a fee.\n"
"\n"
" 5. Conveying Modified Source Versions.\n"
"\n"
" You may convey a work based on the Program, or the modifications to\n"
"produce it from the Program, in the form of source code under the\n"
"terms of section 4, provided that you also meet all of these conditions:\n"
"\n"
" a) The work must carry prominent notices stating that you modified\n"
" it, and giving a relevant date.\n"
"\n"
" b) The work must carry prominent notices stating that it is\n"
" released under this License and any conditions added under section\n"
" 7. This requirement modifies the requirement in section 4 to\n"
" \"keep intact all notices\".\n"
"\n"
" c) You must license the entire work, as a whole, under this\n"
" License to anyone who comes into possession of a copy. This\n"
" License will therefore apply, along with any applicable section 7\n"
" additional terms, to the whole of the work, and all its parts,\n"
" regardless of how they are packaged. This License gives no\n"
" permission to license the work in any other way, but it does not\n"
" invalidate such permission if you have separately received it.\n"
"\n"
" d) If the work has interactive user interfaces, each must display\n"
" Appropriate Legal Notices; however, if the Program has interactive\n"
" interfaces that do not display Appropriate Legal Notices, your\n"
" work need not make them do so.\n"
"\n"
" A compilation of a covered work with other separate and independent\n"
"works, which are not by their nature extensions of the covered work,\n"
"and which are not combined with it such as to form a larger program,\n"
"in or on a volume of a storage or distribution medium, is called an\n"
"\"aggregate\" if the compilation and its resulting copyright are not\n"
"used to limit the access or legal rights of the compilation\'s users\n"
"beyond what the individual works permit. Inclusion of a covered work\n"
"in an aggregate does not cause this License to apply to the other\n"
"parts of the aggregate.\n"
"\n"
" 6. Conveying Non-Source Forms.\n"
"\n"
" You may convey a covered work in object code form under the terms\n"
"of sections 4 and 5, provided that you also convey the\n"
"machine-readable Corresponding Source under the terms of this License,\n"
"in one of these ways:\n"
"\n"
" a) Convey the object code in, or embodied in, a physical product\n"
" (including a physical distribution medium), accompanied by the\n"
" Corresponding Source fixed on a durable physical medium\n"
" customarily used for software interchange.\n"
"\n"
" b) Convey the object code in, or embodied in, a physical product\n"
" (including a physical distribution medium), accompanied by a\n"
" written offer, valid for at least three years and valid for as\n"
" long as you offer spare parts or customer support for that product\n"
" model, to give anyone who possesses the object code either (1) a\n"
" copy of the Corresponding Source for all the software in the\n"
" product that is covered by this License, on a durable physical\n"
" medium customarily used for software interchange, for a price no\n"
" more than your reasonable cost of physically performing this\n"
" conveying of source, or (2) access to copy the\n"
" Corresponding Source from a network server at no charge.\n"
"\n"
" c) Convey individual copies of the object code with a copy of the\n"
" written offer to provide the Corresponding Source. This\n"
" alternative is allowed only occasionally and noncommercially, and\n"
" only if you received the object code with such an offer, in accord\n"
" with subsection 6b.\n"
"\n"
" d) Convey the object code by offering access from a designated\n"
" place (gratis or for a charge), and offer equivalent access to the\n"
" Corresponding Source in the same way through the same place at no\n"
" further charge. You need not require recipients to copy the\n"
" Corresponding Source along with the object code. If the place to\n"
" copy the object code is a network server, the Corresponding Source\n"
" may be on a different server (operated by you or a third party)\n"
" that supports equivalent copying facilities, provided you maintain\n"
" clear directions next to the object code saying where to find the\n"
" Corresponding Source. Regardless of what server hosts the\n"
" Corresponding Source, you remain obligated to ensure that it is\n"
" available for as long as needed to satisfy these requirements.\n"
"\n"
" e) Convey the object code using peer-to-peer transmission, provided\n"
" you inform other peers where the object code and Corresponding\n"
" Source of the work are being offered to the general public at no\n"
" charge under subsection 6d.\n"
"\n"
" A separable portion of the object code, whose source code is excluded\n"
"from the Corresponding Source as a System Library, need not be\n"
"included in conveying the object code work.\n"
"\n"
" A \"User Product\" is either (1) a \"consumer product\", which means any\n"
"tangible personal property which is normally used for personal, family,\n"
"or household purposes, or (2) anything designed or sold for incorporation\n"
"into a dwelling. In determining whether a product is a consumer product,\n"
"doubtful cases shall be resolved in favor of coverage. For a particular\n"
"product received by a particular user, \"normally used\" refers to a\n"
"typical or common use of that class of product, regardless of the status\n"
"of the particular user or of the way in which the particular user\n"
"actually uses, or expects or is expected to use, the product. A product\n"
"is a consumer product regardless of whether the product has substantial\n"
"commercial, industrial or non-consumer uses, unless such uses represent\n"
"the only significant mode of use of the product.\n"
"\n"
" \"Installation Information\" for a User Product means any methods,\n"
"procedures, authorization keys, or other information required to install\n"
"and execute modified versions of a covered work in that User Product from\n"
"a modified version of its Corresponding Source. The information must\n"
"suffice to ensure that the continued functioning of the modified object\n"
"code is in no case prevented or interfered with solely because\n"
"modification has been made.\n"
"\n"
" If you convey an object code work under this section in, or with, or\n"
"specifically for use in, a User Product, and the conveying occurs as\n"
"part of a transaction in which the right of possession and use of the\n"
"User Product is transferred to the recipient in perpetuity or for a\n"
"fixed term (regardless of how the transaction is characterized), the\n"
"Corresponding Source conveyed under this section must be accompanied\n"
"by the Installation Information. But this requirement does not apply\n"
"if neither you nor any third party retains the ability to install\n"
"modified object code on the User Product (for example, the work has\n"
"been installed in ROM).\n"
"\n"
" The requirement to provide Installation Information does not include a\n"
"requirement to continue to provide support service, warranty, or updates\n"
"for a work that has been modified or installed by the recipient, or for\n"
"the User Product in which it has been modified or installed. Access to a\n"
"network may be denied when the modification itself materially and\n"
"adversely affects the operation of the network or violates the rules and\n"
"protocols for communication across the network.\n"
"\n"
" Corresponding Source conveyed, and Installation Information provided,\n"
"in accord with this section must be in a format that is publicly\n"
"documented (and with an implementation available to the public in\n"
"source code form), and must require no special password or key for\n"
"unpacking, reading or copying.\n"
"\n"
" 7. Additional Terms.\n"
"\n"
" \"Additional permissions\" are terms that supplement the terms of this\n"
"License by making exceptions from one or more of its conditions.\n"
"Additional permissions that are applicable to the entire Program shall\n"
"be treated as though they were included in this License, to the extent\n"
"that they are valid under applicable law. If additional permissions\n"
"apply only to part of the Program, that part may be used separately\n"
"under those permissions, but the entire Program remains governed by\n"
"this License without regard to the additional permissions.\n"
"\n"
" When you convey a copy of a covered work, you may at your option\n"
"remove any additional permissions from that copy, or from any part of\n"
"it. (Additional permissions may be written to require their own\n"
"removal in certain cases when you modify the work.) You may place\n"
"additional permissions on material, added by you to a covered work,\n"
"for which you have or can give appropriate copyright permission.\n"
"\n"
" Notwithstanding any other provision of this License, for material you\n"
"add to a covered work, you may (if authorized by the copyright holders of\n"
"that material) supplement the terms of this License with terms:\n"
"\n"
" a) Disclaiming warranty or limiting liability differently from the\n"
" terms of sections 15 and 16 of this License; or\n"
"\n"
" b) Requiring preservation of specified reasonable legal notices or\n"
" author attributions in that material or in the Appropriate Legal\n"
" Notices displayed by works containing it; or\n"
"\n"
" c) Prohibiting misrepresentation of the origin of that material, or\n"
" requiring that modified versions of such material be marked in\n"
" reasonable ways as different from the original version; or\n"
"\n"
" d) Limiting the use for publicity purposes of names of licensors or\n"
" authors of the material; or\n"
"\n"
" e) Declining to grant rights under trademark law for use of some\n"
" trade names, trademarks, or service marks; or\n"
"\n"
" f) Requiring indemnification of licensors and authors of that\n"
" material by anyone who conveys the material (or modified versions of\n"
" it) with contractual assumptions of liability to the recipient, for\n"
" any liability that these contractual assumptions directly impose on\n"
" those licensors and authors.\n"
"\n"
" All other non-permissive additional terms are considered \"further\n"
"restrictions\" within the meaning of section 10. If the Program as you\n"
"received it, or any part of it, contains a notice stating that it is\n"
"governed by this License along with a term that is a further\n"
"restriction, you may remove that term. If a license document contains\n"
"a further restriction but permits relicensing or conveying under this\n"
"License, you may add to a covered work material governed by the terms\n"
"of that license document, provided that the further restriction does\n"
"not survive such relicensing or conveying.\n"
"\n"
" If you add terms to a covered work in accord with this section, you\n"
"must place, in the relevant source files, a statement of the\n"
"additional terms that apply to those files, or a notice indicating\n"
"where to find the applicable terms.\n"
"\n"
" Additional terms, permissive or non-permissive, may be stated in the\n"
"form of a separately written license, or stated as exceptions;\n"
"the above requirements apply either way.\n"
"\n"
" 8. Termination.\n"
"\n"
" You may not propagate or modify a covered work except as expressly\n"
"provided under this License. Any attempt otherwise to propagate or\n"
"modify it is void, and will automatically terminate your rights under\n"
"this License (including any patent licenses granted under the third\n"
"paragraph of section 11).\n"
"\n"
" However, if you cease all violation of this License, then your\n"
"license from a particular copyright holder is reinstated (a)\n"
"provisionally, unless and until the copyright holder explicitly and\n"
"finally terminates your license, and (b) permanently, if the copyright\n"
"holder fails to notify you of the violation by some reasonable means\n"
"prior to 60 days after the cessation.\n"
"\n"
" Moreover, your license from a particular copyright holder is\n"
"reinstated permanently if the copyright holder notifies you of the\n"
"violation by some reasonable means, this is the first time you have\n"
"received notice of violation of this License (for any work) from that\n"
"copyright holder, and you cure the violation prior to 30 days after\n"
"your receipt of the notice.\n"
"\n"
" Termination of your rights under this section does not terminate the\n"
"licenses of parties who have received copies or rights from you under\n"
"this License. If your rights have been terminated and not permanently\n"
"reinstated, you do not qualify to receive new licenses for the same\n"
"material under section 10.\n"
"\n"
" 9. Acceptance Not Required for Having Copies.\n"
"\n"
" You are not required to accept this License in order to receive or\n"
"run a copy of the Program. Ancillary propagation of a covered work\n"
"occurring solely as a consequence of using peer-to-peer transmission\n"
"to receive a copy likewise does not require acceptance. However,\n"
"nothing other than this License grants you permission to propagate or\n"
"modify any covered work. These actions infringe copyright if you do\n"
"not accept this License. Therefore, by modifying or propagating a\n"
"covered work, you indicate your acceptance of this License to do so.\n"
"\n"
" 10. Automatic Licensing of Downstream Recipients.\n"
"\n"
" Each time you convey a covered work, the recipient automatically\n"
"receives a license from the original licensors, to run, modify and\n"
"propagate that work, subject to this License. You are not responsible\n"
"for enforcing compliance by third parties with this License.\n"
"\n"
" An \"entity transaction\" is a transaction transferring control of an\n"
"organization, or substantially all assets of one, or subdividing an\n"
"organization, or merging organizations. If propagation of a covered\n"
"work results from an entity transaction, each party to that\n"
"transaction who receives a copy of the work also receives whatever\n"
"licenses to the work the party\'s predecessor in interest had or could\n"
"give under the previous paragraph, plus a right to possession of the\n"
"Corresponding Source of the work from the predecessor in interest, if\n"
"the predecessor has it or can get it with reasonable efforts.\n"
"\n"
" You may not impose any further restrictions on the exercise of the\n"
"rights granted or affirmed under this License. For example, you may\n"
"not impose a license fee, royalty, or other charge for exercise of\n"
"rights granted under this License, and you may not initiate litigation\n"
"(including a cross-claim or counterclaim in a lawsuit) alleging that\n"
"any patent claim is infringed by making, using, selling, offering for\n"
"sale, or importing the Program or any portion of it.\n"
"\n"
" 11. Patents.\n"
"\n"
" A \"contributor\" is a copyright holder who authorizes use under this\n"
"License of the Program or a work on which the Program is based. The\n"
"work thus licensed is called the contributor\'s \"contributor version\".\n"
"\n"
" A contributor\'s \"essential patent claims\" are all patent claims\n"
"owned or controlled by the contributor, whether already acquired or\n"
"hereafter acquired, that would be infringed by some manner, permitted\n"
"by this License, of making, using, or selling its contributor version,\n"
"but do not include claims that would be infringed only as a\n"
"consequence of further modification of the contributor version. For\n"
"purposes of this definition, \"control\" includes the right to grant\n"
"patent sublicenses in a manner consistent with the requirements of\n"
"this License.\n"
"\n"
" Each contributor grants you a non-exclusive, worldwide, royalty-free\n"
"patent license under the contributor\'s essential patent claims, to\n"
"make, use, sell, offer for sale, import and otherwise run, modify and\n"
"propagate the contents of its contributor version.\n"
"\n"
" In the following three paragraphs, a \"patent license\" is any express\n"
"agreement or commitment, however denominated, not to enforce a patent\n"
"(such as an express permission to practice a patent or covenant not to\n"
"sue for patent infringement). To \"grant\" such a patent license to a\n"
"party means to make such an agreement or commitment not to enforce a\n"
"patent against the party.\n"
"\n"
" If you convey a covered work, knowingly relying on a patent license,\n"
"and the Corresponding Source of the work is not available for anyone\n"
"to copy, free of charge and under the terms of this License, through a\n"
"publicly available network server or other readily accessible means,\n"
"then you must either (1) cause the Corresponding Source to be so\n"
"available, or (2) arrange to deprive yourself of the benefit of the\n"
"patent license for this particular work, or (3) arrange, in a manner\n"
"consistent with the requirements of this License, to extend the patent\n"
"license to downstream recipients. \"Knowingly relying\" means you have\n"
"actual knowledge that, but for the patent license, your conveying the\n"
"covered work in a country, or your recipient\'s use of the covered work\n"
"in a country, would infringe one or more identifiable patents in that\n"
"country that you have reason to believe are valid.\n"
"\n"
" If, pursuant to or in connection with a single transaction or\n"
"arrangement, you convey, or propagate by procuring conveyance of, a\n"
"covered work, and grant a patent license to some of the parties\n"
"receiving the covered work authorizing them to use, propagate, modify\n"
"or convey a specific copy of the covered work, then the patent license\n"
"you grant is automatically extended to all recipients of the covered\n"
"work and works based on it.\n"
"\n"
" A patent license is \"discriminatory\" if it does not include within\n"
"the scope of its coverage, prohibits the exercise of, or is\n"
"conditioned on the non-exercise of one or more of the rights that are\n"
"specifically granted under this License. You may not convey a covered\n"
"work if you are a party to an arrangement with a third party that is\n"
"in the business of distributing software, under which you make payment\n"
"to the third party based on the extent of your activity of conveying\n"
"the work, and under which the third party grants, to any of the\n"
"parties who would receive the covered work from you, a discriminatory\n"
"patent license (a) in connection with copies of the covered work\n"
"conveyed by you (or copies made from those copies), or (b) primarily\n"
"for and in connection with specific products or compilations that\n"
"contain the covered work, unless you entered into that arrangement,\n"
"or that patent license was granted, prior to 28 March 2007.\n"
"\n"
" Nothing in this License shall be construed as excluding or limiting\n"
"any implied license or other defenses to infringement that may\n"
"otherwise be available to you under applicable patent law.\n"
"\n"
" 12. No Surrender of Others\' Freedom.\n"
"\n"
" If conditions are imposed on you (whether by court order, agreement or\n"
"otherwise) that contradict the conditions of this License, they do not\n"
"excuse you from the conditions of this License. If you cannot convey a\n"
"covered work so as to satisfy simultaneously your obligations under this\n"
"License and any other pertinent obligations, then as a consequence you may\n"
"not convey it at all. For example, if you agree to terms that obligate you\n"
"to collect a royalty for further conveying from those to whom you convey\n"
"the Program, the only way you could satisfy both those terms and this\n"
"License would be to refrain entirely from conveying the Program.\n"
"\n"
" 13. Use with the GNU Affero General Public License.\n"
"\n"
" Notwithstanding any other provision of this License, you have\n"
"permission to link or combine any covered work with a work licensed\n"
"under version 3 of the GNU Affero General Public License into a single\n"
"combined work, and to convey the resulting work. The terms of this\n"
"License will continue to apply to the part which is the covered work,\n"
"but the special requirements of the GNU Affero General Public License,\n"
"section 13, concerning interaction through a network will apply to the\n"
"combination as such.\n"
"\n"
" 14. Revised Versions of this License.\n"
"\n"
" The Free Software Foundation may publish revised and/or new versions of\n"
"the GNU General Public License from time to time. Such new versions will\n"
"be similar in spirit to the present version, but may differ in detail to\n"
"address new problems or concerns.\n"
"\n"
" Each version is given a distinguishing version number. If the\n"
"Program specifies that a certain numbered version of the GNU General\n"
"Public License \"or any later version\" applies to it, you have the\n"
"option of following the terms and conditions either of that numbered\n"
"version or of any later version published by the Free Software\n"
"Foundation. If the Program does not specify a version number of the\n"
"GNU General Public License, you may choose any version ever published\n"
"by the Free Software Foundation.\n"
"\n"
" If the Program specifies that a proxy can decide which future\n"
"versions of the GNU General Public License can be used, that proxy\'s\n"
"public statement of acceptance of a version permanently authorizes you\n"
"to choose that version for the Program.\n"
"\n"
" Later license versions may give you additional or different\n"
"permissions. However, no additional obligations are imposed on any\n"
"author or copyright holder as a result of your choosing to follow a\n"
"later version.\n"
"\n"
" 15. Disclaimer of Warranty.\n"
"\n"
" THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\n"
"APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\n"
"HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\n"
"OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\n"
"THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n"
"PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\n"
"IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\n"
"ALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n"
"\n"
" 16. Limitation of Liability.\n"
"\n"
" IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\n"
"WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\n"
"THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\n"
"GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\n"
"USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\n"
"DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\n"
"PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\n"
"EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\n"
"SUCH DAMAGES.\n"
"\n"
" 17. Interpretation of Sections 15 and 16.\n"
"\n"
" If the disclaimer of warranty and limitation of liability provided\n"
"above cannot be given local legal effect according to their terms,\n"
"reviewing courts shall apply local law that most closely approximates\n"
"an absolute waiver of all civil liability in connection with the\n"
"Program, unless a warranty or assumption of liability accompanies a\n"
"copy of the Program in return for a fee.\n"
"\n"
" END OF TERMS AND CONDITIONS\n"
"\n"
" How to Apply These Terms to Your New Programs\n"
"\n"
" If you develop a new program, and you want it to be of the greatest\n"
"possible use to the public, the best way to achieve this is to make it\n"
"free software which everyone can redistribute and change under these terms.\n"
"\n"
" To do so, attach the following notices to the program. It is safest\n"
"to attach them to the start of each source file to most effectively\n"
"state the exclusion of warranty; and each file should have at least\n"
"the \"copyright\" line and a pointer to where the full notice is found.\n"
"\n"
" <one line to give the program\'s name and a brief idea of what it does.>\n"
" Copyright (C) <year> <name of author>\n"
"\n"
" This program is free software: you can redistribute it and/or modify\n"
" it under the terms of the GNU General Public License as published by\n"
" the Free Software Foundation, either version 3 of the License, or\n"
" (at your option) any later version.\n"
"\n"
" This program is distributed in the hope that it will be useful,\n"
" but WITHOUT ANY WARRANTY; without even the implied warranty of\n"
" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n"
" GNU General Public License for more details.\n"
"\n"
" You should have received a copy of the GNU General Public License\n"
" along with this program. If not, see <http://www.gnu.org/licenses/>.\n"
"\n"
"Also add information on how to contact you by electronic and paper mail.\n"
"\n"
" If the program does terminal interaction, make it output a short\n"
"notice like this when it starts in an interactive mode:\n"
"\n"
" <program> Copyright (C) <year> <name of author>\n"
" This program comes with ABSOLUTELY NO WARRANTY; for details type `show w\'.\n"
" This is free software, and you are welcome to redistribute it\n"
" under certain conditions; type `show c\' for details.\n"
"\n"
"The hypothetical commands `show w\' and `show c\' should show the appropriate\n"
"parts of the General Public License. Of course, your program\'s commands\n"
"might be different; for a GUI interface, you would use an \"about box\".\n"
"\n"
" You should also get your employer (if you work as a programmer) or school,\n"
"if any, to sign a \"copyright disclaimer\" for the program, if necessary.\n"
"For more information on this, and how to apply and follow the GNU GPL, see\n"
"<http://www.gnu.org/licenses/>.\n"
"\n"
" The GNU General Public License does not permit incorporating your program\n"
"into proprietary programs. If your program is a subroutine library, you\n"
"may consider it more useful to permit linking proprietary applications with\n"
"the library. If this is what you want to do, use the GNU Lesser General\n"
"Public License instead of this License. But first, please read\n"
"<http://www.gnu.org/philosophy/why-not-lgpl.html>.\n"
"", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("AboutDialog", "&License Agreement", None))
from . import resources_rc
| noplay/gns3-gui | gns3/ui/about_dialog_ui.py | Python | gpl-3.0 | 112,795 | [
"Brian",
"CRYSTAL",
"Dalton",
"GAMESS",
"MOOSE"
] | f198432deff9cf2c919fd580bce3f407b18770c6496e622cd1e083e0f2ef1a01 |
# -*- coding: utf-8 -*-
# Copyright: (c) 2020-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""A facade for interfacing with multiple Galaxy instances."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Dict, Iterable, Iterator, Tuple, List
from ansible.galaxy.api import CollectionVersionMetadata
from ansible.galaxy.collection.concrete_artifact_manager import (
ConcreteArtifactsManager,
)
from ansible.galaxy.dependency_resolution.dataclasses import (
Candidate, Requirement,
)
from ansible.galaxy.api import GalaxyAPI, GalaxyError
from ansible.module_utils._text import to_text
from ansible.utils.display import Display
display = Display()
class MultiGalaxyAPIProxy:
"""A proxy that abstracts talking to multiple Galaxy instances."""
def __init__(self, apis, concrete_artifacts_manager):
# type: (Iterable[GalaxyAPI], ConcreteArtifactsManager) -> None
"""Initialize the target APIs list."""
self._apis = apis
self._concrete_art_mgr = concrete_artifacts_manager
def _get_collection_versions(self, requirement):
# type: (Requirement) -> Iterator[Tuple[GalaxyAPI, str]]
"""Helper for get_collection_versions.
Yield api, version pairs for all APIs,
and reraise the last error if no valid API was found.
"""
found_api = False
last_error = None
api_lookup_order = (
(requirement.src, )
if isinstance(requirement.src, GalaxyAPI)
else self._apis
)
for api in api_lookup_order:
try:
versions = api.get_collection_versions(requirement.namespace, requirement.name)
except GalaxyError as api_err:
last_error = api_err
except Exception as unknown_err:
display.warning(
"Skipping Galaxy server {server!s}. "
"Got an unexpected error when getting "
"available versions of collection {fqcn!s}: {err!s}".
format(
server=api.api_server,
fqcn=requirement.fqcn,
err=to_text(unknown_err),
)
)
last_error = unknown_err
else:
found_api = True
for version in versions:
yield api, version
if not found_api and last_error is not None:
raise last_error
def get_collection_versions(self, requirement):
# type: (Requirement) -> Iterable[Tuple[str, GalaxyAPI]]
"""Get a set of unique versions for FQCN on Galaxy servers."""
if requirement.is_concrete_artifact:
return {
(
self._concrete_art_mgr.
get_direct_collection_version(requirement),
requirement.src,
),
}
api_lookup_order = (
(requirement.src, )
if isinstance(requirement.src, GalaxyAPI)
else self._apis
)
return set(
(version, api)
for api, version in self._get_collection_versions(
requirement,
)
)
def get_collection_version_metadata(self, collection_candidate):
# type: (Candidate) -> CollectionVersionMetadata
"""Retrieve collection metadata of a given candidate."""
api_lookup_order = (
(collection_candidate.src, )
if isinstance(collection_candidate.src, GalaxyAPI)
else self._apis
)
for api in api_lookup_order:
try:
version_metadata = api.get_collection_version_metadata(
collection_candidate.namespace,
collection_candidate.name,
collection_candidate.ver,
)
except GalaxyError as api_err:
last_err = api_err
except Exception as unknown_err:
# `verify` doesn't use `get_collection_versions` since the version is already known.
# Do the same as `install` and `download` by trying all APIs before failing.
# Warn for debugging purposes, since the Galaxy server may be unexpectedly down.
last_err = unknown_err
display.warning(
"Skipping Galaxy server {server!s}. "
"Got an unexpected error when getting "
"available versions of collection {fqcn!s}: {err!s}".
format(
server=api.api_server,
fqcn=collection_candidate.fqcn,
err=to_text(unknown_err),
)
)
else:
self._concrete_art_mgr.save_collection_source(
collection_candidate,
version_metadata.download_url,
version_metadata.artifact_sha256,
api.token,
version_metadata.signatures_url,
version_metadata.signatures,
)
return version_metadata
raise last_err
def get_collection_dependencies(self, collection_candidate):
# type: (Candidate) -> Dict[str, str]
# FIXME: return Requirement instances instead?
"""Retrieve collection dependencies of a given candidate."""
if collection_candidate.is_concrete_artifact:
return (
self.
_concrete_art_mgr.
get_direct_collection_dependencies
)(collection_candidate)
return (
self.
get_collection_version_metadata(collection_candidate).
dependencies
)
def get_signatures(self, collection_candidate):
# type: (Candidate) -> List[Dict[str, str]]
namespace = collection_candidate.namespace
name = collection_candidate.name
version = collection_candidate.ver
last_err = None
api_lookup_order = (
(collection_candidate.src, )
if isinstance(collection_candidate.src, GalaxyAPI)
else self._apis
)
for api in api_lookup_order:
try:
return api.get_collection_signatures(namespace, name, version)
except GalaxyError as api_err:
last_err = api_err
except Exception as unknown_err:
# Warn for debugging purposes, since the Galaxy server may be unexpectedly down.
last_err = unknown_err
display.warning(
"Skipping Galaxy server {server!s}. "
"Got an unexpected error when getting "
"available versions of collection {fqcn!s}: {err!s}".
format(
server=api.api_server,
fqcn=collection_candidate.fqcn,
err=to_text(unknown_err),
)
)
if last_err:
raise last_err
return []
| renard/ansible | lib/ansible/galaxy/collection/galaxy_api_proxy.py | Python | gpl-3.0 | 7,459 | [
"Galaxy"
] | a5605688b5452a754f3e08e22869480ac492db754e80cc6032f9fe13045bc6a9 |
# coding=utf-8
"""Auto pull request migrations information plugin"""
import re
from git import Repo
from . import MASTER_BRANCH
from .base import AutoPullRequestPluginInterface, section_order
from ..nodes import DescriptionNode, NodeList, NumberedList, SectionNode
MIGRATION_FOLDER = 'migration_scripts'
MIGRATION_SCRIPT_MATCHER = re.compile(r'^%s/.*\.(?:py|sql|sh|rb)(?<!__init__.py)$'
% MIGRATION_FOLDER)
TESTS_MATCHER = re.compile(r'(?:(?:^|[\b_./-])[Tt]est(?:[Cc]ase)?|[Tt]est[Cc]ase$)')
MIGRATION_FOLDERS = ['revert', 'rollback', 'reverse']
class MigrationsInfoPlugin(AutoPullRequestPluginInterface):
def _get_migration_scripts(self):
changes = []
repo = Repo('.git')
git_diff = repo.index.diff(MASTER_BRANCH, 'migration_scripts')
for diff in git_diff:
# Since we're comparing in reverse, assert file is added (new script)
file_path = diff.a_blob.path
if diff.deleted_file and self._is_migration_script(file_path):
changes.append(file_path)
return changes
def _is_migration_script(self, file_name):
return bool(MIGRATION_SCRIPT_MATCHER.match(file_name)
and not TESTS_MATCHER.match(file_name))
def _separate_forward_and_back_scripts(self, migration_scripts):
forward = []
backward = []
for script in migration_scripts:
if any(('/%s/' % folder) in script for folder in MIGRATION_FOLDERS):
backward.append(script)
else:
forward.append(script)
return forward, backward
@section_order(-1)
def section_migration_scripts(self):
migration_scripts = self._get_migration_scripts()
if migration_scripts:
forward, backward = self._separate_forward_and_back_scripts(migration_scripts)
value = NodeList([
NumberedList(forward),
SectionNode('Rollback Scripts', NumberedList(backward), level=2)
])
else:
value = DescriptionNode('No migration scripts.')
return value
| gxx/auto_pull_request | auto_pull_request/plugins/migrations_info.py | Python | gpl-2.0 | 2,145 | [
"ASE"
] | e72baf2ad678897478eb9a9b053a390a024938355462155e666d8e8ce899b539 |
#!/usr/bin/python
# http://www.deheus.net/petrik/blog/2005/11/20/creating-a-wikipedia-watchlist-rss-feed-with-python-and-twill/
import sys, string, datetime, time, os, re, stat
import twill
import twill.commands as t
import gd
temp_html = "/tmp/wikipedia.html"
rss_title = "Wikipedia watchlist"
rss_link = "http://en.wikipedia.org"
host = "http://ersch.wikimedia.org/"
#host = "http://127.0.0.1/wiki/"
def login(username, password):
t.add_extra_header("User-Agent", "python-twill-russnelson@gmail.com")
t.go(host+"index.php/Special:UserLogin")
t.fv("1", "wpName", username)
t.fv("1", "wpPassword", password)
t.submit("wpLoginAttempt")
def upload_list(browser, pagename, uploads):
# get the file sizes for later comparison.
filesizes = []
for fn in uploads:
filesizes.append(os.stat(fn)[stat.ST_SIZE])
filesizes.reverse() # because they get listed newest first.
# Upload copy #1.
t.go(host+"index.php/Special:Upload")
t.formfile("1", "wpUploadFile", uploads[0])
t.fv("1", "wpDestFile", pagename)
t.fv("1", "wpUploadDescription", "Uploading %s" % pagename)
t.submit("wpUpload")
# Verify that we succeeded.
t.find("File:%s" % pagename)
for fn in uploads[1:]:
# propose that we upload a replacement
t.go(host+"index.php?title=Special:Upload&wpDestFile=%s&wpForReUpload=1" % pagename)
t.formfile("1", "wpUploadFile", fn)
t.fv("1", "wpUploadDescription", "Uploading %s as %s" % (fn, pagename))
t.submit("wpUpload")
# get the URLs for the thumbnails
urls = []
for url in re.finditer(r'<td><a href="([^"]*?)"><img alt="Thumbnail for version .*?" src="(.*?)"', browser.get_html()):
urls.append(url.group(1))
urls.append(url.group(2))
print filesizes
for i, url in enumerate(urls):
t.go(url)
if i % 2 == 0 and len(browser.get_html()) != filesizes[i / 2]:
print i,len(browser.get_html()), filesizes[i / 2]
t.find("Files differ in size")
t.code("200")
t.back()
# delete all versions
t.go(host+"index.php?title=File:%s&action=delete" % pagename)
# after we get the confirmation page, commit to the action.
t.fv("1", "wpReason", "Test Deleting...")
t.submit("mw-filedelete-submit")
# make sure that we can't visit their URLs.
for i, url in enumerate(urls):
t.go(url)
if 0 and i % 2 == 1 and i > 0 and browser.get_code() == 200:
# bug 30192: the archived file's thumbnail doesn't get deleted.
print "special-casing the last URL"
continue
t.code("404")
# restore the current and archived version.
t.go(host+"index.php/Special:Undelete/File:%s" % pagename)
t.fv("1", "wpComment", "Test Restore")
t.submit("restore")
# visit the page to make sure that the thumbs get re-rendered properly.
# when we get the 404 handler working correctly, this won't be needed.
t.go(host+"index.php?title=File:%s" % pagename)
# make sure that they got restored correctly.
for i, url in enumerate(urls):
t.go(url)
if i % 2 == 0 and len(browser.get_html()) != filesizes[i / 2]:
t.find("Files differ in size")
t.code("200")
t.back()
if len(uploads) != 2:
return
match = re.search(r'"([^"]+?)" title="[^"]+?">revert', browser.get_html())
if not match:
t.find('revert')
t.go(match.group(1).replace('&', '&'))
def make_files(pagename):
redfilename = "/tmp/Red-%s" % pagename
greenfilename = "/tmp/Green-%s" % pagename
bluefilename = "/tmp/Blue-%s" % pagename
# create a small test image.
gd.gdMaxColors = 256
i = gd.image((200,100))
black = i.colorAllocate((0,0,0))
white = i.colorAllocate((255,255,255))
red = i.colorAllocate((255,55,55))
green = i.colorAllocate((55,255,55))
blue = i.colorAllocate((55,55,255))
# now write a red version
i.rectangle((0,0),(199,99),red, red)
i.line((0,0),(199,99),black)
i.string(gd.gdFontLarge, (5,50), pagename, white)
i.writePng(redfilename)
# now write a green version
i.rectangle((0,0),(199,99),green, green)
i.line((0,0),(99,99),black)
i.string(gd.gdFontLarge, (5,50), pagename, white)
i.writePng(greenfilename)
# write a blue version
i.rectangle((0,0),(199,99),blue,blue)
i.line((0,0),(99,199),black)
i.string(gd.gdFontLarge, (5,50), pagename, white)
i.writePng(bluefilename)
# propose that we delete it (in case it exists)
t.go(host+"index.php?title=File:%s&action=delete" % pagename)
# make sure that we've NOT gotten the wrong page and HAVE gotten the right one.
t.notfind('You are about to delete the file')
t.find("could not be deleted")
return (redfilename, greenfilename, bluefilename )
def main():
try:
username = sys.argv[1]
password = sys.argv[2]
except IndexError:
print "Please supply username password"
sys.exit(1)
browser = twill.get_browser()
login(username, password)
serial = time.time()
pagename = "Test-%s.png" % serial
filenames = make_files(pagename)
upload_list(browser, pagename, filenames[0:2])
# try it again with two replacement files.
# pagename = "Test-%sA.png" % serial
# filenames = make_files(pagename)
# upload_list(browser, pagename, filenames)
t.showforms()
t.save_html("/tmp/testabcd")
if __name__ == "__main__":
main()
| SuriyaaKudoIsc/wikia-app-test | extensions/SwiftMedia/smtest.py | Python | gpl-2.0 | 5,519 | [
"VisIt"
] | 24b80fc52d7890ca4ff61159c0ec585782e03dd4234bc5ad3b6c1b8a4de9f75c |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Checks for SDK updates."""
import datetime
import logging
import os
import socket
import sys
import time
import urllib2
import google
import yaml
from google.appengine.api import validation
from google.appengine.api import yaml_object
VERSION_FILE = '../../VERSION'
UPDATE_CHECK_TIMEOUT = 3
NAG_FILE = '.appcfg_nag'
class NagFile(validation.Validated):
"""A validated YAML class to represent the user's nag preferences.
Attributes:
timestamp: The timestamp of the last nag.
opt_in: True if the user wants to check for updates on dev_appserver
start. False if not. May be None if we have not asked the user yet.
"""
ATTRIBUTES = {
'timestamp': validation.TYPE_FLOAT,
'opt_in': validation.Optional(validation.TYPE_BOOL),
}
@staticmethod
def Load(nag_file):
"""Load a single NagFile object where one and only one is expected.
Args:
nag_file: A file-like object or string containing the yaml data to parse.
Returns:
A NagFile instance.
"""
return yaml_object.BuildSingleObject(NagFile, nag_file)
def GetVersionObject(isfile=os.path.isfile, open_fn=open):
"""Gets the version of the SDK by parsing the VERSION file.
Args:
isfile: used for testing.
open_fn: Used for testing.
Returns:
A Yaml object or None if the VERSION file does not exist.
"""
version_filename = os.path.join(os.path.dirname(google.appengine.__file__),
VERSION_FILE)
if not isfile(version_filename):
logging.error('Could not find version file at %s', version_filename)
return None
version_fh = open_fn(version_filename, 'r')
try:
version = yaml.safe_load(version_fh)
finally:
version_fh.close()
return version
def _VersionList(release):
"""Parse a version string into a list of ints.
Args:
release: The 'release' version, e.g. '1.2.4'.
(Due to YAML parsing this may also be an int or float.)
Returns:
A list of ints corresponding to the parts of the version string
between periods. Example:
'1.2.4' -> [1, 2, 4]
'1.2.3.4' -> [1, 2, 3, 4]
Raises:
ValueError if not all the parts are valid integers.
"""
return [int(part) for part in str(release).split('.')]
class SDKUpdateChecker(object):
"""Determines if the local SDK is the latest version.
Nags the user when there are updates to the SDK. As the SDK becomes
more out of date, the language in the nagging gets stronger. We
store a little yaml file in the user's home directory so that we nag
the user only once a week.
The yaml file has the following field:
'timestamp': Last time we nagged the user in seconds since the epoch.
Attributes:
rpcserver: An AbstractRpcServer instance used to check for the latest SDK.
config: The app's AppInfoExternal. Needed to determine which api_version
the app is using.
"""
def __init__(self,
rpcserver,
configs,
isdir=os.path.isdir,
isfile=os.path.isfile,
open_fn=open):
"""Create a new SDKUpdateChecker.
Args:
rpcserver: The AbstractRpcServer to use.
configs: A list of yaml objects or a single yaml object that specify the
configuration of this application.
isdir: Replacement for os.path.isdir (for testing).
isfile: Replacement for os.path.isfile (for testing).
open_fn: Replacement for the open builtin (for testing).
"""
if not isinstance(configs, list):
configs = [configs]
self.rpcserver = rpcserver
self.isdir = isdir
self.isfile = isfile
self.open = open_fn
self.runtimes = set(config.runtime for config in configs)
self.runtime_to_api_version = {}
for config in configs:
self.runtime_to_api_version.setdefault(
config.runtime, set()).add(config.api_version)
@staticmethod
def MakeNagFilename():
"""Returns the filename for the nag file for this user."""
user_homedir = os.path.expanduser('~/')
if not os.path.isdir(user_homedir):
drive, unused_tail = os.path.splitdrive(os.__file__)
if drive:
os.environ['HOMEDRIVE'] = drive
return os.path.expanduser('~/' + NAG_FILE)
def _ParseVersionFile(self):
"""Parse the local VERSION file.
Returns:
A Yaml object or None if the file does not exist.
"""
return GetVersionObject(isfile=self.isfile, open_fn=self.open)
def CheckSupportedVersion(self):
"""Determines if the app's api_version is supported by the SDK.
Uses the api_version field from the AppInfoExternal to determine if
the SDK supports that api_version.
Raises:
sys.exit if the api_version is not supported.
"""
version = self._ParseVersionFile()
if version is None:
logging.error('Could not determine if the SDK supports the api_version '
'requested in app.yaml.')
return
unsupported_api_versions_found = False
for runtime, api_versions in self.runtime_to_api_version.items():
if 'supported_api_versions' in version:
supported_api_versions = version['supported_api_versions'].get(
runtime, version)['api_versions']
else:
supported_api_versions = version['api_versions']
unsupported_api_versions = sorted(api_versions -
set(supported_api_versions))
if unsupported_api_versions:
unsupported_api_versions_found = True
if len(unsupported_api_versions) == 1:
logging.critical('The requested api_version (%s) is not supported by '
'the %s runtime in this release of the SDK. The '
'supported api_versions are %s.',
unsupported_api_versions[0], runtime,
supported_api_versions)
else:
logging.critical('The requested api_versions (%s) are not supported '
'by the %s runtime in this release of the SDK. The '
'supported api_versions are %s.',
unsupported_api_versions, runtime,
supported_api_versions)
if unsupported_api_versions_found:
sys.exit(1)
def CheckForUpdates(self):
"""Queries the server for updates and nags the user if appropriate.
Queries the server for the latest SDK version at the same time reporting
the local SDK version. The server will respond with a yaml document
containing the fields:
'release': The name of the release (e.g. 1.2).
'timestamp': The time the release was created (YYYY-MM-DD HH:MM AM/PM TZ).
'api_versions': A list of api_version strings (e.g. ['1', 'beta']).
We will nag the user with increasing severity if:
- There is a new release.
- There is a new release with a new api_version.
- There is a new release that does not support an api_version named in
a configuration in self.configs.
"""
version = self._ParseVersionFile()
if version is None:
logging.info('Skipping update check')
return
logging.info('Checking for updates to the SDK.')
responses = {}
try:
for runtime in self.runtimes:
responses[runtime] = yaml.safe_load(self.rpcserver.Send(
'/api/updatecheck',
timeout=UPDATE_CHECK_TIMEOUT,
release=version['release'],
timestamp=version['timestamp'],
api_versions=version['api_versions'],
runtime=runtime))
except (urllib2.URLError, socket.error), e:
logging.info('Update check failed: %s', e)
return
try:
latest = sorted(responses.values(), reverse=True,
key=lambda release: _VersionList(release['release']))[0]
except ValueError:
logging.warn('Could not parse this release version')
if version['release'] == latest['release']:
logging.info('The SDK is up to date.')
return
try:
this_release = _VersionList(version['release'])
except ValueError:
logging.warn('Could not parse this release version (%r)',
version['release'])
else:
try:
advertised_release = _VersionList(latest['release'])
except ValueError:
logging.warn('Could not parse advertised release version (%r)',
latest['release'])
else:
if this_release > advertised_release:
logging.info('This SDK release is newer than the advertised release.')
return
for runtime, response in responses.items():
api_versions = response['api_versions']
obsolete_versions = sorted(
self.runtime_to_api_version[runtime] - set(api_versions))
if len(obsolete_versions) == 1:
self._Nag(
'The api version you are using (%s) is obsolete! You should\n'
'upgrade your SDK and test that your code works with the new\n'
'api version.' % obsolete_versions[0],
response, version, force=True)
elif obsolete_versions:
self._Nag(
'The api versions you are using (%s) are obsolete! You should\n'
'upgrade your SDK and test that your code works with the new\n'
'api version.' % obsolete_versions,
response, version, force=True)
deprecated_versions = sorted(
self.runtime_to_api_version[runtime].intersection(api_versions[:-1]))
if len(deprecated_versions) == 1:
self._Nag(
'The api version you are using (%s) is deprecated. You should\n'
'upgrade your SDK to try the new functionality.' %
deprecated_versions[0], response, version)
elif deprecated_versions:
self._Nag(
'The api versions you are using (%s) are deprecated. You should\n'
'upgrade your SDK to try the new functionality.' %
deprecated_versions, response, version)
self._Nag('There is a new release of the SDK available.',
latest, version)
def _ParseNagFile(self):
"""Parses the nag file.
Returns:
A NagFile if the file was present else None.
"""
nag_filename = SDKUpdateChecker.MakeNagFilename()
if self.isfile(nag_filename):
fh = self.open(nag_filename, 'r')
try:
nag = NagFile.Load(fh)
finally:
fh.close()
return nag
return None
def _WriteNagFile(self, nag):
"""Writes the NagFile to the user's nag file.
If the destination path does not exist, this method will log an error
and fail silently.
Args:
nag: The NagFile to write.
"""
nagfilename = SDKUpdateChecker.MakeNagFilename()
try:
fh = self.open(nagfilename, 'w')
try:
fh.write(nag.ToYAML())
finally:
fh.close()
except (OSError, IOError), e:
logging.error('Could not write nag file to %s. Error: %s', nagfilename, e)
def _Nag(self, msg, latest, version, force=False):
"""Prints a nag message and updates the nag file's timestamp.
Because we don't want to nag the user everytime, we store a simple
yaml document in the user's home directory. If the timestamp in this
doc is over a week old, we'll nag the user. And when we nag the user,
we update the timestamp in this doc.
Args:
msg: The formatted message to print to the user.
latest: The yaml document received from the server.
version: The local yaml version document.
force: If True, always nag the user, ignoring the nag file.
"""
nag = self._ParseNagFile()
if nag and not force:
last_nag = datetime.datetime.fromtimestamp(nag.timestamp)
if datetime.datetime.now() - last_nag < datetime.timedelta(weeks=1):
logging.debug('Skipping nag message')
return
if nag is None:
nag = NagFile()
nag.timestamp = time.time()
self._WriteNagFile(nag)
print '****************************************************************'
print msg
print '-----------'
print 'Latest SDK:'
print yaml.dump(latest)
print '-----------'
print 'Your SDK:'
print yaml.dump(version)
print '-----------'
print 'Please visit https://developers.google.com/appengine/downloads'
print 'for the latest SDK'
print '****************************************************************'
def AllowedToCheckForUpdates(self, input_fn=raw_input):
"""Determines if the user wants to check for updates.
On startup, the dev_appserver wants to check for updates to the SDK.
Because this action reports usage to Google when the user is not
otherwise communicating with Google (e.g. pushing a new app version),
the user must opt in.
If the user does not have a nag file, we will query the user and
save the response in the nag file. Subsequent calls to this function
will re-use that response.
Args:
input_fn: used to collect user input. This is for testing only.
Returns:
True if the user wants to check for updates. False otherwise.
"""
nag = self._ParseNagFile()
if nag is None:
nag = NagFile()
nag.timestamp = 0.0
if nag.opt_in is None:
answer = input_fn('Allow dev_appserver to check for updates on startup? '
'(Y/n): ')
answer = answer.strip().lower()
if answer == 'n' or answer == 'no':
print ('dev_appserver will not check for updates on startup. To '
'change this setting, edit %s' %
SDKUpdateChecker.MakeNagFilename())
nag.opt_in = False
else:
print ('dev_appserver will check for updates on startup. To change '
'this setting, edit %s' % SDKUpdateChecker.MakeNagFilename())
nag.opt_in = True
self._WriteNagFile(nag)
return nag.opt_in
| yencarnacion/jaikuengine | .google_appengine/google/appengine/tools/sdk_update_checker.py | Python | apache-2.0 | 14,498 | [
"VisIt"
] | 1b14c6c0859ff3ef935b6cd975090fe0aa4d9c885a34bdd6f014eaaa6b3d3f83 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RTopgo(RPackage):
"""topGO package provides tools for testing GO terms while accounting
for the topology of the GO graph. Different test statistics and
different methods for eliminating local similarities and dependencies
between GO terms can be implemented and applied."""
homepage = "https://www.bioconductor.org/packages/topGO/"
url = "https://git.bioconductor.org/packages/topGO"
version('2.30.1', git='https://git.bioconductor.org/packages/topGO', commit='b1469ce1d198ccb73ef79ca22cab81659e16dbaa')
version('2.28.0', git='https://git.bioconductor.org/packages/topGO', commit='066a975d460046cce33fb27e74e6a0ebc33fd716')
depends_on('r@3.4.0:3.4.9')
depends_on('r-dbi', type=('build', 'run'))
depends_on('r-matrixstats', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-sparsem@0.73:', type=('build', 'run'))
depends_on('r-annotationdbi@1.7.19:', type=('build', 'run'))
depends_on('r-go-db@2.3.0:', type=('build', 'run'))
depends_on('r-biobase@2.0.0:', type=('build', 'run'))
depends_on('r-graph@1.14.0:', type=('build', 'run'))
depends_on('r-biocgenerics@0.13.6:', type=('build', 'run'))
| EmreAtes/spack | var/spack/repos/builtin/packages/r-topgo/package.py | Python | lgpl-2.1 | 2,469 | [
"Bioconductor"
] | d4a0dc142efabb88a06ce2130cbbe6f25f0ff6d1084aca79e39da635935274d5 |
# -*- coding: utf-8 -*-
"""
@author: c.zambaldi
"""
import os, sys
import math
try:
import numpy as np
except:
np = None
import time
from sketch import Sketch
#from msc.tools import MSC_TOOLS
from tools import Tools
#class Proc(Sketch, MSC_TOOLS):
class Proc(Sketch, Tools):
""" This class defines the common bits needed for writing
MSC.Marc/Mentat procedure files.
Previously all preprocessing was done from one "class".
Now (March 2013) it is more modular and a little bit tidier,
thus more presentable.
"""
import getpass
author = 'python_package (C. Zambaldi) used by ' + getpass.getuser()
title = 'model'
affiliation = 'MPI fuer Eisenforschung, www.mpie.de'
initConds = []
CODE = 'GENMAT'
#CODE = 'DAMASK'
FEMSOFTWAREVERSION = 2010 # default
FEMSOFTWARE = 'Mentat'
header_line_mark = '|+++++++++++++++++++++++++++++++++++++++++++++\n'
def __init__(self):
proc = [] # empty list to hold the procedure file content
def get_proc(self):
return self.proc
def header(self, label):
"""Visually separate the sections in the Mentat procedure file
"""
assert(label is not None)
return '\n' + self.header_line_mark + \
'| %s\n' % label + \
self.header_line_mark
def start(self,
title=None,
author=None,
affiliation=None,
FEMSOFTWARE=None):
if title is None: title = self.title
if author is None: author = self.author
if affiliation is None: affiliation = self.affiliation
if FEMSOFTWARE is None: FEMSOFTWARE = self.FEMSOFTWARE
self.proc.append("""
|+++++++++++++++++++++++++++++++++++++++++++++
| PROCEDURE FILE
| FOR USE WITH MSC.%s""" % FEMSOFTWARE +"""
|=============================================
| TITLE: %s\n""" % title + """
|=============================================
| AUTHOR: %s""" % author + """
| DATE: %s""" % str(time.ctime()) + """
| GENERATED WITH: msc package by C. Zambaldi, http://github.com/czambaldi
| %s""" % affiliation + """
|+++++++++++++++++++++++++++++++++++++++++++++
| USAGE IN MENTAT:
| /!\ Save current model /!\, then
| UTILS > PROCEDURES > LOAD > START/CONT
|+++++++++++++++++++++++++++++++++++++++++++++
""")
def procNewModel(self):
self.proc.append(self.header('NEW MODEL'))
self.proc.append("""
*new_model yes\n*select_reset\n*plot_reset\n*expand_reset\n*move_reset
*set_sweep_tolerance
0.0001 | the MSC.Marc default value
""")
def procIndentDocCall(self):
self.proc.append('''
|=== procIndentDocCall
|The parameters with which preIndentation was called were:
|m.preIndentation(
''')
#P=self.IndentParameters
P = self.callerDict
callString = 'preIndentation('
for k in iter(P):
callString += '%s=%s, ' % (k, str(P[k]))
self.proc.append('| %s)' % callString)
def procParameters(self):
self.proc.append(self.header('PARAMETER-DEFINITION'))
def procParametersUniax(self,
smv=0.01,
eps_max=0.25,
def_time=100.,
nr_incr=100):
#az=12 für tessel666d2
self.proc.append('''
| generated by procParameterUniax
|+++++++++++++++++++++++++++++++++++++++++++++
|++ GEOMETRY ++++++
*define smv %f | small_value wegen node_sets''' % (smv) + '''
*define ax %f | Element''' % self.modelDim[0] + '''
*define ay %f''' % self.modelDim[1] + '''
*define az %f''' % self.modelDim[2] + '''
*define divix %i''' % self.divi[0] + '''
*define diviy %i''' % self.divi[1] + '''
*define diviz %i''' % self.divi[2] + '''
\n|++ DEFORMATION +++\n
*define eps_max %f | Maximal engineering strain''' % (eps_max) + '''
*define eps_dot 0.001 | Strain rate (for engineering strain)
*define def_time %f | old:eps_max/eps_dot | time for loadcase''' % (def_time) + '''
*define nr_incr %i | Number of increments''' % (nr_incr) + '''
*renumber_all\n*sweep_all\n
''')
def procSample(self):
self.header('SAMPLE-MODELING AND MESHING')
def proc_points(self, p_list):
p_str = '*add_points\n'
for n, p in enumerate(p_list):
p_str += '%e %e %e | %i \n' % (p[0], p[1], p[2], n)
return p_str
def proc_nodes(self, n_list):
n_str = '*add_nodes\n'
for i, n in enumerate(n_list):
n_str += '%e %e %e | %i \n' % (n[0], n[1], n[2], i)
return n_str
def procNodeSets(self):
self.proc.append('''
|++++++++++++++++++++
| NODE SET DEFINITION
*select_clear
*select_filter_surface
*select_nodes
all_existing
*select_reset
*store_nodes prescribed_nodes
all_selected
*store_nodes surf_nodes
all_selected
*select_clear
| Nodes on Cube faces
|## faces xmin
*select_reset
*select_method_box
*select_nodes
-smv smv
-smv ay+smv
-smv az+smv
*store_nodes xminf_nds
all_selected
*select_clear
|## faces xmax
*select_nodes
ax-smv ax+smv
-smv ay+smv
-smv az+smv
*store_nodes xmaxf_nds
all_selected
*select_clear
|## faces ymin
*select_nodes
-smv ax+smv
-smv +smv
-smv az+smv
*store_nodes yminf_nds
all_selected
*select_clear
|## faces ymax
*select_nodes
-smv ax+smv
ay-smv ay+smv
-smv az+smv
*store_nodes ymaxf_nds
all_selected
*select_clear
|## faces zmin
*select_nodes
-smv ax+smv
-smv ay+smv
-smv smv
*store_nodes zminf_nds
all_selected
*select_clear
|## faces zmax
*select_nodes
-smv ax+smv
-smv ay+smv
az-smv az+smv
*store_nodes zmaxf_nds
all_selected
*select_clear
| Nodes on cube faces, but not intersecting on the boundaries (for periodic boundary conditions)
|## periodic xmin
*select_reset
*select_method_box
*select_nodes
-smv smv
-smv ay+smv
-smv az+smv
*store_nodes xmin_nds
all_selected
*select_clear
|## periodic xmax
*select_nodes
ax-smv ax+smv
-smv ay+smv
-smv az+smv |z
|-smv az-smv |z D.
*store_nodes xmax_nds
all_selected
*select_clear
|## periodic ymin
*select_nodes
|-smv ax+smv |x
-smv ax-smv |x D.
-smv smv
-smv az+smv
*store_nodes ymin_nds
all_selected
*select_clear
|## periodic ymax
*select_nodes
|-smv ax+smv
-smv ax-smv | L.Delannay
ay-smv ay+smv
-smv az+smv
*store_nodes ymax_nds
all_selected
*select_clear
|## periodic zmin
*select_nodes
|-smv ax+smv |x
-smv ax-smv |x D.
|-smv ay+smv |y
-smv ay-smv |y D.
-smv smv
*store_nodes zmin_nds
all_selected
*select_clear
|## periodic zmax
*select_nodes
|-smv ax+smv |x
-smv ax-smv |x D.
|-smv ay+smv |y
-smv ay-smv |y D.
az-smv az+smv
*store_nodes zmax_nds
all_selected
*select_clear
| EVALUATION NODES SET
*set_node_labels on
*select_clear
*select_method_box
*select_nodes
-smv smv
-smv smv
0.2*h 0.8*h
*store_nodes evaluate_nodes
all_selected
*select_clear
*select_method_box
*select_elements
-d smv
-d smv
-smv h+smv
*store_elements quarter_elements
*all_selected
*invisible_selected
''')
def procBoundaryConditions(self):
self.header('BOUNDARY CONDITIONS')
def procNodeFixXYZ(self, name='node1_fix_all',
nodes=[1]):
nodestr = ''
for i in range(0, len(nodes)):
nodestr = nodestr + ' %i' % nodes[i]
self.proc.append('''
*new_apply
*apply_name\n%s''' % name + '''
*apply_dof x *apply_dof_value x
0
*apply_dof y *apply_dof_value y
0
*apply_dof z *apply_dof_value z
0
*add_apply_nodes\n%s''' % nodestr + '''
#
''')
def procLoadCase(self):
self.header('LOADCASES DEFINITION')
def procTable(self, tablename='displacement',
tabletype='time',
tablepoints=[(0., 0.), ('def_time', 'eps_max*az')]):
self.proc.append('''
|
| TABLE DEFINITION
|
*new_table
*table_name
%s''' % tablename + '''
*set_table_type
%s''' % tabletype + '''
*table_add''')
for pts in tablepoints:
self.proc.append('%s\n%s' % (pts[0], pts[1]))
self.proc.append('''\n*show_table\n*table_fit\n*table_filled\n''')
def procContact(self):
self.header('CONTACT DEFINITION')
def deg2rad(self, deg):
return (deg / 180. * math.pi)
def rad2deg(self, rad):
return (rad * 180. / math.pi)
def e1(self):
return np.array([1., 0., 0.])
def e2(self):
return np.array([0., 1., 0.])
def e3(self):
return np.array([0., 0., 1.])
def procInitCond(self, iconds=['icond_mpie'], ic_els=['all_existing']):
self.proc.append(self.header('INITIAL CONDITIONS'))
self.initConds.extend(iconds)
for ic in range(0, len(iconds)):
self.proc.append('''
*new_icond
*icond_name
%s
*icond_type state_variable
*icond_param_value state_var_id
2
*icond_dof var *icond_dof_value var
%i
*add_icond_elements
%s\n''' % (iconds[ic], ic + 1, ic_els[ic]))
def procInitCondSV(self, label=['icond_mpie'],
StateVariableNumber=None,
StateVariableValue=None,
elements='all_existing',
new=True):
self.initConds.append(label)
icond = self.init_cond_state_var(label=label,
StateVariableNumber=StateVariableNumber,
StateVariableValue=StateVariableValue,
elements=elements,
new=new)
self.proc.append(icond)
def init_cond_state_var(self,
label=['icond_mpie'],
StateVariableNumber=None,
StateVariableValue=None,
elements='all_existing', # set 'None' for don't change
new=True):
icond = ''
if new:
icond += '*new_icond\n'
if label is not None:
icond += "*icond_name\n%s\n" % label
icond += ("*icond_type state_variable\n"
"*icond_param_value state_var_id\n"
"%i\n"
"*icond_dof var *icond_dof_value var\n"
"%i\n") % (StateVariableNumber, StateVariableValue)
if elements is not None:
icond += '*add_icond_elements\n%s\n' % elements
return icond
def procInitCondDamask(self,
T=300, # temperature (K)
H=[1], # homogenization
M=[1] # microstructure
):
self.procInitCondSV(label='icond_temperature',
StateVariableNumber=1,
StateVariableValue=T)
for h in H:
self.procInitCondSV(label='icond_homogenization_%i' % h,
StateVariableNumber=2,
StateVariableValue=h)
for m in M:
self.procInitCondSV(label='icond_microstructure_%i' % m,
StateVariableNumber=3,
StateVariableValue=m)
def procMaterial(self, name='hypela2', els='all_existing'):
self.proc.append(self.header('MATERIAL'))
self.proc.append('''
*material_name %s''' % name + '''
*material_type mechanical:hypoelastic
*material_option hypoelastic:method:hypela2
*material_option hypoelastic:pass:def_rot
*add_material_elements
%s\n''' % (els))
def procMaterialElast(self, name='hypela2', els='all_existing'):
self.proc.append(self.header('MATERIAL DATA'))
self.proc.append('''
*material_name %s''' % name + '''
*material_type mechanical:hypoelastic
*material_option hypoelastic:method:hypela2
*material_option hypoelastic:pass:def_rot
*add_material_elements
%s\n''' % (els))
def procGeometricProperties(self, cdil='on'):
self.proc.append(self.header('GEOMETRIC PROPERTIES'))
self.proc.append('''
*geometry_type mech_three_solid
*geometry_option cdilatation:%s''' % cdil + '''
*geometry_option assumedstrn:off
*geometry_option ctemperature:on
*geometry_option red_integ_capacity:off
*add_geometry_elements
all_existing\n''')
def procJobDef(self, cpfemLoc='mpie_marc_cz.f'):
self.proc.append(self.header('JOB DEFINITION'))
self.proc.append('''
*sweep_all\n*surfaces_wireframe *regen
*job_class mechanical\n
''')
for ic in self.initConds:
self.proc.append('''
*add_job_iconds %s
''' % ic)
self.proc.append('''
| ANALYSIS OPTIONS
|| Large Displacement
*job_option large:on
|| Plasticity Procedure: Large strain additive
*job_option plasticity:l_strn_mn_add
|| Advanced Options
||| CONSTANT DILATATION (moved to Geometric Properties Section in Marc 2008r1)
*job_option cdilatation:on
||| Updated Lagrange Procedure
*job_option update:on
||| Large Strains
*job_option finite:on
||| Multiplicative Decomposition (large stra =2)
*job_option plas_proc:multiplicative
| JOB PARAMETERS
|| Solver: Nonsymmetrical Solution
*job_option solver_nonsym:on
| SOUBROUTINE DEFINITION
*job_usersub_file %s''' % (cpfemLoc) + '''
|*job_usersub_file only_forcdt.f
*job_option user_source:compile_save
|*job_option user_source:run_saved
\n
''')
def proc_copy_job(self,
jobname=None, # e.g. ori
number=None): # e.g. nr of ori
p = '*copy_job\n'
if number is not None:
jobname += '%03i' % number
if jobname is not None:
#jobname = 'copied_job'
p += '*job_name %s\n' % jobname
self.proc.append(p)
def copy_jobs_for_oris(self):
pass
def write_dat(self):
self.proc.append('*job_write_input yes\n')
self.proc.append('*copy_job\n')
self.proc.append('*job_name postdef\n')
def procAnalysisOptions(self):
self.proc.append('''
| ANALYSIS OPTIONS
|| Large Displacement
*job_option large:on
|| Plasticity Procedure: Large strain additive
*job_option plasticity:l_strn_mn_add
|| Advanced Options
||| CONSTANT DILATATION
*job_option cdilatation:on
||| Updated Lagrange Procedure
*job_option update:on
||| Large Strains
*job_option finite:on
||| Multiplicative Decomposition (large stra =2)
*job_option plas_proc:multiplicative\n''')
def procJobResults(self, step=5):
self.proc.append('''
| JOB RESULTS
|*job_option post ascii/binary | Write Result File as formatted ASCII or binary
*job_param post %i | write each ith increment to *.t16 (binary) or *.t19 (ascii)''' % step + '''
*add_post_tensor stress
*add_post_tensor strain
*add_post_tensor cauchy
*add_post_var temperature
*add_post_var state2 homogenization | Homogenization ID of MPIE crystal-plasticity (old material ID)
*add_post_var state3 microstructure | Microstructure ID of MPIE crystal-plasticity
*add_post_var von_mises
*add_post_var eel_strain
*add_post_var ecauchy
*add_post_var te_energy
*add_post_var tepl_strain
*add_post_var thickness
*add_post_var eq/yl_stress
*add_post_var volume | volume (initial)
*add_post_var cur_volume | volume (current)
\n''')
if self.CODE == 'GENMAT':
self.proc.append('''
*add_post_var user1
*edit_post_var user1 Approx. element thickness
*add_post_var user2
*edit_post_var user2 phi1
*add_post_var user3
*edit_post_var user3 PHI
*add_post_var user4
*edit_post_var user4 phi2
*add_post_var user5
*edit_post_var user5 Misorientation angle
*add_post_var user6
*edit_post_var user6 Accumulated slip
|*add_post_var user7 Vol fraction was excluded from subroutine (CZambaldi)
|*edit_post_var user7 Vol fraction
*add_post_var user7
*edit_post_var user7 gam1new
*add_post_var user8
*edit_post_var user8 gam2new
*add_post_var user9
*edit_post_var user9 gam3new
*add_post_var user10
*edit_post_var user10 gam4new
*add_post_var user11
*edit_post_var user11 gam5new
*add_post_var user12
*edit_post_var user12 gam6new
*add_post_var user13
*edit_post_var user13 gam7new
*add_post_var user14
*edit_post_var user14 gam8new
*add_post_var user15
*edit_post_var user15 gam9new
*add_post_var user16
*edit_post_var user16 gam10new
*add_post_var user17
*edit_post_var user17 gam11new
*add_post_var user18
*edit_post_var user18 gam12new
*add_post_var user19
*edit_post_var user19 gam13new
*add_post_var user20
*edit_post_var user20 gam14new
*add_post_var user21
*edit_post_var user21 gam15new
*add_post_var user22
*edit_post_var user22 gam16new
*add_post_var user23
*edit_post_var user23 gam1dot
*add_post_var user24
*edit_post_var user24 gam2dot
*add_post_var user25
*add_post_var user26
*add_post_var user27
*add_post_var user28
*add_post_var user29
*add_post_var user30\n''')
def procJobParameters(self):
self.proc.append('''
| JOB PARAMETERS
|| Solver: Nonsymmetrical Solution
*job_option solver_nonsym:on
| SOUBROUTINE DEFINITION
*job_usersub_file %s'''%({'GENMAT':'mpie_marc_cz.f','DAMASK':'DAMASK_marc.f90'}[self.CODE]) + '''
|*job_usersub_file only_forcdt.f
*job_option user_source:compile_save
|*job_option user_source:run_saved\n''')
def proc_usersub_def(self):
#TODO: gibts in procJobParameters und in ProcJobDef
pass
def procFriction(self):
self.proc.append('''
| FRICTION
|job_option frictype:<none/coul_stick_slip/shear/ coulomb/shear_roll/coulomb_roll>
|*job_option frictype\n''')
def procCleanUp(self, sweepTol=0.001):
self.proc.append('''
| CLEAN UP
*sweep_all
*remove_unused_nodes
*renumber_all\n''')
def procSaveModel(self, modelname='model.mfd'):
self.proc.append(self.header('SAVE MODEL'))
self.proc.append('''
*sweep_all
*renumber_all
*save_as_model %s yes
*update_job
|*submit_job 1
|*submit_job 2 | nur bei compile_save
|*pause 4
*monitor_job
|@top()
|@push(jobs)
|@popup(job_run_popmenu)\n''' % modelname)
def norm(self, vec):
if len(vec) == 3:
n = math.sqrt(vec[0] ** 2. + vec[1] ** 2. + vec[2] ** 2.)
else:
raise (ValueError)
return n
# def procSetMoveTranslations(self,f,t):
#
def getNodeSets(self):
'''Not used by now'''
#self.setPostname=self.post_dummy
#print postname
# Read element sets
print 'getNodeSets: Trying to open dummy', os.getcwd(), '/', postname, ' ...'
self.p = opent16(self.post_dummy)
if self.p == None:
self.p = post_open(postname[0:-1] + '9') # is it *.t19 file?
if self.p == None:
print 'Could not open %s. run make_post' % postname;
sys.exit(1)
self.p.moveto(0)
nnds = self.p.nodes();
print nnds
nSets = self.p.sets()
print nSets
self.nodeSets = {}
for i in range(0, nSets - 1):
s = self.p.set(i)
print 'Set: ', s.name, ', Type: ', s.type, '\n', s.items
exec ('self.nodeSets[''%s'']=s.items' % s.name)
#print 'xmin: ',xmin_nds,'\nxmax: ',xmax_nds
def ParticleLinks(self):
self.getNodeSets()
#f=open('servo.proc','w')# store servo definitions in proc-file
#self.write2servo(f=f,tie,dof=1,ret,coeff=(1, 1, 1))
def quit_mentat(self):
self.proc.append('*quit yes\n') # exit Mentat after model is built
def proc_draw_update_manual(self):
self.proc.append('*draw_manual\n')
def proc_draw_update_automatic(self):
self.proc.append('*draw_automatic\n')
def to_file(self, dst_path=None, dst_name=None):
'''Write self.proc list to file'''
if dst_name is None:
dst_name = 'msc_procedure_file.proc'
#try:
# self.procfilename
#except:
# self.procfilename=dst_name
self.procfilename = dst_name
if dst_path is not None:
self.procpath = dst_path
else:
self.procpath = './'
filename = os.path.join(self.procpath, self.procfilename)
print(filename)
self.print_commands(self.proc, filename=filename) | stabix/stabix | third_party_code/python/msc/proc/base.py | Python | agpl-3.0 | 19,911 | [
"CRYSTAL"
] | d3ec07ea89daa9c2bd3331a8c26f19eba4318383ec73a9c787a2ea8b88e5ad61 |
#!/usr/bin/env python
import os
import sys
import fnmatch
import fileinput
from subprocess import call
import re
def main(Main_Folder):
geneid = re.compile('Vocar[a-zA-Z0-9]*m\.g')
for root, dirnames, filenames in os.walk(Main_Folder):
for filename in fnmatch.filter(filenames,'*.count'):
checkf = os.path.join(root, filename)
outf = "HTSeq-Additional.count"
routf = os.path.join(root,outf)
with open(routf,"w") as fout:
f = open(checkf,"r")
fl = f.readlines()
print fl
f.close()
rf = open(checkf,"w")
for line in fl:
gl = geneid.search(line)
if not gl:
fout.write(line.rstrip()+"\n")
else:
rf.write(line.rstrip() + "\n")
rf.close()
if __name__ == '__main__':
print 'input folder is: ' + sys.argv[1]
main(sys.argv[1])
print "completed"
| Luminarys/Bioinformatics | Scripts/ExtractStuff.py | Python | gpl-2.0 | 1,040 | [
"HTSeq"
] | 5e0389cc93fc2d3ee2d191d4491bd9c78d007b824efb69891286686bdff12a7c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.