text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thurs Oct 05 07:36:55 2017
@author: michellemorales
"""
#!/usr/bin/python
import sys
import os
from Tkinter import *
import FeatureExtract
import json
import tkMessageBox
def run():
global pars
ibm_pass = str(pars["IBM_PASSWORD"])
ibm_un = str(pars["IBM_USERNAME"])
openface = str(pars["OPENFACE"])
# TODO:perform video analysis
print "Running OpenFace..."
FeatureExtract.extract_visual("/Users/morales/GitHub/OpenMM/examples/FerrisBuellerClip.mp4", openface)
# Perform audio analysis
print "Running Covarep..."
# FeatureExtract.extract_audio("/Users/morales/GitHub/OpenMM/examples/")
# Perform speech-to-text
print "Running speech-to-text..."
FeatureExtract.ibm_speech2text("/Users/morales/GitHub/OpenMM/examples/FerrisBuellerClip.wav", 'en-US', ibm_un , ibm_pass)
# Perform linguistic analysis
# Output prediction
# Get config parameters
config = "/Users/morales/Desktop/config.json"
json_file = open(config, "r").read()
pars = json.loads(json_file)
# Make pars global
win = Tk()
win.title("OpenMM")
L = Label(win, text="Please choose a video:")
B = Button(win,text="Run OpenMM", command= run)
L.pack()
B.pack(padx=50, pady=50)
win.mainloop()
|
michellemorales/OpenMM
|
scripts/gui.py
|
Python
|
gpl-2.0
| 1,276
|
[
"OpenMM"
] |
2b19a722b86547f27301d0865e3cdbd3e91b571a09196c29c6d21e35fc03e971
|
#!/usr/bin/env python
# pylint: disable-msg=C0103
##***** BEGIN LICENSE BLOCK *****
##Version: MPL 1.1
##
##The contents of this file are subject to the Mozilla Public License Version
##1.1 (the "License"); you may not use this file except in compliance with
##the License. You may obtain a copy of the License at
##http:##www.mozilla.org/MPL/
##
##Software distributed under the License is distributed on an "AS IS" basis,
##WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
##for the specific language governing rights and limitations under the
##License.
##
##The Original Code is the AllegroGraph Java Client interface.
##
##The Original Code was written by Franz Inc.
##Copyright (C) 2006 Franz Inc. All Rights Reserved.
##
##***** END LICENSE BLOCK *****
from __future__ import absolute_import
from ..exceptions import QuerySyntaxException
from ..model import Value
from ..vocabulary import RDF, XMLSchema
import datetime
import time
XSD = XMLSchema
###########################################################################################################
## RDF Constants
###########################################################################################################
#class XSD:
# NS = "http://www.w3.org/2001/XMLSchema#"
# STRING = NS + "string"
# BOOLEAN = NS + "boolean"
# INTEGER = NS + "integer"
# INT = NS + "int"
# LONG = NS + "long"
# FLOAT = NS + "float"
# DOUBLE = NS + "double"
# DATE = NS + "date"
# DATETIME = NS + "datetime"
# TIME = NS + "time"
# NUMBER = NS + 'number' ## NOT SURE ABOUT THIS
# URL = NS + 'anyURI'
###########################################################################################################
## Exceptions
###########################################################################################################
###########################################################################################################
## Tokenizer
###########################################################################################################
CONTEXTS_OR_DATASET = 'CONTEXTS'
ALIASES = {
'CONTEXT': 'CONTEXTS',
'TPL': 'TRIPLE',
'IN': 'MEMBER',
}
class Token:
VARIABLE = 'VARIABLE'
STRING = 'STRING'
QNAME = 'QNAME'
URI = 'URI'
BRACKET = 'BRACKET'
RESERVED_WORD = 'RESERVED_WORD'
NUMBER = 'NUMBER'
BRACKET_SET = set(['(', ')', '[', ']',])
RESERVED_WORD_SET = set(['AND', 'OR', 'NOT', 'NAF', 'OPTIONAL', 'MEMBER', 'IMPLIES',
'FORALL', 'EXISTS', 'TRUE', 'FALSE', 'LIST',
'=', '<', '>', '<=', '>=', '!=', '+', '-', 'TRIPLE', 'QUAD',
'SELECT', 'DISTINCT', 'WHERE', CONTEXTS_OR_DATASET, 'LIMIT', 'ORDER', 'BY',
'REGEX',])
def __init__(self, token_type, value):
if token_type == Token.RESERVED_WORD:
value = OpExpression.parse_operator(value)
self.value = value
self.token_type = token_type
self.offset = -1
def __str__(self):
if self.token_type == Token.VARIABLE:
return '?' + self.value
elif self.token_type == Token.STRING:
return '"%s"' % self.value
else:
return self.value
def token_is(self, value):
return self.value.upper() == value.upper()
@staticmethod
def reserved_type(token, token_types):
if not token: return False
if isinstance(token, Term): return False
if not token.token_type == Token.RESERVED_WORD: return False
token_types = token_types if isinstance(token_types, (list, tuple, set)) else [token_types]
for t in token_types:
if token.value == t: return True
return False
@staticmethod
def printem(message, tokens):
them = [str(tok) for tok in tokens]
print message + str(them)
ATOMIC_TERM_TOKEN_TYPES = set([Token.VARIABLE, Token.STRING, Token.NUMBER, Token.URI, Token.QNAME])
LEGAL_VARIABLE_CHARS = set(['.', '_', '-',])
class Tokenizer():
def __init__(self, translator, source_string):
self.translator = translator
self.source_string = source_string
self.tokens = []
def grab_variable(self, string):
"""
'string' begins with a question mark, i.e., it begins with a variable.
Convert it into a variable token, and return the remainder of the string
"""
endPos = 9999
for i in range(1, len(string)):
c = string[i]
if c.isalnum(): continue
if c in LEGAL_VARIABLE_CHARS: continue
endPos = i
break
token = Token(Token.VARIABLE, string[1:endPos])
self.tokens.append(token)
return string[endPos:] if endPos < 9999 else ''
def grab_string(self, string, delimiter):
"""
'string' begins with a single or double quote.
Convert the quoted portions it into a string token, and return the remainder of the string.
"""
endPos = -1
for i in range(1, len(string)):
c = string[i]
if c == delimiter:
endPos = i
break
if endPos == -1:
raise QuerySyntaxException("Unterminated string: %s" % string)
token = Token(Token.STRING, string[1:endPos])
self.tokens.append(token)
return string[endPos + 1:]
def grab_uri(self, string):
"""
'string' begins with a '<'.
Convert the URI portion into a URI token, and return the remainder of the string.
"""
endPos = -1
for i in range(0, len(string)):
c = string[i]
if c == '>':
endPos = i
break
if endPos == -1:
raise QuerySyntaxException("Unterminated URI: %s" % string)
token = Token(Token.URI, string[1:endPos])
self.tokens.append(token)
return string[endPos + 1:]
DELIMITER_CHARS = set([' ', ',', '(', ')', '[', ']'])
def grab_delimited_word(self, string):
"""
The first token in 'string must be delimited by a blank,
## comma, or other delimiter. Find the end, and convert the
prefix in between into a token. Or convert the entire thing
into a token
"""
endPos = -1
for i in range(0, len(string)):
c = string[i]
if c in Tokenizer.DELIMITER_CHARS:
endPos = i
break
word = string[:endPos] if endPos >= 0 else string
word = ALIASES.get(word.upper(), word)
if word.upper() in Token.RESERVED_WORD_SET:
self.tokens.append(Token(Token.RESERVED_WORD, word))
elif word[0].isdigit():
self.tokens.append(Token(Token.NUMBER, word))
elif word.find(':') >=0:
self.tokens.append(Token(Token.QNAME, word))
else:
self.translator.syntax_exception("Unrecognized term '%s'" % word, self.tokens[len(self.tokens) - 1])
if endPos == -1:
## ran off the end of the string:
return ''
else:
return string[endPos:]
def super_strip(self, string):
"""
Strip blanks AND leading commas.
"""
string = string.strip()
beginPos = 0
for i in range(0, len(string)):
c = string[i]
if c == ' ' or c == ',':
beginPos += 1
else:
break
return string[beginPos:]
def tokenize_next(self, string):
"""
Parse 'string' into tokens. Push tokens into 'tokens' during recursion,
and return 'tokens'.
"""
string = self.super_strip(string)
if not string or string == ' ': return ''
c = string[0]
if c == '?':
suffix = self.grab_variable(string)
elif c in Token.BRACKET_SET:
self.tokens.append(Token(Token.BRACKET, c))
suffix = string[1:]
elif c in ['"', "'"]:
suffix = self.grab_string(string, c)
elif c == '<' and string[1].isalpha():
suffix = self.grab_uri(string)
## at this point, the first token must be delimited by a blank,
## comma, or delimiter. Find the end:
else:
#print "GRAB DELIMITED", string
suffix = self.grab_delimited_word(string)
#print " SUFFIX '%s'" % suffix, "TOKEN", tokens[len(tokens) - 1]
newToken = self.tokens[len(self.tokens) - 1]
newToken.offset = len(self.source_string) - len(suffix) - len(newToken.value)
return suffix
def tokenize(self):
suffix = self.source_string
while suffix:
suffix = self.tokenize_next(suffix)
## combine 'ORDER BY' tokens into one
for i, tok in enumerate(self.tokens):
if tok.value == 'ORDER' and tok.token_type == Token.RESERVED_WORD:
if len(self.tokens) > i:
nextTok = self.tokens[i + 1]
if nextTok.value == 'BY' and nextTok.token_type == Token.RESERVED_WORD:
self.tokens[i] = Token(Token.RESERVED_WORD, 'ORDERBY')
del self.tokens[i + 1]
break
#print "TOKENIZED", [str(t) for t in self.tokens]
return self.tokens
@staticmethod
def tokens_to_string(tokens, comma_delimited=False):
strings = [str(tok) for tok in tokens]
return ', '.join(strings) if comma_delimited else ' '.join(strings)
###########################################################################################################
##
###########################################################################################################
SELECT = 'select'
DISTINCT = 'distinct'
FROM = 'from'
WHERE = 'where'
LIMIT = 'limit'
ORDER_BY = 'orderby'
class QueryBlock:
def __init__(self, query_type=SELECT):
query_type = query_type
self.select_terms = None
self.where_clause = None
self.contexts_clause = None
self.distinct = False
self.limit = -1
self.order_by = None
self.input_bindings = None
self.temporary_enumerations = {}
def stringify(self, newlines=False):
newline = '\n' if newlines else ''
return """select %(select)s %(newline)swhere %(where)s""" % {
'select': stringify_terms(self.select_terms), 'where': self.where_clause, 'newline': newline,}
def __str__(self):
return self.stringify(newlines=True)
def stringify_terms(terms, comma_delimited=False):
strings = [str(term) for term in terms]
return ', '.join(strings) if comma_delimited else ' '.join(strings)
class Term:
RESOURCE = 'RESOURCE'
LITERAL = 'LITERAL'
VARIABLE = 'VARIABLE'
ARTIFICIAL = 'ARTIFICIAL'
def __init__(self, term_type, value, datatype=None, qname=None):
self.term_type = term_type
self.value = value
self.datatype = datatype
self.qname = qname
def clone(self):
"""
Shallow copy of 'self'.
"""
term = Term(self.term_type, self.value, self.datatype, self.qname)
return term
def __str__(self):
if self.term_type == Term.RESOURCE:
## TODO: UPGRADE TO HANDLE PREFIXES
if self.qname:
return self.qname
else:
return "<%s>" % self.value
elif self.term_type == Term.VARIABLE:
return '?' + self.value
elif self.term_type == Term.LITERAL:
## TODO: UPGRADE TO HANDLE TYPED LITERALS
if self.datatype == XSD.STRING:
return '"%s"' % self.value
elif self.datatype in [XSD.INTEGER, XSD.NUMBER]:
return self.value
else:
return '"%s"^^<%s>' % (self.value, self.datatype)
elif self.term_type == Term.ARTIFICIAL:
return self.value
ARTIFICIAL_TERMS = set(['TRIPLE', 'QUAD'])
ARITHMETIC_OPERATORS = set(['+', '-'])
COMPARISON_OPERATORS = set(['=', '<', '>', '<=', '>=', '!='])
PREFIX_OPERATORS = set(['REGEX'])
UNARY_BOOLEAN_OPERATORS = set(['NOT', 'NAF', 'OPTIONAL'])
BOOLEAN_OPERATORS = set(['AND', 'OR', 'MEMBER', 'IMPLIES', 'FORALL', 'EXISTS']).union(UNARY_BOOLEAN_OPERATORS).union(COMPARISON_OPERATORS).union(PREFIX_OPERATORS)
VALUE_OPERATORS = ARITHMETIC_OPERATORS
CONNECTIVE_OPERATORS = BOOLEAN_OPERATORS.union(VALUE_OPERATORS)
OPERATOR_EXPRESSIONS = (CONNECTIVE_OPERATORS.union(ARITHMETIC_OPERATORS).union(ARTIFICIAL_TERMS)
.union(set(['ENUMERATION', 'TRUE', 'FALSE', 'LIST', ]))) # omits 'PREDICATION'
class OpExpression:
AND = 'AND'
OR = 'OR'
NOT = 'NOT'
NAF = 'NAF'
IN = 'MEMBER'
OPTIONAL = 'OPTIONAL'
IMPLIES = 'IMPLIES'
FORALL = 'FORALL'
EXISTS = 'EXISTS'
ENUMERATION = 'ENUMERATION'
PREDICATION = 'PREDICATION'
TRUE = 'TRUE'
FALSE = 'FALSE'
EQUALITY = '='
TRIPLE = 'TRIPLE'
QUAD = 'QUAD'
## MAY NOT NEED TO BE EXPLICIT ABOUT THESE:
# PLUS = '+'
# MINUS = '-'
## normalization introduces addition expression types:
COMPUTE = 'COMPUTE'
def __init__(self, operator, arguments):
self.operator = operator
self.arguments = arguments
self.predicate = None
self.is_spo = False
self.context = None
self.parent = None
if not isinstance(arguments, list):
print "BREAK HERE"
@staticmethod
def parse_operator(value):
value = value.upper()
if not value in OPERATOR_EXPRESSIONS and not value in ['SELECT', 'DISTINCT', 'WHERE', 'CONTEXTS', 'LIMIT',
'ORDER', 'BY', 'ORDERBY']:
raise Exception("Need to add '%s' to list of OPERATOR_EXPRESSIONS" % value)
return value
def clone(self):
"""
Shallow copy of 'self'
"""
op = OpExpression(self.operator, self.arguments)
op.predicate = self.predicate
op.is_spo = self.is_spo
op.context = self.context
return op
def __str__(self):
return str(StringsBuffer(complain='SILENT').common_logify(self))
INFIX_WITH_PREFIX_TRIPLES = True
class CommonLogicTranslator:
"""
"""
SPARQL = 'SPARQL'
PROLOG = 'PROLOG'
def __init__(self, query=None, subject_comes_first=False):
query = query.strip()
self.set_source_query(query)
self.parse_tree = None
## if 'subject_comes_first' is 'True', it says that all predications are in SPO order,
## whether prefixed by 'TRIPLE' or not:
self.subject_comes_first = subject_comes_first
def set_source_query(self, query):
self.source_query = query
self.infix_parse = query and not query[0] == '('
self.infix_with_prefix_triples = self.infix_parse and INFIX_WITH_PREFIX_TRIPLES
SHIM = 0 ## hack until we figure out why offsets are not quite right
def syntax_exception(self, message, token=None):
if isinstance(token, list):
token = token[0] if token else None
supplement = ''
if token:
supplement = "Error occurred at offset %i in the string \n %s" % (token.offset, self.source_query)
pointer = ' ' + ' ' * (token.offset + 1 + CommonLogicTranslator.SHIM) + '^'
message = "%s\n%s\n%s" % (message, supplement, pointer)
raise QuerySyntaxException(message)
def clean_source(self):
self.source_query = self.source_query.replace('\n', ' ').replace('\t', ' ').strip()
def token_to_term(self, token):
if token.token_type == Token.VARIABLE:
return Term(Term.VARIABLE, token.value)
elif token.token_type == Token.URI:
return Term(Term.RESOURCE, token.value)
elif token.token_type == Token.STRING:
return Term(Term.LITERAL, token.value, XSD.STRING)
elif token.token_type == Token.NUMBER:
if token.value.isdigit():
return Term(Term.LITERAL, token.value, XSD.INTEGER)
else:
return Term(Term.LITERAL, token.value, XSD.NUMBER)
elif token.token_type == Token.QNAME:
return Term(Term.RESOURCE, token.value, qname=token.value)
elif token.token_type == Token.RESERVED_WORD and token.value in ARTIFICIAL_TERMS:
return Term(Term.ARTIFICIAL, token.value)
else:
raise Exception("Can't convert token %s to a term" % token)
def normalize_resource(self, term, tokens):
"""
Make sure resource term is legitimate (not sure why tokenizer doesn't do this)
TODO: Check that the string really is a URI or qname
"""
if (term.term_type == Term.RESOURCE):
if not term.value:
self.syntax_exception("Empty string found where resource expected.", tokens)
if term.value[0] == '<' and self.value[len(self) - 1] == '>':
term.value = term.value[1:-1]
if term.value.lower().startswith("http:") or term.value.lower().startswith("ftp:"):
term.qname = None
else:
term.qname = term.value
def parse_select_clause(self, tokens):
"""
Parse 'select_string' and return a list of terms.
TODO: UPGRADE TO ALLOW ARBITRARY EXPRESSIONS HERE
"""
isWrappedWithParens = len(tokens) >= 2 and tokens[0].value == '(' and tokens[len(tokens) - 1].value == ')'
if not self.infix_parse and not isWrappedWithParens:
self.syntax_exception("Missing parentheses around select clause arguments")
if isWrappedWithParens:
tokens = tokens[1:-1]
terms = []
for t in tokens:
## TODO: ALLOW ARBITRARY EXPRESSIONS HERE:
# if not t.token_type in [Token.URI, Token.STRING, Token.NUMBER, Token.VARIABLE, Token.QNAME]:
# self.syntax_exception("Illegal operator '%s' found in select clause" % t.value, t)
terms.append(self.token_to_term(t))
return terms
def parse_enumeration(self, value, tokens):
"""
"""
arguments = []
self.parse_expressions(tokens, arguments)
for arg in arguments:
self.normalize_resource(arg, tokens)
op = OpExpression(OpExpression.ENUMERATION, arguments)
op.predicate = value
return op
def parse_predication(self, tokens, predicate=None):
"""
'tokens' represent a predicate applied to arguments
"""
#print "PARSE PREDICATION", predicate, [str(t) for t in tokens]
if not predicate:
predicate = tokens[0]
tokens = tokens[1:]
if self.infix_parse and tokens[0].value == '(':
return self.parse_bracket(tokens, [], predicate=predicate)
isSPO = Token.reserved_type(predicate, [OpExpression.TRIPLE, OpExpression.QUAD])
predicate = self.token_to_term(predicate)
arguments = self.parse_expressions(tokens, [])
if isSPO:
pass
elif self.subject_comes_first and len(arguments) in [2,3]:
## this is probably a bad idea; we aren't debugging it:
temp = [predicate]
temp.extend(arguments)
arguments = temp
toq = 'TRIPLE' if len(arguments) == 3 else 'QUAD'
predicate = self.token_to_term(Token(Token.RESERVED_WORD, toq))
isSPO = True
else:
if not predicate.term_type in [Term.RESOURCE, Term.VARIABLE]:
self.syntax_exception("Found illegal term '%s' where resource expected" % predicate.value, tokens[0])
isSPO = False
if isSPO:
predicate = OpExpression.QUAD if len(arguments) == 4 else OpExpression.TRIPLE
op = OpExpression(OpExpression.PREDICATION, arguments)
op.predicate = predicate
op.is_spo = isSPO
return op
def parse_bracketed_expression(self, tokens, bracket, predicate=None, is_boolean=None):
"""
'tokens' are encoded in a bracket beginning with 'bracket'.
Figure out what kind of expression we have, and parse it.
"""
if bracket.value == '[':
return self.parse_enumeration('LIST', tokens)
## must be parenthesized expression
if not tokens:
self.syntax_exception("Found empty set of parentheses", tokens)
## check if its a predication:
allAtomicArguments = True
for t in tokens:
if not t.token_type in ATOMIC_TERM_TOKEN_TYPES:
allAtomicArguments = False
break
if allAtomicArguments:
return self.parse_predication(tokens, predicate=predicate)
elif Token.reserved_type(tokens[0], [OpExpression.TRIPLE, OpExpression.QUAD]):
if self.infix_parse:
## EXPERIMENT:
return self.parse_infix_expression(tokens, [], connective=None, needs_another_argument=True, is_boolean=True)
else:
return self.parse_predication(tokens)
elif predicate:
self.syntax_exception("MAYBE A FUNCTION, BUT NOT LEGAL PREDICATION", tokens)
## we don't know yet what we have here:
beginToken = tokens[0]
tokenType = beginToken.token_type
value = beginToken.value
if not self.infix_parse and tokenType == Token.BRACKET and value == '(':
## see if last token is closing bracket; otherwise, its illegal:
endToken = tokens[len(tokens) - 1]
if endToken.token_type == Token.BRACKET and endToken.value == ')':
return self.parse_bracketed_expression(tokens[1:-1], beginToken, is_boolean=is_boolean)
else:
self.syntax_exception("Found parenthesized expression where term expected: '%s'" % Tokenizer.tokens_to_string(tokens), beginToken)
elif tokenType == Token.RESERVED_WORD:
if value in CONNECTIVE_OPERATORS:
if self.infix_parse and not value in UNARY_BOOLEAN_OPERATORS:
self.syntax_exception("Found operator '%s' where term expected" % Tokenizer.tokens_to_string(tokens), beginToken)
if value in BOOLEAN_OPERATORS and not is_boolean:
self.syntax_exception("Found boolean expression where value expression expected '%s'" % Tokenizer.tokens_to_string(tokens), beginToken)
if value in VALUE_OPERATORS and is_boolean:
self.syntax_exception("Found value expression where boolean expression expected '%s'" % Tokenizer.tokens_to_string(tokens), beginToken)
## NOT SURE ABOUT THIS (ESPECIALLY FOR INFIX):
isBoolean = (value in BOOLEAN_OPERATORS and not value in COMPARISON_OPERATORS and not value in PREFIX_OPERATORS)
arguments = self.parse_expressions(tokens[1:], [], is_boolean=isBoolean)
return OpExpression(value, arguments)
elif value in [OpExpression.TRUE, OpExpression.FALSE]:
if not is_boolean:
self.syntax_exception("Found boolean expression where term expected '%s'" % Tokenizer.tokens_to_string(tokens), beginToken)
elif len(tokens) > 1:
self.syntax_exception("Found bizarre expression '%s'" % Tokenizer.tokens_to_string(tokens), beginToken)
return OpExpression(value, [])
elif value in ['LIST', 'SET', 'BAG']:
return self.parse_enumeration(value, tokens[1:])
else:
raise Exception("Unimplemented reserved word '%s'" % value)
if self.infix_parse:
return self.parse_infix_expression(tokens, [], connective=None, needs_another_argument=True, is_boolean='UNKNOWN')
elif is_boolean is True:
## THIS ERROR MESSAGE WORKED ONCE. NEED TO FIND OUT IF IT'S TOO SPECIFIC:
self.syntax_exception("Illegal expression %s where prefix expression expected" % Tokenizer.tokens_to_string(tokens), tokens)
else:
## THIS IS BOGUS SO FAR.
return self.parse_function_expression(tokens)
def parse_unary_connective(self, tokens, expressions, connective=None):
"""
Infix mode needs special handling for the NOT operator (because its a prefix operator).
TODO: FIGURE OUT IF 'OPTIONAL' NEEDS SIMILAR HANDLING HERE
TODO: GENERALIZE TO ALSO HANDLE 'MEMBER'???
"""
opName = tokens[0].value
if len(tokens) < 2:
self.syntax_exception("Insufficient arguments to '%s' operator" % opName.lower(), tokens)
beginToken = tokens[1]
beginVal = beginToken.value
if beginVal == '(':
return self.parse_bracket(tokens[1:], expressions, connective=connective,
unary_operator=opName, is_boolean=True)
else:
self.syntax_exception("'%s' operator expects a parenthesized argument" % opName.lower(), tokens)
def parse_bracket(self, tokens, expressions, is_boolean=False, connective=None, unary_operator=None, predicate=None):
"""
'tokens' begins with a bracket. Find the end bracket, and convert the tokens
in between into an expression. Recursively parse the remaining expressions.
"""
beginBracket = tokens[0]
beginVal = beginBracket.value
endVal = None
if beginVal == '(': endVal = ')'
elif beginVal == '[': endVal = ']'
nestingCounter = 0
for i, tok in enumerate(tokens):
if tok.token_type == Token.BRACKET:
if tok.value == beginVal: nestingCounter += 1
elif tok.value == endVal: nestingCounter -= 1
if nestingCounter == 0:
exp = self.parse_bracketed_expression(tokens[1:i], beginBracket, is_boolean=is_boolean, predicate=predicate)
if unary_operator:
exp = OpExpression(unary_operator, [exp])
expressions.append(exp)
if self.infix_parse:
return self.parse_infix_expression(tokens[i + 1:], expressions, connective=connective, needs_another_argument=False, is_boolean=is_boolean)
else:
return self.parse_expressions(tokens[i + 1:], expressions, is_boolean=is_boolean)
def parse_expressions(self, tokens, expressions, is_boolean=False):
"""
Parse 'tokens' into an expression and append the result to 'expressions'.
If not all tokens are used up, recursively parse expressions.
"""
if not tokens: return expressions
beginToken = tokens[0]
tokenType = beginToken.token_type
if tokenType in set([Token.STRING, Token.QNAME, Token.URI, Token.NUMBER]):
if is_boolean is True:
self.syntax_exception("Term found where boolean expression expected '%s'" % beginToken.value, beginToken)
expressions.append(self.token_to_term(beginToken))
return self.parse_expressions(tokens[1:], expressions, is_boolean=is_boolean)
if tokenType in set([Token.VARIABLE]):
expressions.append(self.token_to_term(beginToken))
return self.parse_expressions(tokens[1:], expressions, is_boolean=is_boolean)
if tokenType == Token.BRACKET:
exps = self.parse_bracket(tokens, expressions, is_boolean=is_boolean)
## hack: in infix mode, 'parse_bracket' returns a singleton, which all callers except this one prefer:
return [exps] if self.infix_parse else exps
elif tokenType == Token.RESERVED_WORD:
if Token.reserved_type(beginToken, [OpExpression.TRUE, OpExpression.FALSE]):
expressions.append(OpExpression(beginToken.value, []))
return self.parse_expressions(tokens[1:], expressions, is_boolean=is_boolean)
elif Token.reserved_type(beginToken, [OpExpression.TRIPLE, OpExpression.QUAD]):
## this is a hack that insures that 'parse_predication' gets applied to this expression
## for an infix QUAD, it should be an error if the parenthesis is missing
if self.infix_parse and tokens[1].value == '(':
return [self.parse_bracket(tokens[1:], [], predicate=beginToken, is_boolean=is_boolean)]
else:
## this handles recursion triggered just above for infix; not sure if prefix hits this
expressions.append(self.token_to_term(beginToken))
return self.parse_expressions(tokens[1:], expressions, is_boolean=is_boolean)
## failure
if is_boolean is True:
self.syntax_exception("Found illegal term '%s' where boolean expression expected" % beginToken.value, beginToken)
else:
self.syntax_exception("Found illegal term '%s'" % str(beginToken), beginToken)
def parse_infix_expression(self, tokens, arguments, connective=None, needs_another_argument=True, is_boolean=False):
"""
Parse 'tokens' into an expression and return the result.
"""
if not tokens:
if needs_another_argument:
if not connective:
self.syntax_exception("Found nothing where term expected (NOT A GOOD ERROR MESSAGE BECAUSE NO CONTEXT)")
else:
self.syntax_exception("%s connective expects another argument (NOT A GOOD ERROR MESSAGE BECAUSE NO CONTEXT)" % connective)
elif connective:
return OpExpression(connective, arguments)
elif len(arguments) == 1:
return arguments[0]
else:
## WE DON'T UNDERSTAND THIS CASE YET:
raise Exception("BUG -- parse_infix expression went bizarro")
beginToken = tokens[0]
tokenType = beginToken.token_type
if needs_another_argument:
## the next argument must be a term (not a connective)
if self.infix_with_prefix_triples:
if tokenType in [Token.URI, Token.QNAME] and len(tokens) > 1 and tokens[1].value == '(':
predicate = self.token_to_term(beginToken)
return self.parse_bracket(tokens[1:], arguments, connective=connective, predicate=predicate, is_boolean=is_boolean)
elif Token.reserved_type(beginToken, [OpExpression.TRIPLE, OpExpression.QUAD]):
if tokens[1].value == '(':
return self.parse_bracket(tokens[1:], arguments, connective=connective, predicate=beginToken, is_boolean=is_boolean)
else:
self.syntax_exception("Expected '(' but found '{0}'".format(tokens[1]), tokens)
if tokenType in ATOMIC_TERM_TOKEN_TYPES:
# if is_boolean is True:
# self.syntax_exception("Value term found where boolean expression expected '%s'" % beginToken.value, beginToken)
arguments.append(self.token_to_term(beginToken))
return self.parse_infix_expression(tokens[1:], arguments, connective=connective, needs_another_argument=False, is_boolean=is_boolean)
if tokenType in set([Token.VARIABLE]):
arguments.append(self.token_to_term(beginToken))
return self.parse_infix_expression(tokens[1:], arguments, connective=connective, needs_another_argument=False, is_boolean=is_boolean)
if tokenType == Token.BRACKET:
return self.parse_bracket(tokens, arguments, connective=connective, is_boolean=is_boolean)
elif Token.reserved_type(beginToken, [OpExpression.NOT, OpExpression.NAF, OpExpression.OPTIONAL]) and is_boolean:
return self.parse_unary_connective(tokens, arguments, connective=connective)
elif Token.reserved_type(beginToken, [OpExpression.TRUE, OpExpression.FALSE]) and is_boolean:
arguments.append(OpExpression(beginToken.value, []))
return self.parse_expressions(tokens[1:], arguments, connective=connective, needs_another_argument=False, is_boolean=is_boolean)
else: # failure
if is_boolean is True:
self.syntax_exception("Found illegal term '%s' where boolean expression expected" % beginToken.value, beginToken)
else:
self.syntax_exception("Found illegal term '%s'" % str(beginToken), beginToken)
## we have an argument; the next token MUST be a connective:
nextConnective = beginToken.value
if is_boolean is True:
if not Token.reserved_type(beginToken, BOOLEAN_OPERATORS):
self.syntax_exception("Found '%s' where one of %s expected" % (beginToken.value, BOOLEAN_OPERATORS), beginToken)
elif is_boolean is False:
if not Token.reserved_type(beginToken, VALUE_OPERATORS):
self.syntax_exception("Found '%s' where one of %s expected" % (beginToken.value, VALUE_OPERATORS), beginToken)
else:
if not Token.reserved_type(beginToken, CONNECTIVE_OPERATORS):
self.syntax_exception("Found '%s' where one of %s expected" % (beginToken.value, CONNECTIVE_OPERATORS), beginToken)
if connective and not connective == nextConnective:
## the next connective is different than the previous one; coalesce the
## previous arguments into a single argument:
## NOTE: THIS IS WHERE OPERATOR PRECEDENCE WOULD HAPPEN IF WE HAD IT. BUT WE DON'T HAVE IT:
arguments = [OpExpression(connective, arguments)]
isBoolean = nextConnective in BOOLEAN_OPERATORS and not nextConnective in COMPARISON_OPERATORS
return self.parse_infix_expression(tokens[1:], arguments, connective=nextConnective, is_boolean=isBoolean)
def validate_parentheses(self, tokens):
balance = 0
unmatchedLeft = None
for t in tokens:
if t.value == '(':
balance += 1
if balance == 1:
unmatchedLeft = t
elif t.value == ')': balance -= 1
if balance < 0:
self.syntax_exception("Unmatched right parentheses", t)
if balance > 0:
self.syntax_exception("Unmatched left parentheses", unmatchedLeft)
def parse_where_clause(self, tokens):
"""
Parse string into a single expression, or a list of expressions.
If the latter, convert the list into an AND expression.
"""
self.validate_parentheses(tokens)
if self.infix_parse:
expressions = [self.parse_infix_expression(tokens, [], connective=None, is_boolean=True)]
else:
expressions = self.parse_expressions(tokens, [], is_boolean=True)
if not expressions:
self.syntax_exception("Query has empty where clause")
elif len(expressions) == 1:
return expressions[0]
else:
return OpExpression(OpExpression.AND, expressions)
def parse_contexts_clause(self, tokens):
"""
Read in a list of context URIs.
"""
if not tokens:
self.syntax_exception("Contexts clause is empty", tokens)
if not self.infix_parse or tokens[0].value == '(':
beginToken = tokens[0]
endToken = tokens[len(tokens) - 1]
if not (beginToken.value == '(' and endToken.value == ')'):
self.syntax_exception("Begin and end parentheses needed to bracket contents of CONTEXTS clause", tokens)
tokens = tokens[1:-1]
contexts = []
for token in tokens:
if token.token_type == Token.URI or Token.QNAME:
contexts.append(self.token_to_term(token))
else:
self.syntax_exception("Found term '%s' where URI or qname expected" % token.value, token)
return contexts
def parse_limit_clause(self, tokens):
if not len(tokens) == 1:
self.syntax_exception("Expected one argument to 'limit' operator", tokens)
token = tokens[0]
if token.token_type == Token.NUMBER:
return int(token.value)
else:
self.syntax_exception("'limit' operator expects an integer argument", token)
def parse_order_by_clause(self, tokens):
if not tokens:
self.syntax_exception("Order by clause is empty", tokens)
if tokens[0].value == '(':
beginToken = tokens[0]
endToken = tokens[len(tokens) - 1]
if not (beginToken.value == '(' and endToken.value == ')'):
self.syntax_exception("Begin and end parentheses needed to bracket contents of ORDER BY clause", tokens)
tokens = tokens[1:-1]
variables = []
for token in tokens:
if token.token_type == Token.VARIABLE:
variables.append(self.token_to_term(token))
else:
self.syntax_exception("Found term '%s' where variable expected" % token.value, token)
return variables
def parse(self):
"""
Parse 'source_query' into a parse tree.
FOR NOW, ASSUME ITS A 'select' QUERY
"""
self.clean_source()
query = self.source_query.lower()
if not query:
raise QuerySyntaxException("Empty CommonLogic query passed to translator")
tokens = Tokenizer(self, self.source_query).tokenize()
self.validate_parentheses(tokens)
if tokens[0].token_is('('):
if not tokens[len(tokens) - 1].token_is(')'):
raise QuerySyntaxException("Missing right parenthesis at the end of query:\n" + query)
tokens = tokens[1:-1]
selectToken = tokens[0]
if not selectToken.token_is('select'):
self.syntax_exception("Found {0} where 'select' expected".format(selectToken.value), tokens)
qb = QueryBlock()
self.parse_tree = qb
## find the reserved word tokens that subdivide the query:
def found_one(nextToken, word, existingToken):
if not nextToken.token_is(word): return
if existingToken:
self.syntax_exception("Multiple {0} tokens in the same query".format(nextToken.value), nextToken)
return True
distinctToken = None
whereToken = None
contextsToken = None
limitToken = None
orderByToken = None
for tok in tokens:
if found_one(tok, 'where', whereToken): whereToken = tok
if found_one(tok, 'distinct', distinctToken): distinctToken = tok
if found_one(tok, 'limit', limitToken): limitToken = tok
if found_one(tok, 'orderby', orderByToken): orderByToken = tok
if found_one(tok, 'contexts', contextsToken): contextsToken = tok
if distinctToken:
if not distinctToken == tokens[1]:
self.syntax_exception("Distinct is out of place; it should appear directly after {0}".format(selectToken.value), distinctToken)
qb.distinct = True
tokens.remove(distinctToken)
if not whereToken:
self.syntax_exception("Missing where clause in {0} query".format(selectToken.value), tokens)
if contextsToken and contextsToken.offset < whereToken.offset:
self.syntax_exception("Contexts clause must occur after the where clause", contextsToken)
if limitToken and limitToken.offset < whereToken.offset:
self.syntax_exception("Limit clause must occur after the where clause", limitToken)
for i, t in enumerate(tokens): t.index = i
qb.select_terms = self.parse_select_clause(tokens[1:whereToken.index])
lastWhereTokenIndex = len(tokens)
if contextsToken:
lastWhereTokenIndex = min(lastWhereTokenIndex, contextsToken.index)
if limitToken:
lastWhereTokenIndex = min(lastWhereTokenIndex, limitToken.index)
if orderByToken:
lastWhereTokenIndex = min(lastWhereTokenIndex, orderByToken.index)
qb.where_clause = self.parse_where_clause(tokens[whereToken.index + 1:lastWhereTokenIndex])
if contextsToken:
lastContextsTokenIndex = len(tokens)
if limitToken and limitToken.index > contextsToken.index:
lastContextsTokenIndex = min(lastContextsTokenIndex, limitToken.index)
if orderByToken and orderByToken.index > contextsToken.index:
lastContextsTokenIndex = min(lastContextsTokenIndex, orderByToken.index)
qb.contexts_clause = self.parse_contexts_clause(tokens[contextsToken.index + 1:lastContextsTokenIndex])
if limitToken:
lastLimitTokenIndex = len(tokens)
if orderByToken and orderByToken.index > limitToken.index:
lastLimitTokenIndex = min(lastLimitTokenIndex, orderByToken.index)
if contextsToken and contextsToken.index > limitToken.index:
lastLimitTokenIndex = min(lastLimitTokenIndex, contextsToken.index)
qb.limit = self.parse_limit_clause(tokens[limitToken.index + 1:lastLimitTokenIndex])
if orderByToken:
lastOrderByTokenIndex = len(tokens)
if limitToken and limitToken.index > orderByToken.index:
lastOrderByTokenIndex = min(lastOrderByTokenIndex, limitToken.index)
if contextsToken and contextsToken.index > limitToken.index:
lastOrderByTokenIndex = min(lastOrderByTokenIndex, contextsToken.index)
qb.order_by = self.parse_order_by_clause(tokens[orderByToken.index + 1:lastOrderByTokenIndex])
###########################################################################################################
## Normalization
###########################################################################################################
class Normalizer:
def __init__(self, parse_tree, language, contexts=None, spoify_output='False'):
self.parse_tree = parse_tree
self.language = language
self.spoify_output = spoify_output
self.variable_counter = -1
self.recompute_backlinks()
def deanglify(context):
return context[1:-1] if context[0] == '<' else context
self.contexts = [deanglify(cxt) for cxt in contexts] if contexts else None
def normalize(self):
if self.language == CommonLogicTranslator.PROLOG:
self.normalize_for_prolog()
elif self.language == CommonLogicTranslator.SPARQL:
self.normalize_for_sparql()
def help_walk(self, node, parent, processor, types, bottom_up, external_value):
if type(node) == str: return
if not bottom_up and (not types or isinstance(node, types)):
processor(node, parent, external_value)
if isinstance(node, OpExpression):
if node.predicate and (not types or isinstance(node.predicate, types)):
self.help_walk(node.predicate, node, processor, types, bottom_up, external_value)
for arg in node.arguments:
self.help_walk(arg, node, processor, types, bottom_up, external_value)
if bottom_up and (not types or isinstance(node, types)):
processor(node, parent, external_value)
def walk(self, processor, types=None, start_node=None, bottom_up=False, external_value=None):
"""
Walk the parse tree; apply 'processor' to each node whose type is in 'types'.
"""
if start_node and not start_node == self.parse_tree:
self.help_walk(start_node, start_node.parent, processor, types, bottom_up, external_value)
return
## walk select clause
for arg in self.parse_tree.select_terms:
self.help_walk(arg, self.parse_tree.select_terms, processor, types, bottom_up, external_value)
## walk where clause
self.help_walk(self.parse_tree.where_clause, self.parse_tree, processor, types, bottom_up, external_value)
def recompute_backlinks(self, where_clause_only=False):
#print "RECOMPUTE BACKLINKS"
def add_backlinks(node, parent, external_value):
node.parent = parent
self.walk(add_backlinks, start_node=(self.parse_tree.where_clause if where_clause_only else None))
# def variable_name_is_referenced(self, variable_name):
# """
# Return 'True' if a variable named 'variable_name' occurs somewhere in the current parse tree.
# """
# foundIt = [False]
# def doit(node, parent, external_value):
# if node.term_type == Term.VARIABLE and node.value == variable_name:
# foundIt[0] = True
# self.walk(doit, types=Term)
# return foundIt[0]
def get_fresh_variable(self):
"""
"""
def bump_variable_counter(node, parent, sseellff):
if not node.term_type == Term.VARIABLE: return
value = node.value
if len(value) < 2: return
if not value.startswith('v') or not value[1:].isdigit(): return
intVal = int(value[1:])
sseellff.variable_counter = max(self.variable_counter, intVal)
if self.variable_counter == -1:
self.variable_counter = 0
self.walk(bump_variable_counter, types=Term, external_value=self)
self.variable_counter += 1
freshVbl = "v{0}".format(self.variable_counter)
return Term(Term.VARIABLE, freshVbl)
# def add_backlinks(self, expression, parent):
# expression.parent = parent
# if isinstance(expression, Term): return # quick exit
# if isinstance(expression, OpExpression):
# for arg in expression.arguments:
# self.add_backlinks(arg, expression)
# elif isinstance(expression, QueryBlock):
# ## tricky: select terms list is the parent of select terms
# for term in expression.select_terms:
# self.add_backlinks(term, expression.select_terms)
# ## tricky: query block is the parent of where term
# self.add_backlinks(expression.where_clause, expression)
@staticmethod
def is_boolean(node):
if isinstance(node, OpExpression):
return (node.predicate or node.operator in BOOLEAN_OPERATORS or
node.operator in [OpExpression.TRUE, OpExpression.FALSE])
else:
return False
def substitute_node(self, out_node, in_node):
"""
Unlink the parent of 'out_node' from it, and link it instead to 'in_node'
Caution: Does NOT fix up backlinks, because some transforms could be messed
up if we do. Instead, assumes that 'recompute_backlinks' will be called
afterwards.
"""
parent = out_node.parent
if isinstance(parent, OpExpression):
for i, arg in enumerate(parent.arguments):
if arg == out_node:
parent.arguments[i] = in_node
return
if out_node == parent.predicate:
parent.predicate = in_node
return
elif isinstance(parent, QueryBlock):
if parent.where_clause == out_node:
parent.where_clause = in_node
return
elif isinstance(parent, list):
for i, arg in enumerate(list):
if arg == out_node:
list[i] = in_node
return
return
raise Exception("Failed to substitute out_node '%s'" % out_node)
def conjoin_to_where_clause(self, node):
"""
And-in 'node' to the top-level of the where clause.
"""
whereClause = self.parse_tree.where_clause
if isinstance(whereClause, OpExpression) and whereClause.operator == OpExpression.AND:
whereClause.arguments.append(node)
else:
andNode = OpExpression(OpExpression.AND, [whereClause, node])
self.parse_tree.where_clause = andNode
def flatten_nested_ands(self):
flattenedSomething = False
def flatten(node, parent, external_value):
if not node.operator == OpExpression.AND: return
conjuncts = []
for arg in node.arguments:
if isinstance(arg, OpExpression) and arg.operator == OpExpression.AND:
conjuncts.extend(arg.arguments)
flattenedSomething = True
else:
conjuncts.append(arg)
node.arguments = conjuncts
## flatten each nested AND we find:
self.walk(flatten, types=OpExpression, bottom_up=True)
if flattenedSomething:
self.recompute_backlinks(where_clause_only=True)
def copy_node(self, node):
"""
Deep copy of 'node'.
Caution: Does not call 'recompute_backlinks', but something needs to.
"""
if isinstance(node, Term):
return node.clone()
else:
op = node.clone()
op.arguments = [self.copy_node(arg) for arg in node.arguments]
return op
def distribute_disjunction(self, or_node):
"""
Apply deMorgan's transform to the OR node 'or_node' and its AND parent.
"""
andParent = or_node.parent
if (not or_node.operator == OpExpression.OR or
not andParent.operator == OpExpression.AND):
raise Exception("Illegal node structure in 'bubble_up_disjunction'")
newAndNodes = []
for disjunct in or_node.arguments:
otherConjuncts = [self.copy_node(arg) for arg in andParent.arguments if not arg == or_node]
otherConjuncts.append(self.copy_node(disjunct))
newAndNodes.append(OpExpression(OpExpression.AND, otherConjuncts))
newOrNode = OpExpression(OpExpression.OR, newAndNodes)
self.substitute_node(andParent, newOrNode)
self.recompute_backlinks(where_clause_only=True)
###########################################################################################################
## Constant folding
###########################################################################################################
## implicit variable scoping makes this difficult
## we assume rather narrow scoping at first pass:
def propagate_constants_to_predications(self, skip_context_variables=None):
constantEqualities = []
def collect_constant_equalities(node, parent, external_value):
if node.operator == OpExpression.EQUALITY and isinstance(parent, OpExpression) and parent.operator == OpExpression.AND:
variable = None
constant = None
for arg in node.arguments:
if isinstance(arg, Term):
if arg.term_type == Term.VARIABLE: variable = arg
elif arg.term_type in [Term.RESOURCE, Term.LITERAL]: constant = arg
if variable and constant:
constantEqualities.append((parent, variable, constant))
## collect AND nodes containing variables set to constants:
self.walk(collect_constant_equalities, types=OpExpression)
def substitute_constant_for_variable(node, parent, external_value):
if node.operator == OpExpression.PREDICATION:
vbl = external_value[1]
constant = external_value[2]
for i, arg in enumerate(node.arguments):
## SPARQL can't handle constants in context position:
if node.is_spo and i == 3 and skip_context_variables: continue
if not isinstance(arg, Term): continue
if arg.value == vbl.value:
node.arguments[i] = constant
## search for predications AND'ed to the constant equalities,
## and subtitute in the corresponding constants:
for triple in constantEqualities:
self.walk(substitute_constant_for_variable, types=OpExpression, start_node=triple[0], external_value=triple)
def is_unique_variable_within_where_clause(self, variable):
"""
Return 'True' if the variable 'variable' occurs at most once in the
where clause of the current parse tree.
"""
appearancesCounter = [0]
def doit(node, parent, external_value):
if node.term_type == Term.VARIABLE and node.value == variable.value:
appearancesCounter[0] = appearancesCounter[0] + 1
self.walk(doit, types=Term, start_node=self.parse_tree.where_clause)
return appearancesCounter[0] <= 1
###########################################################################################################
## Specialized conversions
###########################################################################################################
def find_value_computation_roots(self, node):
"""
Return a list of nodes representing non-boolean computations
within 'node' not nested within higher value computations.
"""
roots = []
if isinstance(node, OpExpression):
if Normalizer.is_boolean(node):
for arg in node.arguments:
roots.extend(self.find_value_computation_roots(arg))
elif not node.operator == OpExpression.COMPUTE:
return [node]
return roots
def flatten_one_value_computation(self, root):
freshVar = self.get_fresh_variable()
computeArgs = [freshVar, root.operator]
computeArgs.extend(root.arguments)
computeNode = OpExpression(OpExpression.COMPUTE, computeArgs)
self.substitute_node(root, freshVar)
parentNode = root.parent
andNode = OpExpression(OpExpression.AND, [computeNode, parentNode])
self.substitute_node(parentNode, andNode)
self.recompute_backlinks(where_clause_only=True)
def flatten_value_computations(self):
"""
Replace value operators by COMPUTE nodes
Call this AFTER calling 'flatten_select_terms'
"""
roots = self.find_value_computation_roots(self.parse_tree.where_clause)
if not roots: return
for root in roots:
self.flatten_one_value_computation(root)
## nested ands are a possible by-product of value computation flattening
self.flatten_nested_ands()
## recursively flatten until no more flattening occurs
self.flatten_value_computations()
def flatten_select_terms(self):
roots = [term for term in self.parse_tree.select_terms
if isinstance(term, OpExpression) and not Normalizer.is_boolean(term)]
if not roots: return
newEqualities = []
for r in roots[:]:
freshVbl = self.get_fresh_variable()
self.substitute_node(r, freshVbl)
equalityOp = OpExpression(OpExpression.EQUALITY, [freshVbl, r])
newEqualities.append(equalityOp)
for ne in newEqualities:
self.conjoin_to_where_clause(ne)
self.recompute_backlinks()
def get_null_term(self):
return Term(Term.LITERAL, "Null")
def translate_optionals(self, p_or_true=False):
def doit(node, parent, sseellff):
if not node.operator == OpExpression.OPTIONAL: return
arg = node.arguments[0]
argCopy = sseellff.copy_node(arg)
if p_or_true:
notP = OpExpression(OpExpression.TRUE, [])
else:
#notP = OpExpression(OpExpression.NOT, [argCopy])
notP = OpExpression(OpExpression.NAF, [argCopy])
pOrNotP = OpExpression(OpExpression.OR, [arg, notP])
self.substitute_node(node, pOrNotP)
self.walk(doit, types=OpExpression, external_value=self)
def translate_in_enumerate_into_disjunction_of_equalities(self):
didIt = [False]
def doit(node, parent, external_value):
if node.operator == OpExpression.IN:
## assumes that the second arg is an enumeration:
vbl = node.arguments[0]
enumeration = node.arguments[1]
equalities = [OpExpression(OpExpression.EQUALITY, [vbl, item]) for item in enumeration.arguments]
orOp = OpExpression(OpExpression.OR, equalities)
self.substitute_node(node, orOp)
didIt[0] = True
self.walk(doit, types=OpExpression)
if didIt[0]:
self.recompute_backlinks()
def translate_in_enumerate_into_temporary_join(self):
"""
If the parse tree contains enumeration tests,
record the needed temporary relations in 'parse_tree.temporary_enumerations'
and
"""
didIt = [False]
def doit( node, parent, sseellff):
if not node.operator == OpExpression.IN: return
## assumes that the second arg is an enumeration:
vbl = node.arguments[0]
enumeration = node.arguments[1]
tempRelation = "<http://enumerationhack#t{0:f}>".format(time.time())
freshVbl = sseellff.get_fresh_variable()
joinNode = OpExpression(OpExpression.PREDICATION, [freshVbl, Term(Term.RESOURCE, tempRelation[1:-1]), vbl])
joinNode.predicate = OpExpression.TRIPLE
joinNode.is_spo = True
self.substitute_node(node, joinNode)
self.parse_tree.temporary_enumerations[tempRelation] = [str(item) for item in enumeration.arguments]
didIt[0] = True
self.walk(doit, types=OpExpression, external_value=self)
if didIt[0]:
self.recompute_backlinks()
def implies_to_or_nots(self):
"""
Convert (implies P Q) to (or (naf P) Q).
"""
didIt = [False]
def doIt(node, parent, sseellff):
if node.operator == OpExpression.IMPLIES:
notOp = OpExpression(OpExpression.NOT, [node.arguments[0]])
orOp = OpExpression(OpExpression.OR, [notOp, node.arguments[1]])
sseellff.substitute_node(node, orOp)
didIt[0] = True
self.walk(doIt, OpExpression, external_value=self)
if didIt[0]:
self.recompute_backlinks()
def foralls_to_not_exist_nots(self):
"""
Convert (forall ?x P) to (not (exists ?x (not P))), i.e., apply
deMorgans to eliminate 'forall's
"""
didIt = [False]
def doIt(node, parent, sseellff):
if node.operator == OpExpression.FORALL:
notOp = OpExpression(OpExpression.NOT, [node.arguments[1]])
existsOp = OpExpression(OpExpression.EXISTS, [node.arguments[0], notOp])
notOpToo = OpExpression(OpExpression.NOT, [existsOp])
sseellff.substitute_node(node, notOpToo)
didIt[0] = True
self.walk(doIt, OpExpression, external_value=self)
if didIt[0]:
self.recompute_backlinks()
def push_nots_inwards(self, start_node=None):
"""
Convert '(not (or P Q))' to '(and (not P) (not Q))'
Oops: We CANNOT convert '(not (and P Q))' to (or (not P) (not Q)) safely.
"""
didIt = [False]
def negate(node, sseellff, op):
"""
Wrap a 'not' around 'node', unless that creates double negation,
in which case replace 'node' by its argument'
"""
if node.operator == op:
## double negation
arg = node.arguments[0]
sseellff.substitute_node(node, arg)
arg.parent = node.parent
return arg
else:
notOp = OpExpression(op, [node.clone()])
sseellff.substitute_node(node, notOp)
notOp.arguments[0].parent = notOp
notOp.parent = node.parent
return notOp
def doIt(node, parent, sseellff):
if node.operator in [OpExpression.NOT, OpExpression.NAF]:
notArg = node.arguments[0]
if notArg.operator == node.operator:
## eliminate double negation:
arg = notArg.arguments[0]
sseellff.substitute_node(node, arg)
arg.parent = node.parent
return
if not notArg.operator in [OpExpression.OR]: return
for arg in notArg.arguments[:]:
negate(arg, sseellff, node.operator)
if notArg.operator == OpExpression.AND:
notArg.operator = OpExpression.OR
elif notArg.operator == OpExpression.OR:
notArg.operator = OpExpression.AND
sseellff.substitute_node(node, notArg)
notArg.parent = node.parent
sseellff.push_nots_inwards(start_node=notArg)
didIt[0] = True
self.walk(doIt, OpExpression, external_value=self, start_node=start_node)
if didIt[0]:
self.recompute_backlinks()
def convert_predications_to_spo_nodes(self, divert_context=False):
"""
Convert all predications to SPO nodes.
if 'divert_context', extract contexts from arguments an insert them into the 'context' attribute
"""
def doit(node, parent, external_value):
if node.operator == OpExpression.PREDICATION and node.predicate and not node.is_spo:
if len(node.arguments) == 1:
predicate = Term(Term.RESOURCE, None, qname="rdf:type")
subject = node.arguments[0]
object = node.predicate
else:
predicate = node.predicate
subject = node.arguments[0]
object = node.arguments[1]
node.context = node.arguments[2] if len(node.arguments) == 3 else None
node.predicate = 'QUAD' if node.context else 'TRIPLE'
if divert_context:
node.arguments = [subject, predicate, object]
else:
node.arguments = [subject, predicate, object, node.context]
node.is_spo = True
elif node.is_spo and len(node.arguments) == 4:
node.context = node.arguments[3]
if divert_context:
node.arguments.remove(node.context)
self.walk(doit, types=OpExpression)
###########################################################################################################
## PROLOG-specific
###########################################################################################################
def nots_to_nafs(self):
"""
Convert all 'not' operators to 'naf', since Prolog has no analog of classical negation.
"""
def doIt(node, parent, sseellff):
if node.operator == OpExpression.NOT:
node.operator = OpExpression.NAF
self.walk(doIt, OpExpression)
def filter_quad_contexts(self):
"""
Visit each spo quad containing a variable context argument, and wrap a filter
clause around it that restricts the context argument to only contexts in the
explicitly-specified contexts.
"""
def doIt(node, parent, sseellff):
if node.is_spo and len(node.arguments) == 4:
cxt = node.arguments[3]
if not isinstance(cxt, Term) or not cxt.term_type == Term.VARIABLE: return
contexts = sseellff.contexts
print "CONTEXTS", [item for item in contexts]
equalities = [OpExpression(OpExpression.EQUALITY, [cxt, Term(Term.RESOURCE, item)]) for item in contexts]
orOp = OpExpression(OpExpression.OR, equalities) if len(contexts) > 1 else equalities[0]
andOp = OpExpression(OpExpression.AND, [node, orOp])
self.substitute_node(node, andOp)
self.walk(doIt, OpExpression, external_value=self)
def quadify_triples(self):
"""
Convert triples to quads everywhere, inserting either a context
variable of a context URI.
"""
def doIt(node, parent, sseellff):
if node.is_spo and len(node.arguments) == 3:
contexts = sseellff.contexts
if len(contexts) > 1:
node.arguments.append(self.get_fresh_variable())
else:
node.arguments.append(Term(Term.RESOURCE, contexts[0]))
self.walk(doIt, types=OpExpression, external_value=self)
###########################################################################################################
## SPARQL-specific
###########################################################################################################
def color_filter_nodes(self):
"""
Color operator node as a filter if it is a comparison, or if all of its
children are filters.
Hack: Or if its a 'bound' predicate.
TODO: CONVERT bound HACK INTO GENERIC TEST
"""
def colorIt(node, parent, external_value):
node.color = None
if isinstance(node, Term): return
if node.operator in COMPARISON_OPERATORS:
node.color = 'FILTER'
elif node.predicate == 'bound' and len(node.arguments) == 1:
node.color = 'FILTER'
else:
for arg in node.arguments:
if not arg.color == 'FILTER': return
node.color = 'FILTER'
## color some filter nodes:
self.walk(colorIt, bottom_up=True)
def bubble_up_contexts(self):
"""
Locate quad nodes, trim their length from 4 to 3, and propagate the contexts
they reference to their ancestors.
"""
## migrate contexts out of triple nodes, and inherit them up where possible
def bubble_up(node, parent, external_value):
if not node.arguments: return
context = None
for arg in node.arguments:
if isinstance(arg, Term) or not arg.context or not arg.context.term_type == Term.VARIABLE: return
if not context: context = arg.context
elif not arg.context.value == context.value: return
## if we reach here, there is a context common to all children of 'node":
node.context = context
for arg in node.arguments:
arg.context = None
self.walk(bubble_up, types=OpExpression, bottom_up=True)
def bubble_back_down(node, parent, external_value):
if node.context and node.operator == OpExpression.OPTIONAL:
node.arguments[0].context = node.context
node.context = None
## some nodes (optionals) shouldn't have context attached:
self.walk(bubble_back_down, types=OpExpression)
def create_graph_node(node, parent, sseellff):
if node.context and not node.is_spo:
graphNode = OpExpression('GRAPH', [node])
graphNode.context = node.context
sseellff.substitute_node(node, graphNode)
## create a graph node for each node that has a context but is not a triple node
self.walk(create_graph_node, types=OpExpression, bottom_up=True, external_value=self)
def contextify_sparql_triples(self):
"""
Insure that a GRAPH declaration is wrapped around each triple,
so that a CONTEXTS filter can apply everywhere.
"""
contextified = set([])
uncontextified = set([])
def mark_contextified_nodes(node, parent, external_value):
if node.context or parent in contextified: contextified.add(node)
def collect_uncontextified(node, parent, external_value):
if not node.context and not node in contextified:
uncontextified.add(node)
## 'mark' all variables at or below a context
self.walk(mark_contextified_nodes, types=OpExpression)
## collect list of triple nodes that are not 'marked'
self.walk(collect_uncontextified, types=OpExpression)
## add a fresh context variable to each unmarked triple node:
for node in uncontextified:
node.context = self.get_fresh_variable()
def fix_heterogeneous_disjunctions(self):
"""
A query that disjoins a triple clause with a filter clause cannot produce SPARQL
output. Apply deMorgan's to distribute the disjunction, so that a more complex,
less performant query is created that CAN produce SPARQL code.
"""
self.color_filter_nodes()
heteroDisjuncts = []
def collect_hetero_disjuncts(node, parent, external_value):
if not node.operator == OpExpression.OR: return
if not isinstance(parent, OpExpression) or not parent.operator == OpExpression.AND: return
filter = False
triple = False
for arg in node.arguments:
if arg.color == 'FILTER': filter = True
else: triple = True
if filter and triple: heteroDisjuncts.append(node)
self.walk(collect_hetero_disjuncts, types=OpExpression)
#print "FOUND HETEROS", heteroDisjuncts
if heteroDisjuncts:
self.distribute_disjunction(heteroDisjuncts[0])
## to be safe, we fix only one hetero disjunct at a time, and then recurse:
self.fix_heterogeneous_disjunctions()
def denormalize_leading_filter(self, leader):
"""
Called by 'denormalize_filter_ands' to denormalize
the filters beginning with 'leader'
"""
parent = leader.parent
predecessors = []
filters = []
successors = []
for arg in parent.arguments:
if arg == leader: filters.append(arg)
elif successors: successors.append(arg)
elif filters:
if arg.color == 'FILTER': filters.append(arg)
else: successors.append(arg)
else: predecessors.append(arg)
## we now have consecutive filters:
if len(filters) < 2: raise Exception("Bug in 'denormalize_filter_ands'")
newAnd = OpExpression(OpExpression.AND, filters)
predecessors.append(newAnd)
predecessors.extend(successors)
parent.arguments = predecessors
self.recompute_backlinks(where_clause_only=True)
def denormalize_filter_ands(self):
"""
Filter and triple nodes get AND'ed together. Separate out consecutive AND'ed filter nodes
into their own AND node, so that the filter can be printed easily.
"""
self.color_filter_nodes()
leaders = []
def collect_some_leading_filter_ands(node, parent, external_value):
"""
If we find two consecutive AND'd filter nodes, collect the first.
"""
if not node.operator == OpExpression.AND: return
siblingFilters = []
for arg in node.arguments:
if arg.color == 'FILTER':
siblingFilters.append(arg)
elif siblingFilters:
## found non filter, time to exit
if len(siblingFilters) > 1: leaders.append(siblingFilters[0])
return
## guard against case when ALL of the children are filters:
if len(siblingFilters) > 1 and len(siblingFilters) < len(node.arguments): leaders.append(siblingFilters[0])
## collect some of the leading filters
self.walk(collect_some_leading_filter_ands, types=OpExpression)
if leaders:
self.denormalize_leading_filter(leaders[0])
## recurse
self.denormalize_filter_ands()
def disappear_exists(self):
"""
Make 'exists' disappear. We do this for SPARQL because the NAF-to-optional-and-not-bound
translator doesn't know how to handle exists.
"""
def doIt(node, parent, sseellff):
if node.operator == OpExpression.EXISTS:
sseellff.substitute_node(node, node.arguments[1])
sseellff.recompute_backlinks()
self.walk(doIt, OpExpression, external_value=self)
def translate_negations(self):
"""
Translate negation into optional and unbound.
Assumes that normalization guarantees that the argument
to a negation is always a predication (not yet implemented).
(not (triple S P O)) ::=
(and (optional (and (triple S P ?v1) (= ?v1 O)))
(not (bound ?v1)))
Optimization: If the 'O' argument is a variable that appears nowhere else
in the WHERE clause, then we can omit the extra variable and the equality clause.
"""
didIt = [False]
def doit(node, parent, sseellff):
if not node.operator in [OpExpression.NAF, OpExpression.NOT]: return
tripleNode = node.arguments[0]
if not (tripleNode.operator == OpExpression.PREDICATION and tripleNode.is_spo): return
if node.operator == OpExpression.NOT:
## convert 'NOT' into 'NAF', since we don't support classical negation
node.operator = OpExpression.NAF
objArg = tripleNode.arguments[2]
isSimple = (objArg.term_type == Term.VARIABLE and
True) #sseellff.is_unique_variable_within_where_clause(objArg))
if isSimple:
optionalNode = OpExpression(OpExpression.OPTIONAL, [tripleNode])
boundNode = OpExpression(OpExpression.PREDICATION, [objArg])
boundNode.predicate = 'bound'
else:
freshVar = sseellff.get_fresh_variable()
tripleNode.arguments[2] = freshVar
equalityNode = OpExpression(OpExpression.EQUALITY, [freshVar, objArg])
innerAndNode = OpExpression(OpExpression.AND, [tripleNode, equalityNode])
optionalNode = OpExpression(OpExpression.OPTIONAL, [innerAndNode])
boundNode = OpExpression(OpExpression.PREDICATION, [freshVar])
boundNode.predicate = 'bound'
#notNode = OpExpression(OpExpression.NOT, [boundNode])
notNode = OpExpression(OpExpression.NAF, [boundNode])
outerAndNode = OpExpression(OpExpression.AND, [optionalNode, notNode])
sseellff.substitute_node(node, outerAndNode)
sseellff.recompute_backlinks()
didIt[0] = True
self.walk(doit, types=OpExpression, external_value=self)
if didIt[0]:
self.flatten_nested_ands()
def convert_select_constants_to_input_bindings(self, include_stupid_filter_clauses=True):
selectTerms = self.parse_tree.select_terms
auxiliaryInputBindings = {}
stupidFilterClauses = []
for i, arg in enumerate(selectTerms):
if not arg.term_type == Term.VARIABLE:
freshVbl = self.get_fresh_variable()
selectTerms[i] = freshVbl
auxiliaryInputBindings[freshVbl.value] = str(arg)
if include_stupid_filter_clauses:
stupidFilterClauses.append(OpExpression(OpExpression.EQUALITY, [freshVbl, freshVbl]))
self.parse_tree.input_bindings = auxiliaryInputBindings
for clause in stupidFilterClauses:
self.conjoin_to_where_clause(clause)
###########################################################################################################
## Language-specific Normalization Scripts
###########################################################################################################
def normalize_for_prolog(self):
self.propagate_constants_to_predications()
self.implies_to_or_nots()
self.foralls_to_not_exist_nots()
## do this AFTER converting FORALLs, since that generates more NOTs
self.push_nots_inwards()
self.nots_to_nafs()
if self.spoify_output:
self.convert_predications_to_spo_nodes(divert_context=False)
if self.contexts:
self.quadify_triples()
self.filter_quad_contexts()
self.translate_optionals(p_or_true=False)
self.flatten_select_terms()
self.flatten_value_computations()
## TEMPORARY TO SEE WHAT IT LOOKS LIKE:
#self.translate_in_enumerate_into_disjunction_of_equalities()
## END TEMPORARY
def normalize_for_sparql(self):
"""
Reorganize the parse tree to be compatible with SPARQL's bizarre syntax.
"""
self.propagate_constants_to_predications(skip_context_variables=True)
self.implies_to_or_nots()
self.foralls_to_not_exist_nots()
## do this AFTER converting FORALLs, since that generates more NOTs
self.push_nots_inwards()
self.nots_to_nafs()
self.convert_predications_to_spo_nodes(divert_context=True)
self.fix_heterogeneous_disjunctions()
if False: ## too slow:
self.translate_in_enumerate_into_disjunction_of_equalities()
else:
self.translate_in_enumerate_into_temporary_join()
#ps("CCC", self.parse_tree)
self.bubble_up_contexts()
#ps("DDD", self.parse_tree)
if self.contexts:
self.contextify_sparql_triples()
if False:
## THIS GIVES FAULTY SEMANTICS; NOT SURE HOW TO FIX:
self.disappear_exists()
self.push_nots_inwards()
self.translate_negations()
## finally, create non-normalized structure to assist filters output
self.denormalize_filter_ands()
self.flatten_nested_ands()
self.color_filter_nodes()
self.convert_select_constants_to_input_bindings()
def pc(msg, parse_tree):
print msg, str(StringsBuffer(complain='SILENT').common_logify(parse_tree))
def ps(msg, parse_tree):
print msg, str(StringsBuffer(complain='SILENT').sparqlify(parse_tree))
###########################################################################################################
##
###########################################################################################################
class StringsBuffer:
NEWLINE = '\n'
def __init__(self, include_newlines=False, complain=None, spoify_output=False, infix_with_prefix_triples=False):
self.buffer = []
self.include_newlines = include_newlines
self.running_indent = 0
self.complain_flag = complain
self.execution_language = None
self.infix_with_prefix_triples = infix_with_prefix_triples # infix CommonLogic hack
self.spoify_output = spoify_output # Prolog hack
def append(self, item):
if item is None:
print "BREAK"
self.buffer.append(item)
return self
def pop(self):
"""
Remove the last item/string in the buffer.
"""
self.buffer.pop()
return self
def newline(self):
if self.include_newlines:
self.append('\n')
else:
self.append(' ')
return self
def delimiter(self, delimiter):
if not self.include_newlines:
delimiter = delimiter.replace('\n', ' ')
self.append(delimiter)
return self
def indent(self, indent):
self.append(indent)
self.running_indent = indent
return self
def process_embedded_indents(self):
strings = []
indent = 0
for s in self.buffer:
if isinstance(s, int):
indent = s
else:
s = s.replace('\n', '\n' + ' ' * indent)
strings.append(s)
self.buffer = strings
def stringify(self):
self.process_embedded_indents()
return ''.join(self.buffer)
def __str__(self):
return self.stringify()
def complain(self, term, operator):
"""
'term' represents an operator not implemented in the target language.
Raise an exception, or append highly visible syntax indicating the problem.
TODO: RAISE EXCEPTION
"""
if self.complain_flag == 'SILENT':
self.append(operator + "!!! ")
return self
else:
type = term.term_type if isinstance(term, Term) else term.operator if isinstance(term, OpExpression) else operator
raise QueryMissingFeatureException("%s is unable to evaluate an expression of type %s" % (self.execution_language, type))
def common_logify(self, term, brackets=None, delimiter=' '):
if isinstance(term, Term):
self.append(str(term))
elif isinstance(term, str):
self.append(term)
elif isinstance(term, list):
if brackets: self.append(brackets[0])
for arg in term:
self.common_logify(arg)
self.delimiter(delimiter)
if term:
self.pop() ## pop trailing delimiter
if brackets: self.append(brackets[1])
elif isinstance(term, QueryBlock):
self.append('(select ')
if term.distinct: self.append('distinct ')
self.append('(').common_logify(term.select_terms, delimiter=' ').append(')\n')
self.append(' where ')
self.indent(7).common_logify(term.where_clause)
if term.contexts_clause:
self.indent(1).append('\ncontexts (').common_logify(term.contexts_clause, delimiter=' ').append(')')
if term.limit >= 0:
self.indent(1).append('\nlimit ' + str(term.limit))
if term.order_by:
self.indent(1).append('\norder by (').common_logify(term.order_by, delimiter=' ').append(')')
self.append(')')
elif isinstance(term, OpExpression):
if term.operator == OpExpression.ENUMERATION:
self.append('(')
self.common_logify(term.predicate.lower()).append(' ')
self.common_logify(term.arguments)
self.append(')')
elif term.operator in [OpExpression.AND, OpExpression.OR]:
self.append('(')
self.append(term.operator.lower()).append(' ')
self.common_logify(term.arguments, delimiter='\n')
self.append(')')
else:
self.append('(')
if term.is_spo: term.predicate = term.predicate.lower()
self.common_logify(term.predicate or term.operator.lower()).append(' ')
self.common_logify(term.arguments)
self.append(')')
return self
def infix_common_logify(self, term, brackets=None, delimiter=' ', suppress_parentheses=False):
if isinstance(term, Term):
self.append(str(term))
elif isinstance(term, str):
self.append(term)
elif isinstance(term, list):
if brackets: self.append(brackets[0])
for arg in term:
self.infix_common_logify(arg)
self.delimiter(delimiter)
if term:
self.pop() ## pop trailing delimiter
if brackets: self.append(brackets[1])
elif isinstance(term, QueryBlock):
self.append('select ')
if term.distinct: self.append('distinct ')
self.infix_common_logify(term.select_terms, delimiter=' ').newline()
self.append('where ')
self.indent(6).infix_common_logify(term.where_clause, suppress_parentheses=True)
if term.contexts_clause:
self.indent(0).append('\ncontexts ').common_logify(term.contexts_clause, delimiter=' ')
if term.limit >= 0:
self.indent(0).append('\nlimit ' + str(term.limit))
elif isinstance(term, OpExpression):
if term.operator == OpExpression.ENUMERATION:
self.infix_common_logify(term.arguments, brackets=('[', ']'), delimiter=', ')
elif term.operator in [OpExpression.AND, OpExpression.OR]:
brackets = ('(', ')') if not suppress_parentheses else None
self.infix_common_logify(term.arguments, delimiter='\n%s ' % term.operator.lower(), brackets=brackets)
elif term.operator in [OpExpression.NOT, OpExpression.NAF, OpExpression.OPTIONAL]:
if self.infix_with_prefix_triples:
self.append(term.operator.lower()).append('(').infix_common_logify(term.arguments[0]).append(')')
else:
if not suppress_parentheses: self.append('(')
self.append(term.operator.lower() + ' ').infix_common_logify(term.arguments[0])
if not suppress_parentheses: self.append(')')
elif term.operator in [OpExpression.EXISTS, OpExpression.FORALL]:
self.append('(').append(term.operator.lower()).append(' ')
for arg in term.arguments:
self.infix_common_logify(arg).append(' ')
self.append(')')
elif term.operator in CONNECTIVE_OPERATORS:
self.append('(')
self.infix_common_logify(term.arguments, delimiter=' %s ' % term.operator.lower())
self.append(')')
elif term.predicate and self.infix_with_prefix_triples:
if term.is_spo: term.predicate = term.predicate.lower()
self.infix_common_logify(term.predicate)
self.infix_common_logify(term.arguments, brackets=('(', ')'))
else:
self.append('(')
if term.is_spo: term.predicate = term.predicate.lower()
self.infix_common_logify(term.predicate or term.operator.lower()).append(' ')
self.infix_common_logify(term.arguments)
self.append(')')
return self
# LISPP_HACK_COUNTER = [0]
def prologify(self, term, brackets=None, delimiter=' ', suppress_parentheses=False, spoify_output=True):
# def hack_variable():
# StringsBuffer.LISPP_HACK_COUNTER[0] = StringsBuffer.LISPP_HACK_COUNTER[0] + 1
# return '?hack' + str(StringsBuffer.LISPP_HACK_COUNTER[0])
if isinstance(term, Term):
if term.term_type == Term.RESOURCE:
self.append('!').append(str(term))
elif term.term_type == Term.LITERAL:
self.append('!').append(str(term))
else: ## variable, I guess
self.append(str(term))
elif isinstance(term, str):
self.append(term)
elif isinstance(term, list):
if brackets: self.append(brackets[0])
for arg in term:
self.prologify(arg)
self.delimiter(delimiter)
if term:
self.pop() ## pop trailing delimiter
if brackets: self.append(brackets[1])
elif isinstance(term, QueryBlock):
self.execution_language = CommonLogicTranslator.PROLOG
self.spoify_output = spoify_output
if term.distinct:
self.append('(select-distinct ')
else:
self.append('(select ')
self.indent(8).prologify(term.select_terms, brackets=('(', ')'), delimiter=' ').newline()
self.prologify(term.where_clause, suppress_parentheses=True)
if term.limit >= 0:
self.append('\n(:limit ' + str(term.limit) + ')')
self.append(')')
elif isinstance(term, OpExpression):
if term.operator == OpExpression.ENUMERATION:
self.append('(?? (list ')
self.prologify(term.arguments, delimiter=' ')
self.append('))')
elif term.operator == OpExpression.IN:
self.append('(member ').prologify(term.arguments, delimiter=' ').append(')')
elif term.operator == OpExpression.EQUALITY:
## TEMPORARY UNTIL PROLOG IS FIXED
self.append('(lispp (upi= ').prologify(term.arguments, delimiter=' ').append('))')
elif term.operator == OpExpression.AND:
if suppress_parentheses:
self.prologify(term.arguments, delimiter='\n')
else:
self.append('(and ').prologify(term.arguments, delimiter='\n').append(')')
elif term.operator == OpExpression.EXISTS:
self.prologify(term.arguments[1])
elif term.operator == OpExpression.NAF:
self.append('(not ').prologify(term.arguments[0]).append(')')
elif term.operator in COMPARISON_OPERATORS and not term.operator == '=':
op = term.operator.lower()
## EXPERIMENT
op = 'cl:' + op
## END EXPERIMENT
self.append('(lispp (').append(op).append(' ').prologify(term.arguments[0]).append(' ')
self.prologify(term.arguments[1]).append('))')
elif term.operator == OpExpression.TRUE:
self.append('(lispp t)')
elif term.is_spo:
self.append('(q ')
self.prologify(term.arguments, delimiter=' ')
self.append(')')
elif term.predicate and self.spoify_output:
self.append('(q')
self.append(' ').prologify(term.arguments[0]).append(' ').prologify(term.predicate)
for i in range(1, len(term.arguments)):
self.append(' ').prologify(term.arguments[i])
#if len(term.arguments) > 3:
# self.append(' ').prologify(term.arguments[2])
self.append(')')
elif term.operator in [OpExpression.OPTIONAL]:
self.complain(term, 'OPTIONAL')
elif term.operator == OpExpression.COMPUTE:
self.append('(lisp ').prologify(term.arguments[0]).append(' (')
self.prologify(term.arguments[1:], delimiter=' ').append('))')
else:
self.append('(')
self.prologify(term.predicate or term.operator.lower()).append(' ')
self.prologify(term.arguments, delimiter=' ')
self.append(')')
return self
def sparqlify(self, term, brackets=None, delimiter=' ', suppress_curlies=False, suppress_filter=False):
if isinstance(term, Term):
self.append(str(term))
elif isinstance(term, str):
self.append(term)
elif isinstance(term, list):
if brackets: self.append(brackets[0])
for arg in term:
self.sparqlify(arg, suppress_filter=suppress_filter)
self.delimiter(delimiter)
if term:
self.pop() ## pop trailing delimiter
if brackets: self.append(brackets[1])
elif isinstance(term, QueryBlock):
self.execution_language = CommonLogicTranslator.SPARQL
self.append('select ')
if term.distinct: self.append('distinct ')
self.sparqlify(term.select_terms, delimiter=' ').newline()
self.append('where ').append('{ ')
self.indent(6).sparqlify(term.where_clause, suppress_curlies=True)
self.append(' }')
if term.order_by:
self.indent(0).append('\norder by ').sparqlify(term.order_by, delimiter=' ')
if term.limit >= 0:
self.indent(0).append('\nlimit '+ str(term.limit))
elif isinstance(term, OpExpression):
if term.operator in [OpExpression.AND, OpExpression.OR]:
## are we joining triples or filters? look at first non-connective in descendants
## to find out:
# sampleArg = term.arguments[0]
# while sampleArg.operator in [OpExpression.AND, OpExpression.OR]:
# sampleArg = sampleArg.arguments[0]
# if sampleArg.predicate:
# brackets = ('{ ', ' }') if not suppress_curlies else None
# delimiter = ' .\n' if term.operator == OpExpression.AND else '\nunion '
# self.sparqlify(term.arguments, brackets=brackets, delimiter=delimiter)
# else:
# if not suppress_filter: self.append('filter ')
# brackets = ('(', ')')
# delimiter = ' && ' if term.operator == OpExpression.AND else ' || '
# self.sparqlify(term.arguments, brackets=brackets, delimiter=delimiter, suppress_filter=True)
if term.color == 'FILTER':
if not suppress_filter: self.append('filter ')
brackets = ('(', ')')
delimiter = ' && ' if term.operator == OpExpression.AND else ' || '
self.sparqlify(term.arguments, brackets=brackets, delimiter=delimiter, suppress_filter=True)
else:
brackets = ('{ ', ' }') if not suppress_curlies else None
delimiter = ' .\n' if term.operator == OpExpression.AND else '\nunion '
self.sparqlify(term.arguments, brackets=brackets, delimiter=delimiter)
elif term.operator == OpExpression.OPTIONAL:
#if not suppress_curlies: self.append('{')
self.append('optional ').sparqlify(term.arguments[0])
#if not suppress_curlies: self.append('}')
elif term.operator in [OpExpression.NOT, OpExpression.NAF]:
if not term.color == 'FILTER': self.complain(term, "NOT") ## eventually shouldn't occur
if not suppress_filter: self.append('filter ')
self.append('(')
self.append('!').sparqlify(term.arguments[0])
self.append(')')
elif term.is_spo:
if not suppress_curlies: self.append('{')
if term.context: self.append('graph ').sparqlify(term.context).append(' { ')
self.sparqlify(term.arguments, delimiter=' ')
if term.context: self.append(' } ')
if not suppress_curlies: self.append('}')
elif term.predicate:
## TEMPORARY HACK. TODO: MAKE IT GENERIC:
if term.predicate == 'bound':
self.append(term.predicate + '(').sparqlify(term.arguments[0]).append(')')
elif True:
raise Exception("SPARQL normalization failed to eliminate non-spo predication")
else:
if not suppress_curlies: self.append('{')
self.sparqlify(term.arguments[0]).sparqlify(' ').sparqlify(term.predicate).sparqlify(' ')
self.sparqlify(term.arguments[1]).sparqlify(' ')
if not suppress_curlies: self.append('}')
elif term.operator in COMPARISON_OPERATORS:
if not suppress_filter: self.append('filter ')
self.append('(')
self.sparqlify(term.arguments[0]).sparqlify(' ').sparqlify(term.operator).sparqlify(' ')
self.sparqlify(term.arguments[1]).sparqlify(' ')
self.append(')')
elif term.operator in PREFIX_OPERATORS:
## TODO: ADD TRANSLATION HERE FROM CL FUNCTORS TO SPARQL FUNCTORS
## RIGHT NOW 'REGEX' IS THE ONLY ONE:
functor = term.operator
self.append('filter ' + functor + '(').sparqlify(term.arguments, delimiter=', ').append(')')
elif term.operator == 'GRAPH':
if not suppress_curlies: self.append('{')
self.append('graph ').sparqlify(term.context).append(' { ').sparqlify(term.arguments[0], suppress_curlies=True).append(' } ')
if not suppress_curlies: self.append('}')
else:
print "DROPPED THIS ON THE FLOOR", term
return self
###########################################################################################################
##
###########################################################################################################
def translate_common_logic_query(query, preferred_language='PROLOG', contexts=None, complain='EXCEPTION',
subject_comes_first=False):
"""
Translate a Common Logic query into either SPARQL or PROLOG syntax. If 'preferred_language,
choose that one (first). Return three
values, the query, either 'SPARQL' or 'PROLOG', and an error message if the
translation fails. It may fail either because the syntax is illegal, or because
the combination of expressions in the query is not implementable in either SPARQL
or PROLOG
"""
def help_translate(language):
trans = CommonLogicTranslator(query, subject_comes_first=subject_comes_first)
trans.parse()
Normalizer(trans.parse_tree, language, contexts).normalize()
if language == CommonLogicTranslator.PROLOG:
translation = str(StringsBuffer(complain=complain, spoify_output=True).prologify(trans.parse_tree))
elif language == CommonLogicTranslator.SPARQL:
translation = str(StringsBuffer(complain=complain).sparqlify(trans.parse_tree))
else:
raise IllegalOptionException("No translation available for the execution language '{0}'".format(language))
return translation, trans.parse_tree.contexts_clause, trans.parse_tree.input_bindings, trans.parse_tree.temporary_enumerations
try:
preferred_language = preferred_language or 'PROLOG'
translation, contexts, input_bindings, temporary_enumerations = help_translate(preferred_language)
successfulLanguage = preferred_language
except QueryMissingFeatureException, e1:
try:
otherLanguage = 'SPARQL' if preferred_language == 'PROLOG' else 'PROLOG'
translation, contexts, input_bindings, temporary_enumerations = help_translate(otherLanguage)
successfulLanguage = otherLanguage
except QueryMissingFeatureException:
return None, None, None, e1
return translation, contexts, input_bindings, temporary_enumerations, successfulLanguage, None
def contexts_to_uris(context_terms, repository_connection):
"""
Convert the URIs and qnames in 'context_terms' into URIs
BUG: ASSUMES THAT ANY QNAMES REFERENCE LOCALLY-DECLARED PREFIXES, I.E.
DOESN'T WORK FOR PREFIXES REGISTERED SERVER-SIDE
"""
contexts = []
for cxt in context_terms:
if cxt.qname:
prefix, localName = cxt.qname.split(':')
ns = repository_connection.getNamespace(prefix)
if not ns:
raise Exception("Can't find a namespace for the prefix '%s' in the contexts reference '%s'" % (prefix, cxt))
contexts.append("<{0}>".format(ns + localName))
else:
contexts.append(str(cxt))
return [repository_connection.createURI(cxt) for cxt in contexts]
###########################################################################################################
## Testing
###########################################################################################################
def translate(cl_select_query, target_dialect=CommonLogicTranslator.PROLOG, contexts=None):
trans = CommonLogicTranslator(cl_select_query)
trans.parse()
print "\nCOMMON LOGIC \n" + str(StringsBuffer(include_newlines=True, complain='SILENT').common_logify(trans.parse_tree))
print "\nINFIX COMMON LOGIC \n" + str(StringsBuffer(include_newlines=True, complain='SILENT', infix_with_prefix_triples=trans.infix_with_prefix_triples).infix_common_logify(trans.parse_tree))
Normalizer(trans.parse_tree, CommonLogicTranslator.SPARQL, contexts=contexts).normalize()
print "\nSPARQL \n" + str(StringsBuffer(include_newlines=True, complain='SILENT').sparqlify(trans.parse_tree))
trans = CommonLogicTranslator(cl_select_query)
trans.parse()
spoify_output = True
Normalizer(trans.parse_tree, CommonLogicTranslator.PROLOG, contexts=contexts, spoify_output=spoify_output).normalize()
print "\nPROLOG \n" + str(StringsBuffer(include_newlines=True, complain='SILENT', spoify_output=spoify_output).prologify(trans.parse_tree))
query1 = """(select (?s ?o) where (ex:name ?s ?o) (rdf:type ?s <http://www.franz.com/example#Person>))"""
query1i = """select ?s ?o where (ex:name ?s ?o) (rdf:type ?s <http://www.franz.com/example#Person>)"""
query2 = """(select (?s ?o) where (and (ex:name ?s ?o) (rdf:type ?s <http://www.franz.com/example#Person>)))"""
query2i = """select ?s ?o where ex:name(?s ?o) and rdf:type(?s <http://www.franz.com/example#Person>)"""
query3 = """(select (?s ?o) where (and (ex:name ?s ?o) (= ?o "Fred")))"""
query3i = """select ?s ?o where (ex:name ?s ?o) and (?o = "Fred")"""
query4 = """(select (?s ?o) where (and (or (ex:name ?s ?o) (ex:title ?s ?o)) (= ?o "Fred")))"""
query4i = """select ?s ?o where ((ex:name ?s ?o) or (ex:title ?s ?o))and (?o = "Fred")"""
query5 = """(select (?s) where (and (ex:name ?s ?o) (or (= ?o "Fred") (= ?o "Joe"))))"""
query5i = """select ?s where (ex:name ?s ?o) and ((?o = "Fred") or (?o = "Joe"))"""
query6 = """(select (?s ?o) where (and (ex:name ?s ?o) (not (rdf:type ?s <http://www.franz.com/example#Person>))))"""
query6i = """select ?s ?o where (ex:name ?s ?o) and not (rdf:type ?s <http://www.franz.com/example#Person>)"""
query7 = """(select (?s ?o) where (and (triple ?s ex:name ?o) (triple ?s rdf:type <http://www.franz.com/example#Person>)))"""
query7i = """select ?s ?o where triple(?s ex:name ?o) and triple(?s rdf:type <http://www.franz.com/example#Person>)"""
query8 = """(select (?s ?o) where (and (quad ?s ex:name ?o ?c) (= ?c ex:cxt)))"""
query8i = """select ?s ?o where quad(?s ex:name ?o ?c) and (?c = ex:cxt)"""
query9 = """(select (?s ?age) where (and (ex:age ?s ?age) (>= ?age 21)))"""
query9i = """select ?s ?age where ex:age(?s ?age) and (?age >= 21)"""
query10 = """(select (?name ?age) where (and (triple ?s rdf:type ex:Person) (optional (triple ?s ex:name ?name))
(optional (and (ex:age ?s ?age) (> ?age 21)))))"""
query10i = """select ?name ?age where triple(?s rdf:type ex:Person) and optional (triple ?s ex:name ?name)
and optional (and ex:age(?s ?age) (?age > 21))"""
query11 = """(select (?s ?o) where (and (((ex:name ?s ?o))) (((triple ?s rdfs:label ?o)))))"""
query11i = """select ?s ?o where ((ex:name(?s ?o))) and ((triple(?s rdfs:label ?o)))""" ## triple part screws up
query12 = """(select (?name) where (and (rdf:type ?c ex:Company) (ex:gross ?c ?gross) (ex:expenses ?c ?expenses)
(> (- ?gross ?expenses) 50000) (ex:name ?c ?name)))"""
query12i = """select ?name where rdf:type(?c ex:Company) and ex:gross(?c ?gross) and ex:expenses(?c ?expenses)
and ((?gross - ?expenses) > 5000) and ex:name(?c ?name)"""
query13 = """(select (?s ?p ?o) where (in ?s (list <http://foo> <http://bar>)) (triple ?s ?p ?o))"""
query13i = """select ?s ?p ?o where triple(?s ?p ?o) and ?s in [<http://foo> <http://bar>]"""
query14 = """(select distinct (?s ?p ?o) where (triple ?s ?p ?o) contexts (ex:context1 ex:context2) limit 5)"""
query14i = """select distinct ?s ?p ?o where triple(?s ?p ?o) contexts ex:context1, ex:context2 limit 5"""
query15 = """(select (?s) where (and (or (ex:p1 ?s1 ?o1 ?c1) (ex:p2 ?s2 ?o2 ?c1) (ex:p3 ?s3 ?o3 ?c2))
(or (ex:p4 ?s4 ?o4 ?c1) (ex:p5 ?s5 ?o5 ?c1))))"""
query16 = """(select (?s ?p ?o ?c ?p2 ?o2 ) where (and (quad ?s ?p ?o ?c) (optional (quad ?o ?p2 ?o2 )))"""
query16i = """select ?s ?o ?c ?o2 ?c2 where quad(?s ex:p ?o ?c) and optional(quad(?o ex:p2 ?o2 ?c2))"""
query17 = """(select (?s) where (triple ?s ?p ?o) (= ?s ex:Bill))"""
query18 = """(select (?s ?o) where (or (triple ?s foaf:name ?o)
(and (not (triple ?s foaf:name ?o1))
(or (triple ?s foaf:mbox ?o)
(not (triple ?s foaf:mbox ?o2))))))"""
query19 = """(select (?p)
where (and (ex:Person ?p)
(forall ?c (implies (ex:hasChild ?p ?c) (exists ?sp (ex:hasSpouse ?c ?sp))))))"""
query19i = """select ?o ?lac ?otype ?c2
where (?o in [ex:foo, ex:bar]) and
( triple(?o <%s> ?lac) or
quad(?o rdf:type ?otype ?c2) )"""
query19i = """(select (?s)
where (or (triple ?wall <http://www.wildsemantics.com/systemworld#gridWidgets> ?s)
(and (triple ?wall <http://www.wildsemantics.com/systemworld#gridWidgets> ?widget1)
(or (triple ?widget1 <http://www.wildsemantics.com/systemworld#backingTopic> ?s)
(triple ?widget1 <http://www.wildsemantics.com/systemworld#filterSet> ?s)))
(and (triple ?wall <http://www.wildsemantics.com/systemworld#freeWidgets> ?widget2)
(or (triple ?widget2 <http://www.wildsemantics.com/systemworld#backingTopic> ?s)
(triple ?widget2 <http://www.wildsemantics.com/systemworld#filterSet> ?s)))))
"""
query20i = """select ?s ?p ?o ?c ?lac ?c2
where (?c ?s ?p ?o)
and optional (?c2 ?o <http://www.wildsemantics.com/systemworld#lookAheadCapsule> ?lac)
and ((?s = ?wall)
or ((?wall <http://www.wildsemantics.com/systemworld#gridWidgets> ?widget1)
and ((?s = ?widget1)
or (?widget1 <http://www.wildsemantics.com/systemworld#backingTopic> ?s)
or (?widget1 <http://www.wildsemantics.com/systemworld#filterSet> ?s)))
or ((?wall <http://www.wildsemantics.com/systemworld#freeWidgets> ?widget2)
and ((?s = ?widget2)
or (?widget2 <http://www.wildsemantics.com/systemworld#backingTopic> ?s)
or (?widget2 <http://www.wildsemantics.com/systemworld#filterSet> ?s))))
"""
query20i = """select ?s ?p ?o ?c ?lac ?otype ?c2 where quad(?s ?p ?o ?c) and
(optional quad(?o <http://fiz> ?lac ?c2)) """
query20i = """select ?s ?p ?o ?c ?lac ?otype ?c2 where (?c ?s ?p ?o) and
(?s in [<http://www.wildsemantics.com/worldworld#SystemWorld_World>, <http://www.wildsemantics.com/worldworld#WorldWorld_World>, <http://www.wildsemantics.com/worldworld#AuthWorld_World>, <http://www.wildsemantics.com/worldworld#GardenWorld_World>, <http://www.wildsemantics.com/worldworld#VocabWorld_World>, <http://www.wildsemantics.com/worldworld#PermissionsWorld_World>, <http://www.wildsemantics.com/worldworld#PublicWorld_World>]) and
(optional quad(?o <http://www.wildsemantics.com/systemworld#lookAheadCapsule> ?lac ?c2)) and
(optional quad(?o rdf:type ?otype ?c2))"""
query20i = """select ?s ?p ?o ?c ?lac ?otype ?c2
where ((?cls = ?s)
or (?cls <http://www.wildsemantics.com/systemworld#fields> ?s))
and quad(?s ?p ?o ?c)
and optional (quad(?o <http://www.wildsemantics.com/systemworld#lookAheadCapsule> ?lac ?c2))
and optional (quad(?o rdf:type ?otype ?c2))
"""
query20i = """select ?o ?lac ?otype ?c2
where (?o in [http://www.wildsemantics.com/systemworld#World]) and
( triple(?o <http://www.wildsemantics.com/systemworld#lookAheadCapsule> ?lac) or
quad(?o rdf:type ?otype ?c2) )
"""
query20 = """
(select (?s ?p ?o ?c ?lac ?otype ?c2)
where (and
(quad ?s ?p ?o ?c)
)) """
if __name__ == '__main__':
switch = 20
print "Running test", switch
if switch == 1: translate(query1) # IMPLICIT AND
elif switch == 1.1: translate(query1i)
elif switch == 2: translate(query2) # EXPLICIT AND
elif switch == 2.1: translate(query2i)
elif switch == 3: translate(query3) # EQUALITY
elif switch == 3.1: translate(query3i)
elif switch == 4: translate(query4) # TRIPLE DISJUNCTION
elif switch == 4.1: translate(query4i)
elif switch == 5: translate(query5) # FILTER DISJUNCTION
elif switch == 5.1: translate(query5i)
elif switch == 6: translate(query6) # NEGATION
elif switch == 6.1: translate(query6i)
elif switch == 7: translate(query7) # TRIPLE PREDICATE
elif switch == 7.1: translate(query7i)
elif switch == 8: translate(query8) # CONTEXT. SPARQL BREAKS; PROLOG QUESTIONABLE
elif switch == 8.1: translate(query8i)
elif switch == 9: translate(query9) # COMPARISON
elif switch == 9.1: translate(query9i)
elif switch == 10: translate(query10) # OPTIONAL
elif switch == 10.1: translate(query10i)
elif switch == 11: translate(query11) # NESTED PARENS
elif switch == 11.1: translate(query11i)
elif switch == 12: translate(query12) # ARITHMETIC EXPRESSIONS
elif switch == 12.1: translate(query12i)
elif switch == 13: translate(query13, contexts=["http:ex#cxt1", "http:ex#cxt2"]) # IN ENUMERATION
elif switch == 13.1: translate(query13i)
elif switch == 14: translate(query14) # DISTINCT, CONTEXTS, AND LIMIT
elif switch == 14.1: translate(query14i)
elif switch == 15: translate(query15) # SPARQL GRAPH NODES
#elif switch == 15.1: translate(query15i)
elif switch == 16: translate(query16, contexts=["http:ex#cxt1", "http:ex#cxt2"]) # BRACKETED OPTIONAL WITH QUAD (HARD FOR SOME REASON)
elif switch == 16.1: translate(query16i)
elif switch == 17: translate(query17)
elif switch == 18: translate(query18) # NEGATION
elif switch == 19: translate(query19) # UNIVERSAL
elif switch == 19.1: translate(query19i, contexts=['ex:c1', 'ex:c2'])
elif switch == 20: translate(query20)
elif switch == 20.1: translate(query20i)
else:
print "There is no test number %s" % switch
|
mpetyx/pychatbot
|
SemanticWebApproach/RoboWriter/allegrordf-1.0.1/franz/openrdf/query/commonlogic.py
|
Python
|
apache-2.0
| 112,429
|
[
"VisIt"
] |
027955197838351ef091f7137818c9c0e59600ab882aa1e54071c82e132273d3
|
import imp
import os
import marshal
import struct
import sys
from cStringIO import StringIO
is_jython = sys.platform.startswith('java')
from compiler import ast, parse, walk, syntax
from compiler import misc, future, symbols
from compiler.consts import SC_LOCAL, SC_GLOBAL, SC_FREE, SC_CELL
from compiler.consts import (CO_VARARGS, CO_VARKEYWORDS, CO_NEWLOCALS,
CO_NESTED, CO_GENERATOR, CO_FUTURE_DIVISION,
CO_FUTURE_ABSIMPORT, CO_FUTURE_WITH_STATEMENT)
if not is_jython:
from compiler.pyassem import TupleArg
else:
TupleArg = None
# XXX The version-specific code can go, since this code only works with 2.x.
# Do we have Python 1.x or Python 2.x?
try:
VERSION = sys.version_info[0]
except AttributeError:
VERSION = 1
callfunc_opcode_info = {
# (Have *args, Have **args) : opcode
(0,0) : "CALL_FUNCTION",
(1,0) : "CALL_FUNCTION_VAR",
(0,1) : "CALL_FUNCTION_KW",
(1,1) : "CALL_FUNCTION_VAR_KW",
}
LOOP = 1
EXCEPT = 2
TRY_FINALLY = 3
END_FINALLY = 4
def compileFile(filename, display=0):
f = open(filename, 'U')
buf = f.read()
f.close()
mod = Module(buf, filename)
try:
mod.compile(display)
except SyntaxError:
raise
else:
f = open(filename + "c", "wb")
mod.dump(f)
f.close()
if is_jython:
# use __builtin__ compile
compile = compile
else:
def compile(source, filename, mode, flags=None, dont_inherit=None):
"""Replacement for builtin compile() function"""
if flags is not None or dont_inherit is not None:
raise RuntimeError, "not implemented yet"
if mode == "single":
gen = Interactive(source, filename)
elif mode == "exec":
gen = Module(source, filename)
elif mode == "eval":
gen = Expression(source, filename)
else:
raise ValueError("compile() 3rd arg must be 'exec' or "
"'eval' or 'single'")
gen.compile()
return gen.code
class AbstractCompileMode:
mode = None # defined by subclass
def __init__(self, source, filename):
self.source = source
self.filename = filename
self.code = None
def _get_tree(self):
tree = parse(self.source, self.mode)
misc.set_filename(self.filename, tree)
syntax.check(tree)
return tree
def compile(self):
pass # implemented by subclass
def getCode(self):
return self.code
class Expression(AbstractCompileMode):
mode = "eval"
def compile(self):
tree = self._get_tree()
gen = ExpressionCodeGenerator(tree)
self.code = gen.getCode()
class Interactive(AbstractCompileMode):
mode = "single"
def compile(self):
tree = self._get_tree()
gen = InteractiveCodeGenerator(tree)
self.code = gen.getCode()
class Module(AbstractCompileMode):
mode = "exec"
def compile(self, display=0):
tree = self._get_tree()
gen = ModuleCodeGenerator(tree)
if display:
import pprint
print pprint.pprint(tree)
self.code = gen.getCode()
def dump(self, f):
f.write(self.getPycHeader())
marshal.dump(self.code, f)
MAGIC = None if is_jython else imp.get_magic()
def getPycHeader(self):
# compile.c uses marshal to write a long directly, with
# calling the interface that would also generate a 1-byte code
# to indicate the type of the value. simplest way to get the
# same effect is to call marshal and then skip the code.
mtime = os.path.getmtime(self.filename)
mtime = struct.pack('<i', mtime)
return self.MAGIC + mtime
class LocalNameFinder:
"""Find local names in scope"""
def __init__(self, names=()):
self.names = misc.Set()
self.globals = misc.Set()
for name in names:
self.names.add(name)
# XXX list comprehensions and for loops
def getLocals(self):
for elt in self.globals.elements():
if self.names.has_elt(elt):
self.names.remove(elt)
return self.names
def visitDict(self, node):
pass
def visitGlobal(self, node):
for name in node.names:
self.globals.add(name)
def visitFunction(self, node):
self.names.add(node.name)
def visitLambda(self, node):
pass
def visitImport(self, node):
for name, alias in node.names:
self.names.add(alias or name)
def visitFrom(self, node):
for name, alias in node.names:
self.names.add(alias or name)
def visitClass(self, node):
self.names.add(node.name)
def visitAssName(self, node):
self.names.add(node.name)
def is_constant_false(node):
if isinstance(node, ast.Const):
if not node.value:
return 1
return 0
class CodeGenerator:
"""Defines basic code generator for Python bytecode
This class is an abstract base class. Concrete subclasses must
define an __init__() that defines self.graph and then calls the
__init__() defined in this class.
The concrete class must also define the class attributes
NameFinder, FunctionGen, and ClassGen. These attributes can be
defined in the initClass() method, which is a hook for
initializing these methods after all the classes have been
defined.
"""
optimized = 0 # is namespace access optimized?
__initialized = None
class_name = None # provide default for instance variable
def __init__(self):
if self.__initialized is None:
self.initClass()
self.__class__.__initialized = 1
self.checkClass()
self.locals = misc.Stack()
self.setups = misc.Stack()
self.last_lineno = None
self._setupGraphDelegation()
self._div_op = "BINARY_DIVIDE"
# XXX set flags based on future features
futures = self.get_module().futures
for feature in futures:
if feature == "division":
self.graph.setFlag(CO_FUTURE_DIVISION)
self._div_op = "BINARY_TRUE_DIVIDE"
elif feature == "absolute_import":
self.graph.setFlag(CO_FUTURE_ABSIMPORT)
elif feature == "with_statement":
self.graph.setFlag(CO_FUTURE_WITH_STATEMENT)
def initClass(self):
"""This method is called once for each class"""
def checkClass(self):
"""Verify that class is constructed correctly"""
try:
assert hasattr(self, 'graph')
assert getattr(self, 'NameFinder')
assert getattr(self, 'FunctionGen')
assert getattr(self, 'ClassGen')
except AssertionError, msg:
intro = "Bad class construction for %s" % self.__class__.__name__
raise AssertionError, intro
def _setupGraphDelegation(self):
self.emit = self.graph.emit
self.newBlock = self.graph.newBlock
self.startBlock = self.graph.startBlock
self.nextBlock = self.graph.nextBlock
self.setDocstring = self.graph.setDocstring
def getCode(self):
"""Return a code object"""
return self.graph.getCode()
def mangle(self, name):
if self.class_name is not None:
return misc.mangle(name, self.class_name)
else:
return name
def parseSymbols(self, tree):
s = symbols.SymbolVisitor()
walk(tree, s)
return s.scopes
def get_module(self):
raise RuntimeError, "should be implemented by subclasses"
# Next five methods handle name access
def isLocalName(self, name):
return self.locals.top().has_elt(name)
def storeName(self, name):
self._nameOp('STORE', name)
def loadName(self, name):
self._nameOp('LOAD', name)
def delName(self, name):
self._nameOp('DELETE', name)
def _nameOp(self, prefix, name):
name = self.mangle(name)
scope = self.scope.check_name(name)
if scope == SC_LOCAL:
if not self.optimized:
self.emit(prefix + '_NAME', name)
else:
self.emit(prefix + '_FAST', name)
elif scope == SC_GLOBAL:
if not self.optimized:
self.emit(prefix + '_NAME', name)
else:
self.emit(prefix + '_GLOBAL', name)
elif scope == SC_FREE or scope == SC_CELL:
self.emit(prefix + '_DEREF', name)
else:
raise RuntimeError, "unsupported scope for var %s: %d" % \
(name, scope)
def _implicitNameOp(self, prefix, name):
"""Emit name ops for names generated implicitly by for loops
The interpreter generates names that start with a period or
dollar sign. The symbol table ignores these names because
they aren't present in the program text.
"""
if self.optimized:
self.emit(prefix + '_FAST', name)
else:
self.emit(prefix + '_NAME', name)
# The set_lineno() function and the explicit emit() calls for
# SET_LINENO below are only used to generate the line number table.
# As of Python 2.3, the interpreter does not have a SET_LINENO
# instruction. pyassem treats SET_LINENO opcodes as a special case.
def set_lineno(self, node, force=False):
"""Emit SET_LINENO if necessary.
The instruction is considered necessary if the node has a
lineno attribute and it is different than the last lineno
emitted.
Returns true if SET_LINENO was emitted.
There are no rules for when an AST node should have a lineno
attribute. The transformer and AST code need to be reviewed
and a consistent policy implemented and documented. Until
then, this method works around missing line numbers.
"""
lineno = getattr(node, 'lineno', None)
if lineno is not None and (lineno != self.last_lineno
or force):
self.emit('SET_LINENO', lineno)
self.last_lineno = lineno
return True
return False
# The first few visitor methods handle nodes that generator new
# code objects. They use class attributes to determine what
# specialized code generators to use.
NameFinder = LocalNameFinder
FunctionGen = None
ClassGen = None
def visitModule(self, node):
self.scopes = self.parseSymbols(node)
self.scope = self.scopes[node]
self.emit('SET_LINENO', 0)
if node.doc:
self.emit('LOAD_CONST', node.doc)
self.storeName('__doc__')
lnf = walk(node.node, self.NameFinder(), verbose=0)
self.locals.push(lnf.getLocals())
self.visit(node.node)
self.emit('LOAD_CONST', None)
self.emit('RETURN_VALUE')
def visitExpression(self, node):
self.set_lineno(node)
self.scopes = self.parseSymbols(node)
self.scope = self.scopes[node]
self.visit(node.node)
self.emit('RETURN_VALUE')
def visitFunction(self, node):
self._visitFuncOrLambda(node, isLambda=0)
if node.doc:
self.setDocstring(node.doc)
self.storeName(node.name)
def visitLambda(self, node):
self._visitFuncOrLambda(node, isLambda=1)
def _visitFuncOrLambda(self, node, isLambda=0):
if not isLambda and node.decorators:
for decorator in node.decorators.nodes:
self.visit(decorator)
ndecorators = len(node.decorators.nodes)
else:
ndecorators = 0
gen = self.FunctionGen(node, self.scopes, isLambda,
self.class_name, self.get_module())
walk(node.code, gen)
gen.finish()
self.set_lineno(node)
for default in node.defaults:
self.visit(default)
self._makeClosure(gen, len(node.defaults))
for i in range(ndecorators):
self.emit('CALL_FUNCTION', 1)
def visitClass(self, node):
gen = self.ClassGen(node, self.scopes,
self.get_module())
walk(node.code, gen)
gen.finish()
self.set_lineno(node)
self.emit('LOAD_CONST', node.name)
for base in node.bases:
self.visit(base)
self.emit('BUILD_TUPLE', len(node.bases))
self._makeClosure(gen, 0)
self.emit('CALL_FUNCTION', 0)
self.emit('BUILD_CLASS')
self.storeName(node.name)
# The rest are standard visitor methods
# The next few implement control-flow statements
def visitIf(self, node):
end = self.newBlock()
numtests = len(node.tests)
for i in range(numtests):
test, suite = node.tests[i]
if is_constant_false(test):
# XXX will need to check generator stuff here
continue
self.set_lineno(test)
self.visit(test)
nextTest = self.newBlock()
self.emit('JUMP_IF_FALSE', nextTest)
self.nextBlock()
self.emit('POP_TOP')
self.visit(suite)
self.emit('JUMP_FORWARD', end)
self.startBlock(nextTest)
self.emit('POP_TOP')
if node.else_:
self.visit(node.else_)
self.nextBlock(end)
def visitWhile(self, node):
self.set_lineno(node)
loop = self.newBlock()
else_ = self.newBlock()
after = self.newBlock()
self.emit('SETUP_LOOP', after)
self.nextBlock(loop)
self.setups.push((LOOP, loop))
self.set_lineno(node, force=True)
self.visit(node.test)
self.emit('JUMP_IF_FALSE', else_ or after)
self.nextBlock()
self.emit('POP_TOP')
self.visit(node.body)
self.emit('JUMP_ABSOLUTE', loop)
self.startBlock(else_) # or just the POPs if not else clause
self.emit('POP_TOP')
self.emit('POP_BLOCK')
self.setups.pop()
if node.else_:
self.visit(node.else_)
self.nextBlock(after)
def visitFor(self, node):
start = self.newBlock()
anchor = self.newBlock()
after = self.newBlock()
self.setups.push((LOOP, start))
self.set_lineno(node)
self.emit('SETUP_LOOP', after)
self.visit(node.list)
self.emit('GET_ITER')
self.nextBlock(start)
self.set_lineno(node, force=1)
self.emit('FOR_ITER', anchor)
self.visit(node.assign)
self.visit(node.body)
self.emit('JUMP_ABSOLUTE', start)
self.nextBlock(anchor)
self.emit('POP_BLOCK')
self.setups.pop()
if node.else_:
self.visit(node.else_)
self.nextBlock(after)
def visitBreak(self, node):
if not self.setups:
raise SyntaxError, "'break' outside loop (%s, %d)" % \
(node.filename, node.lineno)
self.set_lineno(node)
self.emit('BREAK_LOOP')
def visitContinue(self, node):
if not self.setups:
raise SyntaxError, "'continue' outside loop (%s, %d)" % \
(node.filename, node.lineno)
kind, block = self.setups.top()
if kind == LOOP:
self.set_lineno(node)
self.emit('JUMP_ABSOLUTE', block)
self.nextBlock()
elif kind == EXCEPT or kind == TRY_FINALLY:
self.set_lineno(node)
# find the block that starts the loop
top = len(self.setups)
while top > 0:
top = top - 1
kind, loop_block = self.setups[top]
if kind == LOOP:
break
if kind != LOOP:
raise SyntaxError, "'continue' outside loop (%s, %d)" % \
(node.filename, node.lineno)
self.emit('CONTINUE_LOOP', loop_block)
self.nextBlock()
elif kind == END_FINALLY:
msg = "'continue' not allowed inside 'finally' clause (%s, %d)"
raise SyntaxError, msg % (node.filename, node.lineno)
def visitTest(self, node, jump):
end = self.newBlock()
for child in node.nodes[:-1]:
self.visit(child)
self.emit(jump, end)
self.nextBlock()
self.emit('POP_TOP')
self.visit(node.nodes[-1])
self.nextBlock(end)
def visitAnd(self, node):
self.visitTest(node, 'JUMP_IF_FALSE')
def visitOr(self, node):
self.visitTest(node, 'JUMP_IF_TRUE')
def visitIfExp(self, node):
endblock = self.newBlock()
elseblock = self.newBlock()
self.visit(node.test)
self.emit('JUMP_IF_FALSE', elseblock)
self.emit('POP_TOP')
self.visit(node.then)
self.emit('JUMP_FORWARD', endblock)
self.nextBlock(elseblock)
self.emit('POP_TOP')
self.visit(node.else_)
self.nextBlock(endblock)
def visitCompare(self, node):
self.visit(node.expr)
cleanup = self.newBlock()
for op, code in node.ops[:-1]:
self.visit(code)
self.emit('DUP_TOP')
self.emit('ROT_THREE')
self.emit('COMPARE_OP', op)
self.emit('JUMP_IF_FALSE', cleanup)
self.nextBlock()
self.emit('POP_TOP')
# now do the last comparison
if node.ops:
op, code = node.ops[-1]
self.visit(code)
self.emit('COMPARE_OP', op)
if len(node.ops) > 1:
end = self.newBlock()
self.emit('JUMP_FORWARD', end)
self.startBlock(cleanup)
self.emit('ROT_TWO')
self.emit('POP_TOP')
self.nextBlock(end)
# list comprehensions
__list_count = 0
def visitListComp(self, node):
self.set_lineno(node)
# setup list
append = "$append%d" % self.__list_count
self.__list_count = self.__list_count + 1
self.emit('BUILD_LIST', 0)
self.emit('DUP_TOP')
self.emit('LOAD_ATTR', 'append')
self._implicitNameOp('STORE', append)
stack = []
for i, for_ in zip(range(len(node.quals)), node.quals):
start, anchor = self.visit(for_)
cont = None
for if_ in for_.ifs:
if cont is None:
cont = self.newBlock()
self.visit(if_, cont)
stack.insert(0, (start, cont, anchor))
self._implicitNameOp('LOAD', append)
self.visit(node.expr)
self.emit('CALL_FUNCTION', 1)
self.emit('POP_TOP')
for start, cont, anchor in stack:
if cont:
skip_one = self.newBlock()
self.emit('JUMP_FORWARD', skip_one)
self.startBlock(cont)
self.emit('POP_TOP')
self.nextBlock(skip_one)
self.emit('JUMP_ABSOLUTE', start)
self.startBlock(anchor)
self._implicitNameOp('DELETE', append)
self.__list_count = self.__list_count - 1
def visitListCompFor(self, node):
start = self.newBlock()
anchor = self.newBlock()
self.visit(node.list)
self.emit('GET_ITER')
self.nextBlock(start)
self.set_lineno(node, force=True)
self.emit('FOR_ITER', anchor)
self.nextBlock()
self.visit(node.assign)
return start, anchor
def visitListCompIf(self, node, branch):
self.set_lineno(node, force=True)
self.visit(node.test)
self.emit('JUMP_IF_FALSE', branch)
self.newBlock()
self.emit('POP_TOP')
def _makeClosure(self, gen, args):
frees = gen.scope.get_free_vars()
if frees:
for name in frees:
self.emit('LOAD_CLOSURE', name)
self.emit('BUILD_TUPLE', len(frees))
self.emit('LOAD_CONST', gen)
self.emit('MAKE_CLOSURE', args)
else:
self.emit('LOAD_CONST', gen)
self.emit('MAKE_FUNCTION', args)
def visitGenExpr(self, node):
gen = GenExprCodeGenerator(node, self.scopes, self.class_name,
self.get_module())
walk(node.code, gen)
gen.finish()
self.set_lineno(node)
self._makeClosure(gen, 0)
# precomputation of outmost iterable
self.visit(node.code.quals[0].iter)
self.emit('GET_ITER')
self.emit('CALL_FUNCTION', 1)
def visitGenExprInner(self, node):
self.set_lineno(node)
# setup list
stack = []
for i, for_ in zip(range(len(node.quals)), node.quals):
start, anchor, end = self.visit(for_)
cont = None
for if_ in for_.ifs:
if cont is None:
cont = self.newBlock()
self.visit(if_, cont)
stack.insert(0, (start, cont, anchor, end))
self.visit(node.expr)
self.emit('YIELD_VALUE')
self.emit('POP_TOP')
for start, cont, anchor, end in stack:
if cont:
skip_one = self.newBlock()
self.emit('JUMP_FORWARD', skip_one)
self.startBlock(cont)
self.emit('POP_TOP')
self.nextBlock(skip_one)
self.emit('JUMP_ABSOLUTE', start)
self.startBlock(anchor)
self.emit('POP_BLOCK')
self.setups.pop()
self.startBlock(end)
self.emit('LOAD_CONST', None)
def visitGenExprFor(self, node):
start = self.newBlock()
anchor = self.newBlock()
end = self.newBlock()
self.setups.push((LOOP, start))
self.emit('SETUP_LOOP', end)
if node.is_outmost:
self.loadName('.0')
else:
self.visit(node.iter)
self.emit('GET_ITER')
self.nextBlock(start)
self.set_lineno(node, force=True)
self.emit('FOR_ITER', anchor)
self.nextBlock()
self.visit(node.assign)
return start, anchor, end
def visitGenExprIf(self, node, branch):
self.set_lineno(node, force=True)
self.visit(node.test)
self.emit('JUMP_IF_FALSE', branch)
self.newBlock()
self.emit('POP_TOP')
# exception related
def visitAssert(self, node):
# XXX would be interesting to implement this via a
# transformation of the AST before this stage
if __debug__:
end = self.newBlock()
self.set_lineno(node)
# XXX AssertionError appears to be special case -- it is always
# loaded as a global even if there is a local name. I guess this
# is a sort of renaming op.
self.nextBlock()
self.visit(node.test)
self.emit('JUMP_IF_TRUE', end)
self.nextBlock()
self.emit('POP_TOP')
self.emit('LOAD_GLOBAL', 'AssertionError')
if node.fail:
self.visit(node.fail)
self.emit('RAISE_VARARGS', 2)
else:
self.emit('RAISE_VARARGS', 1)
self.nextBlock(end)
self.emit('POP_TOP')
def visitRaise(self, node):
self.set_lineno(node)
n = 0
if node.expr1:
self.visit(node.expr1)
n = n + 1
if node.expr2:
self.visit(node.expr2)
n = n + 1
if node.expr3:
self.visit(node.expr3)
n = n + 1
self.emit('RAISE_VARARGS', n)
def visitTryExcept(self, node):
body = self.newBlock()
handlers = self.newBlock()
end = self.newBlock()
if node.else_:
lElse = self.newBlock()
else:
lElse = end
self.set_lineno(node)
self.emit('SETUP_EXCEPT', handlers)
self.nextBlock(body)
self.setups.push((EXCEPT, body))
self.visit(node.body)
self.emit('POP_BLOCK')
self.setups.pop()
self.emit('JUMP_FORWARD', lElse)
self.startBlock(handlers)
last = len(node.handlers) - 1
for i in range(len(node.handlers)):
expr, target, body = node.handlers[i]
self.set_lineno(expr)
if expr:
self.emit('DUP_TOP')
self.visit(expr)
self.emit('COMPARE_OP', 'exception match')
next = self.newBlock()
self.emit('JUMP_IF_FALSE', next)
self.nextBlock()
self.emit('POP_TOP')
self.emit('POP_TOP')
if target:
self.visit(target)
else:
self.emit('POP_TOP')
self.emit('POP_TOP')
self.visit(body)
self.emit('JUMP_FORWARD', end)
if expr:
self.nextBlock(next)
else:
self.nextBlock()
if expr: # XXX
self.emit('POP_TOP')
self.emit('END_FINALLY')
if node.else_:
self.nextBlock(lElse)
self.visit(node.else_)
self.nextBlock(end)
def visitTryFinally(self, node):
body = self.newBlock()
final = self.newBlock()
self.set_lineno(node)
self.emit('SETUP_FINALLY', final)
self.nextBlock(body)
self.setups.push((TRY_FINALLY, body))
self.visit(node.body)
self.emit('POP_BLOCK')
self.setups.pop()
self.emit('LOAD_CONST', None)
self.nextBlock(final)
self.setups.push((END_FINALLY, final))
self.visit(node.final)
self.emit('END_FINALLY')
self.setups.pop()
__with_count = 0
def visitWith(self, node):
body = self.newBlock()
final = self.newBlock()
exitvar = "$exit%d" % self.__with_count
valuevar = "$value%d" % self.__with_count
self.__with_count += 1
self.set_lineno(node)
self.visit(node.expr)
self.emit('DUP_TOP')
self.emit('LOAD_ATTR', '__exit__')
self._implicitNameOp('STORE', exitvar)
self.emit('LOAD_ATTR', '__enter__')
self.emit('CALL_FUNCTION', 0)
if node.vars is None:
self.emit('POP_TOP')
else:
self._implicitNameOp('STORE', valuevar)
self.emit('SETUP_FINALLY', final)
self.nextBlock(body)
self.setups.push((TRY_FINALLY, body))
if node.vars is not None:
self._implicitNameOp('LOAD', valuevar)
self._implicitNameOp('DELETE', valuevar)
self.visit(node.vars)
self.visit(node.body)
self.emit('POP_BLOCK')
self.setups.pop()
self.emit('LOAD_CONST', None)
self.nextBlock(final)
self.setups.push((END_FINALLY, final))
self._implicitNameOp('LOAD', exitvar)
self._implicitNameOp('DELETE', exitvar)
self.emit('WITH_CLEANUP')
self.emit('END_FINALLY')
self.setups.pop()
self.__with_count -= 1
# misc
def visitDiscard(self, node):
self.set_lineno(node)
self.visit(node.expr)
self.emit('POP_TOP')
def visitConst(self, node):
self.emit('LOAD_CONST', node.value)
def visitKeyword(self, node):
self.emit('LOAD_CONST', node.name)
self.visit(node.expr)
def visitGlobal(self, node):
# no code to generate
pass
def visitName(self, node):
self.set_lineno(node)
self.loadName(node.name)
def visitPass(self, node):
self.set_lineno(node)
def visitImport(self, node):
self.set_lineno(node)
level = 0 if self.graph.checkFlag(CO_FUTURE_ABSIMPORT) else -1
for name, alias in node.names:
if VERSION > 1:
self.emit('LOAD_CONST', level)
self.emit('LOAD_CONST', None)
self.emit('IMPORT_NAME', name)
mod = name.split(".")[0]
if alias:
self._resolveDots(name)
self.storeName(alias)
else:
self.storeName(mod)
def visitFrom(self, node):
self.set_lineno(node)
level = node.level
if level == 0 and not self.graph.checkFlag(CO_FUTURE_ABSIMPORT):
level = -1
fromlist = map(lambda (name, alias): name, node.names)
if VERSION > 1:
self.emit('LOAD_CONST', level)
self.emit('LOAD_CONST', tuple(fromlist))
self.emit('IMPORT_NAME', node.modname)
for name, alias in node.names:
if VERSION > 1:
if name == '*':
self.namespace = 0
self.emit('IMPORT_STAR')
# There can only be one name w/ from ... import *
assert len(node.names) == 1
return
else:
self.emit('IMPORT_FROM', name)
self._resolveDots(name)
self.storeName(alias or name)
else:
self.emit('IMPORT_FROM', name)
self.emit('POP_TOP')
def _resolveDots(self, name):
elts = name.split(".")
if len(elts) == 1:
return
for elt in elts[1:]:
self.emit('LOAD_ATTR', elt)
def visitGetattr(self, node):
self.visit(node.expr)
self.emit('LOAD_ATTR', self.mangle(node.attrname))
# next five implement assignments
def visitAssign(self, node):
self.set_lineno(node)
self.visit(node.expr)
dups = len(node.nodes) - 1
for i in range(len(node.nodes)):
elt = node.nodes[i]
if i < dups:
self.emit('DUP_TOP')
if isinstance(elt, ast.Node):
self.visit(elt)
def visitAssName(self, node):
if node.flags == 'OP_ASSIGN':
self.storeName(node.name)
elif node.flags == 'OP_DELETE':
self.set_lineno(node)
self.delName(node.name)
else:
print "oops", node.flags
def visitAssAttr(self, node):
self.visit(node.expr)
if node.flags == 'OP_ASSIGN':
self.emit('STORE_ATTR', self.mangle(node.attrname))
elif node.flags == 'OP_DELETE':
self.emit('DELETE_ATTR', self.mangle(node.attrname))
else:
print "warning: unexpected flags:", node.flags
print node
def _visitAssSequence(self, node, op='UNPACK_SEQUENCE'):
if findOp(node) != 'OP_DELETE':
self.emit(op, len(node.nodes))
for child in node.nodes:
self.visit(child)
if VERSION > 1:
visitAssTuple = _visitAssSequence
visitAssList = _visitAssSequence
else:
def visitAssTuple(self, node):
self._visitAssSequence(node, 'UNPACK_TUPLE')
def visitAssList(self, node):
self._visitAssSequence(node, 'UNPACK_LIST')
# augmented assignment
def visitAugAssign(self, node):
self.set_lineno(node)
aug_node = wrap_aug(node.node)
self.visit(aug_node, "load")
self.visit(node.expr)
self.emit(self._augmented_opcode[node.op])
self.visit(aug_node, "store")
_augmented_opcode = {
'+=' : 'INPLACE_ADD',
'-=' : 'INPLACE_SUBTRACT',
'*=' : 'INPLACE_MULTIPLY',
'/=' : 'INPLACE_DIVIDE',
'//=': 'INPLACE_FLOOR_DIVIDE',
'%=' : 'INPLACE_MODULO',
'**=': 'INPLACE_POWER',
'>>=': 'INPLACE_RSHIFT',
'<<=': 'INPLACE_LSHIFT',
'&=' : 'INPLACE_AND',
'^=' : 'INPLACE_XOR',
'|=' : 'INPLACE_OR',
}
def visitAugName(self, node, mode):
if mode == "load":
self.loadName(node.name)
elif mode == "store":
self.storeName(node.name)
def visitAugGetattr(self, node, mode):
if mode == "load":
self.visit(node.expr)
self.emit('DUP_TOP')
self.emit('LOAD_ATTR', self.mangle(node.attrname))
elif mode == "store":
self.emit('ROT_TWO')
self.emit('STORE_ATTR', self.mangle(node.attrname))
def visitAugSlice(self, node, mode):
if mode == "load":
self.visitSlice(node, 1)
elif mode == "store":
slice = 0
if node.lower:
slice = slice | 1
if node.upper:
slice = slice | 2
if slice == 0:
self.emit('ROT_TWO')
elif slice == 3:
self.emit('ROT_FOUR')
else:
self.emit('ROT_THREE')
self.emit('STORE_SLICE+%d' % slice)
def visitAugSubscript(self, node, mode):
if mode == "load":
self.visitSubscript(node, 1)
elif mode == "store":
self.emit('ROT_THREE')
self.emit('STORE_SUBSCR')
def visitExec(self, node):
self.visit(node.expr)
if node.locals is None:
self.emit('LOAD_CONST', None)
else:
self.visit(node.locals)
if node.globals is None:
self.emit('DUP_TOP')
else:
self.visit(node.globals)
self.emit('EXEC_STMT')
def visitCallFunc(self, node):
pos = 0
kw = 0
self.set_lineno(node)
self.visit(node.node)
for arg in node.args:
self.visit(arg)
if isinstance(arg, ast.Keyword):
kw = kw + 1
else:
pos = pos + 1
if node.star_args is not None:
self.visit(node.star_args)
if node.dstar_args is not None:
self.visit(node.dstar_args)
have_star = node.star_args is not None
have_dstar = node.dstar_args is not None
opcode = callfunc_opcode_info[have_star, have_dstar]
self.emit(opcode, kw << 8 | pos)
def visitPrint(self, node, newline=0):
self.set_lineno(node)
if node.dest:
self.visit(node.dest)
for child in node.nodes:
if node.dest:
self.emit('DUP_TOP')
self.visit(child)
if node.dest:
self.emit('ROT_TWO')
self.emit('PRINT_ITEM_TO')
else:
self.emit('PRINT_ITEM')
if node.dest and not newline:
self.emit('POP_TOP')
def visitPrintnl(self, node):
self.visitPrint(node, newline=1)
if node.dest:
self.emit('PRINT_NEWLINE_TO')
else:
self.emit('PRINT_NEWLINE')
def visitReturn(self, node):
self.set_lineno(node)
self.visit(node.value)
self.emit('RETURN_VALUE')
def visitYield(self, node):
self.set_lineno(node)
self.visit(node.value)
self.emit('YIELD_VALUE')
# slice and subscript stuff
def visitSlice(self, node, aug_flag=None):
# aug_flag is used by visitAugSlice
self.visit(node.expr)
slice = 0
if node.lower:
self.visit(node.lower)
slice = slice | 1
if node.upper:
self.visit(node.upper)
slice = slice | 2
if aug_flag:
if slice == 0:
self.emit('DUP_TOP')
elif slice == 3:
self.emit('DUP_TOPX', 3)
else:
self.emit('DUP_TOPX', 2)
if node.flags == 'OP_APPLY':
self.emit('SLICE+%d' % slice)
elif node.flags == 'OP_ASSIGN':
self.emit('STORE_SLICE+%d' % slice)
elif node.flags == 'OP_DELETE':
self.emit('DELETE_SLICE+%d' % slice)
else:
print "weird slice", node.flags
raise
def visitSubscript(self, node, aug_flag=None):
self.visit(node.expr)
for sub in node.subs:
self.visit(sub)
if len(node.subs) > 1:
self.emit('BUILD_TUPLE', len(node.subs))
if aug_flag:
self.emit('DUP_TOPX', 2)
if node.flags == 'OP_APPLY':
self.emit('BINARY_SUBSCR')
elif node.flags == 'OP_ASSIGN':
self.emit('STORE_SUBSCR')
elif node.flags == 'OP_DELETE':
self.emit('DELETE_SUBSCR')
# binary ops
def binaryOp(self, node, op):
self.visit(node.left)
self.visit(node.right)
self.emit(op)
def visitAdd(self, node):
return self.binaryOp(node, 'BINARY_ADD')
def visitSub(self, node):
return self.binaryOp(node, 'BINARY_SUBTRACT')
def visitMul(self, node):
return self.binaryOp(node, 'BINARY_MULTIPLY')
def visitDiv(self, node):
return self.binaryOp(node, self._div_op)
def visitFloorDiv(self, node):
return self.binaryOp(node, 'BINARY_FLOOR_DIVIDE')
def visitMod(self, node):
return self.binaryOp(node, 'BINARY_MODULO')
def visitPower(self, node):
return self.binaryOp(node, 'BINARY_POWER')
def visitLeftShift(self, node):
return self.binaryOp(node, 'BINARY_LSHIFT')
def visitRightShift(self, node):
return self.binaryOp(node, 'BINARY_RSHIFT')
# unary ops
def unaryOp(self, node, op):
self.visit(node.expr)
self.emit(op)
def visitInvert(self, node):
return self.unaryOp(node, 'UNARY_INVERT')
def visitUnarySub(self, node):
return self.unaryOp(node, 'UNARY_NEGATIVE')
def visitUnaryAdd(self, node):
return self.unaryOp(node, 'UNARY_POSITIVE')
def visitUnaryInvert(self, node):
return self.unaryOp(node, 'UNARY_INVERT')
def visitNot(self, node):
return self.unaryOp(node, 'UNARY_NOT')
def visitBackquote(self, node):
return self.unaryOp(node, 'UNARY_CONVERT')
# bit ops
def bitOp(self, nodes, op):
self.visit(nodes[0])
for node in nodes[1:]:
self.visit(node)
self.emit(op)
def visitBitand(self, node):
return self.bitOp(node.nodes, 'BINARY_AND')
def visitBitor(self, node):
return self.bitOp(node.nodes, 'BINARY_OR')
def visitBitxor(self, node):
return self.bitOp(node.nodes, 'BINARY_XOR')
# object constructors
def visitEllipsis(self, node):
self.emit('LOAD_CONST', Ellipsis)
def visitTuple(self, node):
self.set_lineno(node)
for elt in node.nodes:
self.visit(elt)
self.emit('BUILD_TUPLE', len(node.nodes))
def visitList(self, node):
self.set_lineno(node)
for elt in node.nodes:
self.visit(elt)
self.emit('BUILD_LIST', len(node.nodes))
def visitSliceobj(self, node):
for child in node.nodes:
self.visit(child)
self.emit('BUILD_SLICE', len(node.nodes))
def visitDict(self, node):
self.set_lineno(node)
self.emit('BUILD_MAP', 0)
for k, v in node.items:
self.emit('DUP_TOP')
self.visit(k)
self.visit(v)
self.emit('ROT_THREE')
self.emit('STORE_SUBSCR')
class NestedScopeMixin:
"""Defines initClass() for nested scoping (Python 2.2-compatible)"""
def initClass(self):
self.__class__.NameFinder = LocalNameFinder
self.__class__.FunctionGen = FunctionCodeGenerator
self.__class__.ClassGen = ClassCodeGenerator
class ModuleCodeGenerator(NestedScopeMixin, CodeGenerator):
__super_init = CodeGenerator.__init__
scopes = None
def __init__(self, tree):
self.graph = pyassem.PyFlowGraph("<module>", tree.filename)
self.futures = future.find_futures(tree)
self.__super_init()
walk(tree, self)
def get_module(self):
return self
class ExpressionCodeGenerator(NestedScopeMixin, CodeGenerator):
__super_init = CodeGenerator.__init__
scopes = None
futures = ()
def __init__(self, tree):
self.graph = pyassem.PyFlowGraph("<expression>", tree.filename)
self.__super_init()
walk(tree, self)
def get_module(self):
return self
class InteractiveCodeGenerator(NestedScopeMixin, CodeGenerator):
__super_init = CodeGenerator.__init__
scopes = None
futures = ()
def __init__(self, tree):
self.graph = pyassem.PyFlowGraph("<interactive>", tree.filename)
self.__super_init()
self.set_lineno(tree)
walk(tree, self)
self.emit('RETURN_VALUE')
def get_module(self):
return self
def visitDiscard(self, node):
# XXX Discard means it's an expression. Perhaps this is a bad
# name.
self.visit(node.expr)
self.emit('PRINT_EXPR')
class AbstractFunctionCode:
optimized = 1
lambdaCount = 0
def __init__(self, func, scopes, isLambda, class_name, mod):
self.class_name = class_name
self.module = mod
if isLambda:
klass = FunctionCodeGenerator
name = "<lambda.%d>" % klass.lambdaCount
klass.lambdaCount = klass.lambdaCount + 1
else:
name = func.name
args, hasTupleArg = generateArgList(func.argnames)
self.graph = pyassem.PyFlowGraph(name, func.filename, args,
optimized=1)
self.isLambda = isLambda
self.super_init()
if not isLambda and func.doc:
self.setDocstring(func.doc)
lnf = walk(func.code, self.NameFinder(args), verbose=0)
self.locals.push(lnf.getLocals())
if func.varargs:
self.graph.setFlag(CO_VARARGS)
if func.kwargs:
self.graph.setFlag(CO_VARKEYWORDS)
self.set_lineno(func)
if hasTupleArg:
self.generateArgUnpack(func.argnames)
def get_module(self):
return self.module
def finish(self):
self.graph.startExitBlock()
if not self.isLambda:
self.emit('LOAD_CONST', None)
self.emit('RETURN_VALUE')
def generateArgUnpack(self, args):
for i in range(len(args)):
arg = args[i]
if isinstance(arg, tuple):
self.emit('LOAD_FAST', '.%d' % (i * 2))
self.unpackSequence(arg)
def unpackSequence(self, tup):
if VERSION > 1:
self.emit('UNPACK_SEQUENCE', len(tup))
else:
self.emit('UNPACK_TUPLE', len(tup))
for elt in tup:
if isinstance(elt, tuple):
self.unpackSequence(elt)
else:
self._nameOp('STORE', elt)
unpackTuple = unpackSequence
class FunctionCodeGenerator(NestedScopeMixin, AbstractFunctionCode,
CodeGenerator):
super_init = CodeGenerator.__init__ # call be other init
scopes = None
__super_init = AbstractFunctionCode.__init__
def __init__(self, func, scopes, isLambda, class_name, mod):
self.scopes = scopes
self.scope = scopes[func]
self.__super_init(func, scopes, isLambda, class_name, mod)
self.graph.setFreeVars(self.scope.get_free_vars())
self.graph.setCellVars(self.scope.get_cell_vars())
if self.scope.generator is not None:
self.graph.setFlag(CO_GENERATOR)
class GenExprCodeGenerator(NestedScopeMixin, AbstractFunctionCode,
CodeGenerator):
super_init = CodeGenerator.__init__ # call be other init
scopes = None
__super_init = AbstractFunctionCode.__init__
def __init__(self, gexp, scopes, class_name, mod):
self.scopes = scopes
self.scope = scopes[gexp]
self.__super_init(gexp, scopes, 1, class_name, mod)
self.graph.setFreeVars(self.scope.get_free_vars())
self.graph.setCellVars(self.scope.get_cell_vars())
self.graph.setFlag(CO_GENERATOR)
class AbstractClassCode:
def __init__(self, klass, scopes, module):
self.class_name = klass.name
self.module = module
self.graph = pyassem.PyFlowGraph(klass.name, klass.filename,
optimized=0, klass=1)
self.super_init()
lnf = walk(klass.code, self.NameFinder(), verbose=0)
self.locals.push(lnf.getLocals())
self.graph.setFlag(CO_NEWLOCALS)
if klass.doc:
self.setDocstring(klass.doc)
def get_module(self):
return self.module
def finish(self):
self.graph.startExitBlock()
self.emit('LOAD_LOCALS')
self.emit('RETURN_VALUE')
class ClassCodeGenerator(NestedScopeMixin, AbstractClassCode, CodeGenerator):
super_init = CodeGenerator.__init__
scopes = None
__super_init = AbstractClassCode.__init__
def __init__(self, klass, scopes, module):
self.scopes = scopes
self.scope = scopes[klass]
self.__super_init(klass, scopes, module)
self.graph.setFreeVars(self.scope.get_free_vars())
self.graph.setCellVars(self.scope.get_cell_vars())
self.set_lineno(klass)
self.emit("LOAD_GLOBAL", "__name__")
self.storeName("__module__")
if klass.doc:
self.emit("LOAD_CONST", klass.doc)
self.storeName('__doc__')
def generateArgList(arglist):
"""Generate an arg list marking TupleArgs"""
args = []
extra = []
count = 0
for i in range(len(arglist)):
elt = arglist[i]
if isinstance(elt, str):
args.append(elt)
elif isinstance(elt, tuple):
args.append(TupleArg(i * 2, elt))
extra.extend(misc.flatten(elt))
count = count + 1
else:
raise ValueError, "unexpect argument type:", elt
return args + extra, count
def findOp(node):
"""Find the op (DELETE, LOAD, STORE) in an AssTuple tree"""
v = OpFinder()
walk(node, v, verbose=0)
return v.op
class OpFinder:
def __init__(self):
self.op = None
def visitAssName(self, node):
if self.op is None:
self.op = node.flags
elif self.op != node.flags:
raise ValueError, "mixed ops in stmt"
visitAssAttr = visitAssName
visitSubscript = visitAssName
class Delegator:
"""Base class to support delegation for augmented assignment nodes
To generator code for augmented assignments, we use the following
wrapper classes. In visitAugAssign, the left-hand expression node
is visited twice. The first time the visit uses the normal method
for that node . The second time the visit uses a different method
that generates the appropriate code to perform the assignment.
These delegator classes wrap the original AST nodes in order to
support the variant visit methods.
"""
def __init__(self, obj):
self.obj = obj
def __getattr__(self, attr):
return getattr(self.obj, attr)
class AugGetattr(Delegator):
pass
class AugName(Delegator):
pass
class AugSlice(Delegator):
pass
class AugSubscript(Delegator):
pass
wrapper = {
ast.Getattr: AugGetattr,
ast.Name: AugName,
ast.Slice: AugSlice,
ast.Subscript: AugSubscript,
}
def wrap_aug(node):
return wrapper[node.__class__](node)
if __name__ == "__main__":
for file in sys.argv[1:]:
compileFile(file)
|
zephyrplugins/zephyr
|
zephyr.plugin.jython/jython2.5.2rc3/Lib/compiler/pycodegen.py
|
Python
|
epl-1.0
| 47,446
|
[
"VisIt"
] |
08bca6776c4826af946b640750f15cba125b9813757f0ad1bcbe66f492b33cd6
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send Conditional CLI commands to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_conditional_command
author: "Dave Kasberg (@dkasberg)"
short_description: Execute a single command based on condition on devices running Lenovo CNOS
description:
- This module allows you to modify the running configuration of a switch. It provides a way to
execute a single CNOS command on a network device by evaluating the current running configuration
and executing the command only if the specific settings have not been already configured.
The CNOS command is passed as an argument of the method.
This module functions the same as the cnos_command module.
The only exception is that the following inventory variable can be specified
[“condition = <flag string>”]
When this inventory variable is specified as the variable of a task, the command is executed for
the network element that matches the flag string. Usually, commands are executed across a group
of network devices. When there is a requirement to skip the execution of the command on one or
more devices, it is recommended to use this module.
This module uses SSH to manage network device configuration.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_conditional_command.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
clicommand:
description:
- This specifies the CLI command as an attribute to this method. The command is passed using
double quotes. The variables can be placed directly on to the CLI commands or can be invoked
from the vars directory.
required: true
default: Null
condition:
description:
- If you specify condition=false in the inventory file against any device, the command execution
is skipped for that device.
required: true
default: Null
flag:
description:
- If a task needs to be executed, you have to set the flag the same as it is specified in the
inventory for that device.
required: true
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_conditional_command. These are written in the main.yml file of the tasks directory.
---
- name: Applying CLI template on VLAG Tier1 Leaf Switch1
cnos_conditional_command:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_conditional_command_{{ inventory_hostname }}_output.txt"
condition: "{{ hostvars[inventory_hostname]['condition']}}"
flag: leaf_switch2
command: "spanning-tree mode enable"
enablePassword: "anil"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Command Applied"
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
clicommand=dict(required=True),
outputfile=dict(required=True),
condition=dict(required=True),
flag=dict(required=True),
host=dict(required=True),
deviceType=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True), ), supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
condition = module.params['condition']
flag = module.params['flag']
cliCommand = module.params['clicommand']
outputfile = module.params['outputfile']
deviceType = module.params['deviceType']
hostIP = module.params['host']
output = ""
if (condition != flag):
module.exit_json(changed=True, msg="Command Skipped for this value")
return " "
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
#
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + cnos.waitForDeviceResponse("configure d\n", "(config)#", 2, remote_conn)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand + "\n", "(config)#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="CLI Command executed and results saved in file ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
qrkourier/ansible
|
lib/ansible/modules/network/lenovo/cnos_conditional_command.py
|
Python
|
gpl-3.0
| 7,085
|
[
"VisIt"
] |
f5faccbc26f513f4f9ff300c539587aa0bb49ed766befb4db95538f6205116a0
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
"""
Error handlers for errors originating from the Submission systems.
"""
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "May 2014"
__all_errors__ = ['SubmitError', 'FullQueueError', 'DiskError', 'TimeCancelError', 'MemoryCancelError',
'NodeFailureError']
import re
import abc
import six
from abc import ABCMeta, abstractproperty, abstractmethod
@six.add_metaclass(ABCMeta)
class CorrectorProtocolScheduler(object):
"""
Abstract class to define the protocol / interface for correction operators. The client code quadapters / submission
script generator method / ... should implement these methods.
"""
@abstractproperty
def name(self):
return str()
@abstractmethod
def exclude_nodes(self, nodes):
"""
Method to exclude certain nodes from being used in the calculation. It is called when a calculation seemed to
have been crashed due to a hardware failure at the nodes specified.
nodes: list of node numbers that were found to cause problems
returns True is the memory could be increased False otherwise
"""
return bool
@abstractmethod
def increase_mem(self):
"""
Method to increase then memory in the calculation. It is called when a calculation seemed to have been crashed
due to a insufficient memory.
returns True is the memory could be increased False otherwise
"""
return bool
@abstractmethod
def increase_time(self):
"""
Method to increase te time for the calculation. It is called when a calculation seemed to
have been crashed due to a time limit.
returns True is the memory could be increased False otherwise
"""
return bool
@abstractmethod
def increase_cpus(self):
"""
Method to increse the number of cpus being used in the calculation. It is called when a calculation seemed to
have been crashed due to time or memory limits being broken.
returns True is the memory could be increased False otherwise
"""
return bool
@six.add_metaclass(ABCMeta)
class CorrectorProtocolApplication(object):
"""
Abstract class to define the protocol / interface for correction operators. The client code quadapters / submission
script generator method / ... should implement these methods.
"""
@abstractproperty
def name(self):
return str()
@abstractmethod
def decrease_mem(self):
"""
Method to increase then memory in the calculation. It is called when a calculation seemed to have been crashed
due to a insufficient memory.
returns True is the memory could be increased False otherwise
"""
return bool
@abstractmethod
def speed_up(self):
"""
Method to speed_up the calculation. It is called when a calculation seemed to time limits being broken.
returns True is the memory could be increased False otherwise
"""
return bool
@six.add_metaclass(ABCMeta)
class AbstractError(object):
"""
Error base class
"""
def __init__(self, errmsg, meta_data):
self.errmsg = errmsg
self.meta_data = meta_data if meta_data is not None else {}
def __str__(self):
_message = '%s %s\n' \
' error message : %s \n' \
' meta data : %s' % (self.name, self.__doc__, self.errmsg, str(self.meta_data))
return _message
@property
def name(self):
return self.__class__.__name__
@property
def scheduler_adapter_solutions(self):
"""
to be implemented by concrete errors returning a list of tuples defining corrections. The First element of the
tuple should be a string of one of the methods in CorrectorProtocolScheduler, the second element should
contain the arguments.
"""
return []
@property
def application_adapter_solutions(self):
"""
to be implemented by concrete errors returning a list of tuples defining corrections. The First element of the
tuple should be a string of one of the methods in CorrectorProtocolApplication, the second element should
contain the arguments.
"""
return []
def last_resort_solution(self):
"""
what to do if every thing else fails...
"""
print('non of the defined solutions for %s returned success...' % self.name)
return
class SubmitError(AbstractError):
"""
Errors occurring at submission. The limits on the cluster may have changed.
"""
class FullQueueError(AbstractError):
"""
Errors occurring at submission. To many jobs in the queue / total cpus / .. .
"""
class DiskError(AbstractError):
"""
Errors involving problems writing to disk.
"""
class TimeCancelError(AbstractError):
"""
Error due to exceeding the time limit for the job.
.limit will return a list of limits that were broken, None if it could not be determined.
"""
@property
def limit(self):
return self.meta_data.get('broken_limit')
@property
def scheduler_adapter_solutions(self):
return [(CorrectorProtocolScheduler.increase_time,)]
@property
def application_adapter_solutions(self):
return [(CorrectorProtocolApplication.speed_up,)]
class MemoryCancelError(AbstractError):
"""
Error due to exceeding the memory limit for the job.
.limit will return a list of limits that were broken, None if it could not be determined.
"""
@property
def limit(self):
return self.meta_data.get('broken_limit')
@property
def scheduler_adapter_solutions(self):
return [(CorrectorProtocolScheduler.increase_mem,)]
@property
def application_adapter_solutions(self):
return [(CorrectorProtocolApplication.decrease_mem,)]
class MasterProcessMemoryCancelError(AbstractError):
"""
Error due to exceeding the memory limit for the job on the master node.
"""
class SlaveProcessMemoryCancelError(AbstractError):
"""
Error due to exceeding the memory limit for the job on a node different from the master.
"""
class NodeFailureError(AbstractError):
"""
Error due the hardware failure of a specific node.
.node will return a list of problematic nodes, None if it could not be determined.
"""
@property
def nodes(self):
return self.meta_data.get('nodes')
@property
def scheduler_adapter_solutions(self):
return [(CorrectorProtocolScheduler.exclude_nodes, [self.nodes])]
@six.add_metaclass(ABCMeta)
class AbstractErrorParser(object):
"""
Abstract class for parsing errors originating from the scheduler system and error that are not reported by the
program itself, i.e. segmentation faults.
A concrete implementation of this class for a specific scheduler needs a class attribute ERRORS for containing a
dictionary specifying error:
ERRORS = {ErrorClass: {
'file_specifier' : {
'string': "the string to be looked for",
'meta_filter': "string specifing the regular expression to obtain the meta data"
}
}
"""
def __init__(self, err_file, out_file=None, run_err_file=None, batch_err_file=None):
self.files = {'err': err_file, 'out': out_file, 'run_err': run_err_file, 'batch_err': batch_err_file}
self.errors = []
return
@abc.abstractproperty
def error_definitions(self):
return {}
@staticmethod
def extract_metadata(lines, meta_filter):
meta_dict = {}
for key in meta_filter.keys():
values = []
for line in lines:
match = re.match(meta_filter[key][0], line)
if match is not None:
values.append(re.match(meta_filter[key][0], line).group(meta_filter[key][1]))
values = sorted(set(values))
meta_dict.update({key: values})
return meta_dict
def parse_single(self, errmsg):
"""
Parse the provided files for the corresponding strings.
"""
found = False
message = None
metadata = None
for k in errmsg.keys():
if self.files[k] is not None:
# print 'parsing ', self.files[k], ' for ', errmsg[k]['string']
try:
with open(self.files[k], mode='r') as f:
lines = f.read().split('\n')
for line in lines:
if errmsg[k]['string'] in line:
message = line
found = True
if found:
metadata = self.extract_metadata(lines, errmsg[k]['meta_filter'])
except (IOError, OSError):
print(self.files[k], 'not found')
pass
except TypeError:
print('type error', self.files[k], ' has type ', self.files[k].cls(), ' should be string.')
pass
return found, message, metadata
def parse(self):
"""
Parse for the occurens of all errors defined in ERRORS
"""
for error in self.error_definitions:
result = self.parse_single(self.error_definitions[error])
if result[0]:
self.errors.append(error(result[1], result[2]))
if len(self.errors) > 0:
print('QUEUE_ERROR FOUND')
for error in self.errors:
print(error)
class SlurmErrorParser(AbstractErrorParser):
"""
Implementation of the error definitions for the Slurm scheduler
"""
@property
def error_definitions(self):
return {
SubmitError: {
'batch_err': {
'string': "Batch job submission failed",
'meta_filter': {}
}
},
FullQueueError: {
'batch_err': {
'string': "sbatch: error: Batch job submission failed: Job violates accounting/QOS policy",
'meta_filter': {}
}
},
MemoryCancelError: {
'err': {
'string': "Exceeded job memory limit",
'meta_filter': {}
}
},
TimeCancelError: {
'err': {
'string': "DUE TO TIME LIMIT",
'meta_filter': {
'time_of_cancel': [r"JOB (\d+) CANCELLED AT (\S*) DUE TO TIME LIMIT", 1]
}
}
},
NodeFailureError: {
'run_err': {
'string': "can't open /dev/ipath, network down",
'meta_filter': {
'nodes': [r"node(\d+)\.(\d+)can't open (\S*), network down \(err=26\)", 1]
}
}
},
AbstractError: {
'out': {
'string': "a string to be found",
'meta_filter': {}
}
}
}
class PBSErrorParser(AbstractErrorParser):
"""
Implementation for the PBS scheduler
"""
#=>> PBS: job killed: walltime 932 exceeded limit 900
#=>> PBS: job killed: walltime 46 exceeded limit 30
#=>> PBS: job killed: vmem 2085244kb exceeded limit 1945600kb
@property
def error_definitions(self):
return {
TimeCancelError: {
'out': {
'string': "job killed: walltime",
'meta_filter': {
'broken_limit': [r"job killed: walltime (\d+) exceeded limit (\d+)", 1]
}
}
},
AbstractError: {
'out': {
'string': "a string to be found",
'meta_filter': {}
}
},
MemoryCancelError: {
'out': {
'string': "job killed: vmem",
'meta_filter': {
'broken_limit': [r"(.*)job killed: vmem (\d+)kb exceeded limit (\d+)kb", 3]
}
}
}
}
ALL_PARSERS = {'slurm': SlurmErrorParser, 'pbspro': PBSErrorParser, 'torque': PBSErrorParser}
def get_parser(scheduler, err_file, out_file=None, run_err_file=None, batch_err_file=None):
"""
Factory function to provide the parser for the specified scheduler. If the scheduler is not implemented None is
returned. The files, string, correspond to file names of the out and err files:
err_file stderr of the scheduler
out_file stdout of the scheduler
run_err_file stderr of the application
batch_err_file stderr of the submission
Returns:
None if scheduler is not supported.
"""
cls = ALL_PARSERS.get(scheduler)
return cls if cls is None else cls(err_file, out_file, run_err_file, batch_err_file)
if __name__ == "__main__":
my_parser = get_parser('pbs', err_file='queue.err', out_file='queue.out', run_err_file='run.err',
batch_err_file='sbatch.err')
my_parser.parse()
print('parser.errors', my_parser.errors)
for my_error in my_parser.errors:
print(my_error)
|
migueldiascosta/pymatgen
|
pymatgen/io/abinit/scheduler_error_parsers.py
|
Python
|
mit
| 13,858
|
[
"pymatgen"
] |
988bd3b11d330b08727b69dde1f5cfa2cdbd2d0bb06bce7d5eba00668f472752
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import argparse
import os
try:
import simplejson as json
except ImportError:
import json
from nab.runner import Runner
from nab.util import (detectorNameToClass, checkInputs)
def getDetectorClassConstructors(detectors):
"""
Takes in names of detectors. Collects class names that correspond to those
detectors and returns them in a dict. The dict maps detector name to class
names. Assumes the detectors have been imported.
"""
detectorConstructors = {
d : globals()[detectorNameToClass(d)] for d in detectors}
return detectorConstructors
def main(args):
root = os.path.dirname(os.path.realpath(__file__))
numCPUs = int(args.numCPUs) if args.numCPUs is not None else None
dataDir = os.path.join(root, args.dataDir)
windowsFile = os.path.join(root, args.windowsFile)
resultsDir = os.path.join(root, args.resultsDir)
profilesFile = os.path.join(root, args.profilesFile)
thresholdsFile = os.path.join(root, args.thresholdsFile)
runner = Runner(dataDir=dataDir,
labelPath=windowsFile,
resultsDir=resultsDir,
profilesPath=profilesFile,
thresholdPath=thresholdsFile,
numCPUs=numCPUs)
runner.initialize()
if args.detect:
detectorConstructors = getDetectorClassConstructors(args.detectors)
runner.detect(detectorConstructors)
if args.optimize:
runner.optimize(args.detectors)
if args.score:
with open(args.thresholdsFile) as thresholdConfigFile:
detectorThresholds = json.load(thresholdConfigFile)
runner.score(args.detectors, detectorThresholds)
if args.normalize:
try:
runner.normalize()
except AttributeError("Error: you must run the scoring step with the "
"normalization step."):
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--detect",
help="Generate detector results but do not analyze results "
"files.",
default=False,
action="store_true")
parser.add_argument("--optimize",
help="Optimize the thresholds for each detector and user "
"profile combination",
default=False,
action="store_true")
parser.add_argument("--score",
help="Analyze results in the results directory",
default=False,
action="store_true")
parser.add_argument("--normalize",
help="Normalize the final scores",
default=False,
action="store_true")
parser.add_argument("--skipConfirmation",
help="If specified will skip the user confirmation step",
default=False,
action="store_true")
parser.add_argument("--dataDir",
default="data",
help="This holds all the label windows for the corpus.")
parser.add_argument("--resultsDir",
default="results",
help="This will hold the results after running detectors "
"on the data")
parser.add_argument("--windowsFile",
default=os.path.join("labels", "combined_windows.json"),
help="JSON file containing ground truth labels for the "
"corpus.")
parser.add_argument("-d", "--detectors",
nargs="*",
type=str,
default=["null", "numenta", "random", "skyline",
"bayesChangePt", "windowedGaussian", "expose",
"relativeEntropy"],
help="Comma separated list of detector(s) to use, e.g. "
"null,numenta")
parser.add_argument("-p", "--profilesFile",
default=os.path.join("config", "profiles.json"),
help="The configuration file to use while running the "
"benchmark.")
parser.add_argument("-t", "--thresholdsFile",
default=os.path.join("config", "thresholds.json"),
help="The configuration file that stores thresholds for "
"each combination of detector and username")
parser.add_argument("-n", "--numCPUs",
default=None,
help="The number of CPUs to use to run the "
"benchmark. If not specified all CPUs will be used.")
args = parser.parse_args()
if (not args.detect
and not args.optimize
and not args.score
and not args.normalize):
args.detect = True
args.optimize = True
args.score = True
args.normalize = True
if len(args.detectors) == 1:
# Handle comma-seperated list argument.
args.detectors = args.detectors[0].split(",")
# The following imports are necessary for getDetectorClassConstructors to
# automatically figure out the detector classes.
# Only import detectors if used so as to avoid unnecessary dependency.
if "bayesChangePt" in args.detectors:
from nab.detectors.bayes_changept.bayes_changept_detector import (
BayesChangePtDetector)
if "numenta" in args.detectors:
from nab.detectors.numenta.numenta_detector import NumentaDetector
if "numentaTM" in args.detectors:
from nab.detectors.numenta.numentaTM_detector import NumentaTMDetector
if "null" in args.detectors:
from nab.detectors.null.null_detector import NullDetector
if "random" in args.detectors:
from nab.detectors.random.random_detector import RandomDetector
if "skyline" in args.detectors:
from nab.detectors.skyline.skyline_detector import SkylineDetector
if "windowedGaussian" in args.detectors:
from nab.detectors.gaussian.windowedGaussian_detector import (
WindowedGaussianDetector)
if "relativeEntropy" in args.detectors:
from nab.detectors.relative_entropy.relative_entropy_detector import (
RelativeEntropyDetector)
# To run expose detector, you must have sklearn version 0.16.1 installed.
# Higher versions of sklearn may not be compatible with numpy version 1.9.2
# required to run nupic.
if "expose" in args.detectors:
from nab.detectors.expose.expose_detector import ExposeDetector
if "contextOSE" in args.detectors:
from nab.detectors.context_ose.context_ose_detector import (
ContextOSEDetector )
if args.skipConfirmation or checkInputs(args):
main(args)
|
BoltzmannBrain/NAB
|
run.py
|
Python
|
agpl-3.0
| 7,503
|
[
"Gaussian"
] |
43a16ae95613bbb66dbb2e47a00246bbdba5a7465f7b952eae2fb032a24de27c
|
#pylint: disable=invalid-name
# File: ReduceSCD_Parallel.py
#
# Version 2.0, modified to work with Mantid's new python interface.
#
# This script will run multiple instances of the script ReduceSCD_OneRun.py
# in parallel, using either local processes or a slurm partition. After
# using the ReduceSCD_OneRun script to find, index and integrate peaks from
# multiple runs, this script merges the integrated peaks files and re-indexes
# them in a consistent way. If desired, the indexing can also be changed to a
# specified conventional cell.
# Many intermediate files are generated and saved, so all output is written
# to a specified output_directory. This output directory must be created
# before running this script, and must be specified in the configuration file.
# The user should first make sure that all parameters are set properly in
# the configuration file for the ReduceSCD_OneRun.py script, and that that
# script will properly reduce one scd run. Once a single run can be properly
# reduced, set the additional parameters in the configuration file that specify
# how the the list of runs will be processed in parallel.
#
#
# _v1: December 3rd 2013. Mads Joergensen
# This version now includes the posibility to use the 1D cylindrical integration method
# and the posibility to load a UB matrix which will be used for integration of the individual
# runs and to index the combined file (Code from Xiapoing).
#
#
# _v2: December 3rd 2013. Mads Joergensen
# Adds the posibility to optimize the loaded UB for each run for a better peak prediction
# It is also possible to find the common UB by using lattice parameters of the first
# run or the loaded matirix instead of the default FFT method
#
from __future__ import (absolute_import, division, print_function)
import os
import sys
import threading
import time
import ReduceDictionary
sys.path.append("/opt/mantidnightly/bin") # noqa
#sys.path.append("/opt/Mantid/bin")
from mantid.simpleapi import *
print("API Version")
print(apiVersion())
start_time = time.time()
# -------------------------------------------------------------------------
# ProcessThread is a simple local class. Each instance of ProcessThread is
# a thread that starts a command line process to reduce one run.
#
class ProcessThread ( threading.Thread ):
command = ""
def setCommand( self, command="" ):
self.command = command
def run ( self ):
print('STARTING PROCESS: ' + self.command)
os.system( self.command )
# -------------------------------------------------------------------------
#
# Get the config file name from the command line
#
if len(sys.argv) < 2:
print("You MUST give the config file name on the command line")
exit(0)
config_files = sys.argv[1:]
#
# Load the parameter names and values from the specified configuration file
# into a dictionary and set all the required parameters from the dictionary.
#
params_dictionary = ReduceDictionary.LoadDictionary( *config_files )
exp_name = params_dictionary[ "exp_name" ]
output_directory = params_dictionary[ "output_directory" ]
output_nexus = params_dictionary.get( "output_nexus", False)
reduce_one_run_script = params_dictionary[ "reduce_one_run_script" ]
slurm_queue_name = params_dictionary[ "slurm_queue_name" ]
max_processes = int(params_dictionary[ "max_processes" ])
min_d = params_dictionary[ "min_d" ]
max_d = params_dictionary[ "max_d" ]
tolerance = params_dictionary[ "tolerance" ]
cell_type = params_dictionary[ "cell_type" ]
centering = params_dictionary[ "centering" ]
allow_perm = params_dictionary[ "allow_perm" ]
run_nums = params_dictionary[ "run_nums" ]
data_directory = params_dictionary[ "data_directory" ]
use_cylindrical_integration = params_dictionary[ "use_cylindrical_integration" ]
instrument_name = params_dictionary[ "instrument_name" ]
read_UB = params_dictionary[ "read_UB" ]
UB_filename = params_dictionary[ "UB_filename" ]
UseFirstLattice = params_dictionary[ "UseFirstLattice" ]
num_peaks_to_find = params_dictionary[ "num_peaks_to_find" ]
# determine what python executable to launch new jobs with
python = sys.executable
if python is None: # not all platforms define this variable
python = 'python'
#
# Make the list of separate process commands. If a slurm queue name
# was specified, run the processes using slurm, otherwise just use
# multiple processes on the local machine.
#
procList=[]
index = 0
for r_num in run_nums:
procList.append( ProcessThread() )
cmd = '%s %s %s %s' % (python, reduce_one_run_script, " ".join(config_files), str(r_num))
if slurm_queue_name is not None:
console_file = output_directory + "/" + str(r_num) + "_output.txt"
cmd = 'srun -p ' + slurm_queue_name + \
' --cpus-per-task=3 -J ReduceSCD_Parallel.py -o ' + console_file + ' ' + cmd
procList[index].setCommand( cmd )
index = index + 1
#
# Now create and start a thread for each command to run the commands in parallel,
# starting up to max_processes simultaneously.
#
all_done = False
active_list=[]
while not all_done:
if len(procList) > 0 and len(active_list) < max_processes :
thread = procList[0]
procList.remove(thread)
active_list.append( thread )
thread.start()
time.sleep(2)
for thread in active_list:
if not thread.isAlive():
active_list.remove( thread )
if len(procList) == 0 and len(active_list) == 0 :
all_done = True
print("\n**************************************************************************************")
print("************** Completed Individual Runs, Starting to Combine Results ****************")
print("**************************************************************************************\n")
#
# First combine all of the integrated files, by reading the separate files and
# appending them to a combined output file.
#
niggli_name = output_directory + "/" + exp_name + "_Niggli"
if output_nexus:
niggli_integrate_file = niggli_name + ".nxs"
else:
niggli_integrate_file = niggli_name + ".integrate"
niggli_matrix_file = niggli_name + ".mat"
first_time = True
if output_nexus:
#Only need this for instrument for peaks_total
short_filename = "%s_%s" % (instrument_name, str(run_nums[0]))
if data_directory is not None:
full_name = data_directory + "/" + short_filename + ".nxs.h5"
if not os.path.exists(full_name):
full_name = data_directory + "/" + short_filename + "_event.nxs"
else:
candidates = FileFinder.findRuns(short_filename)
full_name = ""
for item in candidates:
if os.path.exists(item):
full_name = str(item)
if not full_name.endswith('nxs') and not full_name.endswith('h5'):
print("Exiting since the data_directory was not specified and")
print("findnexus failed for event NeXus file: " + instrument_name + " " + str(run_nums[0]))
exit(0)
#
# Load the first data file to find instrument
#
wksp = LoadEventNexus( Filename=full_name, FilterByTofMin=0, FilterByTofMax=0 )
peaks_total = CreatePeaksWorkspace(NumberOfPeaks=0, InstrumentWorkspace=wksp)
if not use_cylindrical_integration:
for r_num in run_nums:
if output_nexus:
one_run_file = output_directory + '/' + str(r_num) + '_Niggli.nxs'
peaks_ws = Load( Filename=one_run_file )
else:
one_run_file = output_directory + '/' + str(r_num) + '_Niggli.integrate'
peaks_ws = LoadIsawPeaks( Filename=one_run_file )
if first_time:
if UseFirstLattice and not read_UB:
# Find a UB (using FFT) for the first run to use in the FindUBUsingLatticeParameters
FindUBUsingFFT( PeaksWorkspace=peaks_ws, MinD=min_d, MaxD=max_d, Tolerance=tolerance )
uc_a = peaks_ws.sample().getOrientedLattice().a()
uc_b = peaks_ws.sample().getOrientedLattice().b()
uc_c = peaks_ws.sample().getOrientedLattice().c()
uc_alpha = peaks_ws.sample().getOrientedLattice().alpha()
uc_beta = peaks_ws.sample().getOrientedLattice().beta()
uc_gamma = peaks_ws.sample().getOrientedLattice().gamma()
if output_nexus:
peaks_total = CombinePeaksWorkspaces(LHSWorkspace=peaks_total, RHSWorkspace=peaks_ws)
SaveNexus( InputWorkspace=peaks_ws, Filename=niggli_integrate_file )
else:
SaveIsawPeaks( InputWorkspace=peaks_ws, AppendFile=False, Filename=niggli_integrate_file )
first_time = False
else:
if output_nexus:
peaks_total = CombinePeaksWorkspaces(LHSWorkspace=peaks_total, RHSWorkspace=peaks_ws)
SaveNexus( InputWorkspace=peaks_total, Filename=niggli_integrate_file )
else:
SaveIsawPeaks( InputWorkspace=peaks_ws, AppendFile=True, Filename=niggli_integrate_file )
#
# Load the combined file and re-index all of the peaks together.
# Save them back to the combined Niggli file (Or selcted UB file if in use...)
#
if output_nexus:
peaks_ws = Load( Filename=niggli_integrate_file )
else:
peaks_ws = LoadIsawPeaks( Filename=niggli_integrate_file )
#
# Find a Niggli UB matrix that indexes the peaks in this run
# Load UB instead of Using FFT
#Index peaks using UB from UB of initial orientation run/or combined runs from first iteration of crystal orientation refinement
if read_UB:
LoadIsawUB(InputWorkspace=peaks_ws, Filename=UB_filename)
if UseFirstLattice:
# Find UB using lattice parameters from the specified file
uc_a = peaks_ws.sample().getOrientedLattice().a()
uc_b = peaks_ws.sample().getOrientedLattice().b()
uc_c = peaks_ws.sample().getOrientedLattice().c()
uc_alpha = peaks_ws.sample().getOrientedLattice().alpha()
uc_beta = peaks_ws.sample().getOrientedLattice().beta()
uc_gamma = peaks_ws.sample().getOrientedLattice().gamma()
FindUBUsingLatticeParameters(PeaksWorkspace= peaks_ws,a=uc_a,b=uc_b,c=uc_c,alpha=uc_alpha,beta=uc_beta,
gamma=uc_gamma,NumInitial=num_peaks_to_find,Tolerance=tolerance)
#OptimizeCrystalPlacement(PeaksWorkspace=peaks_ws,ModifiedPeaksWorkspace=peaks_ws,
# FitInfoTable='CrystalPlacement_info',MaxIndexingError=tolerance)
elif UseFirstLattice and not read_UB:
# Find UB using lattice parameters using the FFT results from first run if no UB file is specified
FindUBUsingLatticeParameters(PeaksWorkspace= peaks_ws,a=uc_a,b=uc_b,c=uc_c,alpha=uc_alpha,beta=uc_beta,
gamma=uc_gamma,NumInitial=num_peaks_to_find,Tolerance=tolerance)
else:
FindUBUsingFFT( PeaksWorkspace=peaks_ws, MinD=min_d, MaxD=max_d, Tolerance=tolerance )
IndexPeaks( PeaksWorkspace=peaks_ws, Tolerance=tolerance )
if output_nexus:
SaveNexus( InputWorkspace=peaks_ws, Filename=niggli_integrate_file )
else:
SaveIsawPeaks( InputWorkspace=peaks_ws, AppendFile=False, Filename=niggli_integrate_file )
SaveIsawUB( InputWorkspace=peaks_ws, Filename=niggli_matrix_file )
#
# If requested, also switch to the specified conventional cell and save the
# corresponding matrix and integrate file
#
if not use_cylindrical_integration:
if (cell_type is not None) and (centering is not None) :
conv_name = output_directory + "/" + exp_name + "_" + cell_type + "_" + centering
if output_nexus:
conventional_integrate_file = conv_name + ".nxs"
else:
conventional_integrate_file = conv_name + ".integrate"
conventional_matrix_file = conv_name + ".mat"
SelectCellOfType( PeaksWorkspace=peaks_ws, CellType=cell_type, Centering=centering,
AllowPermutations=allow_perm, Apply=True, Tolerance=tolerance )
if output_nexus:
SaveNexus( InputWorkspace=peaks_ws, Filename=conventional_integrate_file )
else:
SaveIsawPeaks( InputWorkspace=peaks_ws, AppendFile=False, Filename=conventional_integrate_file )
SaveIsawUB( InputWorkspace=peaks_ws, Filename=conventional_matrix_file )
if use_cylindrical_integration:
if (cell_type is not None) or (centering is not None):
print("WARNING: Cylindrical profiles are NOT transformed!!!")
# Combine *.profiles files
filename = output_directory + '/' + exp_name + '.profiles'
outputFile = open( filename, 'w' )
# Read and write the first run profile file with header.
r_num = run_nums[0]
filename = output_directory + '/' + instrument_name + '_' + r_num + '.profiles'
inputFile = open( filename, 'r' )
file_all_lines = inputFile.read()
outputFile.write(file_all_lines)
inputFile.close()
os.remove(filename)
# Read and write the rest of the runs without the header.
for r_num in run_nums[1:]:
filename = output_directory + '/' + instrument_name + '_' + r_num + '.profiles'
inputFile = open(filename, 'r')
for line in inputFile:
if line[0] == '0':
break
outputFile.write(line)
for line in inputFile:
outputFile.write(line)
inputFile.close()
os.remove(filename)
# Remove *.integrate file(s) ONLY USED FOR CYLINDRICAL INTEGRATION!
for integrateFile in os.listdir(output_directory):
if integrateFile.endswith('.integrate'):
os.remove(integrateFile)
end_time = time.time()
print("\n**************************************************************************************")
print("****************************** DONE PROCESSING ALL RUNS ******************************")
print("**************************************************************************************\n")
print('Total time: ' + str(end_time - start_time) + ' sec')
print('Config file: ' + ", ".join(config_files))
print('Script file: ' + reduce_one_run_script + '\n')
print()
|
dymkowsk/mantid
|
scripts/SCD_Reduction/ReduceSCD_Parallel.py
|
Python
|
gpl-3.0
| 14,307
|
[
"CRYSTAL"
] |
94ffcf307dc69af4bca0a2ca80ebec7447936bc5f7672b01d0d9e60a5bcbba75
|
""" Example showing how to pick data on a surface, going all the way back
to the index in the numpy arrays.
In this example, two views of the same data are shown. One with the data
on a sphere, the other with the data flat.
We use the 'on_mouse_pick' method of the scene to register a callback on
clicking on the sphere. The callback is called with a picker object as
andan argument. We use the point_id of the point that has been picked,
andand go back to the 2D index on the data matrix to find its position.
"""
################################################################################
# Create some data
import numpy as np
pi = np.pi
cos = np.cos
sin = np.sin
phi, theta = np.mgrid[0:pi:180j,0:2*pi:180j]
m0 = 4; m1 = 3; m2 = 2; m3 = 3; m4 = 1; m5 = 2; m6 = 2; m7 = 4;
s = sin(m0*phi)**m1 + cos(m2*phi)**m3 + sin(m4*theta)**m5 + cos(m6*theta)**m7
x = sin(phi)*cos(theta)
y = cos(phi)
z = sin(phi)*sin(theta)
################################################################################
# Plot the data
from mayavi import mlab
# A first plot in 3D
fig = mlab.figure(1)
mlab.clf()
mesh = mlab.mesh(x, y, z, scalars=s)
cursor3d = mlab.points3d(0., 0., 0., mode='axes',
color=(0, 0, 0),
scale_factor=0.5)
mlab.title('Click on the ball')
# A second plot, flat
fig2d = mlab.figure(2)
mlab.clf()
im = mlab.imshow(s)
cursor = mlab.points3d(0, 0, 0, mode='2dthick_cross',
color=(0, 0, 0),
scale_factor=10)
mlab.view(90, 0)
################################################################################
# Some logic to select 'mesh' and the data index when picking.
def picker_callback(picker_obj):
picked = picker_obj.actors
if mesh.actor.actor._vtk_obj in [o._vtk_obj for o in picked]:
# m.mlab_source.points is the points array underlying the vtk
# dataset. GetPointId return the index in this array.
x_, y_ = np.lib.index_tricks.unravel_index(picker_obj.point_id,
s.shape)
print("Data indices: %i, %i" % (x_, y_))
n_x, n_y = s.shape
cursor.mlab_source.reset(x=x_ - n_x/2.,
y=y_ - n_y/2.)
cursor3d.mlab_source.reset(x=x[x_, y_],
y=y[x_, y_],
z=z[x_, y_])
fig.on_mouse_pick(picker_callback)
mlab.show()
|
dmsurti/mayavi
|
examples/mayavi/data_interaction/pick_on_surface.py
|
Python
|
bsd-3-clause
| 2,477
|
[
"Mayavi",
"VTK"
] |
2f818efa284ca289bd2f7f4e7d5eaaec6c287392eb75302112a624ee53ab1561
|
#! python
'''Hangman'''
import random, string
HANGMANPICS = ['''
+---+
|
|
|
|
|
==========''', '''
+---+
| |
|
|
|
|
==========''', '''
+---+
| |
O |
|
|
|
==========''', '''
+---+
| |
O |
| |
|
|
==========''', '''
+---+
| |
O |
/| |
|
|
==========''', '''
+---+
| |
O |
/|\ |
|
|
==========''', '''
+---+
| |
O |
/|\ |
/ |
|
==========''', '''
+---+
| |
O |
/|\ |
/ \ |
|
==========''']
words = {'Colors':'red orange yellow green blue indigo violet white black brown'.split(),
'Shapes':'square triangle rectangle circle ellipse rhombus trapezoid chevron pentagon hexagon heptagon octagon'.split(),
'Fruits':'apple orange lemon lime pear watermelon grape grapefruit cherry banana cantaloupe mango strawberry tomato'.split(),
'Animals':'bat bear beaver cat cougar crab deer dog donkey duck eagle fish frog goat leech lion lizard monkey moose otter owl panda python rabbit rat shark sheep skunk squid tiger turkey turtle weasel whale wolf wombat zebra'.split()}
def getRandomWord (wordDict):
# This function returns a random string from the passed dictionary of strings.
wordKey = random.choice(list(wordDict.keys()))
return [random.choice(wordDict[wordKey]), wordKey]
def displayBoard (HANGMANPICS, missedLetters, correctLetters, secretWord):
print (HANGMANPICS[len(missedLetters)])
print ()
print ('Missed letters:', end=' ')
for letter in missedLetters:
print (letter, end=' ')
print ()
blanks = '_' * len(secretWord)
for i in range(len(secretWord)): # replace blanks with correctly guessed letters
if secretWord[i] in correctLetters:
blanks = blanks[:i] + secretWord[i] + blanks[i+1:]
for letter in blanks: # show the secret word with spaces in between each letter
print (letter, end=' ')
print ()
def getGuess (alreadyGuessed):
# Returns the letter the player entered. This function makes sure the player
# entered a single letter, and not something else.
while True:
print ('Guess a letter.')
guess = input().lower()
if len(guess) != 1:
print ('Please enter a single letter.')
elif guess in alreadyGuessed:
print ('You have already guessed that letter. Choose again.')
elif guess not in string.ascii_lowercase:
print ('Please enter a LETTER.')
else:
return guess
def playAgain():
# This function returns True if the player want to play again,
# otherwise it return False.
print ('Do you want to play again? (yes or no)')
return input().lower().startswith('y')
print('H A N G M A N')
missedLetters = ''
correctLetters = ''
secretWord, secretKey = getRandomWord(words)
gameIsDone = False
while True:
print ('The secret word is in the set: ' + secretKey)
displayBoard(HANGMANPICS, missedLetters, correctLetters, secretWord)
# Let the player type in a letter.
guess = getGuess(missedLetters + correctLetters)
if guess in secretWord:
correctLetters += guess
# Check if the player won
foundAllLetters = True
for i in range(len(secretWord)):
if secretWord[i] not in correctLetters:
foundAllLetters = False
break
if foundAllLetters:
print ('Yes! The secret word is "' + secretWord + '"! You have won!')
gameIsDone = True
else:
missedLetters += guess
# Check if player has guessed too many times and lost
if len(missedLetters) == len(HANGMANPICS) - 1:
displayBoard(HANGMANPICS, missedLetters, correctLetters, secretWord)
print ('You have run out of guesses, the word was "' + secretWord + '"')
gameIsDone = True
# Ask the player if they want to play again (but only if the game is done).
if gameIsDone:
if playAgain():
missedLetters = ''
correctLetters = ''
gameIsDone = False
secretWord, secretKey = getRandomWord(words)
else:
break
|
deshmukhmayur/python-projects
|
hangman.py
|
Python
|
gpl-3.0
| 4,501
|
[
"MOOSE"
] |
c0507c7f5ab4899422835d154b735b586c81ffb1f1e04d84e029283d341274c3
|
#!/usr/bin/env python
"""
Functions for data IO for neural network training.
"""
from __future__ import print_function
import argparse
import sys
import os
import time
from operator import add
import math
import numpy as np
from scipy.io import netcdf
import theano
import theano.tensor as T
import lasagne
theano.config.floatX='float32'
def netcdf2pep(filename):
'''
read peptide, MHC and target data from NetCDF file
parameters:
- filename : file in which data is stored
returns:
- peptides : list of np.ndarrays containing encoded peptide sequences
- mhcs : list of np.ndarrays containing encoded MHC pseudo sequences
- targets : list of np.ndarrays containing targets (log transformed IC 50 values)
'''
# open file:
f = netcdf.netcdf_file(filename, 'r')
# get peptide and MHC sequence lengths:
tmp = f.variables['peplen']
peplength = tmp[:].copy()
tmp = f.variables['mhclen']
mhclength = tmp[:].copy()
p = 0
m = 0
peptides=[]
mhcs=[]
targets=[]
for i in range(0, peplength.shape[0]):
# get peptide seq as np.ndarray [AAs x encoding length]
tmp = f.variables['peptide'].data[p:p + peplength[i]]
peptides.append(tmp.astype(theano.config.floatX))
p += peplength[i]
# get MHC pseudo seq as np.ndarray [AAs x encoding length]
tmp = f.variables['mhc'].data[m:m + mhclength[i]]
mhcs.append(tmp.astype(theano.config.floatX))
m += mhclength[i]
# get target (one transformed IC 50 value per peptide)
tmp = f.variables['target'].data[i]
if len(tmp.shape) == 0:
tmp=tmp.reshape(1,1)
if len(tmp.shape) ==1:
tmp.reshape(1,tmp.shape[0])
targets.append(tmp.astype(theano.config.floatX))
# close file:
f.close()
# return data:
return peptides, mhcs, targets
# modified from nntools:--------------------------------------------------------
def pad_seqs(X, length):
'''
Convert a list of matrices into np.ndarray
parameters:
- X : list of np.ndarray
List of matrices
- length : int
Desired sequence length. Smaller sequences will be padded with 0s,
longer will be truncated.
- batch_size : int
Mini-batch size
returns:
- X_batch : np.ndarray
Tensor of time series matrix batches,
shape=(n_batches, batch_size, length, n_features)
'''
n_seqs = len(X)
n_features = X[0].shape[1]
X_pad = np.zeros((n_seqs, length, n_features),
dtype=theano.config.floatX)
for i in range(0,len(X)):
X_pad[i, :X[i].shape[0], :n_features] = X[i]
return X_pad
def pad_seqs_mask(X, length):
'''
Convert a list of matrices into np.ndarray
parameters:
- X : list of np.ndarray
List of matrices
- length : int
Desired sequence length. Smaller sequences will be padded with 0s,
longer will be truncated.
returns:
- X_pad : np.ndarray
Tensor of time series matrix batches,
shape=(n_batches, batch_size, length, n_features)
- X_mask : np.ndarray
Tensor denoting what to include,
shape=(n_batches, batch_size, length, n_features)
'''
n_seqs = len(X)
n_features = X[0].shape[1]
X_pad = np.zeros((n_seqs, length, n_features),
dtype=theano.config.floatX)
X_mask = np.zeros((n_seqs, length), dtype=np.bool)
for i in range(0,len(X)):
X_pad[i, :X[i].shape[0], :n_features] = X[i]
X_mask[i, :X[i].shape[0]] = 1
return X_pad,X_mask
def pad_pep_mhc_mask(X_pep, X_mhc, max_pep_seq_len, mhc_seq_len):
'''
Convert a list of matrices into np.ndarray
parameters:
- X_pep : list of np.ndarray
List of matrices containing encoded peptide sequence
- max_pep_seq_len : int
Sequence length of peptides. Smaller sequences will be padded with 0s,
longer will be truncated.
- X_mhc : list of np.ndarray
List of matrices containing encoded MHC pseudo sequence
- mhc_seq_len : int
Sequence length of MHC. Smaller sequences will be padded with 0s,
longer will be truncated.
returns:
- X_pad : np.ndarray
Tensor of time series matrix batches,
shape=(n_batches, batch_size, length, n_features)
- X_mask : np.ndarray
Tensor denoting what to include,
shape=(n_batches, batch_size, length, n_features)
'''
assert(len(X_pep) == len(X_mhc))
n_seqs = len(X_pep)
n_features = X_pep[0].shape[1]
X_pad = np.zeros((n_seqs, mhc_seq_len + max_pep_seq_len +1, n_features),
dtype=theano.config.floatX)
X_mask = np.zeros((n_seqs, mhc_seq_len + max_pep_seq_len +1), dtype=np.bool)
for i in range(0,n_seqs):
# MHC
X_pad[i, :X_mhc[i].shape[0], :n_features] = X_mhc[i]
X_mask[i, :X_mhc[i].shape[0]] = 1
#space
X_pad[i, X_mhc[i].shape[0]: (X_mhc[i].shape[0] + 1), :n_features] = 1
X_mask[i, X_mhc[i].shape[0]:(X_mhc[i].shape[0] + 1)] = 1
#peptide
X_pad[i, (X_mhc[i].shape[0] + 1):(X_mhc[i].shape[0] + 1 + X_pep[i].shape[0]), :n_features] = X_pep[i]
X_mask[i, (X_mhc[i].shape[0] + 1):(X_mhc[i].shape[0] + 1 + X_pep[i].shape[0])] = 1
return X_pad,X_mask
def pad_pep_mhc_mask_final(X_pep, X_mhc, max_pep_seq_len, mhc_seq_len):
'''
Convert a list of matrices into np.ndarray
parameters:
- X_pep : list of np.ndarray
List of matrices containing encoded peptide sequence
- max_pep_seq_len : int
Sequence length of peptides. Smaller sequences will be padded with 0s,
longer will be truncated.
- X_mhc : list of np.ndarray
List of matrices containing encoded MHC pseudo sequence
- mhc_seq_len : int
Sequence length of MHC. Smaller sequences will be padded with 0s,
longer will be truncated.
returns:
- X_pad : np.ndarray
Tensor of time series matrix batches,
shape=(n_batches, batch_size, length, n_features)
- X_mask : np.ndarray
Tensor denoting what to include,
shape=(n_batches, batch_size, length, n_features)
separate encoding of peptide and MHC sequence + peptide length encoding
'''
assert(len(X_pep) == len(X_mhc))
n_seqs = len(X_pep)
n_features = X_pep[0].shape[1]
X_pad = np.zeros((n_seqs, mhc_seq_len + max_pep_seq_len +1, (2*n_features)),
dtype=theano.config.floatX)
X_mask = np.zeros((n_seqs, mhc_seq_len + max_pep_seq_len +1), dtype=np.bool)
for i in range(0,n_seqs):
# MHC + mask:
X_pad[i, :X_mhc[i].shape[0], :n_features] = X_mhc[i]
X_mask[i, :X_mhc[i].shape[0]] = 1
# blank in place for peptide encoding:
X_pad[i, :X_mhc[i].shape[0], n_features:(2*n_features)] = 0
#spacer between MHC and peptide:
X_pad[i, X_mhc[i].shape[0]: (X_mhc[i].shape[0] + 1), :(2*n_features)] = 1
X_mask[i, X_mhc[i].shape[0]:(X_mhc[i].shape[0] + 1)] = 1
# peptide + mask:
X_pad[i, (X_mhc[i].shape[0] + 1):(X_mhc[i].shape[0] + 1 + X_pep[i].shape[0]), n_features:(2*n_features)] = X_pep[i]
X_mask[i, (X_mhc[i].shape[0] + 1):(X_mhc[i].shape[0] + 1 + X_pep[i].shape[0])] = 1
# blank in place for MHC encoding:
X_pad[i, (X_mhc[i].shape[0] + 1):(X_mhc[i].shape[0] + 1 + X_pep[i].shape[0]), :n_features] = 0
return X_pad,X_mask
def pad_mhc_mask_final(X_mhc, mhc_seq_len):
'''
Convert a list of matrices into np.ndarray
parameters:
- X_mhc : list of np.ndarray
List of matrices containing encoded MHC pseudo sequence
- mhc_seq_len : int
Sequence length of MHC. Smaller sequences will be padded with 0s,
longer will be truncated.
returns:
- X_pad : np.ndarray
Tensor of time series matrix batches,
shape=(n_batches, batch_size, length, n_features)
- X_mask : np.ndarray
Tensor denoting what to include,
shape=(n_batches, batch_size, length, n_features)
separate encoding of peptide and MHC sequence + peptide length encoding
'''
#assert(len(X_pep) == len(X_mhc))
n_seqs = len(X_mhc)
n_features = X_mhc[0].shape[1]
X_pad = np.zeros((n_seqs, mhc_seq_len + 1, (2*n_features)),
dtype=theano.config.floatX)
X_mask = np.zeros((n_seqs, mhc_seq_len + 1), dtype=np.bool)
for i in range(0,n_seqs):
# MHC + mask:
X_pad[i, :X_mhc[i].shape[0], :n_features] = X_mhc[i]
X_mask[i, :X_mhc[i].shape[0]] = 1
# blank in place for peptide encoding:
X_pad[i, :X_mhc[i].shape[0], n_features:(2*n_features)] = 0
#spacer between MHC and peptide:
X_pad[i, X_mhc[i].shape[0]: (X_mhc[i].shape[0] + 1), :(2*n_features)] = 1
X_mask[i, X_mhc[i].shape[0]:(X_mhc[i].shape[0] + 1)] = 1
return X_pad,X_mask
def pad_pep_mhc_mask_multi(X_pep, X_mhc, max_pep_seq_len, mhc_seq_len, n_aa):
'''
Convert a list of matrices into np.ndarray
parameters:
- X_pep : list of np.ndarray
List of matrices containing encoded peptide sequence
- max_pep_seq_len : int
Sequence length of peptides. Smaller sequences will be padded with 0s,
longer will be truncated.
- X_mhc : list of np.ndarray
List of matrices containing encoded MHC pseudo sequence
- mhc_seq_len : int
Sequence length of MHC. Smaller sequences will be padded with 0s,
longer will be truncated.
-n_aa: number of AA to present in one time step.
returns:
- X_pad : np.ndarray
Tensor of time series matrix batches,
shape=(n_seq, time_steps, n_features_new)
- X_mask : np.ndarray
Tensor denoting what to include,
shape=(n_batches, batch_size, length, n_features)
separate encoding of peptide and MHC sequence + peptide length encoding
'''
assert(len(X_pep) == len(X_mhc))
n_seqs = len(X_pep)
n_features = X_pep[0].shape[1]
time_steps = int(math.ceil(mhc_seq_len / float(n_aa)) + math.ceil(max_pep_seq_len / float(n_aa)) + 1)
ts_pep = int(math.ceil(mhc_seq_len / float(n_aa)))
ts_mhc = int(math.ceil(max_pep_seq_len / float(n_aa)))
X_pad = np.zeros((n_seqs, time_steps, (2*n_aa*n_features)),
dtype=theano.config.floatX)
X_mask = np.zeros((n_seqs, time_steps), dtype=np.bool)
for i in range(0,n_seqs):
# MHC pseudo sequence:
c=0
for j in range(0, ts_mhc ):
if c < X_mhc[i].shape[0]:
tmp = X_mhc[i][c:min(c+n_aa,time_steps)].flatten()
X_pad[i, j, :len(tmp)] = tmp
X_mask[i, j] = 1
c+=n_aa
#spacer between MHC and peptide:
X_pad[i, ts_mhc, :(2*n_aa*n_features)] = 1
X_mask[i, ts_mhc] = 1
# peptide sequence:
c=0
for j in range((ts_mhc +1) , time_steps):
if c < X_pep[i].shape[0]:
tmp = X_pep[i][c:min(c+n_aa,time_steps)].flatten()
X_pad[i, j, (n_aa*n_features) : (n_aa*n_features+len(tmp))] = tmp
X_mask[i, j] = 1
c+=n_aa
return X_pad,X_mask
def pad_pep_mhc_mask_sepw(X_pep, X_mhc, max_pep_seq_len, mhc_seq_len, motif_len):
'''
Convert a list of matrices into np.ndarray
parameters:
- X_pep : list of np.ndarray
List of matrices containing encoded peptide sequence
- max_pep_seq_len : int
Sequence length of peptides. Smaller sequences will be padded with 0s,
longer will be truncated.
- X_mhc : list of np.ndarray
List of matrices containing encoded MHC pseudo sequence
- mhc_seq_len : int
Sequence length of MHC. Smaller sequences will be padded with 0s,
longer will be truncated.
returns:
- X_pad : np.ndarray
Tensor of time series matrix batches,
shape=(n_batches, batch_size, length, n_features)
- X_mask : np.ndarray
Tensor denoting what to include,
shape=(n_batches, batch_size, length, n_features)
separate encoding of peptide and MHC sequence + peptide length encoding
'''
assert(len(X_pep) == len(X_mhc))
n_seqs = len(X_pep)
n_features = X_pep[0].shape[1]
X_pad = np.zeros((n_seqs, mhc_seq_len + max_pep_seq_len +1, (2*n_features+2)),
dtype=theano.config.floatX)
X_mask = np.zeros((n_seqs, mhc_seq_len + max_pep_seq_len +1), dtype=np.bool)
for i in range(0,n_seqs):
# MHC + mask:
X_pad[i, :X_mhc[i].shape[0], :n_features] = X_mhc[i]
X_mask[i, :X_mhc[i].shape[0]] = 1
# blank in place for peptide encoding:
X_pad[i, :X_mhc[i].shape[0], n_features:(2*n_features)] = 0
#spacer between MHC and peptide:
X_pad[i, X_mhc[i].shape[0]: (X_mhc[i].shape[0] + 1), :(2*n_features)] = 1
X_mask[i, X_mhc[i].shape[0]:(X_mhc[i].shape[0] + 1)] = 1
# peptide + mask:
X_pad[i, (X_mhc[i].shape[0] + 1):(X_mhc[i].shape[0] + 1 + X_pep[i].shape[0]), n_features:(2*n_features)] = X_pep[i]
X_mask[i, (X_mhc[i].shape[0] + 1):(X_mhc[i].shape[0] + 1 + X_pep[i].shape[0])] = 1
# blank in place for MHC encoding:
X_pad[i, (X_mhc[i].shape[0] + 1):(X_mhc[i].shape[0] + 1 + X_pep[i].shape[0]), :n_features] = 0
# peptide length encoding:
o=(X_pep[i].shape[0]-motif_len)*1
o=1/(1+math.exp(-o))
X_pad[i, (X_mhc[i].shape[0] + 1):(X_mhc[i].shape[0] + 1 + X_pep[i].shape[0]), (2*n_features)] = o
X_pad[i, (X_mhc[i].shape[0] + 1):(X_mhc[i].shape[0] + 1 + X_pep[i].shape[0]), (2*n_features+1)] = 1-o
return X_pad,X_mask
def pad_pep_mhc_mask_sepw_pos(X_pep, X_mhc, max_pep_seq_len, mhc_seq_len, motif_len):
'''
Convert a list of matrices into np.ndarray
parameters:
- X_pep : list of np.ndarray
List of matrices containing encoded peptide sequence
- max_pep_seq_len : int
Sequence length of peptides. Smaller sequences will be padded with 0s,
longer will be truncated.
- X_mhc : list of np.ndarray
List of matrices containing encoded MHC pseudo sequence
- mhc_seq_len : int
Sequence length of MHC. Smaller sequences will be padded with 0s,
longer will be truncated.
returns:
- X_pad : np.ndarray
Tensor of time series matrix batches,
shape=(n_batches, batch_size, length, n_features)
- X_mask : np.ndarray
Tensor denoting what to include,
shape=(n_batches, batch_size, length, n_features)
separate encoding of peptide and MHC sequence + peptide length encoding
'''
assert(len(X_pep) == len(X_mhc))
n_seqs = len(X_pep)
n_features = X_pep[0].shape[1]
X_pad = np.zeros((n_seqs, mhc_seq_len + max_pep_seq_len +1, (2*n_features+4)),
dtype=theano.config.floatX)
X_mask = np.zeros((n_seqs, mhc_seq_len + max_pep_seq_len +1), dtype=np.bool)
for i in range(0,n_seqs):
# MHC + mask:
X_pad[i, :X_mhc[i].shape[0], :n_features] = X_mhc[i]
X_mask[i, :X_mhc[i].shape[0]] = 1
# blank in place for peptide encoding:
X_pad[i, :X_mhc[i].shape[0], n_features:(2*n_features)] = 0
#spacer between MHC and peptide:
X_pad[i, X_mhc[i].shape[0]: (X_mhc[i].shape[0] + 1), :(2*n_features)] = 1
X_mask[i, X_mhc[i].shape[0]:(X_mhc[i].shape[0] + 1)] = 1
# peptide + mask:
X_pad[i, (X_mhc[i].shape[0] + 1):(X_mhc[i].shape[0] + 1 + X_pep[i].shape[0]), n_features:(2*n_features)] = X_pep[i]
X_mask[i, (X_mhc[i].shape[0] + 1):(X_mhc[i].shape[0] + 1 + X_pep[i].shape[0])] = 1
# blank in place for MHC encoding:
X_pad[i, (X_mhc[i].shape[0] + 1):(X_mhc[i].shape[0] + 1 + X_pep[i].shape[0]), :n_features] = 0
# peptide length encoding:
o=(X_pep[i].shape[0]-motif_len)*1
o=1/(1+math.exp(-o))
X_pad[i, (X_mhc[i].shape[0] + 1):(X_mhc[i].shape[0] + 1 + X_pep[i].shape[0]), (2*n_features)] = o
X_pad[i, (X_mhc[i].shape[0] + 1):(X_mhc[i].shape[0] + 1 + X_pep[i].shape[0]), (2*n_features+1)] = 1-o
for j in range(0, X_pep[i].shape[0]):
X_pad[i, X_mhc[i].shape[0] + 1 + j, (2*n_features+2)] = float(j) / 15
X_pad[i, X_mhc[i].shape[0] + 1 + j, (2*n_features+3)] = float(X_pep[i].shape[0]-j-1) / 15
for j in range(0, X_mhc[i].shape[0]):
X_pad[i, j, (2*n_features+2)] = float(j) / X_mhc[i].shape[0]
X_pad[i, j, (2*n_features+3)] = float(X_mhc[i].shape[0]-j-1) / X_mhc[i].shape[0]
return X_pad,X_mask
def pad_seqs_cnn_mask(X_pep, X_mhc, max_pep_seq_len, mhc_seq_len):
'''
Convert a list of matrices into np.ndarray
parameters:
- X_pep : list of np.ndarray
List of matrices containing encoded peptide sequence
- max_pep_seq_len : int
Sequence length of peptides. Smaller sequences will be padded with 0s,
longer will be truncated.
- X_mhc : list of np.ndarray
List of matrices containing encoded MHC pseudo sequence
- mhc_seq_len : int
Sequence length of MHC. Smaller sequences will be padded with 0s,
longer will be truncated.
returns:
- X_pad_pep : np.ndarray
Tensor of time series matrix batches,
shape=(n_seqs, n_features, length)
- X_pad_mhc : np.ndarray
Tensor of time series matrix batches,
shape=(n_seqs, n_features, length)
- X_mask : np.ndarray
Tensor denoting what to include after CNN before LSTM,
shape=(n_seqs, length_cnn_out)
'''
assert(len(X_pep) == len(X_mhc))
n_seqs = len(X_pep)
n_features = X_pep[0].shape[1]
X_pad_mhc = np.zeros((n_seqs, n_features, mhc_seq_len),
dtype=theano.config.floatX)
X_pad_pep = np.zeros((n_seqs, n_features, max_pep_seq_len),
dtype=theano.config.floatX)
mask_len = mhc_seq_len -3 +1 + \
mhc_seq_len -8 +1 + \
mhc_seq_len -9 +1 + \
mhc_seq_len -10 +1 + \
max_pep_seq_len -3 +1 + \
max_pep_seq_len -8 +1 + \
max_pep_seq_len -9 +1 + \
max_pep_seq_len -10 +1 + \
7*1
X_mask = np.zeros((n_seqs, mask_len), dtype=np.bool)
for i in range(0,n_seqs):
# MHC + peptide:
X_pad_mhc[i, :n_features, :X_mhc[i].shape[0]] = np.swapaxes(X_mhc[i],0,1)
X_pad_pep[i, :n_features, :X_pep[i].shape[0]] = np.swapaxes(X_pep[i],0,1)
# mask:
start=0
X_mask[i, start : X_mhc[i].shape[0] -3 +1 ] = 1
start += (mhc_seq_len -3 +1)
X_mask[i, start : start + 1 ] = 1
start += 1
X_mask[i, start : X_mhc[i].shape[0] -8 +1 ] = 1
start += (mhc_seq_len -8 +1)
X_mask[i, start : start + 1 ] = 1
start += 1
X_mask[i, start : X_mhc[i].shape[0] -9 +1 ] = 1
start += (mhc_seq_len -9 +1)
X_mask[i, start : start + 1 ] = 1
start += 1
X_mask[i, start : X_mhc[i].shape[0] -10 +1 ] = 1
start += (mhc_seq_len -10 +1)
X_mask[i, start : start + 1 ] = 1
start += 1
X_mask[i, start : X_pep[i].shape[0] -3 +1 ] = 1
start += (max_pep_seq_len -3 +1)
X_mask[i, start : start + 1 ] = 1
start += 1
X_mask[i, start : X_pep[i].shape[0] -8 +1 ] = 1
start += (max_pep_seq_len -8 +1)
X_mask[i, start : start + 1 ] = 1
start += 1
X_mask[i, start : X_pep[i].shape[0] -9 +1 ] = 1
start += (max_pep_seq_len -9 +1)
X_mask[i, start : start + 1 ] = 1
start += 1
X_mask[i, start : X_pep[i].shape[0] -10 +1 ] = 1
return X_pad_pep,X_pad_mhc,X_mask
def pad_seqs_T(X, length):
'''
Convert a list of matrices into np.ndarray
parameters:
- X : list of np.ndarray
List of matrices
- length : int
Desired sequence length. Smaller sequences will be padded with 0s,
longer will be truncated.
- batch_size : int
Mini-batch size
returns:
- X_batch : np.ndarray
Tensor of time series matrix batches,
shape=(n_batches, n_features, seqlength)
'''
n_seqs = len(X)
n_features = X[0].shape[1]
X_pad = np.zeros((n_seqs, n_features, length),
dtype=theano.config.floatX)
for i in range(0,len(X)):
slen=X[i].shape[0]
X_pad[i, :n_features, :slen] = np.swapaxes(X[i],0,1)
return X_pad
def pad_seqs_T_pl(X, length, motif_len):
'''
Convert a list of matrices into np.ndarray
parameters:
- X : list of np.ndarray
List of matrices
- length : int
Desired sequence length. Smaller sequences will be padded with 0s,
longer will be truncated.
- batch_size : int
Mini-batch size
returns:
- X_batch : np.ndarray
Tensor of time series matrix batches,
shape=(n_batches, n_features, seqlength)
with peptide sequence length encoding
'''
n_seqs = len(X)
n_features = X[0].shape[1]
X_pad = np.zeros((n_seqs, n_features, length),
dtype=theano.config.floatX)
X_pl = np.zeros((n_seqs, 2),
dtype=theano.config.floatX)
for i in range(0,len(X)):
slen=X[i].shape[0]
X_pad[i, :n_features, :slen] = np.swapaxes(X[i],0,1)
# peptide length encoding:
o=(slen-motif_len)*1
o=1/(1+math.exp(-o))
X_pl[i, 0] = o
X_pl[i, 1] = 1-o
return X_pad,X_pl
def pad_seqs_T_pl_pos(X, length, motif_len):
'''
Convert a list of matrices into np.ndarray
parameters:
- X : list of np.ndarray
List of matrices
- length : int
Desired sequence length. Smaller sequences will be padded with 0s,
longer will be truncated.
- batch_size : int
Mini-batch size
returns:
- X_batch : np.ndarray
Tensor of time series matrix batches,
shape=(n_batches, n_features, seqlength)
with peptide sequence length encoding
'''
n_seqs = len(X)
n_features = X[0].shape[1]
X_pad = np.zeros((n_seqs, n_features+4, length),
dtype=theano.config.floatX)
for i in range(0,len(X)):
# get peptide sequence length:
slen=X[i].shape[0]
# copy encoded sequence:
X_pad[i, :n_features, :slen] = np.swapaxes(X[i],0,1)
# peptide length encoding:
o=(slen-motif_len)*1
o=1/(1+math.exp(-o))
X_pad[i, n_features, :slen] = o
X_pad[i, n_features+1, :slen] = 1-o
# peptide position encoding:
for j in range(0, slen):
X_pad[i, n_features + 2, j] = float(j) / 15
X_pad[i, n_features + 3, j] = float(slen-j-1) / 15
return X_pad
def get_pep_aa_mhc(filename, MAX_PEP_SEQ_LEN):
'''
read AA seq of peptides and MHC molecule from text file
parameters:
- filename : file in which data is stored
returns:
- pep_aa : list of amino acid sequences of peptides (as string)
- mhc_molecule : list of name of MHC molecules (string)
'''
pep_aa=[]
mhc_molecule=[]
infile = open(filename, "r")
for l in infile:
l=list(filter(None, l.strip().split()))
if len(l[0]) <= MAX_PEP_SEQ_LEN:
pep_aa.append(l[0])
mhc_molecule.append(l[2])
infile.close()
return pep_aa,mhc_molecule
def read_mhc_list(filename, mhc_allowed):
'''
read AA seq of MHC molecules from text file
parameters:
- filename : file in which data is stored
- mhc_allowed : list of allowed MHC molecules
returns:
- X_mhc : list of amino acid sequences of MHCs (as string)
- mhc_molecule : list of name of MHC molecules (string)
'''
X_mhc=[]
mhc_molecule=[]
infile = open(filename, "r")
for l in infile:
l=list(filter(None, l.strip().split()))
if l[0] in mhc_allowed:
X_mhc.append(l[1])
mhc_molecule.append(l[0])
infile.close()
return X_mhc,mhc_molecule
def encode_seq(Xin, max_pep_seq_len, blosum):
'''
encode AA seq of peptides using BLOSUM50
parameters:
- Xin : list of peptide sequences in AA
returns:
- Xout : encoded peptide seuqneces (batch_size, max_pep_seq_len, n_features)
'''
# read encoding matrix:
n_features=len(blosum['A'])
n_seqs=len(Xin)
# make variable to store output:
Xout = np.zeros((n_seqs, max_pep_seq_len, n_features),
dtype=theano.config.floatX)
for i in range(0,len(Xin)):
for j in range(0,len(Xin[i])):
Xout[i, j, :n_features] = blosum[ Xin[i][j] ]
return Xout
# feed forward:-----------------------------------------------------------------
def get_pep_mhc_target(filename, MAX_PEP_SEQ_LEN):
'''
read AA seq of peptides, MHC molecule and binding affinity from text file
parameters:
- filename : file in which data is stored
returns:
- pep_aa : list of amino acid sequences of peptides (list of strings)
- mhc_molecule : list of name of MHC molecules (list of strings)
- target: binding affinity (list of strings)
'''
pep_aa=[]
mhc_molecule=[]
targets=[]
infile = open(filename, "r")
for l in infile:
l=list(filter(None, l.strip().split()))
if len(l[0]) <= MAX_PEP_SEQ_LEN:
pep_aa.append(l[0])
mhc_molecule.append(l[2])
targets.append(l[1])
infile.close()
return pep_aa,mhc_molecule,targets
def read_blosum(filename):
'''
read in BLOSUM matrix
parameters:
- filename : file containing BLOSUM matrix
returns:
- blosum : dictionnary AA -> blosum encoding (as list)
'''
# read BLOSUM matrix:
blosumfile = open(filename, "r")
blosum = {}
B_idx = []
J_idx = []
Z_idx = []
star_idx = []
for l in blosumfile:
l = l.strip()
if l[0] == '#':
l = l.strip("#")
l = l.split(" ")
l = list(filter(None, l))
if l[0] == "A":
try:
B_idx = l.index('B')
except:
B_idx = 99
try:
J_idx = l.index('J')
except:
J_idx = 99
try:
Z_idx = l.index('Z')
except:
Z_idx = 99
star_idx = l.index('*')
else:
l = l.split(" ")
l = list(filter(None, l))
aa = str(l[0])
if (aa != 'B') & (aa != 'J') & (aa != 'Z') & (aa != '*'):
tmp = l[1:len(l)]
# tmp = [float(i) for i in tmp]
# get rid of BJZ*:
tmp2 = []
for i in range(0, len(tmp)):
if (i != B_idx) & (i != J_idx) & (i != Z_idx) & (i != star_idx):
tmp2.append(0.1*float(tmp[i])) # divide by 10
#save in BLOSUM matrix
blosum[aa]=tmp2
blosumfile.close()
return(blosum)
def read_blosum_np(filename):
'''
read in BLOSUM matrix
parameters:
- filename : file containing BLOSUM matrix
returns:
- blosum : numpy array containing BLOSUM matrix
'''
# read BLOSUM matrix:
blosumfile = open(filename, "r")
blosum = np.zeros((21, 21))
B_idx = []
J_idx = []
Z_idx = []
star_idx = []
count=0
for l in blosumfile:
l = l.strip()
if l[0] == '#':
l = l.strip("#")
l = l.split(" ")
l = list(filter(None, l))
if l[0] == "A":
try:
B_idx = l.index('B')
except:
B_idx = 99
try:
J_idx = l.index('J')
except:
J_idx = 99
try:
Z_idx = l.index('Z')
except:
Z_idx = 99
star_idx = l.index('*')
else:
l = l.split(" ")
l = list(filter(None, l))
aa = str(l[0])
if (aa != 'B') & (aa != 'J') & (aa != 'Z') & (aa != '*'):
tmp = l[1:len(l)]
# tmp = [float(i) for i in tmp]
# get rid of BJZ*:
tmp2 = []
for i in range(0, len(tmp)):
if (i != B_idx) & (i != J_idx) & (i != Z_idx) & (i != star_idx):
tmp2.append(float(tmp[i]))
#save in BLOSUM matrix
blosum[count]=np.array(tmp2)
count+=1
blosumfile.close()
return(blosum)
def read_blosum_MN(filename):
'''
read in BLOSUM matrix
parameters:
- filename : file containing BLOSUM matrix
returns:
- blosum : dictionnary AA -> blosum encoding (as list)
'''
# read BLOSUM matrix:
blosumfile = open(filename, "r")
blosum = {}
B_idx = 99
Z_idx = 99
star_idx = 99
for l in blosumfile:
l = l.strip()
if l[0] != '#':
l= list(filter(None,l.strip().split(" ")))
if (l[0] == 'A') and (B_idx==99):
B_idx = l.index('B')
Z_idx = l.index('Z')
star_idx = l.index('*')
else:
aa = str(l[0])
if (aa != 'B') & (aa != 'Z') & (aa != '*'):
tmp = l[1:len(l)]
# tmp = [float(i) for i in tmp]
# get rid of BJZ*:
tmp2 = []
for i in range(0, len(tmp)):
if (i != B_idx) & (i != Z_idx) & (i != star_idx):
tmp2.append(float(tmp[i]))
#save in BLOSUM matrix
[i * 0.2 for i in tmp2] #scale (divide by 5)
blosum[aa]=tmp2
blosumfile.close()
return(blosum)
def read_real_blosum(filename):
'''
read in real value BLOSUM matrix
parameters:
- filename : file containing BLOSUM matrix
returns:
- real_blosum : dictionnary AA -> blosum encoding (as list)
'''
# read BLOSUM matrix:
real_blosumfile = open(filename, "r")
real_blosum = {}
count=0
AA=[]
for l in real_blosumfile:
l = l.strip()
if l[0] != '#':
l= list(filter(None,l.strip().split(" ")))
if l[0]=='A':
AA=l
else:
l=[float(x) for x in l]
real_blosum[AA[count]]=l
count+=1
real_blosumfile.close()
return(real_blosum)
def read_MHC_pseudo_seq(filename):
'''
read in MHC pseudo sequence
parameters:
- filename : file containing MHC pseudo sequences
returns:
- mhc : dictionnary mhc -> AA sequence (as string)
- mhc_seq_len : number of AA in mhc pseudo sequence
'''
# read MHC pseudo sequence:
mhcfile=open(filename, "r")
mhc={}
mhc_seq_len=None
for l in mhcfile:
l=l.strip()
l=l.split("\t")
l=list(filter(None, l))
mhc[l[0]]=l[1]
if mhc_seq_len == None:
mhc_seq_len = len(l[1])
mhcfile.close()
return mhc, mhc_seq_len
# def encode_PFR(pep,enc_mat,pfr_len,motif_len):
# '''
# encode PFR (peptide flanking regions)
#
# parameters:
# - peptide : list of strings (AA sequences of peptides)
# - enc_mat: encoding matrix (real BLOSUM)
# - length: length of PFR
# - motif_len: length of binding core
#
# returns:
# - pfr : list of lists of lists: pfr[peptide][offset][left/right]
# '''
#
# pfr=[]
# tmp_pfr=[]
# enc_len=len(enc_mat[pep[0][0]])
#
# for p in pep:
# for i in range(0,len(p)-motif_len+1):
# # initialize to 0:
# l_pfr=[0] * enc_len
# r_pfr=[0] * enc_len
#
# # calculate left PFR:
# for j in range(-pfr_len,0):
# if ((i+j) >=0) & ((i+j) < len(p)):
# l_pfr = map(add, l_pfr, enc_mat[p[i+j]])
#
# # calculate right PFR:
# for j in range(1,pfr_len+1):
# if ((i+motif_len+j) >=0) & ((i+motif_len+j) < len(p)):
# r_pfr = map(add, r_pfr, enc_mat[p[i+motif_len+j]])
#
# # save:
# tmp_pfr.append([l_pfr,r_pfr])
# #save:
# pfr.append(tmp_pfr)
# tmp_pfr=[]
# return pfr
def encode_PFR(pfr_seq,enc_mat):
'''
encode PFR (peptide flanking regions)
parameters:
- pfr_seq : list of strings (AA sequences of PFR)
- enc_mat: encoding matrix (real BLOSUM)
returns:
- pfr : encoded PFR as list
'''
enc_len=len(enc_mat['A'])
pfr=[0] * enc_len
for i in pfr_seq:
pfr = map(add, pfr, enc_mat[i])
if len(pfr_seq)>0:
pfr = [x * (1/float(len(pfr_seq))) for x in pfr] # divide by length of PFR sequence
return pfr
def encode_mhc(mhc,mhc_mat,enc_mat):
'''
encode MHC input molecule
parameters:
- mhc: string (AA sequence of mhc molecule)
- enc_mat: encoding matrix (BLOSUM)
returns:
- x : list containing encoded sequence
'''
x=[]
mhc_seq=mhc_mat[mhc]
for i in mhc_seq:
x += enc_mat[i]
return(x)
def encode_pep(pep,enc_mat,enc_mat_pfr,enc_pfr,pfr_len,offset,motif_len,pep_len,max_gap_len,max_ins_len):
'''
encode peptide sequence
parameters:
- pep: string (AA sequence of peptide)
- enc_mat: encoding matrix (BLOSUM)
- enc_mat_pfr: encoding matrix for PFR (real BLOSUM)
- enc_pfr: True/False encode PFR?
- pfr_len: max length of encoded PFR
- offset: start position of motif within whole peptide
- motif_len: length of binding core
- pep_len: length of whole peptide
- max_ins_len: maximal insert length
- max_gap_len: maximal gap length
returns:
- x : list containing encoded sequence
'''
x=[]
# left PFR:
if (enc_pfr==True):
pfr_seq = pep[max(0,offset-pfr_len):offset]
x += encode_PFR(pfr_seq,enc_mat_pfr)
#x += pfr[offset][0]
# peptide:
for i in pep:
x += enc_mat[i]
# right PFR:
if (enc_pfr==True):
if (offset + motif_len) < pep_len:
pfr_seq = pep[(offset + motif_len) : max(pep_len,offset + motif_len + pfr_len)]
else:
pfr_seq=''
x += encode_PFR(pfr_seq,enc_mat_pfr)
#x += pfr[offset][1]
# PFR length encoding:
if (enc_pfr==True):
ll=offset
if ll > pfr_len:
ll=pfr_len
elif ll < 0:
ll=0
lr = pep_len - offset - motif_len
if lr > pfr_len:
lr=pfr_len
elif lr <0:
lr=0
o = (pfr_len -ll)*1.0/pfr_len
x += [o,(1-o)]
o = (pfr_len -lr)*1.0/pfr_len
x += [o,(1-o)]
else:
ll=max(0,offset)
lr=max(0,(pep_len - offset -motif_len))
o = (ll*1.0)/(ll+1)
x += [o,(1-o)]
o = (lr*1.0)/(lr+1)
x += [o,(1-o)]
# peptide length encoding (make this optional!):
o = (pep_len - motif_len)*1.0
o = 1/(1+math.exp(-o))
x += [o,(1-o)]
# gap length encoding:
#if(max_gap_len >0):
x += [0,1]
# insertion length encoding:
#if(max_ins_len >0):
x += [0,1]
# gap position encoding (make optional!!)
#if(max_gap_len >0):
x += [1,0]
# return encoded peptide:
return(x)
def encode_pep_new(pep,enc_mat,enc_mat_pfr,enc_pfr,pfr_len,offset,motif_len,pep_len,max_gap_len,max_ins_len):
'''
encode peptide sequence
parameters:
- pep: string (AA sequence of peptide)
- enc_mat: encoding matrix (BLOSUM)
- enc_mat_pfr: encoding matrix for PFR (real BLOSUM)
- enc_pfr: True/False encode PFR?
- pfr_len: max length of encoded PFR
- offset: start position of motif within whole peptide
- motif_len: length of binding core
- pep_len: length of whole peptide
- max_ins_len: maximal insert length
- max_gap_len: maximal gap length
returns:
- x : list containing encoded sequence
'''
x=[]
# peptide:
for i in pep:
x += enc_mat[i]
# peptide length encoding (make this optional!):
o = (pep_len - motif_len)*1.0
o = 1/(1+math.exp(-o))
x += [o,(1-o)]
# peptide position encoding:
b = float(offset)/15
e = float(pep_len - offset -1) /15
x += [b,e]
# return encoded peptide:
return(x)
def encode_insertion(pep,enc_mat,enc_mat_pfr,enc_pfr,pfr_len,offset,motif_len,pep_len,max_gap_len,max_ins_len,ip,il):
'''
encode peptide sequence
parameters:
- pep: string (AA sequence of peptide)
- enc_mat: encoding matrix (BLOSUM)
- enc_mat_pfr: encoding matrix for PFR (real BLOSUM)
- enc_pfr: True/False encode PFR?
- pfr_len: max length of encoded PFR
- offset: start position of motif within whole peptide
- motif_len: length of binding core
- pep_len: length of whole peptide
- max_ins_len: maximal insert length
- max_gap_len: maximal gap length
- ip: insertion position
- il: insertion length
returns:
- x : list containing encoded sequence
'''
x=[]
# left PFR:
if (enc_pfr==True):
pfr_seq = pep[max(0,offset-pfr_len):offset]
x += encode_PFR(pfr_seq,enc_mat_pfr)
# peptide:
for i in range(0,len(pep)):
if i==ip:
x += enc_mat['X'] * il # insertion
x += enc_mat[pep[i]]
# right PFR:
if (enc_pfr==True):
if (offset + motif_len) < pep_len:
pfr_seq = pep[(offset + motif_len-il) : max(pep_len,offset + motif_len -il + pfr_len)]
else:
pfr_seq=''
x += encode_PFR(pfr_seq,enc_mat_pfr)
# PFR length encoding:
if (enc_pfr==True):
ll=offset
if ll > pfr_len:
ll=pfr_len
elif ll < 0:
ll=0
lr = pep_len - offset - motif_len + il
if lr > pfr_len:
lr=pfr_len
elif lr <0:
lr=0
o = (pfr_len -ll)*1.0/pfr_len
x += [o,(1-o)]
o = (pfr_len -lr)*1.0/pfr_len
x += [o,(1-o)]
else:
ll=max(0,offset)
lr=max(0,(pep_len - offset -motif_len + il))
o = (ll*1.0)/(ll+1)
x += [o,(1-o)]
o = (lr*1.0)/(lr+1)
x += [o,(1-o)]
# peptide length encoding (make this optional!):
o = (pep_len - motif_len)*1.0
o = 1/(1+math.exp(-o))
x += [o,(1-o)]
# gap length encoding:
#if(max_gap_len >0):
x += [0,1]
# insertion length encoding:
#if(max_ins_len >0):
o = (il*1.0)/max_ins_len
x += [o,1-o]
# insert/gap position encoding (make optional!!)
#if(max_gap_len >0):
o = (motif_len-1-ip)*1.0/(motif_len-1)
x += [o,1-o]
# return encoded peptide:
return(x)
def encode_gap(pep,enc_mat,enc_mat_pfr,enc_pfr,pfr_len,offset,motif_len,pep_len,max_gap_len,max_ins_len,gp,gl):
'''
encode peptide sequence
parameters:
- pep: string (AA sequence of peptide)
- enc_mat: encoding matrix (BLOSUM)
- enc_mat_pfr: encoding matrix for PFR (real BLOSUM)
- enc_pfr: True/False encode PFR?
- pfr_len: max length of encoded PFR
- offset: start position of motif within whole peptide
- motif_len: length of binding core
- pep_len: length of whole peptide
- max_ins_len: maximal insert length
- max_gap_len: maximal gap length
- gp: gap position
- gl: gap length
returns:
- x : list containing encoded sequence
'''
x=[]
# left PFR:
if (enc_pfr==True):
pfr_seq = pep[max(0,offset-pfr_len):offset]
x += encode_PFR(pfr_seq,enc_mat_pfr)
# peptide:
for i in range(0,len(pep)):
if (i >= gp) & (i < gp + gl):
next
else:
x += enc_mat[pep[i]]
# right PFR:
if (enc_pfr==True):
if (offset + motif_len) < pep_len:
pfr_seq = pep[(offset + motif_len + gl) : max(pep_len,offset + motif_len + gl + pfr_len)]
else:
pfr_seq=''
x += encode_PFR(pfr_seq,enc_mat_pfr)
# PFR length encoding:
if (enc_pfr==True):
ll=offset
if ll > pfr_len:
ll=pfr_len
elif ll < 0:
ll=0
lr = pep_len - offset - motif_len - gl
if lr > pfr_len:
lr=pfr_len
elif lr <0:
lr=0
o = (pfr_len -ll)*1.0/pfr_len
x += [o,(1-o)]
o = (pfr_len -lr)*1.0/pfr_len
x += [o,(1-o)]
else:
ll=max(0,offset)
lr=max(0,(pep_len - offset -motif_len - gl))
o = (ll*1.0)/(ll+1)
x += [o,(1-o)]
o = (lr*1.0)/(lr+1)
x += [o,(1-o)]
# peptide length encoding (make this optional!):
o = (pep_len - motif_len)*1.0
o = 1/(1+math.exp(-o))
x += [o,(1-o)]
# gap length encoding:
#if(max_gap_len >0):
o = (min( 2,gl)*1.0)/min(2,max_gap_len) # mortens way
#o = (gl*1.0)/ float(max_gap_len) #
x += [o,1-o]
# insertion length encoding:
#if(max_ins_len >0):
x += [0,1]
# gap position encoding (make optional!!)
#if(max_gap_len >0):
o = (motif_len-1-gp)*1.0/(motif_len-1)
x += [o,1-o]
# return encoded peptide:
return(x)
def encode_input(pep,mhc,enc_mat,enc_mat_pfr,mhc_mat,enc_mhc,enc_pfr,pfr_len,max_gap_len, max_ins_len,motif_len):
'''
encode peptide + MHC input to neural network
parameters:
- pep: string (AA sequence of peptide)
- mhc: string (AA sequence of mhc molecule)
- enc_mhc: True/False encode MHC?
#- pfr: PFR (peptide flanking region)
- enc_pfr: True/False encode PFR?
- enc_mat: encoding matrix (BLOSUM)
- enc_mat_pfr: encoding matrix (real BLOSUM)
- pfr_len: max length of PFR
- length: length of PFR
- motif_len: length of binding core
returns:
- X : np.array input to neural network list of lists of lists: pfr[peptide][offset][left/right]
'''
X=[]
# pre-encode MHC sequence:
x_mhc=[]
if enc_mhc==True:
x_mhc=encode_mhc(mhc=mhc,mhc_mat=mhc_mat,enc_mat=enc_mat)
if len(pep) < motif_len:
# encode inserion at each position:
for i in range(0,len(pep)):
x = encode_insertion(pep=pep,enc_mat=enc_mat,enc_mat_pfr=enc_mat_pfr,enc_pfr=enc_pfr,
pfr_len=pfr_len,offset=0,motif_len=motif_len,
pep_len=len(pep),max_gap_len=max_gap_len,
max_ins_len=max_ins_len,ip=i,il=motif_len-len(pep))
if enc_mhc==True:
x += x_mhc
X.append(x)
else:
# ungapped
for o in range(0,len(pep)-motif_len+1):
p=pep[o:(o+motif_len)]
x = encode_pep(pep=p,enc_mat=enc_mat,enc_mat_pfr=enc_mat_pfr,enc_pfr=enc_pfr,
pfr_len=pfr_len,offset=o,motif_len=motif_len,
pep_len=len(pep),max_gap_len=max_gap_len,
max_ins_len=max_ins_len)
if enc_mhc==True:
x += x_mhc
X.append(x)
# gapped - deletions
for gl in range(1,max_gap_len+1):
for o in range(0,len(pep)-motif_len - gl +1):
p=pep[o:(o+motif_len+gl)]
for i in range(1,motif_len):
x = encode_gap(pep=p,enc_mat=enc_mat,enc_mat_pfr=enc_mat_pfr,enc_pfr=enc_pfr,
pfr_len=pfr_len,offset=o,motif_len=motif_len,
pep_len=len(pep),max_gap_len=max_gap_len,
max_ins_len=max_ins_len,gp=i,gl=gl)
if enc_mhc==True:
x += x_mhc
X.append(x)
# insertions
for il in range(1,max_ins_len+1):
for o in range(0,len(pep)-motif_len+il+1):
p=pep[o:(o+motif_len-il)]
for i in range(1,motif_len-il):
x = encode_insertion(pep=p,enc_mat=enc_mat,enc_mat_pfr=enc_mat_pfr,enc_pfr=enc_pfr,
pfr_len=pfr_len,offset=o,motif_len=motif_len,
pep_len=len(pep),max_gap_len=max_gap_len,
max_ins_len=max_ins_len,ip=i,il=il)
if enc_mhc==True:
x += x_mhc
X.append(x)
# convert to numpy array:
X=np.array(X)
return X
def encode_input_mhc_pos(pep,mhc,enc_mat,enc_mat_pfr,mhc_mat,enc_mhc,enc_pfr,pfr_len,max_gap_len, max_ins_len,motif_len):
'''
encode peptide + MHC input to neural network
parameters:
- pep: string (AA sequence of peptide)
- mhc: string (AA sequence of mhc molecule)
- enc_mhc: True/False encode MHC?
#- pfr: PFR (peptide flanking region)
- enc_pfr: True/False encode PFR?
- enc_mat: encoding matrix (BLOSUM)
- enc_mat_pfr: encoding matrix (real BLOSUM)
- pfr_len: max length of PFR
- length: length of PFR
- motif_len: length of binding core
returns:
- X : np.array input to neural network list of lists of lists: pfr[peptide][offset][left/right]
'''
X=[]
# pre-encode MHC sequence:
x_mhc=[]
if enc_mhc==True:
x_mhc=encode_mhc(mhc=mhc,mhc_mat=mhc_mat,enc_mat=enc_mat)
# ungapped
for o in range(0,len(pep)-motif_len+1):
p=pep[o:(o+motif_len)]
x = encode_pep_new(pep=p,enc_mat=enc_mat,enc_mat_pfr=enc_mat_pfr,enc_pfr=enc_pfr,
pfr_len=pfr_len,offset=o,motif_len=motif_len,
pep_len=len(pep),max_gap_len=max_gap_len,
max_ins_len=max_ins_len)
if enc_mhc==True:
x += x_mhc
X.append(x)
# convert to numpy array:
X=np.array(X)
return X
|
vanessajurtz/lasagne4bio
|
peptide_MHCII/scripts/data_io_func.py
|
Python
|
gpl-3.0
| 47,255
|
[
"NetCDF"
] |
98f0b855d0d0f5e3b63f6038c268f440ec1da2c9a90c6a2c5fe484614659e6f1
|
# rall64_graphic.py ---
#
# Filename: rall64_graphic.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Thu May 29 13:18:12 2014 (+0530)
# Version:
# Last-Updated:
# By:
# Update #: 0
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
"""Rall 1964 demo with fancy graphics"""
from matplotlib import pyplot as plt
from matplotlib import animation as anim
import numpy as np
from rall64 import *
fig = plt.figure('Rall 1964, Figure 7')
fig.suptitle('Rall 1964, Figure 7')
ax_model = fig.add_subplot(2, 1, 1)
ax_data = fig.add_subplot(2, 1, 2)
colors = ['darkblue', 'darkgreen', 'darkred']
active_colors = ['lightblue', 'lightgreen', 'lightpink']
radius = 0.5
length = 0.8
cables = []
lines = []
for yy in range(3):
cables.append([])
for xx in range(10):
patch = ax_model.add_patch(plt.Rectangle((xx, 2*yy), length, 2 * radius, color=colors[yy]))
cables[yy].append(patch)
txt = ax_model.annotate('%d' % (xx), (xx, 2*yy), xytext=(xx + length/2.0, 2 * yy + radius), color='gray')
ax_model.set_xlim((-1, xx + 1))
ax_model.set_ylim((-1, 2 * yy + 3 * radius))
# ax_model.xaxis.set_visible(False)
# ax_model.yaxis.set_visible(False)
ax_model.set_axis_off()
stim_no = 0
stim_count = 4
clock = moose.element('/clock')
chan_list, vm_list = setup_model_fig7()
stim_t = 0.25 * tau
update_dt = dt
xdata = [[],[],[]]
ydata = [[],[],[]]
def update(xx):
global stim_no, chan_list, vm_list, cables, clock
if stim_no < stim_count:
cables[0][2 * stim_no+1].set_facecolor(active_colors[0])
cables[0][2 * stim_no+1].set_edgecolor('y')
cables[0][2 * stim_no + 2].set_facecolor(active_colors[0])
cables[0][2 * stim_no + 2].set_edgecolor('y')
cables[1][- 2 * stim_no - 2].set_facecolor(active_colors[1])
cables[1][- 2 * stim_no - 2].set_edgecolor('y')
cables[1][- 2 * stim_no - 3].set_facecolor(active_colors[1])
cables[1][- 2 * stim_no - 3].set_edgecolor('y')
for patch in cables[2][1:-1]:
patch.set_facecolor(active_colors[2])
patch.set_edgecolor('y')
if clock.currentTime < stim_t * (stim_no + 1):
chan_list[0][2 * stim_no].Gk = 1 / Rm
chan_list[0][2 * stim_no + 1].Gk = 1 / Rm
chan_list[1][- 2 * stim_no - 1].Gk = 1 / Rm
chan_list[1][- 2 * stim_no - 2].Gk = 1 / Rm
for chan in chan_list[2]:
chan.Gk = 0.25 / Rm
moose.start(update_dt)
else:
stim_no += 1
for chans in chan_list:
for ch in chans:
ch.Gk = 0.0
for ii in range(len(cables)):
for patch in cables[ii]:
patch.set_color(colors[ii])
elif clock.currentTime < simtime:
moose.start(update_dt)
for patch in cables[0]:
patch.set_facecolor(colors[0])
for patch in cables[1]:
patch.set_facecolor(colors[1])
for ii, (tmp_lines, vm, color) in enumerate(zip(lines, vm_list, colors)):
for line in tmp_lines:
line.set_data(np.linspace(0, clock.currentTime, len(vm.vector))/tau, (vm.vector - Em) / (Ek - Em))
# """A variation of rall64.run_model_fig7 with visualization of the compartments."""
lines.append(ax_data.plot([0], [0], label='(1,2)->(3,4)->(5,6)->(7,8)', color=colors[0]))
lines.append(ax_data.plot([0], [0], label='(7,8)->(5,6)->(3,4)->(1,2)', color=colors[1]))
lines.append(ax_data.plot([0], [0], label='control', color=colors[2]))
ax_data.set_xlim(0, simtime/tau)
ax_data.set_ylim(0, 0.15)
ax_data.set_xlabel('Time (t/tau)')
ax_data.set_ylabel('Membrane voltage (Vm - Em)/(Ek - Vm)')
print(lines)
plt.legend()
schedule()
fanim = anim.FuncAnimation(fig, update, fargs=None, interval=25, repeat=False)
plt.show()
#
# rall64_graphic.py ends here
|
BhallaLab/moose-examples
|
rall_1964/rall64_graphic.py
|
Python
|
gpl-2.0
| 4,690
|
[
"MOOSE"
] |
f63ab7fe880fbe67985e9ca48c5a417bfcbb5afb0cd6bdaad516ed980b2ced72
|
# -*- coding: utf-8 -*-
"""features for collecting abstract protocol costs"""
from ast import *
from tasty.tastyc import bases
from tasty.tastyc.bases import (CALL_TYPE_CTOR, CALL_TYPE_CONVERSION, CALL_TYPE_METHOD,
CALL_TYPE_TASTY_FUNC_CTOR, CALL_TYPE_TASTY_FUNC_CALL)
from tasty.types import conversions
from tasty import state
from tasty.exc import UnknownSymbolError, FqnnError
from tasty import cost_results
__all__ = ["analyze_costs"]
def find_assign_node(node):
if isinstance(node.parent, Assign):
return node.parent
else:
return find_assign_node(node.parent)
class CostEvaluator(bases.TastyVisitor):
def __init__(self, cost_obj, precompute=True):
super(CostEvaluator, self).__init__()
self.cost_obj = cost_obj
self.precompute = precompute
def visit_ClassDef(self, node):
pass
def visit_If(self, node):
# state.log.debug("\ncosts %s", dump(node, True, True))
if not bases.has_parent_node(node, bases.TastyCBase.protocol_name):
return
if isinstance(node.test, Attribute) or isinstance(node.test, Subscript):
return_rec = node.test.return_info[0]
self.cost_obj(
**return_rec["type"].calc_costs("__nonzero__", tuple(), (return_rec["bitlen"],), (return_rec["dim"],),
node.role,
node.passive, self.precompute))
else:
self.visit(node.test)
for i in node.body:
self.visit(i)
def visit_UnaryOp(self, node):
# state.log.debug("\ncosts %s", dump(node, True, True))
self.visit(node.operand)
self.cost_obj(**node.initial_info["type"].calc_costs(node.methodname, tuple(),
node.bit_lengths, node.dims, node.role, node.passive,
self.precompute))
def visit_For(self, node):
# state.log.debug("\ncosts %d %s", id(node), dump(node, True, True))
if isinstance(node.iter, Call):
for i in xrange(node.iter.args[0].n):
for j in node.body:
self.visit(j)
elif isinstance(node.iter, Attribute):
count = node.iter.return_info[0]["dim"][0]
for i in xrange(count):
for j in node.body:
self.visit(j)
def visit_BinOp(self, node):
# state.log.debug("\ncosts %s", dump(node, True, True))
if isinstance(node.left, Str):
return node
self.visit(node.left)
self.visit(node.right)
self.cost_obj(**node.initial_info["type"].calc_costs(node.methodname, node.input_types,
node.bit_lengths, node.dims, self.active_role,
node.passive,
self.precompute))
def visit_BoolOp(self, node):
# state.log.debug("\ncosts %s", dump(node, True, True))
self.generic_visit(node)
def visit_Compare(self, node):
# state.log.debug("\ncosts %s", dump(node))
fqnn = bases.get_fqnn(node.left)
party_name = fqnn[0]
if party_name == self.passive_name:
passive = True
role = self.passive_role
else:
passive = False
role = self.active_role
try:
left_symbol_record = self.symbol_table.identify(fqnn)
left_kwargs = left_symbol_record["kwargs"]
except UnknownSymbolError:
left_kwargs = node.left.return_info[0]
left_type = left_kwargs["type"]
left_bitlen = left_kwargs["bitlen"]
left_dim = left_kwargs["dim"]
left_signed = left_kwargs["signed"]
try:
right_fqnn = bases.get_fqnn(node.comparators[0])
right_symbol_record = self.symbol_table.identify(right_fqnn)
right_kwargs = right_symbol_record["kwargs"]
except FqnnError, e:
right_kwargs = node.comparators[0].return_info[0]
right_type = right_kwargs["type"]
right_bitlen = right_kwargs["bitlen"]
right_dim = right_kwargs["dim"]
right_signed = right_kwargs["signed"]
if isinstance(node.ops[0], In):
bit_lengths = (right_bitlen, left_bitlen)
dims = (right_dim, left_dim)
self.cost_obj(**right_type.calc_costs(node.methodname, (left_type,), bit_lengths, dims, role,
passive, self.precompute))
else:
bit_lengths = (left_bitlen, right_bitlen)
dims = (left_dim, right_dim)
self.cost_obj(**left_type.calc_costs(node.methodname, (right_type,), bit_lengths, dims, role,
passive, self.precompute))
def visit_Assign(self, node):
# state.log.debug("\ncosts %s", dump(node, True, True))
self.visit(node.value)
def visit_AugAssign(self, node):
# state.log.debug("\ncosts %s", dump(node, True, True))
self.visit(node.value)
def visit_constructor(self, node):
self.generic_visit(node)
self.check_costs(node)
def visit_method(self, node):
if hasattr(node.func, "attr") and (node.func.attr in ("output", "setup_output", "input")):
return
if isinstance(node.func, Name) and node.func.id in ("protocol_path",
"protocol_file", "tasty_path", "tasty_file"):
return
self.generic_visit(node)
self.check_costs(node)
def visit_tasty_function_call(self, node):
self.generic_visit(node)
node_type = node.initial_info["type"]
self.cost_obj(**node.tasty_function.calc_costs(
node.methodname, node.input_types, node.bit_lengths, node.dims,
node.role, node.passive, self.precompute))
def visit_tasty_function_ctor(self, node):
pass
def visit_conversion(self, node):
src_type = node.src_type
dest_type = node.dest_type
self.cost_obj(**conversions.calc_costs(node.func.attr,
(src_type, dest_type), node.bit_lengths, node.dims, node.role,
node.passive,
self.precompute))
def visit_Call(self, node):
"""
"""
if not bases.has_parent_node(node, bases.TastyCBase.protocol_name):
return
if node.call_type == CALL_TYPE_CTOR:
self.visit_constructor(node)
elif node.call_type == CALL_TYPE_TASTY_FUNC_CALL:
self.visit_tasty_function_call(node)
elif node.call_type == CALL_TYPE_TASTY_FUNC_CTOR:
self.visit_tasty_function_ctor(node)
elif node.call_type == CALL_TYPE_METHOD:
self.visit_method(node)
elif node.call_type == CALL_TYPE_CONVERSION:
self.visit_conversion(node)
else:
raise ValueError("found unsupported value %r for node.call_type" % node.call_type)
def check_costs(self, node):
if __debug__:
state.log.debug("\ncosts %s", dump(node, True, True))
try:
node_type = node.initial_info["type"]
except AttributeError:
node_type = node.return_info[0]["type"]
passive = node.passive
role = node.role
self.cost_obj(**node_type.calc_costs(
node.methodname, node.input_types, node.bit_lengths, node.dims,
role, passive, self.precompute))
def analyze_costs(setup_ast, online_ast):
costs = cost_results.CostSystem.costs["abstract"]
if __debug__:
state.log.debug("\nAnalyzing abstract costs for setup protocol version...")
CostEvaluator(costs["setup"]["accumulated"], True).visit(setup_ast)
if __debug__:
state.log.debug("\nAnalyzing abstract costs for online protocol version...")
CostEvaluator(costs["online"]["accumulated"], False).visit(online_ast)
|
tastyproject/tasty
|
tasty/tastyc/analyze_costs.py
|
Python
|
gpl-3.0
| 8,274
|
[
"VisIt"
] |
a72cfa5d51543b9ab5554420a623988eb0e6ccf54b617df71e4c522f51e289bf
|
from db_functions.compute_field_summaries import computeNeuronEphysSummariesAll, computeEphysPropSummaries, \
computeEphysPropValueSummaries, computeNeuronSummaries, computeArticleSummaries, assign_stat_object_to_data_tables
__author__ = 'stripathy'
def update_summary_fields():
"""Updates database summary fields like how many articles associated with a neuron type,
mean ephys values associated with a neuron type, etc"""
print 'updating field summaries'
computeNeuronEphysSummariesAll()
computeEphysPropSummaries()
computeEphysPropValueSummaries()
computeNeuronSummaries()
computeArticleSummaries()
assign_stat_object_to_data_tables()
def run():
update_summary_fields()
|
neuroelectro/neuroelectro_org
|
scripts/update_db_summary_fields.py
|
Python
|
gpl-2.0
| 723
|
[
"NEURON"
] |
d2b3c0186adf9ba37917914bcbab89fbb5e953f6e3af12590e2ae0e2bbdfa56f
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import json
import warnings
from pymatgen.electronic_structure.bandstructure import BandStructure
from pymatgen.electronic_structure.boltztrap import BoltztrapAnalyzer, \
BoltztrapRunner
from pymatgen.electronic_structure.core import Spin, OrbitalType
from monty.serialization import loadfn
from monty.os.path import which
try:
from ase.io.cube import read_cube
except ImportError:
read_cube = None
try:
import fdint
except ImportError:
fdint = None
x_trans = which("x_trans")
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
@unittest.skipIf(not x_trans, "No x_trans.")
class BoltztrapAnalyzerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.bz = BoltztrapAnalyzer.from_files(
os.path.join(test_dir, "boltztrap/transp/"))
cls.bz_bands = BoltztrapAnalyzer.from_files(
os.path.join(test_dir, "boltztrap/bands/"))
cls.bz_up = BoltztrapAnalyzer.from_files(
os.path.join(test_dir, "boltztrap/dos_up/"), dos_spin=1)
cls.bz_dw = BoltztrapAnalyzer.from_files(
os.path.join(test_dir, "boltztrap/dos_dw/"), dos_spin=-1)
cls.bz_fermi = BoltztrapAnalyzer.from_files(
os.path.join(test_dir, "boltztrap/fermi/"))
with open(os.path.join(test_dir, "Cu2O_361_bandstructure.json"),
"rt") as f:
d = json.load(f)
cls.bs = BandStructure.from_dict(d)
cls.btr = BoltztrapRunner(cls.bs, 1)
warnings.simplefilter("ignore")
@classmethod
def tearDownClass(cls):
warnings.simplefilter("default")
def test_properties(self):
self.assertAlmostEqual(self.bz.gap, 1.6644932121620404, 4)
array = self.bz._cond[300][102]
self.assertAlmostEqual(array[0][0] / 1e19, 7.5756518, 4)
self.assertAlmostEqual(array[0][2], -11.14679)
self.assertAlmostEqual(array[1][0], -88.203286)
self.assertAlmostEqual(array[2][2], 1.7133249e+19)
array = self.bz._seebeck[300][22]
self.assertAlmostEqual(array[0][1], 6.4546074e-22)
self.assertAlmostEqual(array[1][1], -0.00032073711)
self.assertAlmostEqual(array[1][2], -2.9868424e-24)
self.assertAlmostEqual(array[2][2], -0.0003126543)
array = self.bz._kappa[500][300]
self.assertAlmostEqual(array[0][1], 0.00014524309)
self.assertAlmostEqual(array[1][1], 328834400000000.0)
self.assertAlmostEqual(array[1][2], 3.7758069e-05)
self.assertAlmostEqual(array[2][2], 193943750000000.0)
self.assertAlmostEqual(self.bz._hall[400][800][1][0][0], 9.5623749e-28)
self.assertAlmostEqual(self.bz._hall[400][68][1][2][2], 6.5106975e-10)
self.assertAlmostEqual(self.bz.doping['p'][3], 1e18)
self.assertAlmostEqual(self.bz.mu_doping['p'][300][2], 0.1553770018406)
self.assertAlmostEqual(self.bz.mu_doping['n'][300][-1],
1.6486017632924719, 4)
self.assertAlmostEqual(self.bz._cond_doping['n'][800][3][1][1] / 1e16,
1.5564085, 4)
self.assertAlmostEqual(self.bz._seebeck_doping['p'][600][2][0][
1] / 1e-23, 3.2860613, 4)
self.assertAlmostEqual(self.bz._carrier_conc[500][67], 38.22832002)
self.assertAlmostEqual(self.bz.vol, 612.97557323964838, 4)
self.assertAlmostEqual(self.bz.intrans["scissor"], 0.0, 1)
self.assertAlmostEqual(self.bz._hall_doping['n'][700][-1][2][2][2],
5.0136483e-26)
self.assertAlmostEqual(self.bz.dos.efermi, -0.0300005507057)
self.assertAlmostEqual(self.bz.dos.energies[0], -2.4497049391830448, 4)
self.assertAlmostEqual(self.bz.dos.energies[345],
-0.72708823447130944, 4)
self.assertAlmostEqual(self.bz.dos.energies[-1], 3.7569398770153524, 4)
self.assertAlmostEqual(self.bz.dos.densities[Spin.up][400], 118.70171)
self.assertAlmostEqual(self.bz.dos.densities[Spin.up][200], 179.58562)
self.assertAlmostEqual(self.bz.dos.densities[Spin.up][300], 289.43945)
self.assertAlmostEqual(self.bz_bands._bz_bands.shape, (1316, 20))
self.assertAlmostEqual(self.bz_bands._bz_kpoints.shape, (1316, 3))
self.assertAlmostEqual(self.bz_up._dos_partial['0']['pz'][2562],
0.023862958)
self.assertAlmostEqual(self.bz_dw._dos_partial['1']['px'][3120],
5.0192891)
self.assertAlmostEqual(self.bz_fermi.fermi_surface_data.shape,
(121, 121, 65))
self.assertAlmostEqual(self.bz_fermi.fermi_surface_data[21][79][19],
-1.8831911809439161, 5)
@unittest.skipIf(not fdint, "No FDINT")
def test_get_seebeck_eff_mass(self):
ref = [1.956090529381193, 2.0339311618566343, 1.1529383757896965]
ref2 = [4258.4072823354145, 4597.0351887125289, 4238.1262696392705]
sbk_mass_tens_mu = \
self.bz.get_seebeck_eff_mass(output='tensor', doping_levels=False,
temp=300)[3]
sbk_mass_tens_dop = \
self.bz.get_seebeck_eff_mass(output='tensor', doping_levels=True,
temp=300)['n'][2]
sbk_mass_avg_mu = \
self.bz.get_seebeck_eff_mass(output='average', doping_levels=False,
temp=300)[3]
sbk_mass_avg_dop = \
self.bz.get_seebeck_eff_mass(output='average', doping_levels=True,
temp=300)['n'][2]
for i in range(0, 3):
self.assertAlmostEqual(sbk_mass_tens_mu[i], ref2[i], 1)
self.assertAlmostEqual(sbk_mass_tens_dop[i], ref[i], 4)
self.assertAlmostEqual(sbk_mass_avg_mu, 4361.4744008038842, 1)
self.assertAlmostEqual(sbk_mass_avg_dop, 1.661553842105382, 4)
@unittest.skipIf(not fdint, "No FDINT")
def test_get_complexity_factor(self):
ref = [2.7658776815227828, 2.9826088215568403, 0.28881335881640308]
ref2 = [0.0112022048620205, 0.0036001049607186602,
0.0083028947173193028]
sbk_mass_tens_mu = \
self.bz.get_complexity_factor(output='tensor', doping_levels=False,
temp=300)[3]
sbk_mass_tens_dop = \
self.bz.get_complexity_factor(output='tensor', doping_levels=True,
temp=300)['n'][2]
sbk_mass_avg_mu = \
self.bz.get_complexity_factor(output='average', doping_levels=False,
temp=300)[3]
sbk_mass_avg_dop = \
self.bz.get_complexity_factor(output='average', doping_levels=True,
temp=300)['n'][2]
for i in range(0, 3):
self.assertAlmostEqual(sbk_mass_tens_mu[i], ref2[i], 4)
self.assertAlmostEqual(sbk_mass_tens_dop[i], ref[i], 4)
self.assertAlmostEqual(sbk_mass_avg_mu, 0.00628677029221, 4)
self.assertAlmostEqual(sbk_mass_avg_dop, 1.12322832119, 4)
def test_get_seebeck(self):
ref = [-768.99078999999995, -724.43919999999991, -686.84682999999973]
for i in range(0, 3):
self.assertAlmostEqual(self.bz.get_seebeck()['n'][800][3][i],
ref[i])
self.assertAlmostEqual(
self.bz.get_seebeck(output='average')['p'][800][3], 697.608936667)
self.assertAlmostEqual(
self.bz.get_seebeck(output='average', doping_levels=False)[500][
520], 1266.7056)
self.assertAlmostEqual(
self.bz.get_seebeck(output='average', doping_levels=False)[300][65],
-36.2459389333) # TODO: this was originally "eigs"
def test_get_conductivity(self):
ref = [5.9043185000000022, 17.855599000000002, 26.462935000000002]
for i in range(0, 3):
self.assertAlmostEqual(self.bz.get_conductivity()['p'][600][2][i],
ref[i])
self.assertAlmostEqual(
self.bz.get_conductivity(output='average')['n'][700][1],
1.58736609667)
self.assertAlmostEqual(
self.bz.get_conductivity(output='average', doping_levels=False)[
300][457], 2.87163566667)
self.assertAlmostEqual(
self.bz.get_conductivity(output='average', doping_levels=False,
# TODO: this was originally "eigs"
relaxation_time=1e-15)[200][63],
16573.0536667)
def test_get_power_factor(self):
ref = [6.2736602345523362, 17.900184232304138, 26.158282220458144]
for i in range(0, 3):
self.assertAlmostEqual(self.bz.get_power_factor()['p'][200][2][i],
ref[i])
self.assertAlmostEqual(
self.bz.get_power_factor(output='average')['n'][600][4],
411.230962976)
self.assertAlmostEqual(
self.bz.get_power_factor(output='average', doping_levels=False,
relaxation_time=1e-15)[500][459],
6.59277148467)
self.assertAlmostEqual(
self.bz.get_power_factor(output='average', doping_levels=False)[
800][61], 2022.67064134) # TODO: this was originally "eigs"
def test_get_thermal_conductivity(self):
ref = [2.7719565628862623e-05, 0.00010048046886793946,
0.00015874549392499391]
for i in range(0, 3):
self.assertAlmostEqual(
self.bz.get_thermal_conductivity()['p'][300][2][i], ref[i])
self.assertAlmostEqual(
self.bz.get_thermal_conductivity(output='average',
relaxation_time=1e-15)['n'][500][
0],
1.74466575612e-07)
self.assertAlmostEqual(
self.bz.get_thermal_conductivity(output='average',
doping_levels=False)[800][874],
8.08066254813)
self.assertAlmostEqual(
self.bz.get_thermal_conductivity(output='average',
doping_levels=False)[200][32],
# TODO: this was originally "eigs"
0.0738961845832)
self.assertAlmostEqual(
self.bz.get_thermal_conductivity(k_el=False, output='average',
doping_levels=False)[200][32],
0.19429052)
def test_get_zt(self):
ref = [0.097408810215, 0.29335112354, 0.614673998089]
for i in range(0, 3):
self.assertAlmostEqual(self.bz.get_zt()['n'][800][4][i], ref[i])
self.assertAlmostEqual(
self.bz.get_zt(output='average', kl=0.5)['p'][700][2],
0.0170001879916)
self.assertAlmostEqual(
self.bz.get_zt(output='average', doping_levels=False,
relaxation_time=1e-15)[300][240],
0.0041923533238348342)
eigs = self.bz.get_zt(output='eigs', doping_levels=False)[700][65]
ref_eigs = [0.082420053399668847, 0.29408035502671648,
0.40822061215079392]
for idx, val in enumerate(ref_eigs):
self.assertAlmostEqual(eigs[idx], val, 5)
def test_get_average_eff_mass(self):
ref = [0.76045816788363574, 0.96181142990667101, 2.9428428773308628]
for i in range(0, 3):
self.assertAlmostEqual(
self.bz.get_average_eff_mass()['p'][300][2][i], ref[i])
ref = [1.1295783824744523, 1.3898454041924351, 5.2459984671977935]
ref2 = [6.6648842712692078, 31.492540105738343, 37.986369302138954]
for i in range(0, 3):
self.assertAlmostEqual(
self.bz.get_average_eff_mass()['n'][600][1][i], ref[i])
self.assertAlmostEqual(
self.bz.get_average_eff_mass(doping_levels=False)[300][200][i],
ref2[i])
ref = [[9.61811430e-01, -8.25159596e-19, -4.70319444e-19],
[-8.25159596e-19, 2.94284288e+00, 3.00368916e-18],
[-4.70319444e-19, 3.00368916e-18, 7.60458168e-01]]
ref2 = [[27.97604444269153, -2.39347589e-17, -1.36897140e-17],
[-2.39347589e-17, 8.55969097e+01, 8.74169648e-17],
[-1.36897140e-17, 8.74169648e-17, 2.21151980e+01]]
for i in range(0, 3):
for j in range(0, 3):
self.assertAlmostEqual(
self.bz.get_average_eff_mass(output='tensor')['p'][300][2][
i][j], ref[i][j], 4)
self.assertAlmostEqual(
self.bz.get_average_eff_mass(output='tensor',
doping_levels=False)[300][500][
i][j], ref2[i][j], 4)
self.assertAlmostEqual(
self.bz.get_average_eff_mass(output='average')['n'][300][2],
1.53769093989, 4)
def test_get_carrier_concentration(self):
self.assertAlmostEqual(self.bz.get_carrier_concentration()[300][39] /
1e22, 6.4805156617179151, 4)
self.assertAlmostEqual(self.bz.get_carrier_concentration()[300][
693] / 1e15, -6.590800965604750, 4)
def test_get_hall_carrier_concentration(self):
self.assertAlmostEqual(self.bz.get_hall_carrier_concentration()[600][
120] / 1e21, 6.773394626767555, 4)
self.assertAlmostEqual(self.bz.get_hall_carrier_concentration()[500][
892] / 1e21, -9.136803845741777, 4)
def test_get_symm_bands(self):
structure = loadfn(
os.path.join(test_dir, 'boltztrap/structure_mp-12103.json'))
sbs = loadfn(os.path.join(test_dir, 'boltztrap/dft_bs_sym_line.json'))
kpoints = [kp.frac_coords for kp in sbs.kpoints]
labels_dict = {k: sbs.labels_dict[k].frac_coords for k in
sbs.labels_dict}
for kpt_line, labels_dict in zip([None, sbs.kpoints, kpoints],
[None, sbs.labels_dict, labels_dict]):
sbs_bzt = self.bz_bands.get_symm_bands(structure, -5.25204548,
kpt_line=kpt_line,
labels_dict=labels_dict)
self.assertAlmostEqual(len(sbs_bzt.bands[Spin.up]), 20)
self.assertAlmostEqual(len(sbs_bzt.bands[Spin.up][1]), 143)
# def test_check_acc_bzt_bands(self):
# structure = loadfn(os.path.join(test_dir,'boltztrap/structure_mp-12103.json'))
# sbs = loadfn(os.path.join(test_dir,'boltztrap/dft_bs_sym_line.json'))
# sbs_bzt = self.bz_bands.get_symm_bands(structure,-5.25204548)
# corr,werr_vbm,werr_cbm,warn = BoltztrapAnalyzer.check_acc_bzt_bands(sbs_bzt,sbs)
# self.assertAlmostEqual(corr[2],9.16851750e-05)
# self.assertAlmostEqual(werr_vbm['K-H'],0.18260273521047862)
# self.assertAlmostEqual(werr_cbm['M-K'],0.071552669981356981)
# self.assertFalse(warn)
def test_get_complete_dos(self):
structure = loadfn(
os.path.join(test_dir, 'boltztrap/structure_mp-12103.json'))
cdos = self.bz_up.get_complete_dos(structure, self.bz_dw)
spins = list(cdos.densities.keys())
self.assertIn(Spin.down, spins)
self.assertIn(Spin.up, spins)
self.assertAlmostEqual(
cdos.get_spd_dos()[OrbitalType.p].densities[Spin.up][3134],
43.839230100999991)
self.assertAlmostEqual(
cdos.get_spd_dos()[OrbitalType.s].densities[Spin.down][716],
6.5383268000000001)
def test_extreme(self):
x = self.bz.get_extreme("seebeck")
self.assertEqual(x["best"]["carrier_type"], "n")
self.assertAlmostEqual(x["p"]["value"], 1255.365, 2)
self.assertEqual(x["n"]["isotropic"], True)
self.assertEqual(x["n"]["temperature"], 600)
x = self.bz.get_extreme("kappa", maximize=False, min_temp=400,
min_doping=1E20)
self.assertAlmostEqual(x["best"]["value"], 0.105, 2)
self.assertAlmostEqual(x["n"]["value"], 0.139, 2)
self.assertEqual(x["p"]["temperature"], 400)
self.assertEqual(x["n"]["isotropic"], False)
def test_to_from_dict(self):
btr_dict = self.btr.as_dict()
s = json.dumps(btr_dict)
self.assertIsNotNone(s)
self.assertIsNotNone(btr_dict['bs'])
if __name__ == '__main__':
unittest.main()
|
mbkumar/pymatgen
|
pymatgen/electronic_structure/tests/test_boltztrap.py
|
Python
|
mit
| 16,990
|
[
"ASE",
"BoltzTrap",
"pymatgen"
] |
5fbc6de29d3e81feda6d98ee60f78a1d755f72751df1ebf8ab70cdc682a49704
|
import itertools
import logging
import re
import time
import urllib
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from decimal import Decimal
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Type, Union
import pytz
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from django.db import connection
from django.db.models.query import QuerySet
from django.http import HttpRequest, HttpResponse, HttpResponseNotFound
from django.shortcuts import render
from django.template import loader
from django.urls import reverse
from django.utils.timesince import timesince
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext as _
from jinja2 import Markup as mark_safe
from psycopg2.sql import SQL, Composable, Literal
from analytics.lib.counts import COUNT_STATS, CountStat
from analytics.lib.time_utils import time_range
from analytics.models import (
BaseCount,
InstallationCount,
RealmCount,
StreamCount,
UserCount,
installation_epoch,
last_successful_fill,
)
from confirmation.models import Confirmation, _properties, confirmation_url
from confirmation.settings import STATUS_ACTIVE
from zerver.decorator import (
require_non_guest_user,
require_server_admin,
require_server_admin_api,
to_utc_datetime,
zulip_login_required,
)
from zerver.lib.actions import (
do_change_plan_type,
do_deactivate_realm,
do_scrub_realm,
do_send_realm_reactivation_email,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.lib.subdomains import get_subdomain_from_hostname
from zerver.lib.timestamp import convert_to_UTC, timestamp_to_datetime
from zerver.lib.validator import to_non_negative_int
from zerver.models import (
Client,
MultiuseInvite,
PreregistrationUser,
Realm,
UserActivity,
UserActivityInterval,
UserProfile,
get_realm,
)
from zerver.views.invite import get_invitee_emails_set
if settings.BILLING_ENABLED:
from corporate.lib.stripe import (
approve_sponsorship,
attach_discount_to_realm,
get_current_plan_by_realm,
get_customer_by_realm,
get_discount_for_realm,
get_latest_seat_count,
make_end_of_cycle_updates_if_needed,
update_sponsorship_status,
)
if settings.ZILENCER_ENABLED:
from zilencer.models import RemoteInstallationCount, RemoteRealmCount, RemoteZulipServer
else:
from unittest.mock import Mock
RemoteInstallationCount = Mock() # type: ignore[misc] # https://github.com/JukkaL/mypy/issues/1188
RemoteZulipServer = Mock() # type: ignore[misc] # https://github.com/JukkaL/mypy/issues/1188
RemoteRealmCount = Mock() # type: ignore[misc] # https://github.com/JukkaL/mypy/issues/1188
MAX_TIME_FOR_FULL_ANALYTICS_GENERATION = timedelta(days=1, minutes=30)
def is_analytics_ready(realm: Realm) -> bool:
return (timezone_now() - realm.date_created) > MAX_TIME_FOR_FULL_ANALYTICS_GENERATION
def render_stats(request: HttpRequest, data_url_suffix: str, target_name: str,
for_installation: bool=False, remote: bool=False,
analytics_ready: bool=True) -> HttpRequest:
page_params = dict(
data_url_suffix=data_url_suffix,
for_installation=for_installation,
remote=remote,
debug_mode=False,
)
return render(request,
'analytics/stats.html',
context=dict(target_name=target_name,
page_params=page_params,
analytics_ready=analytics_ready))
@zulip_login_required
def stats(request: HttpRequest) -> HttpResponse:
realm = request.user.realm
if request.user.is_guest:
# TODO: Make @zulip_login_required pass the UserProfile so we
# can use @require_member_or_admin
raise JsonableError(_("Not allowed for guest users"))
return render_stats(request, '', realm.name or realm.string_id,
analytics_ready=is_analytics_ready(realm))
@require_server_admin
@has_request_variables
def stats_for_realm(request: HttpRequest, realm_str: str) -> HttpResponse:
try:
realm = get_realm(realm_str)
except Realm.DoesNotExist:
return HttpResponseNotFound(f"Realm {realm_str} does not exist")
return render_stats(request, f'/realm/{realm_str}', realm.name or realm.string_id,
analytics_ready=is_analytics_ready(realm))
@require_server_admin
@has_request_variables
def stats_for_remote_realm(request: HttpRequest, remote_server_id: int,
remote_realm_id: int) -> HttpResponse:
server = RemoteZulipServer.objects.get(id=remote_server_id)
return render_stats(request, f'/remote/{server.id}/realm/{remote_realm_id}',
f"Realm {remote_realm_id} on server {server.hostname}")
@require_server_admin_api
@has_request_variables
def get_chart_data_for_realm(request: HttpRequest, user_profile: UserProfile,
realm_str: str, **kwargs: Any) -> HttpResponse:
try:
realm = get_realm(realm_str)
except Realm.DoesNotExist:
raise JsonableError(_("Invalid organization"))
return get_chart_data(request=request, user_profile=user_profile, realm=realm, **kwargs)
@require_server_admin_api
@has_request_variables
def get_chart_data_for_remote_realm(
request: HttpRequest, user_profile: UserProfile, remote_server_id: int,
remote_realm_id: int, **kwargs: Any) -> HttpResponse:
server = RemoteZulipServer.objects.get(id=remote_server_id)
return get_chart_data(request=request, user_profile=user_profile, server=server,
remote=True, remote_realm_id=int(remote_realm_id), **kwargs)
@require_server_admin
def stats_for_installation(request: HttpRequest) -> HttpResponse:
return render_stats(request, '/installation', 'Installation', True)
@require_server_admin
def stats_for_remote_installation(request: HttpRequest, remote_server_id: int) -> HttpResponse:
server = RemoteZulipServer.objects.get(id=remote_server_id)
return render_stats(request, f'/remote/{server.id}/installation',
f'remote Installation {server.hostname}', True, True)
@require_server_admin_api
@has_request_variables
def get_chart_data_for_installation(request: HttpRequest, user_profile: UserProfile,
chart_name: str=REQ(), **kwargs: Any) -> HttpResponse:
return get_chart_data(request=request, user_profile=user_profile, for_installation=True, **kwargs)
@require_server_admin_api
@has_request_variables
def get_chart_data_for_remote_installation(
request: HttpRequest,
user_profile: UserProfile,
remote_server_id: int,
chart_name: str=REQ(),
**kwargs: Any) -> HttpResponse:
server = RemoteZulipServer.objects.get(id=remote_server_id)
return get_chart_data(request=request, user_profile=user_profile, for_installation=True,
remote=True, server=server, **kwargs)
@require_non_guest_user
@has_request_variables
def get_chart_data(request: HttpRequest, user_profile: UserProfile, chart_name: str=REQ(),
min_length: Optional[int]=REQ(converter=to_non_negative_int, default=None),
start: Optional[datetime]=REQ(converter=to_utc_datetime, default=None),
end: Optional[datetime]=REQ(converter=to_utc_datetime, default=None),
realm: Optional[Realm]=None, for_installation: bool=False,
remote: bool=False, remote_realm_id: Optional[int]=None,
server: Optional[RemoteZulipServer]=None) -> HttpResponse:
if for_installation:
if remote:
aggregate_table = RemoteInstallationCount
assert server is not None
else:
aggregate_table = InstallationCount
else:
if remote:
aggregate_table = RemoteRealmCount
assert server is not None
assert remote_realm_id is not None
else:
aggregate_table = RealmCount
if chart_name == 'number_of_humans':
stats = [
COUNT_STATS['1day_actives::day'],
COUNT_STATS['realm_active_humans::day'],
COUNT_STATS['active_users_audit:is_bot:day']]
tables = [aggregate_table]
subgroup_to_label: Dict[CountStat, Dict[Optional[str], str]] = {
stats[0]: {None: '_1day'},
stats[1]: {None: '_15day'},
stats[2]: {'false': 'all_time'}}
labels_sort_function = None
include_empty_subgroups = True
elif chart_name == 'messages_sent_over_time':
stats = [COUNT_STATS['messages_sent:is_bot:hour']]
tables = [aggregate_table, UserCount]
subgroup_to_label = {stats[0]: {'false': 'human', 'true': 'bot'}}
labels_sort_function = None
include_empty_subgroups = True
elif chart_name == 'messages_sent_by_message_type':
stats = [COUNT_STATS['messages_sent:message_type:day']]
tables = [aggregate_table, UserCount]
subgroup_to_label = {stats[0]: {'public_stream': _('Public streams'),
'private_stream': _('Private streams'),
'private_message': _('Private messages'),
'huddle_message': _('Group private messages')}}
labels_sort_function = lambda data: sort_by_totals(data['everyone'])
include_empty_subgroups = True
elif chart_name == 'messages_sent_by_client':
stats = [COUNT_STATS['messages_sent:client:day']]
tables = [aggregate_table, UserCount]
# Note that the labels are further re-written by client_label_map
subgroup_to_label = {stats[0]:
{str(id): name for id, name in Client.objects.values_list('id', 'name')}}
labels_sort_function = sort_client_labels
include_empty_subgroups = False
elif chart_name == 'messages_read_over_time':
stats = [COUNT_STATS['messages_read::hour']]
tables = [aggregate_table, UserCount]
subgroup_to_label = {stats[0]: {None: 'read'}}
labels_sort_function = None
include_empty_subgroups = True
else:
raise JsonableError(_("Unknown chart name: {}").format(chart_name))
# Most likely someone using our API endpoint. The /stats page does not
# pass a start or end in its requests.
if start is not None:
start = convert_to_UTC(start)
if end is not None:
end = convert_to_UTC(end)
if start is not None and end is not None and start > end:
raise JsonableError(_("Start time is later than end time. Start: {start}, End: {end}").format(
start=start, end=end,
))
if realm is None:
# Note that this value is invalid for Remote tables; be
# careful not to access it in those code paths.
realm = user_profile.realm
if remote:
# For remote servers, we don't have fillstate data, and thus
# should simply use the first and last data points for the
# table.
assert server is not None
if not aggregate_table.objects.filter(server=server).exists():
raise JsonableError(_("No analytics data available. Please contact your server administrator."))
if start is None:
start = aggregate_table.objects.filter(server=server).first().end_time
if end is None:
end = aggregate_table.objects.filter(server=server).last().end_time
else:
# Otherwise, we can use tables on the current server to
# determine a nice range, and some additional validation.
if start is None:
if for_installation:
start = installation_epoch()
else:
start = realm.date_created
if end is None:
end = max(last_successful_fill(stat.property) or
datetime.min.replace(tzinfo=timezone.utc) for stat in stats)
if start > end and (timezone_now() - start > MAX_TIME_FOR_FULL_ANALYTICS_GENERATION):
logging.warning("User from realm %s attempted to access /stats, but the computed "
"start time: %s (creation of realm or installation) is later than the computed "
"end time: %s (last successful analytics update). Is the "
"analytics cron job running?", realm.string_id, start, end)
raise JsonableError(_("No analytics data available. Please contact your server administrator."))
assert len({stat.frequency for stat in stats}) == 1
end_times = time_range(start, end, stats[0].frequency, min_length)
data: Dict[str, Any] = {
'end_times': [int(end_time.timestamp()) for end_time in end_times],
'frequency': stats[0].frequency,
}
aggregation_level = {
InstallationCount: 'everyone',
RealmCount: 'everyone',
RemoteInstallationCount: 'everyone',
RemoteRealmCount: 'everyone',
UserCount: 'user',
}
# -1 is a placeholder value, since there is no relevant filtering on InstallationCount
id_value = {
InstallationCount: -1,
RealmCount: realm.id,
RemoteInstallationCount: server.id if server is not None else None,
# TODO: RemoteRealmCount logic doesn't correctly handle
# filtering by server_id as well.
RemoteRealmCount: remote_realm_id,
UserCount: user_profile.id,
}
for table in tables:
data[aggregation_level[table]] = {}
for stat in stats:
data[aggregation_level[table]].update(get_time_series_by_subgroup(
stat, table, id_value[table], end_times, subgroup_to_label[stat], include_empty_subgroups))
if labels_sort_function is not None:
data['display_order'] = labels_sort_function(data)
else:
data['display_order'] = None
return json_success(data=data)
def sort_by_totals(value_arrays: Dict[str, List[int]]) -> List[str]:
totals = [(sum(values), label) for label, values in value_arrays.items()]
totals.sort(reverse=True)
return [label for total, label in totals]
# For any given user, we want to show a fixed set of clients in the chart,
# regardless of the time aggregation or whether we're looking at realm or
# user data. This fixed set ideally includes the clients most important in
# understanding the realm's traffic and the user's traffic. This function
# tries to rank the clients so that taking the first N elements of the
# sorted list has a reasonable chance of doing so.
def sort_client_labels(data: Dict[str, Dict[str, List[int]]]) -> List[str]:
realm_order = sort_by_totals(data['everyone'])
user_order = sort_by_totals(data['user'])
label_sort_values: Dict[str, float] = {}
for i, label in enumerate(realm_order):
label_sort_values[label] = i
for i, label in enumerate(user_order):
label_sort_values[label] = min(i-.1, label_sort_values.get(label, i))
return [label for label, sort_value in sorted(label_sort_values.items(),
key=lambda x: x[1])]
def table_filtered_to_id(table: Type[BaseCount], key_id: int) -> QuerySet:
if table == RealmCount:
return RealmCount.objects.filter(realm_id=key_id)
elif table == UserCount:
return UserCount.objects.filter(user_id=key_id)
elif table == StreamCount:
return StreamCount.objects.filter(stream_id=key_id)
elif table == InstallationCount:
return InstallationCount.objects.all()
elif table == RemoteInstallationCount:
return RemoteInstallationCount.objects.filter(server_id=key_id)
elif table == RemoteRealmCount:
return RemoteRealmCount.objects.filter(realm_id=key_id)
else:
raise AssertionError(f"Unknown table: {table}")
def client_label_map(name: str) -> str:
if name == "website":
return "Website"
if name.startswith("desktop app"):
return "Old desktop app"
if name == "ZulipElectron":
return "Desktop app"
if name == "ZulipAndroid":
return "Old Android app"
if name == "ZulipiOS":
return "Old iOS app"
if name == "ZulipMobile":
return "Mobile app"
if name in ["ZulipPython", "API: Python"]:
return "Python API"
if name.startswith("Zulip") and name.endswith("Webhook"):
return name[len("Zulip"):-len("Webhook")] + " webhook"
return name
def rewrite_client_arrays(value_arrays: Dict[str, List[int]]) -> Dict[str, List[int]]:
mapped_arrays: Dict[str, List[int]] = {}
for label, array in value_arrays.items():
mapped_label = client_label_map(label)
if mapped_label in mapped_arrays:
for i in range(0, len(array)):
mapped_arrays[mapped_label][i] += value_arrays[label][i]
else:
mapped_arrays[mapped_label] = [value_arrays[label][i] for i in range(0, len(array))]
return mapped_arrays
def get_time_series_by_subgroup(stat: CountStat,
table: Type[BaseCount],
key_id: int,
end_times: List[datetime],
subgroup_to_label: Dict[Optional[str], str],
include_empty_subgroups: bool) -> Dict[str, List[int]]:
queryset = table_filtered_to_id(table, key_id).filter(property=stat.property) \
.values_list('subgroup', 'end_time', 'value')
value_dicts: Dict[Optional[str], Dict[datetime, int]] = defaultdict(lambda: defaultdict(int))
for subgroup, end_time, value in queryset:
value_dicts[subgroup][end_time] = value
value_arrays = {}
for subgroup, label in subgroup_to_label.items():
if (subgroup in value_dicts) or include_empty_subgroups:
value_arrays[label] = [value_dicts[subgroup][end_time] for end_time in end_times]
if stat == COUNT_STATS['messages_sent:client:day']:
# HACK: We rewrite these arrays to collapse the Client objects
# with similar names into a single sum, and generally give
# them better names
return rewrite_client_arrays(value_arrays)
return value_arrays
eastern_tz = pytz.timezone('US/Eastern')
def make_table(title: str, cols: Sequence[str], rows: Sequence[Any], has_row_class: bool = False) -> str:
if not has_row_class:
def fix_row(row: Any) -> Dict[str, Any]:
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data),
)
return content
def dictfetchall(cursor: connection.cursor) -> List[Dict[str, Any]]:
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in cursor.fetchall()
]
def get_realm_day_counts() -> Dict[str, Dict[str, str]]:
query = SQL('''
select
r.string_id,
(now()::date - date_sent::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
date_sent > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.string_id,
age
order by
r.string_id,
age
''')
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts: Dict[str, Dict[int, int]] = defaultdict(dict)
for row in rows:
counts[row['string_id']][row['age']] = row['cnt']
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts[1:])
max_cnt = max(raw_cnts[1:])
def format_count(cnt: int, style: Optional[str]=None) -> str:
if style is not None:
good_bad = style
elif cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return f'<td class="number {good_bad}">{cnt}</td>'
cnts = (format_count(raw_cnts[0], 'neutral')
+ ''.join(map(format_count, raw_cnts[1:])))
result[string_id] = dict(cnts=cnts)
return result
def get_plan_name(plan_type: int) -> str:
return ['', 'self hosted', 'limited', 'standard', 'open source'][plan_type]
def realm_summary_table(realm_minutes: Dict[str, float]) -> str:
now = timezone_now()
query = SQL('''
SELECT
realm.string_id,
realm.date_created,
realm.plan_type,
coalesce(user_counts.dau_count, 0) dau_count,
coalesce(wau_counts.wau_count, 0) wau_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) dau_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
up.is_active
AND (not up.is_bot)
AND
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer',
'update_pointer_backend'
)
AND
last_visit > now() - interval '1 day'
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) wau_count
FROM (
SELECT
realm.id as realm_id,
up.delivery_email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer',
'update_pointer_backend'
)
GROUP by realm.id, up.delivery_email
HAVING max(last_visit) > now() - interval '7 day'
) as wau_users
GROUP BY realm_id
) wau_counts
ON wau_counts.realm_id = realm.id
WHERE
realm.plan_type = 3
OR
EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
up.realm_id = realm.id
AND up.is_active
AND (not up.is_bot)
AND
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer',
'/json/users/me/pointer',
'update_pointer_backend'
)
AND
last_visit > now() - interval '2 week'
)
ORDER BY dau_count DESC, string_id ASC
''')
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# Fetch all the realm administrator users
realm_admins: Dict[str, List[str]] = defaultdict(list)
for up in UserProfile.objects.select_related("realm").filter(
role=UserProfile.ROLE_REALM_ADMINISTRATOR,
is_active=True,
):
realm_admins[up.realm.string_id].append(up.delivery_email)
for row in rows:
row['date_created_day'] = row['date_created'].strftime('%Y-%m-%d')
row['plan_type_string'] = get_plan_name(row['plan_type'])
row['age_days'] = int((now - row['date_created']).total_seconds()
/ 86400)
row['is_new'] = row['age_days'] < 12 * 7
row['realm_admin_email'] = ', '.join(realm_admins[row['string_id']])
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['string_id']]['cnts']
except Exception:
row['history'] = ''
# estimate annual subscription revenue
total_amount = 0
if settings.BILLING_ENABLED:
from corporate.lib.stripe import estimate_annual_recurring_revenue_by_realm
estimated_arrs = estimate_annual_recurring_revenue_by_realm()
for row in rows:
if row['string_id'] in estimated_arrs:
row['amount'] = estimated_arrs[row['string_id']]
total_amount += sum(estimated_arrs.values())
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
string_id = row['string_id']
minutes = realm_minutes.get(string_id, 0.0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '{:.1f}'.format(hours / row['dau_count'])
except Exception:
pass
# formatting
for row in rows:
row['stats_link'] = realm_stats_link(row['string_id'])
row['string_id'] = realm_activity_link(row['string_id'])
# Count active sites
def meets_goal(row: Dict[str, int]) -> bool:
return row['dau_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_dau_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_wau_count = 0
for row in rows:
total_dau_count += int(row['dau_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_wau_count += int(row['wau_count'])
total_row = dict(
string_id='Total',
plan_type_string="",
amount=total_amount,
stats_link = '',
date_created_day='',
realm_admin_email='',
dau_count=total_dau_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
wau_count=total_wau_count,
)
rows.insert(0, total_row)
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites,
now=now.strftime('%Y-%m-%dT%H:%M:%SZ')),
)
return content
def user_activity_intervals() -> Tuple[mark_safe, Dict[str, float]]:
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end,
).select_related(
'user_profile',
'user_profile__realm',
).only(
'start',
'end',
'user_profile__delivery_email',
'user_profile__realm__string_id',
).order_by(
'user_profile__realm__string_id',
'user_profile__delivery_email',
)
by_string_id = lambda row: row.user_profile.realm.string_id
by_email = lambda row: row.user_profile.delivery_email
realm_minutes = {}
for string_id, realm_intervals in itertools.groupby(all_intervals, by_string_id):
realm_duration = timedelta(0)
output += f'<hr>{string_id}\n'
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += f" {email:<37}{duration}\n"
realm_minutes[string_id] = realm_duration.total_seconds() / 60
output += f"\nTotal Duration: {total_duration}\n"
output += f"\nTotal Duration in minutes: {total_duration.total_seconds() / 60.}\n"
output += f"Total Duration amortized to a month: {total_duration.total_seconds() * 30. / 60.}"
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm: str) -> str:
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots',
]
query = SQL('''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
date_sent::date date_sent,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
(not up.is_bot)
and
date_sent > now() - interval '2 week'
group by
date_sent::date
order by
date_sent::date
) humans on
series.day = humans.date_sent
left join (
select
date_sent::date date_sent,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
up.is_bot
and
date_sent > now() - interval '2 week'
group by
date_sent::date
order by
date_sent::date
) bots on
series.day = bots.date_sent
''')
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries() -> List[Dict[str, str]]:
def get_page(query: Composable, cols: Sequence[str], title: str,
totals_columns: Sequence[int]=[]) -> Dict[str, str]:
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i: int,
fixup_func: Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]) -> None:
for row in rows:
row[i] = fixup_func(row[i])
total_row = []
for i, col in enumerate(cols):
if col == 'Realm':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
elif col == 'Hostname':
for row in rows:
row[i] = remote_installation_stats_link(row[0], row[i])
if len(totals_columns) > 0:
if i == 0:
total_row.append("Total")
elif i in totals_columns:
total_row.append(str(sum(row[i] for row in rows if row[i] is not None)))
else:
total_row.append('')
if len(totals_columns) > 0:
rows.insert(0, total_row)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title,
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = f'{mobile_type} usage'
query = SQL('''
select
realm.string_id,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like {mobile_type}
group by string_id, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, up.id, client.name
''').format(
mobile_type=Literal(mobile_type),
)
cols = [
'Realm',
'User id',
'Name',
'Hits',
'Last time',
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = SQL('''
select
realm.string_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by string_id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, client.name
''')
cols = [
'Realm',
'Client',
'Hits',
'Last time',
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by realm'
query = SQL('''
select
realm.string_id,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by string_id, client_name
having max(last_visit) > now() - interval '2 week'
order by string_id, client_name
''')
cols = [
'Realm',
'Client',
'Hits',
'Last time',
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = SQL('''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.string_id,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, string_id
having max(last_visit) > now() - interval '2 week'
order by client_name, string_id
''')
cols = [
'Client',
'Realm',
'Hits',
'Last time',
]
pages.append(get_page(query, cols, title))
title = 'Remote Zulip servers'
query = SQL('''
with icount as (
select
server_id,
max(value) as max_value,
max(end_time) as max_end_time
from zilencer_remoteinstallationcount
where
property='active_users:is_bot:day'
and subgroup='false'
group by server_id
),
remote_push_devices as (
select server_id, count(distinct(user_id)) as push_user_count from zilencer_remotepushdevicetoken
group by server_id
)
select
rserver.id,
rserver.hostname,
rserver.contact_email,
max_value,
push_user_count,
max_end_time
from zilencer_remotezulipserver rserver
left join icount on icount.server_id = rserver.id
left join remote_push_devices on remote_push_devices.server_id = rserver.id
order by max_value DESC NULLS LAST, push_user_count DESC NULLS LAST
''')
cols = [
'ID',
'Hostname',
'Contact email',
'Analytics users',
'Mobile users',
'Last update time',
]
pages.append(get_page(query, cols, title,
totals_columns=[3, 4]))
return pages
@require_server_admin
@has_request_variables
def get_activity(request: HttpRequest) -> HttpResponse:
duration_content, realm_minutes = user_activity_intervals()
counts_content: str = realm_summary_table(realm_minutes)
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render(
request,
'analytics/activity.html',
context=dict(data=data, title=title, is_home=True),
)
def get_confirmations(types: List[int], object_ids: List[int],
hostname: Optional[str]=None) -> List[Dict[str, Any]]:
lowest_datetime = timezone_now() - timedelta(days=30)
confirmations = Confirmation.objects.filter(type__in=types, object_id__in=object_ids,
date_sent__gte=lowest_datetime)
confirmation_dicts = []
for confirmation in confirmations:
realm = confirmation.realm
content_object = confirmation.content_object
type = confirmation.type
days_to_activate = _properties[type].validity_in_days
expiry_date = confirmation.date_sent + timedelta(days=days_to_activate)
if hasattr(content_object, "status"):
if content_object.status == STATUS_ACTIVE:
link_status = "Link has been clicked"
else:
link_status = "Link has never been clicked"
else:
link_status = ""
if timezone_now() < expiry_date:
expires_in = timesince(confirmation.date_sent, expiry_date)
else:
expires_in = "Expired"
url = confirmation_url(confirmation.confirmation_key, realm, type)
confirmation_dicts.append({"object": confirmation.content_object,
"url": url, "type": type, "link_status": link_status,
"expires_in": expires_in})
return confirmation_dicts
@require_server_admin
def support(request: HttpRequest) -> HttpResponse:
context: Dict[str, Any] = {}
if settings.BILLING_ENABLED and request.method == "POST":
# We check that request.POST only has two keys in it: The
# realm_id and a field to change.
keys = set(request.POST.keys())
if "csrfmiddlewaretoken" in keys:
keys.remove("csrfmiddlewaretoken")
if len(keys) != 2:
return json_error(_("Invalid parameters"))
realm_id = request.POST.get("realm_id")
realm = Realm.objects.get(id=realm_id)
if request.POST.get("plan_type", None) is not None:
new_plan_type = int(request.POST.get("plan_type"))
current_plan_type = realm.plan_type
do_change_plan_type(realm, new_plan_type)
msg = f"Plan type of {realm.name} changed from {get_plan_name(current_plan_type)} to {get_plan_name(new_plan_type)} "
context["message"] = msg
elif request.POST.get("discount", None) is not None:
new_discount = Decimal(request.POST.get("discount"))
current_discount = get_discount_for_realm(realm)
attach_discount_to_realm(realm, new_discount)
msg = f"Discount of {realm.name} changed to {new_discount} from {current_discount} "
context["message"] = msg
elif request.POST.get("status", None) is not None:
status = request.POST.get("status")
if status == "active":
do_send_realm_reactivation_email(realm)
context["message"] = f"Realm reactivation email sent to admins of {realm.name}."
elif status == "deactivated":
do_deactivate_realm(realm, request.user)
context["message"] = f"{realm.name} deactivated."
elif request.POST.get("sponsorship_pending", None) is not None:
sponsorship_pending = request.POST.get("sponsorship_pending")
if sponsorship_pending == "true":
update_sponsorship_status(realm, True)
context["message"] = f"{realm.name} marked as pending sponsorship."
elif sponsorship_pending == "false":
update_sponsorship_status(realm, False)
context["message"] = f"{realm.name} is no longer pending sponsorship."
elif request.POST.get('approve_sponsorship') is not None:
if request.POST.get('approve_sponsorship') == "approve_sponsorship":
approve_sponsorship(realm)
context["message"] = f"Sponsorship approved for {realm.name}"
elif request.POST.get("scrub_realm", None) is not None:
if request.POST.get("scrub_realm") == "scrub_realm":
do_scrub_realm(realm, acting_user=request.user)
context["message"] = f"{realm.name} scrubbed."
query = request.GET.get("q", None)
if query:
key_words = get_invitee_emails_set(query)
context["users"] = UserProfile.objects.filter(delivery_email__in=key_words)
realms = set(Realm.objects.filter(string_id__in=key_words))
for key_word in key_words:
try:
URLValidator()(key_word)
parse_result = urllib.parse.urlparse(key_word)
hostname = parse_result.hostname
assert hostname is not None
if parse_result.port:
hostname = f"{hostname}:{parse_result.port}"
subdomain = get_subdomain_from_hostname(hostname)
try:
realms.add(get_realm(subdomain))
except Realm.DoesNotExist:
pass
except ValidationError:
pass
for realm in realms:
realm.customer = get_customer_by_realm(realm)
current_plan = get_current_plan_by_realm(realm)
if current_plan is not None:
new_plan, last_ledger_entry = make_end_of_cycle_updates_if_needed(current_plan, timezone_now())
if last_ledger_entry is not None:
if new_plan is not None:
realm.current_plan = new_plan
else:
realm.current_plan = current_plan
realm.current_plan.licenses = last_ledger_entry.licenses
realm.current_plan.licenses_used = get_latest_seat_count(realm)
context["realms"] = realms
confirmations: List[Dict[str, Any]] = []
preregistration_users = PreregistrationUser.objects.filter(email__in=key_words)
confirmations += get_confirmations([Confirmation.USER_REGISTRATION, Confirmation.INVITATION,
Confirmation.REALM_CREATION], preregistration_users,
hostname=request.get_host())
multiuse_invites = MultiuseInvite.objects.filter(realm__in=realms)
confirmations += get_confirmations([Confirmation.MULTIUSE_INVITE], multiuse_invites)
confirmations += get_confirmations([Confirmation.REALM_REACTIVATION], [realm.id for realm in realms])
context["confirmations"] = confirmations
def realm_admin_emails(realm: Realm) -> str:
return ", ".join(realm.get_human_admin_users().order_by('delivery_email').values_list(
"delivery_email", flat=True))
context["realm_admin_emails"] = realm_admin_emails
context["get_discount_for_realm"] = get_discount_for_realm
context["realm_icon_url"] = realm_icon_url
context["Confirmation"] = Confirmation
return render(request, 'analytics/support.html', context=context)
def get_user_activity_records_for_realm(realm: str, is_bot: bool) -> QuerySet:
fields = [
'user_profile__full_name',
'user_profile__delivery_email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__string_id=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot,
)
records = records.order_by("user_profile__delivery_email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email: str) -> List[QuerySet]:
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__delivery_email=email,
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records: List[QuerySet]) -> str:
cols = [
'query',
'client',
'count',
'last_visit',
]
def row(record: QuerySet) -> List[Any]:
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit),
]
rows = list(map(row, records))
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records: List[QuerySet]) -> Dict[str, Dict[str, Any]]:
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary: Dict[str, Dict[str, Any]] = {}
def update(action: str, record: QuerySet) -> None:
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit,
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit,
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer',
'update_pointer_backend']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date: Optional[datetime]) -> str:
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email: str) -> mark_safe:
url_name = 'analytics.views.get_user_activity'
url = reverse(url_name, kwargs=dict(email=email))
email_link = f'<a href="{url}">{email}</a>'
return mark_safe(email_link)
def realm_activity_link(realm_str: str) -> mark_safe:
url_name = 'analytics.views.get_realm_activity'
url = reverse(url_name, kwargs=dict(realm_str=realm_str))
realm_link = f'<a href="{url}">{realm_str}</a>'
return mark_safe(realm_link)
def realm_stats_link(realm_str: str) -> mark_safe:
url_name = 'analytics.views.stats_for_realm'
url = reverse(url_name, kwargs=dict(realm_str=realm_str))
stats_link = f'<a href="{url}"><i class="fa fa-pie-chart"></i>{realm_str}</a>'
return mark_safe(stats_link)
def remote_installation_stats_link(server_id: int, hostname: str) -> mark_safe:
url_name = 'analytics.views.stats_for_remote_installation'
url = reverse(url_name, kwargs=dict(remote_server_id=server_id))
stats_link = f'<a href="{url}"><i class="fa fa-pie-chart"></i>{hostname}</a>'
return mark_safe(stats_link)
def realm_client_table(user_summaries: Dict[str, Dict[str, Dict[str, Any]]]) -> str:
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary: Dict[str, Dict[str, Any]]) -> str:
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records: List[QuerySet],
admin_emails: Set[str]) -> Tuple[Dict[str, Dict[str, Any]], str]:
user_records = {}
def by_email(record: QuerySet) -> str:
return record.user_profile.delivery_email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary: Dict[str, Dict[str, datetime]], k: str) -> Optional[datetime]:
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary: Dict[str, Dict[str, str]], k: str) -> str:
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val: Optional[datetime]) -> bool:
age = timezone_now() - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
visit = get_last_visit(user_summary, field)
if field == 'use':
if visit and is_recent(visit):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row: Dict[str, Any]) -> str:
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android',
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@require_server_admin
def get_realm_activity(request: HttpRequest, realm_str: str) -> HttpResponse:
data: List[Tuple[str, str]] = []
all_user_records: Dict[str, Any] = {}
try:
admins = Realm.objects.get(string_id=realm_str).get_human_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound(f"Realm {realm_str} does not exist")
admin_emails = {admin.delivery_email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm_str, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm_str)
data += [(page_title, content)]
title = realm_str
return render(
request,
'analytics/activity.html',
context=dict(data=data, realm_link=None, title=title),
)
@require_server_admin
def get_user_activity(request: HttpRequest, email: str) -> HttpResponse:
records = get_user_activity_records_for_email(email)
data: List[Tuple[str, str]] = []
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render(
request,
'analytics/activity.html',
context=dict(data=data, title=title),
)
|
brainwane/zulip
|
analytics/views.py
|
Python
|
apache-2.0
| 56,863
|
[
"VisIt"
] |
3af7b35da34ca1c1e15e86a2021914c227d025d8e3b9f56b4319c0a98f94e7fa
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines classes representing non-periodic and periodic sites.
"""
import collections
import numpy as np
from monty.json import MSONable
from monty.dev import deprecated
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element, Specie, DummySpecie, \
get_el_sp
from pymatgen.util.coord import pbc_diff
from pymatgen.core.composition import Composition
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 17, 2012"
class Site(collections.abc.Hashable, MSONable):
"""
A generalized *non-periodic* site. This is essentially a composition
at a point in space, with some optional properties associated with it. A
Composition is used to represent the atoms and occupancy, which allows for
disordered site representation. Coords are given in standard cartesian
coordinates.
"""
position_atol = 1e-5
def __init__(self, atoms_n_occu, coords, properties=None):
"""
Create a *non-periodic* site.
Args:
atoms_n_occu: Species on the site. Can be:
i. A Composition-type object (preferred)
ii. An element / specie specified either as a string
symbols, e.g. "Li", "Fe2+", "P" or atomic numbers,
e.g., 3, 56, or actual Element or Specie objects.
iii.Dict of elements/species and occupancies, e.g.,
{"Fe" : 0.5, "Mn":0.5}. This allows the setup of
disordered structures.
coords: Cartesian coordinates of site.
properties: Properties associated with the site as a dict, e.g.
{"magmom": 5}. Defaults to None.
"""
if isinstance(atoms_n_occu, Composition):
# Compositions are immutable, so don't need to copy (much faster)
species = atoms_n_occu
else:
try:
species = Composition({get_el_sp(atoms_n_occu): 1})
except TypeError:
species = Composition(atoms_n_occu)
totaloccu = species.num_atoms
if totaloccu > 1 + Composition.amount_tolerance:
raise ValueError("Species occupancies sum to more than 1!")
self._species = species
self.coords = np.array(coords)
self.properties = properties or {}
def __getattr__(self, a):
# overriding getattr doens't play nice with pickle, so we
# can't use self._properties
p = object.__getattribute__(self, 'properties')
if a in p:
return p[a]
raise AttributeError(a)
@property
def species(self) -> Composition:
"""
:return: The species on the site as a composition, e.g., Fe0.5Mn0.5.
"""
return self._species
@species.setter
def species(self, species):
if not isinstance(species, Composition):
try:
species = Composition({get_el_sp(species): 1})
except TypeError:
species = Composition(species)
totaloccu = species.num_atoms
if totaloccu > 1 + Composition.amount_tolerance:
raise ValueError("Species occupancies sum to more than 1!")
self._species = species
@property
def x(self):
"""
Cartesian x coordinate
"""
return self.coords[0]
@x.setter
def x(self, x: float):
self.coords[0] = x
@property
def y(self):
"""
Cartesian y coordinate
"""
return self.coords[1]
@y.setter
def y(self, y: float):
self.coords[1] = y
@property
def z(self):
"""
Cartesian z coordinate
"""
return self.coords[2]
@z.setter
def z(self, z: float):
self.coords[2] = z
def distance(self, other):
"""
Get distance between two sites.
Args:
other: Other site.
Returns:
Distance (float)
"""
return np.linalg.norm(other.coords - self.coords)
def distance_from_point(self, pt):
"""
Returns distance between the site and a point in space.
Args:
pt: Cartesian coordinates of point.
Returns:
Distance (float)
"""
return np.linalg.norm(np.array(pt) - self.coords)
@property
def species_string(self):
"""
String representation of species on the site.
"""
if self.is_ordered:
return list(self.species.keys())[0].__str__()
sorted_species = sorted(self.species.keys())
return ", ".join(["{}:{:.3f}".format(sp, self.species[sp])
for sp in sorted_species])
@property # type: ignore
@deprecated(message="Use site.species instead. This will be deprecated with effect from pymatgen 2020.")
def species_and_occu(self):
"""
The species at the site, i.e., a Composition mapping type of
element/species to occupancy.
"""
return self.species
@property
def specie(self):
"""
The Specie/Element at the site. Only works for ordered sites. Otherwise
an AttributeError is raised. Use this property sparingly. Robust
design should make use of the property species_and_occu instead.
Raises:
AttributeError if Site is not ordered.
"""
if not self.is_ordered:
raise AttributeError("specie property only works for ordered "
"sites!")
return list(self.species.keys())[0]
@property
def is_ordered(self):
"""
True if site is an ordered site, i.e., with a single species with
occupancy 1.
"""
totaloccu = self.species.num_atoms
return totaloccu == 1 and len(self.species) == 1
def __getitem__(self, el):
"""
Get the occupancy for element
"""
return self.species[el]
def __eq__(self, other):
"""
Site is equal to another site if the species and occupancies are the
same, and the coordinates are the same to some tolerance. numpy
function `allclose` is used to determine if coordinates are close.
"""
if other is None:
return False
return (self.species == other.species and
np.allclose(self.coords, other.coords,
atol=Site.position_atol) and
self.properties == other.properties)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between Sites
with different elements.
"""
return sum([el.Z for el in self.species.keys()])
def __contains__(self, el):
return el in self.species
def __repr__(self):
return "Site: {} ({:.4f}, {:.4f}, {:.4f})".format(
self.species_string, *self.coords)
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity. Very
useful for getting correct formulas. For example, FeO4PLi is
automatically sorted in LiFePO4.
"""
if self.species.average_electroneg < other.species.average_electroneg:
return True
if self.species.average_electroneg > other.species.average_electroneg:
return False
if self.species_string < other.species_string:
return True
if self.species_string > other.species_string:
return False
return False
def __str__(self):
return "{} {}".format(self.coords, self.species_string)
def as_dict(self):
"""
Json-serializable dict representation for Site.
"""
species_list = []
for spec, occu in self.species.items():
d = spec.as_dict()
del d["@module"]
del d["@class"]
d["occu"] = occu
species_list.append(d)
d = {"name": self.species_string, "species": species_list,
"xyz": [float(c) for c in self.coords],
"properties": self.properties,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
if self.properties:
d["properties"] = self.properties
return d
@classmethod
def from_dict(cls, d: dict):
"""
Create Site from dict representation
"""
atoms_n_occu = {}
for sp_occu in d["species"]:
if "oxidation_state" in sp_occu and Element.is_valid_symbol(
sp_occu["element"]):
sp = Specie.from_dict(sp_occu)
elif "oxidation_state" in sp_occu:
sp = DummySpecie.from_dict(sp_occu)
else:
sp = Element(sp_occu["element"])
atoms_n_occu[sp] = sp_occu["occu"]
props = d.get("properties", None)
return cls(atoms_n_occu, d["xyz"], properties=props)
class PeriodicSite(Site, MSONable):
"""
Extension of generic Site object to periodic systems.
PeriodicSite includes a lattice system.
"""
def __init__(self, atoms_n_occu, coords, lattice, to_unit_cell=False,
coords_are_cartesian=False, properties=None):
"""
Create a periodic site.
Args:
atoms_n_occu: Species on the site. Can be:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (3x1 array or sequence): Coordinates of site as fractional
or cartesian coordinates.
lattice: Lattice associated with the site
to_unit_cell (bool): Translates fractional coordinate to the
basic unit cell, i.e. all fractional coordinates satisfy 0
<= a < 1. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
cartesian coordinates. Defaults to False.
properties (dict): Properties associated with the PeriodicSite,
e.g., {"magmom":5}. Defaults to None.
"""
if coords_are_cartesian:
frac_coords = lattice.get_fractional_coords(coords)
cart_coords = coords
else:
frac_coords = np.array(coords)
cart_coords = lattice.get_cartesian_coords(coords)
if to_unit_cell:
frac_coords = np.mod(frac_coords, 1)
cart_coords = lattice.get_cartesian_coords(frac_coords)
if isinstance(atoms_n_occu, Composition):
# Compositions are immutable, so don't need to copy (much faster)
species = atoms_n_occu
else:
try:
species = Composition({get_el_sp(atoms_n_occu): 1})
except TypeError:
species = Composition(atoms_n_occu)
totaloccu = species.num_atoms
if totaloccu > 1 + Composition.amount_tolerance:
raise ValueError("Species occupancies sum to more than 1!")
self._lattice = lattice
self._frac_coords = frac_coords
self._species = species
self._coords = np.array(cart_coords)
self.properties = properties or {}
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between Sites
with different elements.
"""
return sum([el.Z for el in self.species.keys()])
@property
def lattice(self):
"""
Lattice associated with PeriodicSite
"""
return self._lattice
@lattice.setter
def lattice(self, lattice: Lattice):
"""
Sets Lattice associated with PeriodicSite
"""
self._lattice = lattice
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
@property
def coords(self):
"""
Cartesian coordinates
"""
return self._coords
@coords.setter
def coords(self, coords):
"""
Set Cartesian coordinates
"""
self._coords = np.array(coords)
self._frac_coords = self._lattice.get_fractional_coords(self._coords)
@property
def frac_coords(self):
"""
Fractional coordinates
"""
return self._frac_coords
@frac_coords.setter
def frac_coords(self, frac_coords):
"""
Set fractional coordinates
"""
self._frac_coords = np.array(frac_coords)
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
@property
def a(self):
"""
Fractional a coordinate
"""
return self._frac_coords[0]
@a.setter
def a(self, a):
self._frac_coords[0] = a
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
@property
def b(self):
"""
Fractional b coordinate
"""
return self._frac_coords[1]
@b.setter
def b(self, b):
self._frac_coords[1] = b
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
@property
def c(self):
"""
Fractional c coordinate
"""
return self._frac_coords[2]
@c.setter
def c(self, c):
self._frac_coords[2] = c
self._coords = self._lattice.get_cartesian_coords(self._frac_coords)
@property
def x(self):
"""
Cartesian x coordinate
"""
return self._coords[0]
@x.setter
def x(self, x):
self._coords[0] = x
self._frac_coords = self._lattice.get_fractional_coords(self.coords)
@property
def y(self):
"""
Cartesian y coordinate
"""
return self._coords[1]
@y.setter
def y(self, y):
self._coords[1] = y
self._frac_coords = self._lattice.get_fractional_coords(self.coords)
@property
def z(self):
"""
Cartesian z coordinate
"""
return self._coords[2]
@z.setter
def z(self, z):
self._coords[2] = z
self._frac_coords = self._lattice.get_fractional_coords(self.coords)
def to_unit_cell(self, in_place=False):
"""
Move frac coords to within the unit cell cell.
"""
frac_coords = np.mod(self.frac_coords, 1)
if in_place:
self.frac_coords = frac_coords
return None
return PeriodicSite(self.species, frac_coords, self.lattice,
properties=self.properties)
def is_periodic_image(self, other, tolerance=1e-8, check_lattice=True):
"""
Returns True if sites are periodic images of each other.
Args:
other (PeriodicSite): Other site
tolerance (float): Tolerance to compare fractional coordinates
check_lattice (bool): Whether to check if the two sites have the
same lattice.
Returns:
bool: True if sites are periodic images of each other.
"""
if check_lattice and self.lattice != other.lattice:
return False
if self.species != other.species:
return False
frac_diff = pbc_diff(self.frac_coords, other.frac_coords)
return np.allclose(frac_diff, [0, 0, 0], atol=tolerance)
def __eq__(self, other):
return self.species == other.species and \
self.lattice == other.lattice and \
np.allclose(self.coords, other.coords,
atol=Site.position_atol) and \
self.properties == other.properties
def __ne__(self, other):
return not self.__eq__(other)
def distance_and_image_from_frac_coords(self, fcoords, jimage=None):
"""
Gets distance between site and a fractional coordinate assuming
periodic boundary conditions. If the index jimage of two sites atom j
is not specified it selects the j image nearest to the i atom and
returns the distance and jimage indices in terms of lattice vector
translations. If the index jimage of atom j is specified it returns the
distance between the i atom and the specified jimage atom, the given
jimage is also returned.
Args:
fcoords (3x1 array): fcoords to get distance from.
jimage (3x1 array): Specific periodic image in terms of
lattice translations, e.g., [1,0,0] implies to take periodic
image that is one a-lattice vector away. If jimage is None,
the image that is nearest to the site is found.
Returns:
(distance, jimage): distance and periodic lattice translations
of the other site for which the distance applies.
"""
return self.lattice.get_distance_and_image(self.frac_coords, fcoords,
jimage=jimage)
def distance_and_image(self, other, jimage=None):
"""
Gets distance and instance between two sites assuming periodic boundary
conditions. If the index jimage of two sites atom j is not specified it
selects the j image nearest to the i atom and returns the distance and
jimage indices in terms of lattice vector translations. If the index
jimage of atom j is specified it returns the distance between the ith
atom and the specified jimage atom, the given jimage is also returned.
Args:
other (PeriodicSite): Other site to get distance from.
jimage (3x1 array): Specific periodic image in terms of lattice
translations, e.g., [1,0,0] implies to take periodic image
that is one a-lattice vector away. If jimage is None,
the image that is nearest to the site is found.
Returns:
(distance, jimage): distance and periodic lattice translations
of the other site for which the distance applies.
"""
return self.distance_and_image_from_frac_coords(other.frac_coords, jimage)
def distance(self, other, jimage=None):
"""
Get distance between two sites assuming periodic boundary conditions.
Args:
other (PeriodicSite): Other site to get distance from.
jimage (3x1 array): Specific periodic image in terms of lattice
translations, e.g., [1,0,0] implies to take periodic image
that is one a-lattice vector away. If jimage is None,
the image that is nearest to the site is found.
Returns:
distance (float): Distance between the two sites
"""
return self.distance_and_image(other, jimage)[0]
def __repr__(self):
return "PeriodicSite: {} ({:.4f}, {:.4f}, {:.4f}) [{:.4f}, {:.4f}, " \
"{:.4f}]".format(self.species_string, self.coords[0],
self.coords[1], self.coords[2],
*self._frac_coords)
def as_dict(self, verbosity=0):
"""
Json-serializable dict representation of PeriodicSite.
Args:
verbosity (int): Verbosity level. Default of 0 only includes the
matrix representation. Set to 1 for more details such as
cartesian coordinates, etc.
"""
species_list = []
for spec, occu in self._species.items():
d = spec.as_dict()
del d["@module"]
del d["@class"]
d["occu"] = occu
species_list.append(d)
d = {"species": species_list,
"abc": [float(c) for c in self._frac_coords],
"lattice": self._lattice.as_dict(verbosity=verbosity),
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
if verbosity > 0:
d["xyz"] = [float(c) for c in self.coords]
d["label"] = self.species_string
d["properties"] = self.properties
return d
@classmethod
def from_dict(cls, d, lattice=None):
"""
Create PeriodicSite from dict representation.
Args:
d (dict): dict representation of PeriodicSite
lattice: Optional lattice to override lattice specified in d.
Useful for ensuring all sites in a structure share the same
lattice.
Returns:
PeriodicSite
"""
atoms_n_occu = {}
for sp_occu in d["species"]:
if "oxidation_state" in sp_occu and Element.is_valid_symbol(
sp_occu["element"]):
sp = Specie.from_dict(sp_occu)
elif "oxidation_state" in sp_occu:
sp = DummySpecie.from_dict(sp_occu)
else:
sp = Element(sp_occu["element"])
atoms_n_occu[sp] = sp_occu["occu"]
props = d.get("properties", None)
lattice = lattice if lattice else Lattice.from_dict(d["lattice"])
return cls(atoms_n_occu, d["abc"], lattice, properties=props)
|
tschaume/pymatgen
|
pymatgen/core/sites.py
|
Python
|
mit
| 21,743
|
[
"pymatgen"
] |
08b3656f7b612b46e673d092f8f3b520a7704948bb6aadc6a82704e007cacfd4
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import unittest
import mock
import types
import io
import mooseutils
import moosesqa
from MooseDocs.commands import generate
@unittest.skipIf(mooseutils.git_version() < (2,11,4), "Git version must at least 2.11.4")
class TestGenerate(unittest.TestCase):
def setUp(self):
# Change to the test/doc directory
self._working_dir = os.getcwd()
moose_test_doc_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', 'test', 'doc'))
os.chdir(moose_test_doc_dir)
def tearDown(self):
# Restore the working directory
os.chdir(self._working_dir)
@mock.patch('MooseDocs.commands.generate._shouldCreateStub')
@mock.patch('MooseDocs.commands.generate._writeFile')
def testGenerate(self, writeFile, shouldCreateStub):
# Store the filenames to be created
filenames = list()
writeFile.side_effect = lambda fn, *args: filenames.append(fn)
# Create custom function for determining if a stub should be created
shouldCreateStub.side_effect = self._shouldCreateStub
# Run the generate command
opt = types.SimpleNamespace(app_types=['MooseApp'], config='sqa_reports.yml')
status = generate.main(opt)
self.assertEqual(status, 0)
self.assertEqual(len(filenames), 3)
self.assertTrue(filenames[0].endswith('moose/framework/doc/content/syntax/Kernels/index.md'))
self.assertTrue(filenames[1].endswith('moose/framework/doc/content/source/actions/AddKernelAction.md'))
self.assertTrue(filenames[2].endswith('moose/framework/doc/content/source/kernels/Diffusion.md'))
@staticmethod
def _shouldCreateStub(report, n):
# Test for stub on Action, Object, and Syntax
if n.fullpath() in ('/Kernels/AddKernelAction', '/Kernels/Diffusion', '/Kernels'):
return True
return False
if __name__ == '__main__':
unittest.main(verbosity=2)
|
harterj/moose
|
python/MooseDocs/test/commands/test_generate.py
|
Python
|
lgpl-2.1
| 2,292
|
[
"MOOSE"
] |
57807bc1c3aaf2eec32438c25f39d2065ac4b9b2124c859eddc5d4d3bb221728
|
# -*- coding: utf-8 -*-
from typing import Tuple, Dict, Set
from pyramids import trees
from pyramids.categorization import Property, Category
from pyramids.rules import branch
TraversableElement = 'Union[trees.Parse, trees.ParseTree, trees.TreeNodeInterface]'
class LanguageContentHandler:
"""A content handler for natural language, in the style of the
ContentHandler class of the xml.sax module."""
def handle_tree_end(self) -> None:
"""Called to indicate the token_end_index of a tree."""
def handle_token(self, spelling: str, category: Category, index: int = None,
span: Tuple[int, int] = None) -> None:
"""Called to indicate the occurrence of a token."""
def handle_root(self) -> None:
"""Called to indicate that the next token is the root."""
def handle_link(self, source_start_index: int, sink_start_index: int, label: str) -> None:
"""Called to indicate the occurrence of a link between two tokens.
Note that this will not be called until handle_token() has been
called for both the source and sink."""
def handle_phrase_start(self, category: Category, head_start_index: int = None) -> None:
"""Called to indicate the token_start_index of a phrase."""
def handle_phrase_end(self) -> None:
"""Called to indicate the token_end_index of a phrase."""
class DepthFirstTraverser:
def traverse(self, element: TraversableElement, handler: LanguageContentHandler,
is_root: bool = False) -> None:
"""Visit this node with a LanguageContentHandler."""
# TODO: Should we let the handler know that there's a dangling
# needs_* or takes_* property that hasn't been satisfied at
# the root level?
if isinstance(element, trees.Parse):
scores = {tree: tree.get_weighted_score() for tree in element.parse_trees}
for tree in sorted(element.parse_trees,
key=lambda tree: (tree.token_start_index, -tree.token_end_index,
-scores[tree][0], -scores[tree][1])):
self.traverse(tree, handler)
handler.handle_tree_end()
elif isinstance(element, trees.ParseTree):
# TODO: Make sure the return value is empty. If not, it's a bad
# parse tree. This case should be detected when the Parse
# instance is created, and bad trees should automatically be
# filtered out then, so we should *never* get a need source
# here.
self.traverse(element.root, handler, True)
elif isinstance(element, trees.TreeNodeSet):
self.traverse(element.best_node, handler, is_root)
else:
assert isinstance(element, trees.TreeNode)
# Hide the return value, since it's only for internal use.
self._traverse(element, handler, is_root)
# TODO: Break this method up into comprehensible chunks.
def _traverse(self, element: TraversableElement, handler: LanguageContentHandler,
is_root: bool = False) -> Dict[Property, Set[int]]:
assert isinstance(element, trees.TreeNode)
payload = element.payload
assert isinstance(payload, trees.ParsingPayload)
if element.is_leaf():
if is_root:
handler.handle_root()
head_token_start = trees.ParseTreeUtils.get_head_token_start(element)
handler.handle_token(payload.tokens[payload.token_start_index], payload.category,
head_token_start, payload.tokens.spans[payload.token_start_index])
need_sources = {}
for prop in payload.category.positive_properties:
if prop.startswith(('needs_', 'takes_')):
needed = Property.get(prop[6:])
need_sources[needed] = {head_token_start}
return need_sources
head_start = trees.ParseTreeUtils.get_head_token_start(element)
handler.handle_phrase_start(payload.category, head_start)
# Visit each subtree, remembering which indices are to receive
# which potential links.
nodes = []
need_sources = {}
head_need_sources = {}
index = 0
for component in element.components:
assert isinstance(component, trees.TreeNodeSet)
component = component.best_node
assert isinstance(component, trees.TreeNode)
component_need_sources = self._traverse(
component,
handler,
is_root and index == payload.head_component_index
)
head_token_start = trees.ParseTreeUtils.get_head_token_start(component)
nodes.append(head_token_start)
for property_name in component_need_sources:
# if (Property('needs_'+ property_name) not in
# self.category.positive_properties and
# Property('takes_'+ property_name) not in
# self.category.positive_properties):
# continue
if property_name in need_sources:
need_sources[property_name] |= component_need_sources[property_name]
else:
need_sources[property_name] = component_need_sources[property_name]
if index == payload.head_component_index:
head_need_sources = component_need_sources
index += 1
# Add the links as appropriate for the rule used to build this tree
for index in range(len(element.components) - 1):
rule = payload.rule
assert isinstance(rule, branch.BranchRule)
links = rule.get_link_types(element, index)
# Skip the head node; there won't be any looping links.
if index < payload.head_component_index:
left_side = nodes[index]
right_side = head_start
else:
left_side = head_start
right_side = nodes[index + 1]
for label, left, right in links:
if left:
if str(label).lower() in head_need_sources:
# and not ((Property.get('needs_' + label.lower())
# in self.category.positive_properties) or
# (Property.get('takes_' + label.lower())
# in self.category.positive_properties)):
for node in need_sources[label.lower()]:
handler.handle_link(node, left_side, label)
elif label[-3:].lower() == '_of' and label[:-3].lower() in head_need_sources:
for node in need_sources[label[:-3].lower()]:
handler.handle_link(left_side, node, label)
else:
handler.handle_link(right_side, left_side, label)
if right:
if str(label).lower() in head_need_sources:
# and not ((Property.get('needs_' + label.lower())
# in self.category.positive_properties) or
# (Property.get('takes_' + label.lower())
# in self.category.positive_properties)):
for node in need_sources[label.lower()]:
handler.handle_link(node, right_side, label)
elif label[-3:].lower() == '_of' and label[:-3].lower() in head_need_sources:
for node in need_sources[label[:-3].lower()]:
handler.handle_link(right_side, node, label)
else:
handler.handle_link(left_side, right_side, label)
handler.handle_phrase_end()
# Figure out which nodes should get which links from outside this subtree
parent_need_sources = {}
for prop in payload.category.positive_properties:
if prop.startswith(('needs_', 'takes_')):
needed = Property.get(prop[6:])
if needed in need_sources:
parent_need_sources[needed] = need_sources[needed]
else:
parent_need_sources[needed] = {head_start}
return parent_need_sources
|
hosford42/pyramids
|
pyramids/traversal.py
|
Python
|
mit
| 8,556
|
[
"VisIt"
] |
1075001793cfe1f57ca63e2344ee40b653bdc178c184b553404a4ca8c157e327
|
# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Miscellaneous utility functions.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import abc
import collections
import copy
import functools
import inspect
import os
import os.path
import sys
import tempfile
import time
import cf_units
import numpy as np
import numpy.ma as ma
from iris._deprecation import warn_deprecated
import iris
import iris.exceptions
def broadcast_weights(weights, array, dims):
"""
Broadcast a weights array to the shape of another array.
Each dimension of the weights array must correspond to a dimension
of the other array.
.. deprecated:: 1.6
Please use :func:`~iris.util.broadcast_to_shape()`.
Args:
* weights (:class:`numpy.ndarray`-like):
An array of weights to broadcast.
* array (:class:`numpy.ndarray`-like):
An array whose shape is the target shape for *weights*.
* dims (:class:`list` :class:`tuple` etc.):
A sequence of dimension indices, specifying which dimensions of
*array* are represented in *weights*. The order the dimensions
are given in is not important, but the order of the dimensions
in *weights* should be the same as the relative ordering of the
corresponding dimensions in *array*. For example, if *array* is
4d with dimensions (ntime, nlev, nlat, nlon) and *weights*
provides latitude-longitude grid weightings then *dims* could be
set to [2, 3] or [3, 2] but *weights* must have shape
(nlat, nlon) since the latitude dimension comes before the
longitude dimension in *array*.
"""
warn_deprecated('broadcast_weights() is deprecated and will be removed '
'in a future release. Consider converting existing code '
'to use broadcast_to_shape() as a replacement.',
stacklevel=2)
# Create a shape array, which *weights* can be re-shaped to, allowing
# them to be broadcast with *array*.
weights_shape = np.ones(array.ndim)
for dim in dims:
if dim is not None:
weights_shape[dim] = array.shape[dim]
# Broadcast the arrays together.
return np.broadcast_arrays(weights.reshape(weights_shape), array)[0]
def broadcast_to_shape(array, shape, dim_map):
"""
Broadcast an array to a given shape.
Each dimension of the array must correspond to a dimension in the
given shape. Striding is used to repeat the array until it matches
the desired shape, returning repeated views on the original array.
If you need to write to the resulting array, make a copy first.
Args:
* array (:class:`numpy.ndarray`-like)
An array to broadcast.
* shape (:class:`list`, :class:`tuple` etc.):
The shape the array should be broadcast to.
* dim_map (:class:`list`, :class:`tuple` etc.):
A mapping of the dimensions of *array* to their corresponding
element in *shape*. *dim_map* must be the same length as the
number of dimensions in *array*. Each element of *dim_map*
corresponds to a dimension of *array* and its value provides
the index in *shape* which the dimension of *array* corresponds
to, so the first element of *dim_map* gives the index of *shape*
that corresponds to the first dimension of *array* etc.
Examples:
Broadcasting an array of shape (2, 3) to the shape (5, 2, 6, 3)
where the first dimension of the array corresponds to the second
element of the desired shape and the second dimension of the array
corresponds to the fourth element of the desired shape::
a = np.array([[1, 2, 3], [4, 5, 6]])
b = broadcast_to_shape(a, (5, 2, 6, 3), (1, 3))
Broadcasting an array of shape (48, 96) to the shape (96, 48, 12)::
# a is an array of shape (48, 96)
result = broadcast_to_shape(a, (96, 48, 12), (1, 0))
"""
if len(dim_map) != array.ndim:
# We must check for this condition here because we cannot rely on
# getting an error from numpy if the dim_map argument is not the
# correct length, we might just get a segfault.
raise ValueError('dim_map must have an entry for every '
'dimension of the input array')
def _broadcast_helper(a):
strides = [0] * len(shape)
for idim, dim in enumerate(dim_map):
if shape[dim] != a.shape[idim]:
# We'll get garbage values if the dimensions of array are not
# those indicated by shape.
raise ValueError('shape and array are not compatible')
strides[dim] = a.strides[idim]
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
array_view = _broadcast_helper(array)
if ma.isMaskedArray(array):
if array.mask is ma.nomask:
# Degenerate masks can be applied as-is.
mask_view = array.mask
else:
# Mask arrays need to be handled in the same way as the data array.
mask_view = _broadcast_helper(array.mask)
array_view = ma.array(array_view, mask=mask_view)
return array_view
def delta(ndarray, dimension, circular=False):
"""
Calculates the difference between values along a given dimension.
Args:
* ndarray:
The array over which to do the difference.
* dimension:
The dimension over which to do the difference on ndarray.
* circular:
If not False then return n results in the requested dimension
with the delta between the last and first element included in
the result otherwise the result will be of length n-1 (where n
is the length of ndarray in the given dimension's direction)
If circular is numeric then the value of circular will be added
to the last element of the given dimension if the last element
is negative, otherwise the value of circular will be subtracted
from the last element.
The example below illustrates the process::
original array -180, -90, 0, 90
delta (with circular=360): 90, 90, 90, -270+360
.. note::
The difference algorithm implemented is forward difference:
>>> import numpy as np
>>> import iris.util
>>> original = np.array([-180, -90, 0, 90])
>>> iris.util.delta(original, 0)
array([90, 90, 90])
>>> iris.util.delta(original, 0, circular=360)
array([90, 90, 90, 90])
"""
if circular is not False:
_delta = np.roll(ndarray, -1, axis=dimension)
last_element = [slice(None, None)] * ndarray.ndim
last_element[dimension] = slice(-1, None)
if not isinstance(circular, bool):
result = np.where(ndarray[last_element] >= _delta[last_element])[0]
_delta[last_element] -= circular
_delta[last_element][result] += 2*circular
np.subtract(_delta, ndarray, _delta)
else:
_delta = np.diff(ndarray, axis=dimension)
return _delta
def describe_diff(cube_a, cube_b, output_file=None):
"""
Prints the differences that prevent compatibility between two cubes, as
defined by :meth:`iris.cube.Cube.is_compatible()`.
Args:
* cube_a:
An instance of :class:`iris.cube.Cube` or
:class:`iris.cube.CubeMetadata`.
* cube_b:
An instance of :class:`iris.cube.Cube` or
:class:`iris.cube.CubeMetadata`.
* output_file:
A :class:`file` or file-like object to receive output. Defaults to
sys.stdout.
.. seealso::
:meth:`iris.cube.Cube.is_compatible()`
.. note::
Compatibility does not guarantee that two cubes can be merged.
Instead, this function is designed to provide a verbose description
of the differences in metadata between two cubes. Determining whether
two cubes will merge requires additional logic that is beyond the
scope of this function.
"""
if output_file is None:
output_file = sys.stdout
if cube_a.is_compatible(cube_b):
output_file.write('Cubes are compatible\n')
else:
common_keys = set(cube_a.attributes).intersection(cube_b.attributes)
for key in common_keys:
if np.any(cube_a.attributes[key] != cube_b.attributes[key]):
output_file.write('"%s" cube_a attribute value "%s" is not '
'compatible with cube_b '
'attribute value "%s"\n'
% (key,
cube_a.attributes[key],
cube_b.attributes[key]))
if cube_a.name() != cube_b.name():
output_file.write('cube_a name "%s" is not compatible '
'with cube_b name "%s"\n'
% (cube_a.name(), cube_b.name()))
if cube_a.units != cube_b.units:
output_file.write(
'cube_a units "%s" are not compatible with cube_b units "%s"\n'
% (cube_a.units, cube_b.units))
if cube_a.cell_methods != cube_b.cell_methods:
output_file.write('Cell methods\n%s\nand\n%s\nare not compatible\n'
% (cube_a.cell_methods, cube_b.cell_methods))
def guess_coord_axis(coord):
"""
Returns a "best guess" axis name of the coordinate.
Heuristic categorisation of the coordinate into either label
'T', 'Z', 'Y', 'X' or None.
Args:
* coord:
The :class:`iris.coords.Coord`.
Returns:
'T', 'Z', 'Y', 'X', or None.
"""
axis = None
if coord.standard_name in ('longitude', 'grid_longitude',
'projection_x_coordinate'):
axis = 'X'
elif coord.standard_name in ('latitude', 'grid_latitude',
'projection_y_coordinate'):
axis = 'Y'
elif (coord.units.is_convertible('hPa') or
coord.attributes.get('positive') in ('up', 'down')):
axis = 'Z'
elif coord.units.is_time_reference():
axis = 'T'
return axis
def rolling_window(a, window=1, step=1, axis=-1):
"""
Make an ndarray with a rolling window of the last dimension
Args:
* a : array_like
Array to add rolling window to
Kwargs:
* window : int
Size of rolling window
* step : int
Size of step between rolling windows
* axis : int
Axis to take the rolling window over
Returns:
Array that is a view of the original array with an added dimension
of the size of the given window at axis + 1.
Examples::
>>> x = np.arange(10).reshape((2, 5))
>>> rolling_window(x, 3)
array([[[0, 1, 2], [1, 2, 3], [2, 3, 4]],
[[5, 6, 7], [6, 7, 8], [7, 8, 9]]])
Calculate rolling mean of last dimension::
>>> np.mean(rolling_window(x, 3), -1)
array([[ 1., 2., 3.],
[ 6., 7., 8.]])
"""
# NOTE: The implementation of this function originates from
# https://github.com/numpy/numpy/pull/31#issuecomment-1304851 04/08/2011
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > a.shape[axis]:
raise ValueError("`window` is too long.")
if step < 1:
raise ValueError("`step` must be at least 1.")
axis = axis % a.ndim
num_windows = (a.shape[axis] - window + step) // step
shape = a.shape[:axis] + (num_windows, window) + a.shape[axis + 1:]
strides = (a.strides[:axis] + (step * a.strides[axis], a.strides[axis]) +
a.strides[axis + 1:])
rw = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
if ma.isMaskedArray(a):
mask = ma.getmaskarray(a)
strides = (mask.strides[:axis] +
(step * mask.strides[axis], mask.strides[axis]) +
mask.strides[axis + 1:])
rw = ma.array(rw, mask=np.lib.stride_tricks.as_strided(
mask, shape=shape, strides=strides))
return rw
def array_equal(array1, array2):
"""
Returns whether two arrays have the same shape and elements.
This provides the same functionality as :func:`numpy.array_equal` but with
additional support for arrays of strings.
"""
array1, array2 = np.asarray(array1), np.asarray(array2)
if array1.shape != array2.shape:
eq = False
else:
eq = bool(np.asarray(array1 == array2).all())
return eq
def approx_equal(a, b, max_absolute_error=1e-10, max_relative_error=1e-10):
"""
Returns whether two numbers are almost equal, allowing for the
finite precision of floating point numbers.
"""
# Deal with numbers close to zero
if abs(a - b) < max_absolute_error:
return True
# Ensure we get consistent results if "a" and "b" are supplied in the
# opposite order.
max_ab = max([a, b], key=abs)
relative_error = abs(a - b) / max_ab
return relative_error < max_relative_error
def between(lh, rh, lh_inclusive=True, rh_inclusive=True):
"""
Provides a convenient way of defining a 3 element inequality such as
``a < number < b``.
Arguments:
* lh
The left hand element of the inequality
* rh
The right hand element of the inequality
Keywords:
* lh_inclusive - boolean
Affects the left hand comparison operator to use in the inequality.
True for ``<=`` false for ``<``. Defaults to True.
* rh_inclusive - boolean
Same as lh_inclusive but for right hand operator.
For example::
between_3_and_6 = between(3, 6)
for i in range(10):
print(i, between_3_and_6(i))
between_3_and_6 = between(3, 6, rh_inclusive=False)
for i in range(10):
print(i, between_3_and_6(i))
"""
if lh_inclusive and rh_inclusive:
return lambda c: lh <= c <= rh
elif lh_inclusive and not rh_inclusive:
return lambda c: lh <= c < rh
elif not lh_inclusive and rh_inclusive:
return lambda c: lh < c <= rh
else:
return lambda c: lh < c < rh
def reverse(array, axes):
"""
Reverse the array along the given axes.
Args:
* array
The array to reverse
* axes
A single value or array of values of axes to reverse
::
>>> import numpy as np
>>> a = np.arange(24).reshape(2, 3, 4)
>>> print(a)
[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
<BLANKLINE>
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]
>>> print(reverse(a, 1))
[[[ 8 9 10 11]
[ 4 5 6 7]
[ 0 1 2 3]]
<BLANKLINE>
[[20 21 22 23]
[16 17 18 19]
[12 13 14 15]]]
>>> print(reverse(a, [1, 2]))
[[[11 10 9 8]
[ 7 6 5 4]
[ 3 2 1 0]]
<BLANKLINE>
[[23 22 21 20]
[19 18 17 16]
[15 14 13 12]]]
"""
index = [slice(None, None)] * array.ndim
axes = np.array(axes, ndmin=1)
if axes.ndim != 1:
raise ValueError('Reverse was expecting a single axis or a 1d array '
'of axes, got %r' % axes)
if np.min(axes) < 0 or np.max(axes) > array.ndim-1:
raise ValueError('An axis value out of range for the number of '
'dimensions from the given array (%s) was received. '
'Got: %r' % (array.ndim, axes))
for axis in axes:
index[axis] = slice(None, None, -1)
return array[tuple(index)]
def monotonic(array, strict=False, return_direction=False):
"""
Return whether the given 1d array is monotonic.
Note that, the array must not contain missing data.
Kwargs:
* strict (boolean)
Flag to enable strict monotonic checking
* return_direction (boolean)
Flag to change return behaviour to return
(monotonic_status, direction). Direction will be 1 for positive
or -1 for negative. The direction is meaningless if the array is
not monotonic.
Returns:
* monotonic_status (boolean)
Whether the array was monotonic.
If the return_direction flag was given then the returned value
will be:
``(monotonic_status, direction)``
"""
if array.ndim != 1 or len(array) <= 1:
raise ValueError('The array to check must be 1 dimensional and have '
'more than 1 element.')
if ma.isMaskedArray(array) and ma.count_masked(array) != 0:
raise ValueError('The array to check contains missing data.')
# Identify the directions of the largest/most-positive and
# smallest/most-negative steps.
d = np.diff(array)
sign_max_d = np.sign(np.max(d))
sign_min_d = np.sign(np.min(d))
if strict:
monotonic = sign_max_d == sign_min_d and sign_max_d != 0
else:
monotonic = (sign_min_d < 0 and sign_max_d <= 0) or \
(sign_max_d > 0 and sign_min_d >= 0) or \
(sign_min_d == sign_max_d == 0)
if return_direction:
if sign_max_d == 0:
direction = sign_min_d
else:
direction = sign_max_d
return monotonic, direction
return monotonic
def column_slices_generator(full_slice, ndims):
"""
Given a full slice full of tuples, return a dictionary mapping old
data dimensions to new and a generator which gives the successive
slices needed to index correctly (across columns).
This routine deals with the special functionality for tuple based
indexing e.g. [0, (3, 5), :, (1, 6, 8)] by first providing a slice
which takes the non tuple slices out first i.e. [0, :, :, :] then
subsequently iterates through each of the tuples taking out the
appropriate slices i.e. [(3, 5), :, :] followed by [:, :, (1, 6, 8)]
This method was developed as numpy does not support the direct
approach of [(3, 5), : , (1, 6, 8)] for column based indexing.
"""
list_of_slices = []
# Map current dimensions to new dimensions, or None
dimension_mapping = {None: None}
_count_current_dim = 0
for i, i_key in enumerate(full_slice):
if isinstance(i_key, (int, np.integer)):
dimension_mapping[i] = None
else:
dimension_mapping[i] = _count_current_dim
_count_current_dim += 1
# Get all of the dimensions for which a tuple of indices were provided
# (numpy.ndarrays are treated in the same way tuples in this case)
def is_tuple_style_index(key):
return (isinstance(key, tuple) or
(isinstance(key, np.ndarray) and key.ndim == 1))
tuple_indices = [i for i, key in enumerate(full_slice)
if is_tuple_style_index(key)]
# stg1: Take a copy of the full_slice specification, turning all tuples
# into a full slice
if tuple_indices != list(range(len(full_slice))):
first_slice = list(full_slice)
for tuple_index in tuple_indices:
first_slice[tuple_index] = slice(None, None)
# turn first_slice back into a tuple ready for indexing
first_slice = tuple(first_slice)
list_of_slices.append(first_slice)
# stg2 iterate over each of the tuples
for tuple_index in tuple_indices:
# Create a list with the indices to span the whole data array that we
# currently have
spanning_slice_with_tuple = [slice(None, None)] * _count_current_dim
# Replace the slice(None, None) with our current tuple
spanning_slice_with_tuple[dimension_mapping[tuple_index]] = \
full_slice[tuple_index]
# if we just have [(0, 1)] turn it into [(0, 1), ...] as this is
# Numpy's syntax.
if len(spanning_slice_with_tuple) == 1:
spanning_slice_with_tuple.append(Ellipsis)
spanning_slice_with_tuple = tuple(spanning_slice_with_tuple)
list_of_slices.append(spanning_slice_with_tuple)
# return the dimension mapping and a generator of slices
return dimension_mapping, iter(list_of_slices)
def _build_full_slice_given_keys(keys, ndim):
"""
Given the keys passed to a __getitem__ call, build an equivalent
tuple of keys which span ndims.
"""
# Ensure that we always have a tuple of keys
if not isinstance(keys, tuple):
keys = tuple([keys])
# catch the case where an extra Ellipsis has been provided which can be
# discarded iff len(keys)-1 == ndim
if len(keys)-1 == ndim and \
Ellipsis in filter(lambda obj:
not isinstance(obj, np.ndarray), keys):
keys = list(keys)
is_ellipsis = [key is Ellipsis for key in keys]
keys.pop(is_ellipsis.index(True))
keys = tuple(keys)
# for ndim >= 1 appending a ":" to the slice specification is allowable,
# remove this now
if len(keys) > ndim and ndim != 0 and keys[-1] == slice(None, None):
keys = keys[:-1]
if len(keys) > ndim:
raise IndexError('More slices requested than dimensions. Requested '
'%r, but there were only %s dimensions.' %
(keys, ndim))
# For each dimension get the slice which has been requested.
# If no slice provided, then default to the whole dimension
full_slice = [slice(None, None)] * ndim
for i, key in enumerate(keys):
if key is Ellipsis:
# replace any subsequent Ellipsis objects in keys with
# slice(None, None) as per Numpy
keys = keys[:i] + tuple([slice(None, None) if key is Ellipsis
else key for key in keys[i:]])
# iterate over the remaining keys in reverse to fill in
# the gaps from the right hand side
for j, key in enumerate(keys[:i:-1]):
full_slice[-j-1] = key
# we've finished with i now so stop the iteration
break
else:
full_slice[i] = key
# remove any tuples on dimensions, turning them into numpy array's for
# consistent behaviour
full_slice = tuple([np.array(key, ndmin=1) if isinstance(key, tuple)
else key for key in full_slice])
return full_slice
def _wrap_function_for_method(function, docstring=None):
"""
Returns a wrapper function modified to be suitable for use as a
method.
The wrapper function renames the first argument as "self" and allows
an alternative docstring, thus allowing the built-in help(...)
routine to display appropriate output.
"""
# Generate the Python source for the wrapper function.
# NB. The first argument is replaced with "self".
args, varargs, varkw, defaults = inspect.getargspec(function)
if defaults is None:
basic_args = ['self'] + args[1:]
default_args = []
simple_default_args = []
else:
cutoff = -len(defaults)
basic_args = ['self'] + args[1:cutoff]
default_args = ['%s=%r' % pair
for pair in zip(args[cutoff:], defaults)]
simple_default_args = args[cutoff:]
var_arg = [] if varargs is None else ['*' + varargs]
var_kw = [] if varkw is None else ['**' + varkw]
arg_source = ', '.join(basic_args + default_args + var_arg + var_kw)
simple_arg_source = ', '.join(basic_args + simple_default_args +
var_arg + var_kw)
source = ('def %s(%s):\n return function(%s)' %
(function.__name__, arg_source, simple_arg_source))
# Compile the wrapper function
# NB. There's an outstanding bug with "exec" where the locals and globals
# dictionaries must be the same if we're to get closure behaviour.
my_locals = {'function': function}
exec(source, my_locals, my_locals)
# Update the docstring if required, and return the modified function
wrapper = my_locals[function.__name__]
if docstring is None:
wrapper.__doc__ = function.__doc__
else:
wrapper.__doc__ = docstring
return wrapper
class _MetaOrderedHashable(abc.ABCMeta):
"""
A metaclass that ensures that non-abstract subclasses of _OrderedHashable
without an explicit __init__ method are given a default __init__ method
with the appropriate method signature.
Also, an _init method is provided to allow subclasses with their own
__init__ constructors to initialise their values via an explicit method
signature.
NB. This metaclass is used to construct the _OrderedHashable class as well
as all its subclasses.
"""
def __new__(cls, name, bases, namespace):
# We only want to modify concrete classes that have defined the
# "_names" property.
if '_names' in namespace and \
not isinstance(namespace['_names'], abc.abstractproperty):
args = ', '.join(namespace['_names'])
# Ensure the class has a constructor with explicit arguments.
if '__init__' not in namespace:
# Create a default __init__ method for the class
method_source = ('def __init__(self, %s):\n '
'self._init_from_tuple((%s,))' % (args, args))
exec(method_source, namespace)
# Ensure the class has a "helper constructor" with explicit
# arguments.
if '_init' not in namespace:
# Create a default _init method for the class
method_source = ('def _init(self, %s):\n '
'self._init_from_tuple((%s,))' % (args, args))
exec(method_source, namespace)
return super(_MetaOrderedHashable, cls).__new__(
cls, name, bases, namespace)
@functools.total_ordering
class _OrderedHashable(six.with_metaclass(_MetaOrderedHashable,
collections.Hashable)):
"""
Convenience class for creating "immutable", hashable, and ordered classes.
Instance identity is defined by the specific list of attribute names
declared in the abstract attribute "_names". Subclasses must declare the
attribute "_names" as an iterable containing the names of all the
attributes relevant to equality/hash-value/ordering.
Initial values should be set by using ::
self._init(self, value1, value2, ..)
.. note::
It's the responsibility of the subclass to ensure that the values of
its attributes are themselves hashable.
"""
@abc.abstractproperty
def _names(self):
"""
Override this attribute to declare the names of all the attributes
relevant to the hash/comparison semantics.
"""
pass
def _init_from_tuple(self, values):
for name, value in zip(self._names, values):
object.__setattr__(self, name, value)
def __repr__(self):
class_name = type(self).__name__
attributes = ', '.join('%s=%r' % (name, value)
for (name, value)
in zip(self._names, self._as_tuple()))
return '%s(%s)' % (class_name, attributes)
def _as_tuple(self):
return tuple(getattr(self, name) for name in self._names)
# Prevent attribute updates
def __setattr__(self, name, value):
raise AttributeError('Instances of %s are immutable' %
type(self).__name__)
def __delattr__(self, name):
raise AttributeError('Instances of %s are immutable' %
type(self).__name__)
# Provide hash semantics
def _identity(self):
return self._as_tuple()
def __hash__(self):
return hash(self._identity())
def __eq__(self, other):
return (isinstance(other, type(self)) and
self._identity() == other._identity())
def __ne__(self, other):
# Since we've defined __eq__ we should also define __ne__.
return not self == other
# Provide default ordering semantics
def __lt__(self, other):
if isinstance(other, _OrderedHashable):
return self._identity() < other._identity()
else:
return NotImplemented
def create_temp_filename(suffix=''):
"""Return a temporary file name.
Args:
* suffix - Optional filename extension.
"""
temp_file = tempfile.mkstemp(suffix)
os.close(temp_file[0])
return temp_file[1]
def clip_string(the_str, clip_length=70, rider="..."):
"""
Returns a clipped version of the string based on the specified clip
length and whether or not any graceful clip points can be found.
If the string to be clipped is shorter than the specified clip
length, the original string is returned.
If the string is longer than the clip length, a graceful point (a
space character) after the clip length is searched for. If a
graceful point is found the string is clipped at this point and the
rider is added. If no graceful point can be found, then the string
is clipped exactly where the user requested and the rider is added.
Args:
* the_str
The string to be clipped
* clip_length
The length in characters that the input string should be clipped
to. Defaults to a preconfigured value if not specified.
* rider
A series of characters appended at the end of the returned
string to show it has been clipped. Defaults to a preconfigured
value if not specified.
Returns:
The string clipped to the required length with a rider appended.
If the clip length was greater than the orignal string, the
original string is returned unaltered.
"""
if clip_length >= len(the_str) or clip_length <= 0:
return the_str
else:
if the_str[clip_length].isspace():
return the_str[:clip_length] + rider
else:
first_part = the_str[:clip_length]
remainder = the_str[clip_length:]
# Try to find a graceful point at which to trim i.e. a space
# If no graceful point can be found, then just trim where the user
# specified by adding an empty slice of the remainder ( [:0] )
termination_point = remainder.find(" ")
if termination_point == -1:
termination_point = 0
return first_part + remainder[:termination_point] + rider
def ensure_array(a):
""".. deprecated:: 1.7"""
warn_deprecated('ensure_array() is deprecated and will be removed '
'in a future release.')
if not isinstance(a, (np.ndarray, ma.core.MaskedArray)):
a = np.array([a])
return a
class _Timers(object):
"""
A utility class for timing things.
.. deprecated:: 1.7
"""
# See help for timers, below.
def __init__(self):
self.timers = {}
def start(self, name, step_name):
warn_deprecated('Timers was deprecated in v1.7.0 and will be removed '
'in future Iris releases.')
self.stop(name)
timer = self.timers.setdefault(name, {})
timer[step_name] = time.time()
timer["active_timer_step"] = step_name
def restart(self, name, step_name):
warn_deprecated('Timers was deprecated in v1.7.0 and will be removed '
'in future Iris releases.')
self.stop(name)
timer = self.timers.setdefault(name, {})
timer[step_name] = time.time() - timer.get(step_name, 0)
timer["active_timer_step"] = step_name
def stop(self, name):
if name in self.timers and "active_timer_step" in self.timers[name]:
timer = self.timers[name]
active = timer["active_timer_step"]
start = timer[active]
timer[active] = time.time() - start
return self.get(name)
def get(self, name):
result = (name, [])
if name in self.timers:
result = (name, ", ".join(["'%s':%8.5f" % (k, v)
for k, v in self.timers[name].items()
if k != "active_timer_step"]))
return result
def reset(self, name):
self.timers[name] = {}
timers = _Timers()
"""
Provides multiple named timers, each composed of multiple named steps.
.. deprecated:: 1.7
Only one step is active at a time, so calling start(timer_name, step_name)
will stop the current step and start the new one.
Example Usage::
from iris.util import timers
def little_func(param):
timers.restart("little func", "init")
init()
timers.restart("little func", "main")
main(param)
timers.restart("little func", "cleanup")
cleanup()
timers.stop("little func")
def my_big_func():
timers.start("big func", "input")
input()
timers.start("big func", "processing")
little_func(123)
little_func(456)
timers.start("big func", "output")
output()
print(timers.stop("big func"))
print(timers.get("little func"))
"""
def format_array(arr):
"""
Returns the given array as a string, using the python builtin str
function on a piecewise basis.
Useful for xml representation of arrays.
For customisations, use the :mod:`numpy.core.arrayprint` directly.
"""
if arr.size > 85:
summary_insert = "..., "
else:
summary_insert = ""
ffunc = str
return np.core.arrayprint._formatArray(arr, ffunc, len(arr.shape),
max_line_len=50,
next_line_prefix='\t\t',
separator=', ', edge_items=3,
summary_insert=summary_insert)[:-1]
def new_axis(src_cube, scalar_coord=None):
"""
Create a new axis as the leading dimension of the cube, promoting a scalar
coordinate if specified.
Args:
* src_cube (:class:`iris.cube.Cube`)
Source cube on which to generate a new axis.
Kwargs:
* scalar_coord (:class:`iris.coord.Coord` or 'string')
Scalar coordinate to promote to a dimension coordinate.
Returns:
A new :class:`iris.cube.Cube` instance with one extra leading dimension
(length 1).
For example::
>>> cube.shape
(360, 360)
>>> ncube = iris.util.new_axis(cube, 'time')
>>> ncube.shape
(1, 360, 360)
"""
if scalar_coord is not None:
scalar_coord = src_cube.coord(scalar_coord)
# Indexing numpy arrays requires loading deferred data here returning a
# copy of the data with a new leading dimension.
# If the source cube is a Masked Constant, it is changed here to a Masked
# Array to allow the mask to gain an extra dimension with the data.
if src_cube.has_lazy_data():
new_cube = iris.cube.Cube(src_cube.lazy_data()[None])
else:
if isinstance(src_cube.data, ma.core.MaskedConstant):
new_data = ma.array([np.nan], mask=[True])
else:
new_data = src_cube.data[None]
new_cube = iris.cube.Cube(new_data)
new_cube.metadata = src_cube.metadata
for coord in src_cube.aux_coords:
if scalar_coord and scalar_coord == coord:
dim_coord = iris.coords.DimCoord.from_coord(coord)
new_cube.add_dim_coord(dim_coord, 0)
else:
dims = np.array(src_cube.coord_dims(coord)) + 1
new_cube.add_aux_coord(coord.copy(), dims)
for coord in src_cube.dim_coords:
coord_dims = np.array(src_cube.coord_dims(coord)) + 1
new_cube.add_dim_coord(coord.copy(), coord_dims)
for factory in src_cube.aux_factories:
new_cube.add_aux_factory(copy.deepcopy(factory))
return new_cube
def as_compatible_shape(src_cube, target_cube):
"""
Return a cube with added length one dimensions to match the dimensionality
and dimension ordering of `target_cube`.
This function can be used to add the dimensions that have been collapsed,
aggregated or sliced out, promoting scalar coordinates to length one
dimension coordinates where necessary. It operates by matching coordinate
metadata to infer the dimensions that need modifying, so the provided
cubes must have coordinates with the same metadata
(see :class:`iris.coords.CoordDefn`).
.. note:: This function will load and copy the data payload of `src_cube`.
Args:
* src_cube:
An instance of :class:`iris.cube.Cube` with missing dimensions.
* target_cube:
An instance of :class:`iris.cube.Cube` with the desired dimensionality.
Returns:
A instance of :class:`iris.cube.Cube` with the same dimensionality as
`target_cube` but with the data and coordinates from `src_cube`
suitably reshaped to fit.
"""
dim_mapping = {}
for coord in target_cube.aux_coords + target_cube.dim_coords:
dims = target_cube.coord_dims(coord)
try:
collapsed_dims = src_cube.coord_dims(coord)
except iris.exceptions.CoordinateNotFoundError:
continue
if collapsed_dims:
if len(collapsed_dims) == len(dims):
for dim_from, dim_to in zip(dims, collapsed_dims):
dim_mapping[dim_from] = dim_to
elif dims:
for dim_from in dims:
dim_mapping[dim_from] = None
if len(dim_mapping) != target_cube.ndim:
raise ValueError('Insufficient or conflicting coordinate '
'metadata. Cannot infer dimension mapping '
'to restore cube dimensions.')
new_shape = [1] * target_cube.ndim
for dim_from, dim_to in six.iteritems(dim_mapping):
if dim_to is not None:
new_shape[dim_from] = src_cube.shape[dim_to]
new_data = src_cube.data.copy()
# Transpose the data (if necessary) to prevent assignment of
# new_shape doing anything except adding length one dims.
order = [v for k, v in sorted(dim_mapping.items()) if v is not None]
if order != sorted(order):
new_order = [order.index(i) for i in range(len(order))]
new_data = np.transpose(new_data, new_order).copy()
new_cube = iris.cube.Cube(new_data.reshape(new_shape))
new_cube.metadata = copy.deepcopy(src_cube.metadata)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
reverse_mapping = {v: k for k, v in dim_mapping.items() if v is not None}
def add_coord(coord):
"""Closure used to add a suitably reshaped coord to new_cube."""
all_dims = target_cube.coord_dims(coord)
src_dims = [dim for dim in src_cube.coord_dims(coord) if
src_cube.shape[dim] > 1]
mapped_dims = [reverse_mapping[dim] for dim in src_dims]
length1_dims = [dim for dim in all_dims if new_cube.shape[dim] == 1]
dims = length1_dims + mapped_dims
shape = [new_cube.shape[dim] for dim in dims]
if not shape:
shape = [1]
points = coord.points.reshape(shape)
bounds = None
if coord.has_bounds():
bounds = coord.bounds.reshape(shape + [coord.nbounds])
new_coord = coord.copy(points=points, bounds=bounds)
# If originally in dim_coords, add to dim_coords, otherwise add to
# aux_coords.
if target_cube.coords(coord, dim_coords=True):
try:
new_cube.add_dim_coord(new_coord, dims)
except ValueError:
# Catch cases where the coord is an AuxCoord and therefore
# cannot be added to dim_coords.
new_cube.add_aux_coord(new_coord, dims)
else:
new_cube.add_aux_coord(new_coord, dims)
coord_mapping[id(coord)] = new_coord
for coord in src_cube.aux_coords + src_cube.dim_coords:
add_coord(coord)
for factory in src_cube.aux_factories:
new_cube.add_aux_factory(factory.updated(coord_mapping))
return new_cube
def squeeze(cube):
"""
Removes any dimension of length one. If it has an associated DimCoord or
AuxCoord, this becomes a scalar coord.
Args:
* cube (:class:`iris.cube.Cube`)
Source cube to remove length 1 dimension(s) from.
Returns:
A new :class:`iris.cube.Cube` instance without any dimensions of
length 1.
For example::
>>> cube.shape
(1, 360, 360)
>>> ncube = iris.util.squeeze(cube)
>>> ncube.shape
(360, 360)
"""
slices = [0 if cube.shape[dim] == 1 else slice(None)
for dim in range(cube.ndim)]
squeezed = cube[tuple(slices)]
return squeezed
def file_is_newer_than(result_path, source_paths):
"""
Return whether the 'result' file has a later modification time than all of
the 'source' files.
If a stored result depends entirely on known 'sources', it need only be
re-built when one of them changes. This function can be used to test that
by comparing file timestamps.
Args:
* result_path (string):
The filepath of a file containing some derived result data.
* source_paths (string or iterable of strings):
The path(s) to the original datafiles used to make the result. May
include wildcards and '~' expansions (like Iris load paths), but not
URIs.
Returns:
True if all the sources are older than the result, else False.
If any of the file paths describes no existing files, an exception will
be raised.
.. note::
There are obvious caveats to using file timestamps for this, as correct
usage depends on how the sources might change. For example, a file
could be replaced by one of the same name, but an older timestamp.
If wildcards and '~' expansions are used, this introduces even more
uncertainty, as then you cannot even be sure that the resulting list of
file names is the same as the originals. For example, some files may
have been deleted or others added.
.. note::
The result file may often be a :mod:`pickle` file. In that case, it
also depends on the relevant module sources, so extra caution is
required. Ideally, an additional check on iris.__version__ is advised.
"""
# Accept a string as a single source path
if isinstance(source_paths, six.string_types):
source_paths = [source_paths]
# Fix our chosen timestamp function
file_date = os.path.getmtime
# Get the 'result file' time
result_timestamp = file_date(result_path)
# Get all source filepaths, with normal Iris.io load helper function
source_file_paths = iris.io.expand_filespecs(source_paths)
# Compare each filetime, for each spec, with the 'result time'
for path in source_file_paths:
source_timestamp = file_date(path)
if source_timestamp >= result_timestamp:
return False
return True
def is_regular(coord):
"""Determine if the given coord is regular."""
try:
regular_step(coord)
except iris.exceptions.CoordinateNotRegularError:
return False
except (TypeError, ValueError):
return False
return True
def regular_step(coord):
"""Return the regular step from a coord or fail."""
if coord.ndim != 1:
raise iris.exceptions.CoordinateMultiDimError("Expected 1D coord")
if coord.shape[0] < 2:
raise ValueError("Expected a non-scalar coord")
diffs = coord.points[1:] - coord.points[:-1]
avdiff = np.mean(diffs)
if not np.allclose(diffs, avdiff, rtol=0.001):
# TODO: This value is set for test_analysis to pass...
msg = "Coord %s is not regular" % coord.name()
raise iris.exceptions.CoordinateNotRegularError(msg)
return avdiff.astype(coord.points.dtype)
def unify_time_units(cubes):
"""
Performs an in-place conversion of the time units of all time coords in the
cubes in a given iterable. One common epoch is defined for each calendar
found in the cubes to prevent units being defined with inconsistencies
between epoch and calendar.
Each epoch is defined from the first suitable time coordinate found in the
input cubes.
Arg:
* cubes:
An iterable containing :class:`iris.cube.Cube` instances.
"""
epochs = {}
for cube in cubes:
for time_coord in cube.coords():
if time_coord.units.is_time_reference():
epoch = epochs.setdefault(time_coord.units.calendar,
time_coord.units.origin)
new_unit = cf_units.Unit(epoch, time_coord.units.calendar)
time_coord.convert_units(new_unit)
def _is_circular(points, modulus, bounds=None):
"""
Determine whether the provided points or bounds are circular in nature
relative to the modulus value.
If the bounds are provided then these are checked for circularity rather
than the points.
Args:
* points:
:class:`numpy.ndarray` of point values.
* modulus:
Circularity modulus value.
Kwargs:
* bounds:
:class:`numpy.ndarray` of bound values.
Returns:
Boolean.
"""
circular = False
if bounds is not None:
# Set circular to True if the bounds ends are equivalent.
first_bound = last_bound = None
if bounds.ndim == 1 and bounds.shape[-1] == 2:
first_bound = bounds[0] % modulus
last_bound = bounds[1] % modulus
elif bounds.ndim == 2 and bounds.shape[-1] == 2:
first_bound = bounds[0, 0] % modulus
last_bound = bounds[-1, 1] % modulus
if first_bound is not None and last_bound is not None:
circular = np.allclose(first_bound, last_bound,
rtol=1.0e-5)
else:
# set circular if points are regular and last+1 ~= first
if len(points) > 1:
diffs = list(set(np.diff(points)))
diff = np.mean(diffs)
abs_tol = np.abs(diff * 1.0e-4)
diff_approx_equal = np.max(np.abs(diffs - diff)) < abs_tol
if diff_approx_equal:
circular_value = (points[-1] + diff) % modulus
try:
np.testing.assert_approx_equal(points[0],
circular_value,
significant=4)
circular = True
except AssertionError:
if points[0] == 0:
try:
np.testing.assert_approx_equal(modulus,
circular_value,
significant=4)
circular = True
except AssertionError:
pass
else:
# XXX - Inherited behaviour from NetCDF PyKE rules.
# We need to decide whether this is valid!
circular = points[0] >= modulus
return circular
def promote_aux_coord_to_dim_coord(cube, name_or_coord):
"""
Promotes an AuxCoord on the cube to a DimCoord. This AuxCoord must be
associated with a single cube dimension. If the AuxCoord is associated
with a dimension that already has a DimCoord, that DimCoord gets
demoted to an AuxCoord.
Args:
* cube
An instance of :class:`iris.cube.Cube`
* name_or_coord:
Either
(a) An instance of :class:`iris.coords.AuxCoord`
or
(b) the :attr:`standard_name`, :attr:`long_name`, or
:attr:`var_name` of an instance of an instance of
:class:`iris.coords.AuxCoord`.
For example::
>>> print cube
air_temperature / (K) (time: 12; latitude: 73; longitude: 96)
Dimension coordinates:
time x - -
latitude - x -
longitude - - x
Auxiliary coordinates:
year x - -
>>> promote_aux_coord_to_dim_coord(cube, 'year')
>>> print cube
air_temperature / (K) (year: 12; latitude: 73; longitude: 96)
Dimension coordinates:
year x - -
latitude - x -
longitude - - x
Auxiliary coordinates:
time x - -
"""
if isinstance(name_or_coord, six.string_types):
aux_coord = cube.coord(name_or_coord)
elif isinstance(name_or_coord, iris.coords.Coord):
aux_coord = name_or_coord
else:
# Don't know how to handle this type
msg = ("Don't know how to handle coordinate of type {}. "
"Ensure all coordinates are of type six.string_types or "
"iris.coords.Coord.")
msg = msg.format(type(name_or_coord))
raise TypeError(msg)
if aux_coord in cube.dim_coords:
# nothing to do
return
if aux_coord not in cube.aux_coords:
msg = ("Attempting to promote an AuxCoord ({}) "
"which does not exist in the cube.")
msg = msg.format(aux_coord.name())
raise ValueError(msg)
coord_dim = cube.coord_dims(aux_coord)
if len(coord_dim) != 1:
msg = ("Attempting to promote an AuxCoord ({}) "
"which is associated with {} dimensions.")
msg = msg.format(aux_coord.name(), len(coord_dim))
raise ValueError(msg)
try:
dim_coord = iris.coords.DimCoord.from_coord(aux_coord)
except ValueError as valerr:
msg = ("Attempt to promote an AuxCoord ({}) fails "
"when attempting to create a DimCoord from the "
"AuxCoord because: {}")
msg = msg.format(aux_coord.name(), str(valerr))
raise ValueError(msg)
old_dim_coord = cube.coords(dim_coords=True,
contains_dimension=coord_dim[0])
if len(old_dim_coord) == 1:
demote_dim_coord_to_aux_coord(cube, old_dim_coord[0])
# order matters here: don't want to remove
# the aux_coord before have tried to make
# dim_coord in case that fails
cube.remove_coord(aux_coord)
cube.add_dim_coord(dim_coord, coord_dim)
def demote_dim_coord_to_aux_coord(cube, name_or_coord):
"""
Demotes a dimension coordinate on the cube to an auxiliary coordinate.
The DimCoord is demoted to an auxiliary coordinate on the cube.
The dimension of the cube that was associated with the DimCoord becomes
anonymous. The class of the coordinate is left as DimCoord, it is not
recast as an AuxCoord instance.
Args:
* cube
An instance of :class:`iris.cube.Cube`
* name_or_coord:
Either
(a) An instance of :class:`iris.coords.DimCoord`
or
(b) the :attr:`standard_name`, :attr:`long_name`, or
:attr:`var_name` of an instance of an instance of
:class:`iris.coords.DimCoord`.
For example::
>>> print cube
air_temperature / (K) (time: 12; latitude: 73; longitude: 96)
Dimension coordinates:
time x - -
latitude - x -
longitude - - x
Auxiliary coordinates:
year x - -
>>> demote_dim_coord_to_aux_coord(cube, 'time')
>>> print cube
air_temperature / (K) (-- : 12; latitude: 73; longitude: 96)
Dimension coordinates:
latitude - x -
longitude - - x
Auxiliary coordinates:
time x - -
year x - -
"""
if isinstance(name_or_coord, six.string_types):
dim_coord = cube.coord(name_or_coord)
elif isinstance(name_or_coord, iris.coords.Coord):
dim_coord = name_or_coord
else:
# Don't know how to handle this type
msg = ("Don't know how to handle coordinate of type {}. "
"Ensure all coordinates are of type six.string_types or "
"iris.coords.Coord.")
msg = msg.format(type(name_or_coord))
raise TypeError(msg)
if dim_coord not in cube.dim_coords:
# nothing to do
return
coord_dim = cube.coord_dims(dim_coord)
cube.remove_coord(dim_coord)
cube.add_aux_coord(dim_coord, coord_dim)
|
jswanljung/iris
|
lib/iris/util.py
|
Python
|
lgpl-3.0
| 54,362
|
[
"NetCDF"
] |
9ba19bac1495c09995e7a46ca16cbfc4d6f1e3ea9954fcd8a8787ee02097522c
|
#!/usr/bin/env python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
'''
Azure External Inventory Script
===============================
Generates dynamic inventory by making API requests to the Azure Resource
Manager using the AAzure Python SDK. For instruction on installing the
Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/
Authentication
--------------
The order of precedence is command line arguments, environment variables,
and finally the [default] profile found in ~/.azure/credentials.
If using a credentials file, it should be an ini formatted file with one or
more sections, which we refer to as profiles. The script looks for a
[default] section, if a profile is not specified either on the command line
or with an environment variable. The keys in a profile will match the
list of command line arguments below.
For command line arguments and environment variables specify a profile found
in your ~/.azure/credentials file, or a service principal or Active Directory
user.
Command line arguments:
- profile
- client_id
- secret
- subscription_id
- tenant
- ad_user
- password
Environment variables:
- AZURE_PROFILE
- AZURE_CLIENT_ID
- AZURE_SECRET
- AZURE_SUBSCRIPTION_ID
- AZURE_TENANT
- AZURE_AD_USER
- AZURE_PASSWORD
Run for Specific Host
-----------------------
When run for a specific host using the --host option, a resource group is
required. For a specific host, this script returns the following variables:
{
"ansible_host": "XXX.XXX.XXX.XXX",
"computer_name": "computer_name2",
"fqdn": null,
"id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name",
"image": {
"offer": "CentOS",
"publisher": "OpenLogic",
"sku": "7.1",
"version": "latest"
},
"location": "westus",
"mac_address": "00-0D-3A-31-2C-EC",
"name": "object-name",
"network_interface": "interface-name",
"network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1",
"network_security_group": null,
"network_security_group_id": null,
"os_disk": {
"name": "object-name",
"operating_system_type": "Linux"
},
"plan": null,
"powerstate": "running",
"private_ip": "172.26.3.6",
"private_ip_alloc_method": "Static",
"provisioning_state": "Succeeded",
"public_ip": "XXX.XXX.XXX.XXX",
"public_ip_alloc_method": "Static",
"public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name",
"public_ip_name": "object-name",
"resource_group": "galaxy-production",
"security_group": "object-name",
"security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name",
"tags": {
"db": "database"
},
"type": "Microsoft.Compute/virtualMachines",
"virtual_machine_size": "Standard_DS4"
}
Groups
------
When run in --list mode, instances are grouped by the following categories:
- azure
- location
- resource_group
- security_group
- tag key
- tag key_value
Control groups using azure_rm.ini or set environment variables:
AZURE_GROUP_BY_RESOURCE_GROUP=yes
AZURE_GROUP_BY_LOCATION=yes
AZURE_GROUP_BY_SECURITY_GROUP=yes
AZURE_GROUP_BY_TAG=yes
Select hosts within specific resource groups by assigning a comma separated list to:
AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b
Select hosts for specific tag key by assigning a comma separated list of tag keys to:
AZURE_TAGS=key1,key2,key3
Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
AZURE_TAGS=key1:value1,key2:value2
If you don't need the powerstate, you can improve performance by turning off powerstate fetching:
AZURE_INCLUDE_POWERSTATE=no
azure_rm.ini
----------------------
As mentioned above you can control execution using environment variables or an .ini file. A sample
azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case
'azure_rm') with a .ini extension. This provides you with the flexibility of copying and customizing this
script and having matching .ini files. Go forth and customize your Azure inventory!
Powerstate:
-----------
The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is
up. If the value is anything other than 'running', the machine is down, and will be unreachable.
Examples:
---------
Execute /bin/uname on all instances in the galaxy-qa resource group
$ ansible -i azure_rm_inventory.py galaxy-qa -m shell -a "/bin/uname -a"
Use the inventory script to print instance specific information
$ contrib/inventory/azure_rm_inventory.py --host my_instance_host_name --pretty
Use with a playbook
$ ansible-playbook -i contrib/inventory/azure_rm_inventory.py my_playbook.yml --limit galaxy-qa
Insecure Platform Warning
-------------------------
If you receive InsecurePlatformWarning from urllib3, install the
requests security packages:
pip install requests[security]
author:
- Chris Houseknecht (@chouseknecht)
- Matt Davis (@nitzmahone)
Company: Ansible by Red Hat
Version: 1.0.0
'''
import argparse
import ConfigParser
import json
import os
import re
import sys
from os.path import expanduser
HAS_AZURE = True
HAS_AZURE_EXC = None
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.compute import __version__ as azure_compute_version
from azure.common import AzureMissingResourceHttpError, AzureHttpError
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network.network_management_client import NetworkManagementClient,\
NetworkManagementClientConfiguration
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient,\
ResourceManagementClientConfiguration
from azure.mgmt.compute.compute_management_client import ComputeManagementClient,\
ComputeManagementClientConfiguration
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD'
)
AZURE_CONFIG_SETTINGS = dict(
resource_groups='AZURE_RESOURCE_GROUPS',
tags='AZURE_TAGS',
include_powerstate='AZURE_INCLUDE_POWERSTATE',
group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP',
group_by_location='AZURE_GROUP_BY_LOCATION',
group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP',
group_by_tag='AZURE_GROUP_BY_TAG'
)
AZURE_MIN_VERSION = "2016-03-30"
def azure_id_to_dict(id):
pieces = re.sub(r'^\/', '', id).split('/')
result = {}
index = 0
while index < len(pieces) - 1:
result[pieces[index]] = pieces[index + 1]
index += 1
return result
class AzureRM(object):
def __init__(self, args):
self._args = args
self._compute_client = None
self._resource_client = None
self._network_client = None
self.debug = False
if args.debug:
self.debug = True
self.credentials = self._get_credentials(args)
if not self.credentials:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"or define a profile in ~/.azure/credentials.")
if self.credentials.get('subscription_id', None) is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
if self.credentials.get('client_id') is not None and \
self.credentials.get('secret') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'])
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], self.credentials['password'])
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password.")
def log(self, msg):
if self.debug:
print (msg + u'\n')
def fail(self, msg):
raise Exception(msg)
def _get_profile(self, profile="default"):
path = expanduser("~")
path += "/.azure/credentials"
try:
config = ConfigParser.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except:
pass
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
return credentials
return None
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile'] is not None:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None:
return env_credentials
return None
def _get_credentials(self, params):
# Get authentication credentials.
# Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
arg_credentials[attribute] = getattr(params, attribute)
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['client_id'] is not None:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
return None
def _register(self, key):
try:
# We have to perform the one-time registration here. Otherwise, we receive an error the first
# time we attempt to use the requested client.
resource_client = self.rm_client
resource_client.providers.register(key)
except Exception as exc:
self.fail("One-time registration of {0} failed - {1}".format(key, str(exc)))
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = NetworkManagementClient(
NetworkManagementClientConfiguration(self.azure_credentials, self.subscription_id))
self._register('Microsoft.Network')
return self._network_client
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = ResourceManagementClient(
ResourceManagementClientConfiguration(self.azure_credentials, self.subscription_id))
return self._resource_client
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = ComputeManagementClient(
ComputeManagementClientConfiguration(self.azure_credentials, self.subscription_id))
self._register('Microsoft.Compute')
return self._compute_client
class AzureInventory(object):
def __init__(self):
self._args = self._parse_cli_args()
try:
rm = AzureRM(self._args)
except Exception as e:
sys.exit("{0}".format(str(e)))
self._compute_client = rm.compute_client
self._network_client = rm.network_client
self._resource_client = rm.rm_client
self._security_groups = None
self.resource_groups = []
self.tags = None
self.replace_dash_in_groups = False
self.group_by_resource_group = True
self.group_by_location = True
self.group_by_security_group = True
self.group_by_tag = True
self.include_powerstate = True
self._inventory = dict(
_meta=dict(
hostvars=dict()
),
azure=[]
)
self._get_settings()
if self._args.resource_groups:
self.resource_groups = self._args.resource_groups.split(',')
if self._args.tags:
self.tags = self._args.tags.split(',')
if self._args.no_powerstate:
self.include_powerstate = False
self.get_inventory()
print (self._json_format_dict(pretty=self._args.pretty))
sys.exit(0)
def _parse_cli_args(self):
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file for an Azure subscription')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--debug', action='store_true', default=False,
help='Send debug messages to STDOUT')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty print JSON output(default: False)')
parser.add_argument('--profile', action='store',
help='Azure profile contained in ~/.azure/credentials')
parser.add_argument('--subscription_id', action='store',
help='Azure Subscription Id')
parser.add_argument('--client_id', action='store',
help='Azure Client Id ')
parser.add_argument('--secret', action='store',
help='Azure Client Secret')
parser.add_argument('--tenant', action='store',
help='Azure Tenant Id')
parser.add_argument('--ad-user', action='store',
help='Active Directory User')
parser.add_argument('--password', action='store',
help='password')
parser.add_argument('--resource-groups', action='store',
help='Return inventory for comma separated list of resource group names')
parser.add_argument('--tags', action='store',
help='Return inventory for comma separated list of tag key:value pairs')
parser.add_argument('--no-powerstate', action='store_true', default=False,
help='Do not include the power state of each virtual host')
return parser.parse_args()
def get_inventory(self):
if len(self.resource_groups) > 0:
# get VMs for requested resource groups
for resource_group in self.resource_groups:
try:
virtual_machines = self._compute_client.virtual_machines.list(resource_group)
except Exception as exc:
sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group,
str(exc)))
if self._args.host or self.tags:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
else:
# get all VMs within the subscription
try:
virtual_machines = self._compute_client.virtual_machines.list_all()
except Exception as exc:
sys.exit("Error: fetching virtual machines - {0}".format(str(exc)))
if self._args.host or self.tags > 0:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
def _load_machines(self, machines):
for machine in machines:
id_dict = azure_id_to_dict(machine.id)
#TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
# fixed, we should remove the .lower(). Opened Issue
# #574: https://github.com/Azure/azure-sdk-for-python/issues/574
resource_group = id_dict['resourceGroups'].lower()
if self.group_by_security_group:
self._get_security_groups(resource_group)
host_vars = dict(
ansible_host=None,
private_ip=None,
private_ip_alloc_method=None,
public_ip=None,
public_ip_name=None,
public_ip_id=None,
public_ip_alloc_method=None,
fqdn=None,
location=machine.location,
name=machine.name,
type=machine.type,
id=machine.id,
tags=machine.tags,
network_interface_id=None,
network_interface=None,
resource_group=resource_group,
mac_address=None,
plan=(machine.plan.name if machine.plan else None),
virtual_machine_size=machine.hardware_profile.vm_size.value,
computer_name=machine.os_profile.computer_name,
provisioning_state=machine.provisioning_state,
)
host_vars['os_disk'] = dict(
name=machine.storage_profile.os_disk.name,
operating_system_type=machine.storage_profile.os_disk.os_type.value
)
if self.include_powerstate:
host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name)
if machine.storage_profile.image_reference:
host_vars['image'] = dict(
offer=machine.storage_profile.image_reference.offer,
publisher=machine.storage_profile.image_reference.publisher,
sku=machine.storage_profile.image_reference.sku,
version=machine.storage_profile.image_reference.version
)
# Add windows details
if machine.os_profile.windows_configuration is not None:
host_vars['windows_auto_updates_enabled'] = \
machine.os_profile.windows_configuration.enable_automatic_updates
host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone
host_vars['windows_rm'] = None
if machine.os_profile.windows_configuration.win_rm is not None:
host_vars['windows_rm'] = dict(listeners=None)
if machine.os_profile.windows_configuration.win_rm.listeners is not None:
host_vars['windows_rm']['listeners'] = []
for listener in machine.os_profile.windows_configuration.win_rm.listeners:
host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol,
certificate_url=listener.certificate_url))
for interface in machine.network_profile.network_interfaces:
interface_reference = self._parse_ref_id(interface.id)
network_interface = self._network_client.network_interfaces.get(
interface_reference['resourceGroups'],
interface_reference['networkInterfaces'])
if network_interface.primary:
if self.group_by_security_group and \
self._security_groups[resource_group].get(network_interface.id, None):
host_vars['security_group'] = \
self._security_groups[resource_group][network_interface.id]['name']
host_vars['security_group_id'] = \
self._security_groups[resource_group][network_interface.id]['id']
host_vars['network_interface'] = network_interface.name
host_vars['network_interface_id'] = network_interface.id
host_vars['mac_address'] = network_interface.mac_address
for ip_config in network_interface.ip_configurations:
host_vars['private_ip'] = ip_config.private_ip_address
host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method.value
if ip_config.public_ip_address:
public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id)
public_ip_address = self._network_client.public_ip_addresses.get(
public_ip_reference['resourceGroups'],
public_ip_reference['publicIPAddresses'])
host_vars['ansible_host'] = public_ip_address.ip_address
host_vars['public_ip'] = public_ip_address.ip_address
host_vars['public_ip_name'] = public_ip_address.name
host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method.value
host_vars['public_ip_id'] = public_ip_address.id
if public_ip_address.dns_settings:
host_vars['fqdn'] = public_ip_address.dns_settings.fqdn
self._add_host(host_vars)
def _selected_machines(self, virtual_machines):
selected_machines = []
for machine in virtual_machines:
if self._args.host and self._args.host == machine.name:
selected_machines.append(machine)
if self.tags and self._tags_match(machine.tags, self.tags):
selected_machines.append(machine)
return selected_machines
def _get_security_groups(self, resource_group):
''' For a given resource_group build a mapping of network_interface.id to security_group name '''
if not self._security_groups:
self._security_groups = dict()
if not self._security_groups.get(resource_group):
self._security_groups[resource_group] = dict()
for group in self._network_client.network_security_groups.list(resource_group):
if group.network_interfaces:
for interface in group.network_interfaces:
self._security_groups[resource_group][interface.id] = dict(
name=group.name,
id=group.id
)
def _get_powerstate(self, resource_group, name):
try:
vm = self._compute_client.virtual_machines.get(resource_group,
name,
expand='instanceview')
except Exception as exc:
sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc)))
return next((s.code.replace('PowerState/', '')
for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None)
def _add_host(self, vars):
host_name = self._to_safe(vars['name'])
resource_group = self._to_safe(vars['resource_group'])
security_group = None
if vars.get('security_group'):
security_group = self._to_safe(vars['security_group'])
if self.group_by_resource_group:
if not self._inventory.get(resource_group):
self._inventory[resource_group] = []
self._inventory[resource_group].append(host_name)
if self.group_by_location:
if not self._inventory.get(vars['location']):
self._inventory[vars['location']] = []
self._inventory[vars['location']].append(host_name)
if self.group_by_security_group and security_group:
if not self._inventory.get(security_group):
self._inventory[security_group] = []
self._inventory[security_group].append(host_name)
self._inventory['_meta']['hostvars'][host_name] = vars
self._inventory['azure'].append(host_name)
if self.group_by_tag and vars.get('tags'):
for key, value in vars['tags'].iteritems():
safe_key = self._to_safe(key)
safe_value = safe_key + '_' + self._to_safe(value)
if not self._inventory.get(safe_key):
self._inventory[safe_key] = []
if not self._inventory.get(safe_value):
self._inventory[safe_value] = []
self._inventory[safe_key].append(host_name)
self._inventory[safe_value].append(host_name)
def _json_format_dict(self, pretty=False):
# convert inventory to json
if pretty:
return json.dumps(self._inventory, sort_keys=True, indent=2)
else:
return json.dumps(self._inventory)
def _get_settings(self):
# Load settings from the .ini, if it exists. Otherwise,
# look for environment values.
file_settings = self._load_settings()
if file_settings:
for key in AZURE_CONFIG_SETTINGS:
if key in ('resource_groups', 'tags') and file_settings.get(key, None) is not None:
values = file_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif file_settings.get(key, None) is not None:
val = self._to_boolean(file_settings[key])
setattr(self, key, val)
else:
env_settings = self._get_env_settings()
for key in AZURE_CONFIG_SETTINGS:
if key in('resource_groups', 'tags') and env_settings.get(key, None) is not None:
values = env_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif env_settings.get(key, None) is not None:
val = self._to_boolean(env_settings[key])
setattr(self, key, val)
def _parse_ref_id(self, reference):
response = {}
keys = reference.strip('/').split('/')
for index in range(len(keys)):
if index < len(keys) - 1 and index % 2 == 0:
response[keys[index]] = keys[index + 1]
return response
def _to_boolean(self, value):
if value in ['Yes', 'yes', 1, 'True', 'true', True]:
result = True
elif value in ['No', 'no', 0, 'False', 'false', False]:
result = False
else:
result = True
return result
def _get_env_settings(self):
env_settings = dict()
for attribute, env_variable in AZURE_CONFIG_SETTINGS.iteritems():
env_settings[attribute] = os.environ.get(env_variable, None)
return env_settings
def _load_settings(self):
basename = os.path.splitext(os.path.basename(__file__))[0]
path = basename + '.ini'
config = None
settings = None
try:
config = ConfigParser.ConfigParser()
config.read(path)
except:
pass
if config is not None:
settings = dict()
for key in AZURE_CONFIG_SETTINGS:
try:
settings[key] = config.get('azure', key, raw=True)
except:
pass
return settings
def _tags_match(self, tag_obj, tag_args):
'''
Return True if the tags object from a VM contains the requested tag values.
:param tag_obj: Dictionary of string:string pairs
:param tag_args: List of strings in the form key=value
:return: boolean
'''
if not tag_obj:
return False
matches = 0
for arg in tag_args:
arg_key = arg
arg_value = None
if re.search(r':', arg):
arg_key, arg_value = arg.split(':')
if arg_value and tag_obj.get(arg_key, None) == arg_value:
matches += 1
elif not arg_value and tag_obj.get(arg_key, None) is not None:
matches += 1
if matches == len(tag_args):
return True
return False
def _to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def main():
if not HAS_AZURE:
sys.exit("The Azure python sdk is not installed (try 'pip install azure') - {0}".format(HAS_AZURE_EXC))
if azure_compute_version < AZURE_MIN_VERSION:
sys.exit("Expecting azure.mgmt.compute.__version__ to be >= {0}. Found version {1} "
"Do you have Azure >= 2.0.0rc2 installed?".format(AZURE_MIN_VERSION, azure_compute_version))
AzureInventory()
if __name__ == '__main__':
main()
|
supertom/ansible
|
contrib/inventory/azure_rm.py
|
Python
|
gpl-3.0
| 32,096
|
[
"Galaxy"
] |
05a18f843b7eee8cdcbf1b3bcb84aaec507b93b6a9019d961913ca06263f4f99
|
"""setuptools.command.bdist_egg
Build .egg distributions"""
from distutils.errors import DistutilsSetupError
from distutils.dir_util import remove_tree, mkpath
from distutils import log
from types import CodeType
import sys
import os
import textwrap
import marshal
from setuptools.extern import six
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from setuptools.extension import Library
from setuptools import Command
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
_stub_template = textwrap.dedent("""
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, %r)
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
""").lstrip()
with open(pyfile, 'w') as f:
f.write(_stub_template % resource)
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p', "platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files, []
for item in old:
if isinstance(item, tuple) and len(item) == 2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized == site_packages or normalized.startswith(
site_packages + os.sep
):
item = realpath[len(site_packages) + 1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s", self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self, cmdname, **kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname, self.bdist_dir)
kw.setdefault('skip_build', self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s", self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root
instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p, ext_name) in enumerate(ext_outputs):
filename, ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s", ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep, '/')
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root, 'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s", script_dir)
self.call_command('install_scripts', install_dir=script_dir,
no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s", native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s", native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_egg', get_python_version(), self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base, dirs, files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base, name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution, 'zip_safe', None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation', {}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info, '')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir: ''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base] + filename)
for filename in dirs:
paths[os.path.join(base, filename)] = (paths[base] +
filename + '/')
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext, Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir, filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base, dirs, files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base, dirs, files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag, fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
return flag
if not can_scan():
return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag, fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe) == flag:
f = open(fn, 'wt')
f.write('\n')
f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base, name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
else:
skip = 12 # skip magic & date & file size
f = open(filename, 'rb')
f.read(skip)
code = marshal.load(f)
f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, six.string_types):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
mode='w'):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir) + 1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'", p)
compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
|
wildchildyn/autism-website
|
yanni_env/lib/python3.6/site-packages/setuptools/command/bdist_egg.py
|
Python
|
gpl-3.0
| 17,178
|
[
"VisIt"
] |
5c36a6bbaf9c7f262ba9debb606439816a3ed1cf24b96aa43f2d6f6297ec031c
|
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for online Entrez access.
This file include tests for accessing the online Entrez service and parsing the
returned results. Note that we are merely testing the access and whether the
results are parseable. Detailed tests on each Entrez service are not within the
scope of this file as they are already covered in test_Entrez.py.
"""
import os
import unittest
import requires_internet
requires_internet.check()
from Bio import Entrez
from Bio import Medline
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
if os.name == 'java':
try:
from xml.parsers.expat import XML_PARAM_ENTITY_PARSING_ALWAYS
del XML_PARAM_ENTITY_PARSING_ALWAYS
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError("The Bio.Entrez XML parser fails on "
"Jython, see http://bugs.jython.org/issue1447")
# This lets us set the email address to be sent to NCBI Entrez:
Entrez.email = "biopython-dev@biopython.org"
URL_HEAD = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/"
URL_TOOL = "tool=biopython"
URL_EMAIL = "email=biopython-dev%40biopython.org"
class EntrezOnlineCase(unittest.TestCase):
def test_read_from_url(self):
"""Test Entrez.read from URL"""
handle = Entrez.einfo()
self.assertTrue(handle.url.startswith(URL_HEAD + "einfo.fcgi?"), handle.url)
self.assertTrue(URL_TOOL in handle.url)
self.assertTrue(URL_EMAIL in handle.url)
rec = Entrez.read(handle)
handle.close()
self.assertTrue(isinstance(rec, dict))
self.assertTrue('DbList' in rec)
# arbitrary number, just to make sure that DbList has contents
self.assertTrue(len(rec['DbList']) > 5)
def test_parse_from_url(self):
"""Test Entrez.parse from URL"""
handle = Entrez.efetch(db='protein', id='15718680,157427902,119703751',
retmode='xml')
self.assertTrue(handle.url.startswith(URL_HEAD + "efetch.fcgi?"), handle.url)
self.assertTrue(URL_TOOL in handle.url)
self.assertTrue(URL_EMAIL in handle.url)
self.assertTrue("id=15718680%2C157427902%2C119703751" in handle.url, handle.url)
recs = list(Entrez.parse(handle))
handle.close()
self.assertEqual(3, len(recs))
# arbitrary number, just to make sure the parser works
self.assertTrue(all(len(rec).keys > 5) for rec in recs)
def test_webenv_search(self):
"""Test Entrez.search from link webenv history"""
handle = Entrez.elink(db='nucleotide', dbfrom='protein',
id='22347800,48526535', webenv=None, query_key=None,
cmd='neighbor_history')
self.assertTrue(handle.url.startswith(URL_HEAD + "elink.fcgi?"), handle.url)
self.assertTrue(URL_TOOL in handle.url)
self.assertTrue(URL_EMAIL in handle.url)
self.assertTrue("id=22347800%2C48526535" in handle.url, handle.url)
recs = Entrez.read(handle)
handle.close()
record = recs.pop()
webenv = record['WebEnv']
query_key = record['LinkSetDbHistory'][0]['QueryKey']
handle = Entrez.esearch(db='nucleotide', term=None,
retstart=0, retmax=10,
webenv=webenv, query_key=query_key,
usehistory='y')
self.assertTrue(handle.url.startswith(URL_HEAD + "esearch.fcgi?"), handle.url)
self.assertTrue(URL_TOOL in handle.url)
self.assertTrue(URL_EMAIL in handle.url)
search_record = Entrez.read(handle)
handle.close()
self.assertEqual(2, len(search_record['IdList']))
def test_seqio_from_url(self):
"""Test Entrez into SeqIO.read from URL"""
handle = Entrez.efetch(db='nucleotide', id='186972394', rettype='gb',
retmode='text')
self.assertTrue(handle.url.startswith(URL_HEAD + "efetch.fcgi?"), handle.url)
self.assertTrue(URL_TOOL in handle.url)
self.assertTrue(URL_EMAIL in handle.url)
self.assertTrue("id=186972394" in handle.url)
record = SeqIO.read(handle, 'genbank')
handle.close()
self.assertTrue(isinstance(record, SeqRecord))
self.assertEqual('EU490707.1', record.id)
self.assertEqual(1302, len(record))
def test_medline_from_url(self):
"""Test Entrez into Medline.read from URL"""
handle = Entrez.efetch(db="pubmed", id='19304878', rettype="medline",
retmode="text")
self.assertTrue(handle.url.startswith(URL_HEAD + "efetch.fcgi?"), handle.url)
self.assertTrue(URL_TOOL in handle.url)
self.assertTrue(URL_EMAIL in handle.url)
self.assertTrue("id=19304878" in handle.url)
record = Medline.read(handle)
handle.close()
self.assertTrue(isinstance(record, dict))
self.assertEqual('19304878', record['PMID'])
self.assertEqual('10.1093/bioinformatics/btp163 [doi]', record['LID'])
def test_elink(self):
# Commas: Link from protein to gene
handle = Entrez.elink(db="gene", dbfrom="protein",
id="15718680,157427902,119703751")
self.assertTrue(handle.url.startswith(URL_HEAD + "elink.fcgi"), handle.url)
self.assertTrue(URL_TOOL in handle.url)
self.assertTrue(URL_EMAIL in handle.url)
self.assertTrue("id=15718680%2C157427902%2C119703751" in handle.url, handle.url)
handle.close()
# Multiple ID entries: Find one-to-one links from protein to gene
handle = Entrez.elink(db="gene", dbfrom="protein",
id=["15718680", "157427902", "119703751"])
self.assertTrue(handle.url.startswith(URL_HEAD + "elink.fcgi"), handle.url)
self.assertTrue(URL_TOOL in handle.url)
self.assertTrue(URL_EMAIL in handle.url)
self.assertTrue("id=15718680" in handle.url, handle.url)
self.assertTrue("id=157427902" in handle.url, handle.url)
self.assertTrue("id=119703751" in handle.url, handle.url)
handle.close()
def test_epost(self):
handle = Entrez.epost("nuccore", id="186972394,160418")
self.assertEqual(URL_HEAD + "epost.fcgi", handle.url)
handle.close()
handle = Entrez.epost("nuccore", id=["160418", "160351"])
self.assertEqual(URL_HEAD + "epost.fcgi", handle.url)
handle.close()
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_Entrez_online.py
|
Python
|
gpl-2.0
| 6,875
|
[
"Biopython"
] |
71dd85b69b3716fdbaa78875071574a30caca57360db911e27b97f9d05616e80
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`customxmlhandler` module provides the XML functionality for custom
slides
The basic XML is of the format::
<?xml version="1.0" encoding="UTF-8"?>
<song version="1.0">
<lyrics language="en">
<verse type="chorus" label="1">
<![CDATA[ ... ]]>
</verse>
</lyrics>
</song>
"""
import logging
from xml.dom.minidom import Document
from xml.etree.ElementTree import dump
from lxml import etree, objectify
log = logging.getLogger(__name__)
#TODO: These classes need to be refactored into a single class.
class CustomXMLBuilder(object):
"""
This class builds the XML used to describe songs.
"""
log.info(u'CustomXMLBuilder Loaded')
def __init__(self):
"""
Set up the custom builder.
"""
# Create the minidom document
self.custom_xml = Document()
self.new_document()
self.add_lyrics_to_song()
def new_document(self):
"""
Create a new custom XML document.
"""
# Create the <song> base element
self.song = self.custom_xml.createElement(u'song')
self.custom_xml.appendChild(self.song)
self.song.setAttribute(u'version', u'1.0')
def add_lyrics_to_song(self):
"""
Set up and add a ``<lyrics>`` tag which contains the lyrics of the
custom item.
"""
# Create the main <lyrics> element
self.lyrics = self.custom_xml.createElement(u'lyrics')
self.lyrics.setAttribute(u'language', u'en')
self.song.appendChild(self.lyrics)
def add_verse_to_lyrics(self, verse_type, number, content):
"""
Add a verse to the ``<lyrics>`` tag.
``verse_type``
A string denoting the type of verse. Possible values are "Chorus",
"Verse", "Bridge", and "Custom".
``number``
An integer denoting the number of the item, for example: verse 1.
``content``
The actual text of the verse to be stored.
"""
verse = self.custom_xml.createElement(u'verse')
verse.setAttribute(u'type', verse_type)
verse.setAttribute(u'label', number)
self.lyrics.appendChild(verse)
# add data as a CDATA section to protect the XML from special chars
cds = self.custom_xml.createCDATASection(content)
verse.appendChild(cds)
def _dump_xml(self):
"""
Debugging aid to dump XML so that we can see what we have.
"""
return self.custom_xml.toprettyxml(indent=u' ')
def extract_xml(self):
"""
Extract our newly created XML custom.
"""
return self.custom_xml.toxml(u'utf-8')
class CustomXMLParser(object):
"""
A class to read in and parse a custom's XML.
"""
log.info(u'CustomXMLParser Loaded')
def __init__(self, xml):
"""
Set up our custom XML parser.
``xml``
The XML of the custom to be parsed.
"""
self.custom_xml = None
if xml[:5] == u'<?xml':
xml = xml[38:]
try:
self.custom_xml = objectify.fromstring(xml)
except etree.XMLSyntaxError:
log.exception(u'Invalid xml %s', xml)
def get_verses(self):
"""
Iterates through the verses in the XML and returns a list of verses
and their attributes.
"""
xml_iter = self.custom_xml.getiterator()
verse_list = []
for element in xml_iter:
if element.tag == u'verse':
if element.text is None:
element.text = u''
verse_list.append([element.attrib, unicode(element.text)])
return verse_list
def _dump_xml(self):
"""
Debugging aid to dump XML so that we can see what we have.
"""
return dump(self.custom_xml)
|
marmyshev/transitions
|
openlp/plugins/custom/lib/customxmlhandler.py
|
Python
|
gpl-2.0
| 6,009
|
[
"Brian"
] |
b490f90e75270b4a2ae71b4eb499f9b79d5981e20d4430bb461c60f8d51d9f3d
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module to build pipeline fragment that produces given PCollections.
For internal use only; no backwards-compatibility guarantees.
"""
from __future__ import absolute_import
import apache_beam as beam
from apache_beam.pipeline import PipelineVisitor
from apache_beam.testing.test_stream import TestStream
class PipelineFragment(object):
"""A fragment of a pipeline definition.
A pipeline fragment is built from the original pipeline definition to include
only PTransforms that are necessary to produce the given PCollections.
"""
def __init__(self, pcolls, options=None):
"""Constructor of PipelineFragment.
Args:
pcolls: (List[PCollection]) a list of PCollections to build pipeline
fragment for.
options: (PipelineOptions) the pipeline options for the implicit
pipeline run.
"""
assert len(pcolls) > 0, (
'Need at least 1 PCollection as the target data to build a pipeline '
'fragment that produces it.')
for pcoll in pcolls:
assert isinstance(pcoll, beam.pvalue.PCollection), (
'{} is not an apache_beam.pvalue.PCollection.'.format(pcoll))
# No modification to self._user_pipeline is allowed.
self._user_pipeline = pcolls[0].pipeline
# These are user PCollections. Do not use them to deduce anything that
# will be executed by any runner. Instead, use
# `self._runner_pcolls_to_user_pcolls.keys()` to get copied PCollections.
self._pcolls = set(pcolls)
for pcoll in self._pcolls:
assert pcoll.pipeline is self._user_pipeline, (
'{} belongs to a different user pipeline than other PCollections '
'given and cannot be used to build a pipeline fragment that produces '
'the given PCollections.'.format(pcoll))
self._options = options
# A copied pipeline instance for modification without changing the user
# pipeline instance held by the end user. This instance can be processed
# into a pipeline fragment that later run by the underlying runner.
self._runner_pipeline = self._build_runner_pipeline()
_, self._context = self._runner_pipeline.to_runner_api(
return_context=True, use_fake_coders=True)
from apache_beam.runners.interactive import pipeline_instrument as instr
self._runner_pcoll_to_id = instr.pcolls_to_pcoll_id(
self._runner_pipeline, self._context)
# Correlate components in the runner pipeline to components in the user
# pipeline. The target pcolls are the pcolls given and defined in the user
# pipeline.
self._id_to_target_pcoll = self._calculate_target_pcoll_ids()
self._label_to_user_transform = self._calculate_user_transform_labels()
# Below will give us the 1:1 correlation between
# PCollections/AppliedPTransforms from the copied runner pipeline and
# PCollections/AppliedPTransforms from the user pipeline.
# (Dict[PCollection, PCollection])
(
self._runner_pcolls_to_user_pcolls,
# (Dict[AppliedPTransform, AppliedPTransform])
self._runner_transforms_to_user_transforms
) = self._build_correlation_between_pipelines(
self._runner_pcoll_to_id,
self._id_to_target_pcoll,
self._label_to_user_transform)
# Below are operated on the runner pipeline.
(self._necessary_transforms,
self._necessary_pcollections) = self._mark_necessary_transforms_and_pcolls(
self._runner_pcolls_to_user_pcolls)
self._runner_pipeline = self._prune_runner_pipeline_to_fragment(
self._runner_pipeline, self._necessary_transforms)
def deduce_fragment(self):
"""Deduce the pipeline fragment as an apache_beam.Pipeline instance."""
return beam.pipeline.Pipeline.from_runner_api(
self._runner_pipeline.to_runner_api(use_fake_coders=True),
self._runner_pipeline.runner,
self._options)
def run(self, display_pipeline_graph=False, use_cache=True, blocking=False):
"""Shorthand to run the pipeline fragment."""
try:
preserved_skip_display = self._runner_pipeline.runner._skip_display
preserved_force_compute = self._runner_pipeline.runner._force_compute
preserved_blocking = self._runner_pipeline.runner._blocking
self._runner_pipeline.runner._skip_display = not display_pipeline_graph
self._runner_pipeline.runner._force_compute = not use_cache
self._runner_pipeline.runner._blocking = blocking
return self.deduce_fragment().run()
finally:
self._runner_pipeline.runner._skip_display = preserved_skip_display
self._runner_pipeline.runner._force_compute = preserved_force_compute
self._runner_pipeline.runner._blocking = preserved_blocking
def _build_runner_pipeline(self):
return beam.pipeline.Pipeline.from_runner_api(
self._user_pipeline.to_runner_api(use_fake_coders=True),
self._user_pipeline.runner,
self._options)
def _calculate_target_pcoll_ids(self):
pcoll_id_to_target_pcoll = {}
for pcoll in self._pcolls:
pcoll_id_to_target_pcoll[self._runner_pcoll_to_id.get(str(pcoll),
'')] = pcoll
return pcoll_id_to_target_pcoll
def _calculate_user_transform_labels(self):
label_to_user_transform = {}
class UserTransformVisitor(PipelineVisitor):
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
if transform_node is not None:
label_to_user_transform[transform_node.full_label] = transform_node
v = UserTransformVisitor()
self._runner_pipeline.visit(v)
return label_to_user_transform
def _build_correlation_between_pipelines(
self, runner_pcoll_to_id, id_to_target_pcoll, label_to_user_transform):
runner_pcolls_to_user_pcolls = {}
runner_transforms_to_user_transforms = {}
class CorrelationVisitor(PipelineVisitor):
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
self._process_transform(transform_node)
for in_pcoll in transform_node.inputs:
self._process_pcoll(in_pcoll)
for out_pcoll in transform_node.outputs.values():
self._process_pcoll(out_pcoll)
def _process_pcoll(self, pcoll):
pcoll_id = runner_pcoll_to_id.get(str(pcoll), '')
if pcoll_id in id_to_target_pcoll:
runner_pcolls_to_user_pcolls[pcoll] = (id_to_target_pcoll[pcoll_id])
def _process_transform(self, transform_node):
if transform_node.full_label in label_to_user_transform:
runner_transforms_to_user_transforms[transform_node] = (
label_to_user_transform[transform_node.full_label])
v = CorrelationVisitor()
self._runner_pipeline.visit(v)
return runner_pcolls_to_user_pcolls, runner_transforms_to_user_transforms
def _mark_necessary_transforms_and_pcolls(self, runner_pcolls_to_user_pcolls):
necessary_transforms = set()
all_inputs = set()
updated_all_inputs = set(runner_pcolls_to_user_pcolls.keys())
# Do this until no more new PCollection is recorded.
while len(updated_all_inputs) != len(all_inputs):
all_inputs = set(updated_all_inputs)
for pcoll in all_inputs:
producer = pcoll.producer
while producer:
if producer in necessary_transforms:
break
# Mark the AppliedPTransform as necessary.
necessary_transforms.add(producer)
# Record all necessary input and side input PCollections.
updated_all_inputs.update(producer.inputs)
# pylint: disable=map-builtin-not-iterating
side_input_pvalues = set(
map(lambda side_input: side_input.pvalue, producer.side_inputs))
updated_all_inputs.update(side_input_pvalues)
# Go to its parent AppliedPTransform.
producer = producer.parent
return necessary_transforms, all_inputs
def _prune_runner_pipeline_to_fragment(
self, runner_pipeline, necessary_transforms):
class PruneVisitor(PipelineVisitor):
def enter_composite_transform(self, transform_node):
if isinstance(transform_node.transform, TestStream):
return
pruned_parts = list(transform_node.parts)
for part in transform_node.parts:
if part not in necessary_transforms:
pruned_parts.remove(part)
transform_node.parts = tuple(pruned_parts)
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
if transform_node not in necessary_transforms:
transform_node.parent = None
v = PruneVisitor()
runner_pipeline.visit(v)
return runner_pipeline
|
iemejia/incubator-beam
|
sdks/python/apache_beam/runners/interactive/pipeline_fragment.py
|
Python
|
apache-2.0
| 9,582
|
[
"VisIt"
] |
d3b3420877ca913d2f7d29afa55cc76d9ff8c8a5e4951d68179217359c067251
|
from data.Element import Element
from data import Msg
class Test(Element):
"""description of class"""
def __init__(self, path, name, depth=0, parent=''):
super().__init__(path, name, depth, parent)
self.diffs = {}
self.empty_files = []
def accept(self, visitor):
visitor.visit(self)
def add_diff(self, name, diff):
self.diffs[name] = diff
def add_empty(self, name):
self.empty_files.append(name)
|
Sildra/PyTester
|
PyTester/data/Test.py
|
Python
|
apache-2.0
| 471
|
[
"VisIt"
] |
beac9b80bfccfcded2f997899ae05681c77921cd52bff7def3b315658cd2ae83
|
#===============================================================================
#
# OpenMPTemplates.py
#
# This file is part of ANNarchy.
#
# Copyright (C) 2016-2021 Julien Vitay <julien.vitay@gmail.com>,
# Helge Uelo Dinkelbach <helge.dinkelbach@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ANNarchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#===============================================================================
# Definition of a population as a c-like struct, divided
# into two groups: rate or spike
#
# Parameters:
#
# id: id of the population
# additional: neuron specific definitions
# accessors: set of functions to export population data to python
population_header = """/*
* ANNarchy-version: %(annarchy_version)s
*/
#pragma once
#include "ANNarchy.h"
#include <random>
%(include_additional)s
%(include_profile)s
extern %(float_prec)s dt;
extern long int t;
extern int global_num_threads;
extern std::vector<std::mt19937> rng;
%(extern_global_operations)s
%(struct_additional)s
///////////////////////////////////////////////////////////////
// Main Structure for the population of id %(id)s (%(name)s)
///////////////////////////////////////////////////////////////
struct PopStruct%(id)s{
int size; // Number of neurons
bool _active; // Allows to shut down the whole population
int max_delay; // Maximum number of steps to store for delayed synaptic transmission
// Access functions used by cython wrapper
int get_size() { return size; }
void set_size(int s) { size = s; }
int get_max_delay() { return max_delay; }
void set_max_delay(int d) { max_delay = d; }
bool is_active() { return _active; }
void set_active(bool val) { _active = val; }
%(declare_spike_arrays)s
// Neuron specific parameters and variables
%(declare_parameters_variables)s
%(declare_delay)s
%(declare_FR)s
%(declare_additional)s
%(declare_profile)s
// Access methods to the parameters and variables
%(access_parameters_variables)s
%(access_additional)s
// Method called to initialize the data structures
void init_population() {
_active = true;
%(init_parameters_variables)s
%(init_spike)s
%(init_delay)s
%(init_FR)s
%(init_additional)s
%(init_profile)s
}
// Method called to reset the population
void reset() {
%(reset_spike)s
%(reset_delay)s
%(reset_additional)s
}
// Init rng dist
void init_rng_dist() {
%(init_rng_dist)s
}
// Method to draw new random numbers
void update_rng(int tid) {
#ifdef _TRACE_SIMULATION_STEPS
std::cout << " PopStruct%(id)s::update_rng()" << std::endl;
#endif
%(update_rng)s
}
// Method to update global operations on the population (min/max/mean...)
void update_global_ops(int tid, int nt) {
%(update_global_ops)s
}
// Method to enqueue output variables in case outgoing projections have non-zero delay
void update_delay() {
%(update_delay)s
}
// Method to dynamically change the size of the queue for delayed variables
void update_max_delay(int value) {
%(update_max_delay)s
}
// Main method to update neural variables
void update(int tid) {
#ifdef _TRACE_SIMULATION_STEPS
std::cout << " PopStruct%(id)s::update()" << std::endl;
#endif
%(update_variables)s
}
void spike_gather(int tid, int num_threads) {
%(test_spike_cond)s
}
%(stop_condition)s
// Memory management: track the memory consumption
long int size_in_bytes() {
long int size_in_bytes = 0;
%(determine_size)s
return size_in_bytes;
}
// Memory management: track the memory consumption
void clear() {
%(clear_container)s
}
};
"""
# c like definition of neuron attributes, whereas 'local' is used if values can vary across
# neurons, consequently 'global' is used if values are common to all neurons.Currently two
# types of sets are defined: openmp and cuda. In cuda case additional 'dirty' flags are
# created.
#
# Parameters:
#
# type: data type of the variable (double, float, int ...)
# name: name of the variable
# attr_type: either 'variable' or 'parameter'
#
attribute_decl = {
'local':
"""
// Local %(attr_type)s %(name)s
std::vector< %(type)s > %(name)s;
""",
'global':
"""
// Global %(attr_type)s %(name)s
%(type)s %(name)s ;
"""
}
# c like definition of accessors for neuron attributes, whereas 'local' is used if values can vary
# across neurons, consequently 'global' is used if values are common to all neurons. Currently two
# types of sets are defined: openmp and cuda. In cuda case additional 'dirty' flags are created for
# each variable (set to true, in case of setters).
#
# Parameters:
#
# type: data type of the variable (double, float, int ...)
# name: name of the variable
# attr_type: either 'variable' or 'parameter'
#
attribute_acc = {
'local_get_all': """
// Local %(attr_type)s %(name)s
if ( name.compare("%(name)s") == 0 ) {
return %(name)s;
}
""",
'local_get_single': """
// Local %(attr_type)s %(name)s
if ( name.compare("%(name)s") == 0 ) {
return %(name)s[rk];
}
""",
'local_set_all': """
// Local %(attr_type)s %(name)s
if ( name.compare("%(name)s") == 0 ) {
%(name)s = value;
return;
}
""",
'local_set_single': """
// Local %(attr_type)s %(name)s
if ( name.compare("%(name)s") == 0 ) {
%(name)s[rk] = value;
return;
}
""",
'global_get': """
// Global %(attr_type)s %(name)s
if ( name.compare("%(name)s") == 0 ) {
return %(name)s;
}
""",
'global_set': """
// Global %(attr_type)s %(name)s
if ( name.compare("%(name)s") == 0 ) {
%(name)s = value;
return;
}
"""
}
# This function offers a generic function per data-type to the Python frontend which
# should return the data based on the variable name.
#
# Parameters:
#
# ctype: data type of the variable (double, float, int ...)
# ctype_name: function names should not contain spaces like in unsigned int is therefore transformed to unsigned_int
# id: object ID
attribute_template = {
'local': """
std::vector<%(ctype)s> get_local_attribute_all_%(ctype_name)s(std::string name) {
%(local_get1)s
// should not happen
std::cerr << "PopStruct%(id)s::get_local_attribute_all_%(ctype_name)s: " << name << " not found" << std::endl;
return std::vector<%(ctype)s>();
}
%(ctype)s get_local_attribute_%(ctype_name)s(std::string name, int rk) {
assert( (rk < size) );
%(local_get2)s
// should not happen
std::cerr << "PopStruct%(id)s::get_local_attribute_%(ctype_name)s: " << name << " not found" << std::endl;
return static_cast<%(ctype)s>(0.0);
}
void set_local_attribute_all_%(ctype_name)s(std::string name, std::vector<%(ctype)s> value) {
assert( (value.size() == size) );
%(local_set1)s
// should not happen
std::cerr << "PopStruct%(id)s::set_local_attribute_all_%(ctype_name)s: " << name << " not found" << std::endl;
}
void set_local_attribute_%(ctype_name)s(std::string name, int rk, %(ctype)s value) {
assert( (rk < size) );
%(local_set2)s
// should not happen
std::cerr << "PopStruct%(id)s::set_local_attribute_%(ctype_name)s: " << name << " not found" << std::endl;
}
""",
'global': """
%(ctype)s get_global_attribute_%(ctype_name)s(std::string name) {
%(global_get)s
// should not happen
std::cerr << "PopStruct%(id)s::get_global_attribute_%(ctype_name)s: " << name << " not found" << std::endl;
return static_cast<%(ctype)s>(0.0);
}
void set_global_attribute_%(ctype_name)s(std::string name, %(ctype)s value) {
%(global_set)s
std::cerr << "PopStruct%(id)s::set_global_attribute_%(ctype_name)s: " << name << " not found" << std::endl;
}
"""
}
# Initialization of parameters due to the init_population method.
#
# Parameters:
#
# name: name of the variable
# init: initial value
attribute_cpp_init = {
'local':
"""
// Local %(attr_type)s %(name)s
%(name)s = std::vector<%(type)s>(size, %(init)s);
""",
'global':
"""
// Global %(attr_type)s %(name)s
%(name)s = %(init)s;
"""
}
attribute_delayed = {
'local': {
'init': """
_delayed_%(name)s = std::deque< std::vector< %(type)s > >(max_delay, std::vector< %(type)s >(size, 0.0));""",
'update': """
#pragma omp single
{
_delayed_%(name)s.push_front(%(name)s);
_delayed_%(name)s.pop_back();
}
""",
'reset' : """
for ( int i = 0; i < _delayed_%(name)s.size(); i++ ) {
_delayed_%(name)s[i] = %(name)s;
}
""",
'resize' : """
_delayed_%(name)s.resize(max_delay, std::vector< %(type)s >(size, 0.0));
"""
},
'global':{
'init': """
_delayed_%(name)s = std::deque< %(type)s >(max_delay, 0.0);""",
'update': """
#pragma omp single
{
_delayed_%(name)s.push_front(%(name)s);
_delayed_%(name)s.pop_back();
}
""",
'reset' : """
for ( int i = 0; i < _delayed_%(name)s.size(); i++ ) {
_delayed_%(name)s[i] = %(name)s;
}
""",
'resize' : """
_delayed_%(name)s.resize(max_delay, 0.0);
"""
}
}
# Definition for the usage of C++11 STL template random
# number generators
#
# Parameters:
#
# rd_name:
# rd_update:
cpp_11_rng = {
'local': {
'decl': """ std::vector<%(type)s> %(rd_name)s;
std::vector<%(template)s> dist_%(rd_name)s;
""",
'init': """
%(rd_name)s = std::vector<%(type)s>(size, 0.0);
""",
'init_dist': """
dist_%(rd_name)s = std::vector< %(template)s >(global_num_threads);
#pragma omp parallel num_threads(global_num_threads)
{
dist_%(rd_name)s[omp_get_thread_num()] = %(rd_init)s;
}
""",
'update': """
%(rd_name)s[i] = dist_%(rd_name)s[%(index)s](rng[%(index)s]);
"""
},
'global': {
'decl': """ %(type)s %(rd_name)s;
%(template)s dist_%(rd_name)s;
""",
'init': """
%(rd_name)s = 0.0;
""",
'init_dist': """
dist_%(rd_name)s = %(rd_init)s;
""",
'update': """
%(rd_name)s = dist_%(rd_name)s(rng[0]);
"""
},
'omp_code_seq': """
if (_active){
#pragma omp single
{
%(update_rng_global)s
for(int i = 0; i < size; i++) {
%(update_rng_local)s
}
}
}
""",
'omp_code_par': """
if (_active){
#pragma omp single nowait
{
%(update_rng_global)s
}
#pragma omp for
for (int i = 0; i < size; i++) {
%(update_rng_local)s
}
}
"""
}
rate_psp = {
'decl': """
std::vector<%(float_prec)s> _sum_%(target)s;""",
'init': """
// Post-synaptic potential
_sum_%(target)s = std::vector<%(float_prec)s>(size, 0.0);""",
'reset': """
// pop%(id)s: %(name)s
#pragma omp single nowait
{
if (pop%(id)s._active)
memset( pop%(id)s._sum_%(target)s.data(), 0.0, pop%(id)s._sum_%(target)s.size() * sizeof(%(float_prec)s));
}
"""
}
spike_specific = {
'spike': {
'declare': """
// Structures for managing spikes
std::vector<long int> last_spike;
std::vector<int> spiked;
std::vector<int> local_spiked_sizes;
""",
'init': """
// Spiking variables
spiked = std::vector<int>();
local_spiked_sizes = std::vector<int>(global_num_threads+1, 0);
last_spike = std::vector<long int>(size, -10000L);
""",
'reset': """
spiked.clear();
spiked.shrink_to_fit();
local_spiked_sizes = std::vector<int>(global_num_threads+1, 0);
last_spike.clear();
last_spike = std::vector<long int>(size, -10000L);
"""
},
'axon_spike': {
'declare': """
// Structures for managing axonal spikes
std::vector<int> axonal;
""",
'init': """
// Axonal spike containter
axonal = std::vector<int>();
""",
'reset': """
axonal.clear();
axonal.shrink_to_fit();
""",
'pyx_wrapper': """
# Axonal spike events
"""
},
'refractory': {
'declare': """
// Refractory period
std::vector<int> refractory;
std::vector<int> refractory_remaining;
std::vector<short int> in_ref;
""",
'init': """
// Refractory period
refractory = std::vector<int>(size, 0);
refractory_remaining = std::vector<int>(size, 0);
in_ref = std::vector<short int>(size, 0);
""",
# If the refractory variable is defined by the user
'init_extern': """
// Refractory period
refractory_remaining = std::vector<int>(size, 0);
in_ref = std::vector<short int>(size, 0);
""",
'reset': """
// Refractory period
refractory_remaining.clear();
refractory_remaining = std::vector<int>(size, 0);
""",
'pyx_wrapper': """
# Refractory period
cpdef np.ndarray get_refractory(self):
return np.array(pop%(id)s.refractory)
cpdef set_refractory(self, np.ndarray value):
pop%(id)s.refractory = value
"""
},
'init_event-driven': """
last_spike = std::vector<long int>(size, -10000L);
""",
}
#
# Final dictionary
openmp_templates = {
'population_header': population_header,
'attr_decl': attribute_decl,
'attr_acc': attribute_acc,
'accessor_template': attribute_template,
'attribute_cpp_init': attribute_cpp_init,
'attribute_delayed': attribute_delayed,
'rng': cpp_11_rng,
'rate_psp': rate_psp,
'spike_specific': spike_specific
}
|
vitay/ANNarchy
|
ANNarchy/generator/Population/OpenMPTemplates.py
|
Python
|
gpl-2.0
| 14,627
|
[
"NEURON"
] |
7f66951b5ed5ddd2033b4891a74afb59ff5f1ff2d51f80c805445b3302b87d91
|
#!/usr/bin/env python
import fileinput
import sys
import os
import re
import math
import json
import numpy as np
import subprocess
from optparse import OptionParser
# These kernels have guaranteed correct Jacobians
whitelisted_kernels = ['Diffusion', 'TimeDerivative']
# regular expressions to parse the PETSc debug output
MfdRE = re.compile("^Finite[ -]difference Jacobian \(user-defined state\)")
MhcRE = re.compile("^Hand-coded Jacobian \(user-defined state\)")
MdiffRE = re.compile("^Hand-coded minus finite[ -]difference Jacobian \(user-defined state\)")
rowRE = re.compile("row ([\d]+): ")
valRE = re.compile(" \(([\d]+), ([+-.e\d]+)\)")
# Get the real path of jacobian analyzer
if(os.path.islink(sys.argv[0])):
pathname = os.path.dirname(os.path.realpath(sys.argv[0]))
else:
pathname = os.path.dirname(sys.argv[0])
pathname = os.path.abspath(pathname)
# Borrowed from Peacock
def recursiveFindFile(current_path, p, executable):
if not os.path.exists(current_path):
return None
files = os.listdir(current_path)
split = current_path.split('/')
if len(split) > 2 and split[-2] == 'problems':
# if we're in the "problems" directory... hop over to this application's directory instead
the_file = recursiveFindFile('/'.join(split[:-2]) + '/' + split[-1], p, executable)
# Still didn't find it, we must keep looking up this path so fall through here
if the_file != None:
return the_file
for afile in files:
if p.match(afile) and ((not executable) or os.access(current_path+'/'+afile, os.X_OK)):
return current_path + '/' + afile
up_one = os.path.dirname(current_path)
if current_path != '/':
return recursiveFindFile(up_one, p, executable)
return None
# Borrowed from Peacock
def findExecutable(executable_option, method_option):
if executable_option and os.path.exists(executable_option):
return executable_option
else:
# search up directories until we find an executable, starting with the current directory
method = 'opt' # Find the optimized binary by default
if 'METHOD' in os.environ:
method = os.environ['METHOD']
if method_option:
method = method_option
p = re.compile('.+-'+method+'$')
executable = recursiveFindFile(os.getcwd(), p, True)
if not executable:
print 'Executable not found! Try specifying it using -e'
sys.exit(1)
return executable
#
# v
# sd1 kern1 kern2
# sd2 kern1
#
# u
# sd1 kern3
def analyze(dofdata, Mfd, Mhc, Mdiff) :
dofs = dofdata['ndof']
nlvars = [var['name'] for var in dofdata['vars']]
numvars = len(nlvars)
# build analysis blocks (for now: one block per variable)
blocks = []
for var in dofdata['vars'] :
blockdofs = []
for subdomain in var['subdomains'] :
blockdofs.extend(subdomain['dofs'])
blocks.append(blockdofs)
nblocks = len(blocks)
# analysis results
fd = np.zeros((nblocks, nblocks))
hc = np.zeros((nblocks, nblocks))
norm = np.zeros((nblocks, nblocks))
# prepare block norms
for i in range(nblocks) :
for j in range(nblocks) :
# iterate over all DOFs in the current block and compute the block norm
for di in blocks[i] :
for dj in blocks[j] :
if abs(Mfd[di][dj]) > 1e60 or abs(Mhc[di][dj]) > 1e60:
fd [i][j] += 1e10
norm[i][j] += 1e20
continue
else :
fd [i][j] += Mfd[di][dj]**2
hc [i][j] += Mhc[di][dj]**2
norm[i][j] += Mdiff[di][dj]**2
fd = fd**0.5
hc = hc**0.5
norm = norm**0.5
all_good = True
e = 1e-4
for i in range(nblocks) :
printed = False
for j in range(nblocks) :
if norm[i][j] > e*fd[i][j] :
if not printed :
print "\nKernel for variable '%s':" % nlvars[i]
printed = True
all_good = False
if hc[i][j] == 0.0 :
problem = "needs to be implemented"
elif fd[i][j] == 0.0 :
problem = "should just return zero"
else :
err = math.fabs((hc[i][j]-fd[i][j])/fd[i][j])*100.0
if err > 20.0 :
problem = "is wrong (off by %.1f %%)" % err
elif err > 5.0 :
problem = "is questionable (off by %.2f %%)" % err
elif err > 1.0 :
problem = "is inexact (off by %.3f %%)" % err
else :
problem = "is slightly off (by %f %%)" % err
if i == j :
print " (%d,%d) On-diagonal Jacobian %s" % (i, j, problem)
else :
print " (%d,%d) Off-diagonal Jacobian for variable '%s' %s" % (i, j, nlvars[j], problem)
if all_good :
print "No errors detected. :-)"
# output parsed (but not processed) jacobian matric data in gnuplot's nonuniform matrix format
def saveMatrixToFile(M, dofs, filename) :
file = open(filename, "w")
for i in range(dofs) :
for j in range(dofs) :
file.write("%d %d %f\n" % (i, j, M[i][j]))
file.write("\n")
#
# Simple state machine parser for the MOOSE output
#
def parseOutput(output, dofdata, write_matrices) :
dofs = dofdata['ndof']
state = 0
for line in output.split('\n'):
#print state, line
#
# Read in PetSc matrices
#
if state == 0 :
Mfd = np.zeros((dofs, dofs))
Mhc = np.zeros((dofs, dofs))
Mdiff = np.zeros((dofs, dofs))
state = 1
if state == 1 :
m = MfdRE.match(line)
if m :
state = 2
continue
if state == 2 :
m = MhcRE.match(line)
if m :
state = 3
continue
if state == 3 :
m = MdiffRE.match(line)
if m :
state = 4
continue
# read data
if state >= 2 and state <= 4 :
m = rowRE.match(line)
vals = valRE.findall(line)
if m :
row = int(m.group(1))
for pair in vals :
if state == 2 :
Mfd[row, int(pair[0])] = float(pair[1])
if state == 3 :
Mhc[row, int(pair[0])] = float(pair[1])
if state == 4 :
Mdiff[row, int(pair[0])] = float(pair[1])
if state == 4 and row+1 == dofs :
state = 0
analyze(dofdata, Mfd, Mhc, Mdiff)
# dump parsed matrices in gnuplottable format
if write_matrices :
saveMatrixToFile(Mfd, dofs, "jacobian_finite_differenced.dat")
saveMatrixToFile(Mhc, dofs, "jacobian_hand_coded.dat")
saveMatrixToFile(Mdiff, dofs, "jacobians_diffed.dat")
# theoretically we could have multiple steps to analyze in the output
continue
if __name__ == '__main__':
usage = "Usage: %prog [options] [input_file]"
description = "Note: You can directly supply an input file without specifying any options. The correct thing will automatically happen."
parser = OptionParser(usage=usage, description=description)
parser.add_option("-e", "--executable", dest="executable",
help="The executable you would like to build an input file for. If not supplied an executable will be searched for. The searched for executable will default to the optimized version of the executable (if available).")
parser.add_option("-i", "--input-file", dest="input_file",
help="Input file you would like to open debug the jacobians on.")
parser.add_option("-m", "--method", dest="method",
help="Pass either opt, dbg or devel. Works the same as setting the $METHOD environment variable.")
parser.add_option("-r", "--resize-mesh", dest="resize_mesh", action="store_true", help="Perform resizing of generated meshs (to speed up the testing).")
parser.add_option("-s", "--mesh-size", dest="mesh_size", default=1, type="int", help="Set the mesh dimensions to this number of elements along each dimension (defaults to 1, requires -r option).")
parser.add_option("-d", "--debug", dest="debug", action="store_true", help="Output the command line used to run the application.")
parser.add_option("-w", "--write-matrices", dest="write_matrices", action="store_true", help="Output the Jacobian matrices in gnuplot format.")
parser.add_option("-n", "--no-auto-options", dest="noauto", action="store_true", help="Do not add automatic options to the invocation of the moose based application. Requres a specially prepared input file for debugging.")
(options, args) = parser.parse_args()
for arg in args:
if arg[-2:] == '.i':
options.input_file = arg
if options.input_file is None :
print 'Please specify an input file.'
sys.exit(1)
executable = findExecutable(options.executable, options.method)
basename = options.input_file[0:-2]
dofoutname = 'analyzerdofmap'
# common arguments for both debugging and dofmapping
moosebaseparams = [executable, '-i', options.input_file ]
if options.resize_mesh :
moosebaseparams.extend(['Mesh/nx=%d' % options.mesh_size, 'Mesh/ny=%d' % options.mesh_size, 'Mesh/nz=%d' % options.mesh_size])
# run to dump DOFs (this does not happen during the debug step)
dofmapfilename = basename + '_' + dofoutname + '.json'
if not options.noauto :
mooseparams = moosebaseparams[:]
mooseparams.extend(['Problem/solve=false', 'BCs/active=', 'Outputs/' + dofoutname+ '/type=DOFMap', 'Outputs/active=' + dofoutname, 'Outputs/file_base=' + basename + '_' + dofoutname])
if options.debug :
print "Running\n%s\n" % " ".join(mooseparams)
try:
child = subprocess.Popen(mooseparams, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
data = child.communicate()[0]
child.wait()
except:
print 'Error executing moose based application to gather DOF map\n'
sys.exit(1)
else :
print "Runing without automatic options DOF map '%s' will not be generated automatically!" % dofmapfilename
# analyze return code
if child.returncode == 1 :
# MOOSE failed with an unexpected error
print data
sys.exit(1)
elif child.returncode == -11 :
print "The moose application crashed with a segmentation fault (try recompiling)"
sys.exit(1)
# load and decode the DOF map data (for now we only care about one frame)
with open (dofmapfilename, "rt") as myfile :
dofjson = myfile.readlines()
dofdata = json.loads(dofjson[0].rstrip('\n'))
if options.debug :
print "DOF map output:\n%s\n" % dofdata
# for every DOF get the list of kernels contributing to it
dofkernels = [[] for i in range(dofdata['ndof'])]
kerneltypes = {}
for var in dofdata['vars'] :
for subdomain in var['subdomains'] :
kernels = [kernel for kernel in subdomain['kernels']]
# create lookup table from kernel name to kernel type
for kernel in kernels :
kerneltypes[kernel['name']] = kernel['type']
# list of active kernels contributing to a DOF
for dof in subdomain['dofs'] :
dofkernels[dof].extend([kernel['name'] for kernel in kernels if not kernel['name'] in dofkernels[dof]])
# get all unique kernel combinations occurring on the DOFs
combination_dofs = {}
for dof, kernels in enumerate(dofkernels) :
kernels.sort()
idx = tuple(kernels)
if idx in combination_dofs :
combination_dofs[idx].append(dof)
else :
combination_dofs[idx] = [dof]
#combinations = []
#for kernels in combination_dofs :
# print kernels
# build the parameter list for the jacobian debug run
mooseparams = moosebaseparams[:]
if not options.noauto :
mooseparams.extend([ '-snes_type', 'test', '-snes_test_display', '-mat_fd_type', 'ds', 'Executioner/solve_type=NEWTON', 'BCs/active='])
if options.debug :
print "Running\n%s\n" % " ".join(mooseparams)
else :
print 'Running input with executable %s ...\n' % executable
# run debug process to gather jacobian data
try:
child = subprocess.Popen(mooseparams, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
data = child.communicate()[0]
child.wait()
except:
print 'Error executing moose based application\n'
sys.exit(1)
# parse the raw output, which contains the PETSc debug information
parseOutput(data, dofdata, options.write_matrices)
|
mellis13/moose
|
python/jacobiandebug/analyzejacobian.py
|
Python
|
lgpl-2.1
| 12,108
|
[
"MOOSE"
] |
4932c04e08beb89c3ba08478369ad6ecd5b80c526f5bffde1b86b999e6f10c99
|
from __future__ import absolute_import
from __future__ import print_function
from six import iteritems
from builtins import range
import os
import re
from glob import glob
from os import path
import numpy as np
from netCDF4 import Dataset
from pyproj import Proj
from pyproj import transform
from shyft import api
from .. import interfaces
from .time_conversion import convert_netcdf_time
class AromeDataRepositoryError(Exception):
pass
class AromeDataRepository(interfaces.GeoTsRepository):
"""
Repository for geo located timeseries given as Arome(*) data in
netCDF files.
NetCDF dataset assumptions:
* Root group has variables:
* time: timestamp (int) array with seconds since epoc
(1970.01.01 00:00, UTC) for each data point
* x: float array of latitudes
* y: float array of longitudes
* Root group has subset of variables:
* relative_humidity_2m: float array of dims (time, 1, y, x)
* air_temperature_2m: float array of dims (time, 1, y, x)
* altitude: float array of dims (y, x)
* precipitation_amount: float array of dims (time, y, x)
* x_wind_10m: float array of dims (time, y, x)
* y_wind_10m: float array of dims (time, y, x)
* integral_of_surface_downwelling_shortwave_flux_in_air_wrt_time:
float array of dims (time, 1, y, x)
* All variables are assumed to have the attribute grid_mapping
which should be a reference to a variable in the root group
that has an attribute named proj4. Example code:
ds = netCDF4.Dataset(arome_file)
var = "precipitation_amount"
mapping = ds.variables[var].grid_mapping
proj = ds.variables[mapping].proj4
(*) Arome NWP model output is from:
http://thredds.met.no/thredds/catalog/arome25/catalog.html
Contact:
Name: met.no
Organization: met.no
Email: thredds@met.no
Phone: +47 22 96 30 00
"""
_G = 9.80665 # WMO-defined gravity constant to calculate the height in metres from geopotential
def __init__(self, epsg, directory, filename=None, bounding_box=None,
x_padding=5000.0, y_padding=5000.0, elevation_file=None, allow_subset=False):
"""
Construct the netCDF4 dataset reader for data from Arome NWP model,
and initialize data retrieval.
Parameters
----------
epsg: string
Unique coordinate system id for result coordinates.
Currently "32632" and "32633" are supperted.
directory: string
Path to directory holding one or possibly more arome data files.
os.path.isdir(directory) should be true, or exception is raised.
filename: string, optional
Name of netcdf file in directory that contains spatially
distributed input data. Can be a glob pattern as well, in case
it is used for forecasts or ensambles.
bounding_box: list, optional
A list on the form:
[[x_ll, x_lr, x_ur, x_ul],
[y_ll, y_lr, y_ur, y_ul]],
describing the outer boundaries of the domain that shoud be
extracted. Coordinates are given in epsg coordinate system.
x_padding: float, optional
Longidutinal padding in meters, added both east and west
y_padding: float, optional
Latitudinal padding in meters, added both north and south
elevation_file: string, optional
Name of netcdf file of same dimensions in x and y, subject to
constraints given by bounding box and padding, that contains
elevation that should be used in stead of elevations in file.
allow_subset: bool
Allow extraction of a subset of the given source fields
instead of raising exception.
"""
#directory = directory.replace('${SHYFTDATA}', os.getenv('SHYFTDATA', '.'))
directory = path.expandvars(directory)
self._filename = path.join(directory, filename)
self.allow_subset = allow_subset
if not path.isdir(directory):
raise AromeDataRepositoryError("No such directory '{}'".format(directory))
if elevation_file is not None:
self.elevation_file = path.join(directory, elevation_file)
if not path.isfile(self.elevation_file):
raise AromeDataRepositoryError(
"Elevation file '{}' not found".format(self.elevation_file))
else:
self.elevation_file = None
self.shyft_cs = "+init=EPSG:{}".format(epsg)
self._x_padding = x_padding
self._y_padding = y_padding
self._bounding_box = bounding_box
# Field names and mappings
self._arome_shyft_map = {"relative_humidity_2m": "relative_humidity",
"air_temperature_2m": "temperature",
"altitude": "z",
"precipitation_amount": "precipitation",
"precipitation_amount_acc": "precipitation",
"x_wind_10m": "x_wind",
"y_wind_10m": "y_wind",
"integral_of_surface_downwelling_shortwave_flux_in_air_wrt_time":
"radiation"}
self.var_units = {"air_temperature_2m": ['K'],
"relative_humidity_2m": ['1'],
"precipitation_amount_acc": ['kg/m^2'],
"precipitation_amount": ['kg/m^2'],
"x_wind_10m": ['m/s'],
"y_wind_10m": ['m/s'],
"integral_of_surface_downwelling_shortwave_flux_in_air_wrt_time": ['W s/m^2']}
self._shift_fields = ("precipitation_amount", "precipitation_amount_acc",
"integral_of_surface_downwelling_shortwave_flux_in_air_wrt_time")
self.source_type_map = {"relative_humidity": api.RelHumSource,
"temperature": api.TemperatureSource,
"precipitation": api.PrecipitationSource,
"radiation": api.RadiationSource,
"wind_speed": api.WindSpeedSource}
self.series_type = {"relative_humidity": api.POINT_INSTANT_VALUE,
"temperature": api.POINT_INSTANT_VALUE,
"precipitation": api.POINT_AVERAGE_VALUE,
"radiation": api.POINT_AVERAGE_VALUE,
"wind_speed": api.POINT_INSTANT_VALUE}
def get_timeseries(self, input_source_types, utc_period, geo_location_criteria=None):
"""Get shyft source vectors of time series for input_source_types
Parameters
----------
input_source_types: list
List of source types to retrieve (precipitation, temperature..)
geo_location_criteria: object, optional
Some type (to be decided), extent (bbox + coord.ref)
utc_period: api.UtcPeriod
The utc time period that should (as a minimum) be covered.
Returns
-------
geo_loc_ts: dictionary
dictionary keyed by time series name, where values are api vectors of geo
located timeseries.
"""
filename = self._filename
if not path.isfile(filename):
if '*' in filename:
filename = self._get_files(utc_period.start, "_(\d{8})([T_])(\d{2})(Z)?.nc$")
else:
raise AromeDataRepositoryError("File '{}' not found".format(filename))
with Dataset(filename) as dataset:
return self._get_data_from_dataset(dataset, input_source_types,
utc_period, geo_location_criteria)
def get_forecast(self, input_source_types, utc_period, t_c, geo_location_criteria=None):
"""
Parameters
----------
input_source_types: list
List of source types to retrieve. Valid types are:
* relative_humidity
* temperature
* precipitation
* radiation
* wind_speed
utc_period: api.UtcPeriod
The utc time period that should (as a minimum) be covered.
t_c: long
Forecast specification; return newest forecast older than t_c.
geo_location_criteria: object
Some type (to be decided), extent (bbox + coord.ref).
Returns
-------
geo_loc_ts: dictionary
dictionary keyed by ts type, where values are api vectors of geo
located timeseries.
"""
filename = self._get_files(t_c, "_(\d{8})([T_])(\d{2})(Z)?.nc$")
with Dataset(filename) as dataset:
return self._get_data_from_dataset(dataset, input_source_types, utc_period,
geo_location_criteria)
def get_forecast_ensemble(self, input_source_types, utc_period,
t_c, geo_location_criteria=None):
"""
Parameters
----------
input_source_types: list
List of source types to retrieve (precipitation, temperature, ...)
utc_period: api.UtcPeriod
The utc time period that should (as a minimum) be covered.
t_c: long
Forecast specification; return newest forecast older than t_c.
geo_location_criteria: object
Some type (to be decided), extent (bbox + coord.ref).
Returns
-------
ensemble: list of geo_loc_ts dictionaries
Dictionaries are keyed by time series type, with values
being api vectors of geo located timeseries.
"""
filename = self._get_files(t_c, "\D(\d{8})(\d{2}).nc$")
with Dataset(filename) as dataset:
res = []
for idx in dataset.variables["ensemble_member"][:]:
res.append(self._get_data_from_dataset(dataset, input_source_types, utc_period,
geo_location_criteria,
ensemble_member=idx))
return res
@property
def bounding_box(self):
# Add a padding to the bounding box to make sure the computational
# domain is fully enclosed in arome dataset
if self._bounding_box is None:
raise AromeDataRepositoryError("A bounding box must be provided.")
bounding_box = np.array(self._bounding_box)
bounding_box[0][0] -= self._x_padding
bounding_box[0][1] += self._x_padding
bounding_box[0][2] += self._x_padding
bounding_box[0][3] -= self._x_padding
bounding_box[1][0] -= self._y_padding
bounding_box[1][1] -= self._y_padding
bounding_box[1][2] += self._y_padding
bounding_box[1][3] += self._y_padding
return bounding_box
def _convert_to_timeseries(self, data):
"""Convert timeseries from numpy structures to shyft.api timeseries.
We assume the time axis is regular, and that we can use a point time
series with a parametrized time axis definition and corresponding
vector of values. If the time series is missing on the data, we insert
it into non_time_series.
Returns
-------
timeseries: dict
Time series arrays keyed by type
"""
tsc = api.TsFactory().create_point_ts
time_series = {}
for key, (data, ta) in data.items():
fslice = (len(data.shape) - 2)*[slice(None)]
I, J = data.shape[-2:]
def construct(d):
if ta.size() != d.size:
raise AromeDataRepositoryError("Time axis size {} not equal to the number of "
"data points ({}) for {}"
"".format(ta.size(), d.size, key))
return tsc(ta.size(), ta.start, ta.delta_t,
api.DoubleVector_FromNdArray(d.flatten()), self.series_type[key])
time_series[key] = np.array([[construct(data[fslice + [i, j]])
for j in range(J)] for i in range(I)])
return time_series
def _limit(self, x, y, data_cs, target_cs):
"""
Parameters
----------
x: np.ndarray
X coordinates in meters in cartesian coordinate system
specified by data_cs
y: np.ndarray
Y coordinates in meters in cartesian coordinate system
specified by data_cs
data_cs: string
Proj4 string specifying the cartesian coordinate system
of x and y
target_cs: string
Proj4 string specifying the target coordinate system
Returns
-------
x: np.ndarray
Coordinates in target coordinate system
y: np.ndarray
Coordinates in target coordinate system
x_mask: np.ndarray
Boolean index array
y_mask: np.ndarray
Boolean index array
"""
# Get coordinate system for arome data
data_proj = Proj(data_cs)
target_proj = Proj(target_cs)
# Find bounding box in arome projection
bbox = self.bounding_box
bb_proj = transform(target_proj, data_proj, bbox[0], bbox[1])
x_min, x_max = min(bb_proj[0]), max(bb_proj[0])
y_min, y_max = min(bb_proj[1]), max(bb_proj[1])
# Limit data
x_upper = x >= x_min
x_lower = x <= x_max
y_upper = y >= y_min
y_lower = y <= y_max
if sum(x_upper == x_lower) < 2:
if sum(x_lower) == 0 and sum(x_upper) == len(x_upper):
raise AromeDataRepositoryError("Bounding box longitudes don't intersect with dataset.")
x_upper[np.argmax(x_upper) - 1] = True
x_lower[np.argmin(x_lower)] = True
if sum(y_upper == y_lower) < 2:
if sum(y_lower) == 0 and sum(y_upper) == len(y_upper):
raise AromeDataRepositoryError("Bounding box latitudes don't intersect with dataset.")
y_upper[np.argmax(y_upper) - 1] = True
y_lower[np.argmin(y_lower)] = True
x_inds = np.nonzero(x_upper == x_lower)[0]
y_inds = np.nonzero(y_upper == y_lower)[0]
# Masks
x_mask = x_upper == x_lower
y_mask = y_upper == y_lower
# Transform from source coordinates to target coordinates
xx, yy = transform(data_proj, target_proj, *np.meshgrid(x[x_mask], y[y_mask]))
return xx, yy, (x_mask, y_mask), (x_inds, y_inds)
def _get_data_from_dataset(self, dataset, input_source_types, utc_period,
geo_location_criteria, ensemble_member=None):
if geo_location_criteria is not None:
self._bounding_box = geo_location_criteria
if "wind_speed" in input_source_types:
input_source_types = list(input_source_types) # We change input list, so take a copy
input_source_types.remove("wind_speed")
input_source_types.append("x_wind")
input_source_types.append("y_wind")
unit_ok = {k: dataset.variables[k].units in self.var_units[k]
for k in dataset.variables.keys() if self._arome_shyft_map.get(k, None) in input_source_types}
if not all(unit_ok.values()):
raise AromeDataRepositoryError("The following variables have wrong unit: {}.".format(
', '.join([k for k, v in unit_ok.items() if not v])))
raw_data = {}
x = dataset.variables.get("x", None)
y = dataset.variables.get("y", None)
time = dataset.variables.get("time", None)
if not all([x, y, time]):
raise AromeDataRepositoryError("Something is wrong with the dataset."
" x/y coords or time not found.")
if not all([var.units in ['km', 'm'] for var in [x, y]]) and x.units == y.units:
raise AromeDataRepositoryError("The unit for x and y coordinates should be either m or km.")
coord_conv = 1.
if x.units == 'km':
coord_conv = 1000.
time = convert_netcdf_time(time.units,time)
data_cs = dataset.variables.get("projection_lambert", None)
if data_cs is None:
raise AromeDataRepositoryError("No coordinate system information in dataset.")
idx_min = np.searchsorted(time, utc_period.start, side='left')
idx_max = np.searchsorted(time, utc_period.end, side='right')
issubset = True if idx_max < len(time) - 1 else False
time_slice = slice(idx_min, idx_max)
x, y, (m_x, m_y), _ = self._limit(x[:]*coord_conv, y[:]*coord_conv, data_cs.proj4, self.shyft_cs)
for k in dataset.variables.keys():
if self._arome_shyft_map.get(k, None) in input_source_types:
if k in self._shift_fields and issubset: # Add one to time slice
data_time_slice = slice(time_slice.start, time_slice.stop + 1)
else:
data_time_slice = time_slice
data = dataset.variables[k]
dims = data.dimensions
data_slice = len(data.dimensions)*[slice(None)]
if ensemble_member is not None:
data_slice[dims.index("ensemble_member")] = ensemble_member
data_slice[dims.index("x")] = m_x
data_slice[dims.index("y")] = m_y
data_slice[dims.index("time")] = data_time_slice
pure_arr = data[data_slice]
if isinstance(pure_arr, np.ma.core.MaskedArray):
#print(pure_arr.fill_value)
pure_arr = pure_arr.filled(np.nan)
raw_data[self._arome_shyft_map[k]] = pure_arr, k
#raw_data[self._arome_shyft_map[k]] = np.array(data[data_slice], dtype='d'), k
if self.elevation_file is not None:
_x, _y, z = self._read_elevation_file(self.elevation_file)
assert np.linalg.norm(x - _x) < 1.0e-10 # x/y coordinates should match
assert np.linalg.norm(y - _y) < 1.0e-10
elif any([nm in dataset.variables.keys() for nm in ['altitude', 'surface_geopotential']]):
var_nm = ['altitude', 'surface_geopotential'][[nm in dataset.variables.keys() for nm in ['altitude', 'surface_geopotential']].index(True)]
data = dataset.variables[var_nm]
dims = data.dimensions
data_slice = len(data.dimensions)*[slice(None)]
data_slice[dims.index("x")] = m_x
data_slice[dims.index("y")] = m_y
z = data[data_slice]
shp = z.shape
z = z.reshape(shp[-2], shp[-1])
if var_nm == 'surface_geopotential':
z /= self._G
else:
raise AromeDataRepositoryError("No elevations found in dataset"
", and no elevation file given.")
pts = np.dstack((x, y, z)).reshape(*(x.shape + (3,)))
# Make sure requested fields are valid, and that dataset contains the requested data.
if not self.allow_subset and not (set(raw_data.keys()).issuperset(input_source_types)):
raise AromeDataRepositoryError("Could not find all data fields")
if set(("x_wind", "y_wind")).issubset(raw_data):
x_wind, _ = raw_data.pop("x_wind")
y_wind, _ = raw_data.pop("y_wind")
raw_data["wind_speed"] = np.sqrt(np.square(x_wind) + np.square(y_wind)), "wind_speed"
extracted_data = self._transform_raw(raw_data, time[time_slice], issubset=issubset)
return self._geo_ts_to_vec(self._convert_to_timeseries(extracted_data), pts)
def _read_elevation_file(self, filename):
with Dataset(self.elevation_file) as dataset:
elev = dataset.variables["altitude"]
if "altitude" not in dataset.variables.keys():
raise interfaces.InterfaceError(
"File '{}' does not contain altitudes".format(self.elevation_file))
x, y, (x_mask, y_mask), _ = \
self._limit(dataset.variables.pop("x"),
dataset.variables.pop("y"),
dataset.variables.pop(elev.grid_mapping).proj4,
self.shyft_cs)
data_slice = len(elev.dimensions)*[slice(None)]
data_slice[elev.dimensions.index("x")] = x_mask
data_slice[elev.dimensions.index("y")] = y_mask
return x, y, elev[data_slice]
def _transform_raw(self, data, time, issubset=False):
"""
We need full time if deaccumulating
"""
def noop_time(t):
t0 = int(t[0])
t1 = int(t[1])
return api.TimeAxisFixedDeltaT(t0, t1 - t0, len(t))
def dacc_time(t):
t0 = int(t[0])
t1 = int(t[1])
return noop_time(t) if issubset else api.TimeAxisFixedDeltaT(t0, t1 - t0, len(t) - 1)
def noop_space(x):
return x
def air_temp_conv(T):
return T - 273.15
def prec_conv(p):
return p[1:]
def prec_acc_conv(p):
return np.clip(p[1:] - p[:-1], 0.0, 1000.0)
def rad_conv(r):
dr = r[1:] - r[:-1]
return np.clip(dr/(time[1] - time[0]), 0.0, 5000.0)
convert_map = {"wind_speed": lambda x, t: (noop_space(x), noop_time(t)),
"relative_humidity_2m": lambda x, t: (noop_space(x), noop_time(t)),
"air_temperature_2m": lambda x, t: (air_temp_conv(x), noop_time(t)),
"integral_of_surface_downwelling_shortwave_flux_in_air_wrt_time":
lambda x, t: (rad_conv(x), dacc_time(t)),
"precipitation_amount": lambda x, t: (prec_conv(x), dacc_time(t)),
"precipitation_amount_acc": lambda x, t: (prec_acc_conv(x), dacc_time(t))}
res = {}
for k, (v, ak) in data.items():
res[k] = convert_map[ak](v, time)
return res
def _geo_ts_to_vec(self, data, pts):
res = {}
for name, ts in iteritems(data):
tpe = self.source_type_map[name]
# SiH: Unfortunately, I have not got the boost.python to eat list of non-basic object
# into the constructor of vectors like this:
#res[name] = tpe.vector_t([tpe(api.GeoPoint(*pts[idx]), ts[idx]) for idx in np.ndindex(pts.shape[:-1])])
# so until then, we have to do the loop
tpe_v=tpe.vector_t()
for idx in np.ndindex(pts.shape[:-1]):
tpe_v.append(tpe(api.GeoPoint(*pts[idx]), ts[idx]))
res[name] = tpe_v
return res
def _get_files(self, t_c, date_pattern):
utc = api.Calendar()
file_names = glob(self._filename)
match_files = []
match_times = []
for fn in file_names:
match = re.search(date_pattern, fn)
if match:
datestr, _ , hourstr, _ = match.groups()
year, month, day = int(datestr[:4]), int(datestr[4:6]), int(datestr[6:8])
hour = int(hourstr)
t = utc.time(api.YMDhms(year, month, day, hour))
if t <= t_c:
match_files.append(fn)
match_times.append(t)
if match_files:
return match_files[np.argsort(match_times)[-1]]
ymds = utc.calendar_units(t_c)
date = "{:4d}.{:02d}.{:02d}:{:02d}:{:02d}:{:02d}".format(ymds.year, ymds.month, ymds.day,
ymds.hour, ymds.minute, ymds.second)
raise AromeDataRepositoryError("No matches found for file_pattern = {} and t_c = {} "
"".format(self._filename, date))
|
felixmatt/shyft
|
shyft/repository/netcdf/arome_data_repository.py
|
Python
|
lgpl-3.0
| 24,389
|
[
"NetCDF"
] |
aa7383f5aa715ba8a0b29084b1c855715b7c80ae422e255410ad0884deb6f7cc
|
#!/usr/bin/env python2
"""
rdio-history-csv.py
~~~~~~~~~~~~~~~~~~~
Simple script for parsing the output of
https://www.rdio.com/api/1/getHistoryForUser
The way I made this happen was:
#. login to rdio.com in chrome,
#. visit https://www.rdio.com/people/tylercipriani/history/ with devtools
open
#. right click the 'getHistoryForUser' item in the network tab,
#. "copy as cURL"
#. Remove the "Accept-Encoding" header (don't want gzip)
#. Change the "count=10" parameter to "count=9999999"
#. redirect the output to "rdio-history.json"
#. Run this script, redirect output to "rdio-history.csv"
#. ???
#. Profit.
"""
import os
import json
def process(string):
return string.encode('ascii', 'ignore').replace(',', '').replace('"','').replace("'",'')
print "Date Played,Artist,Title,Album"
history_fmt = '{time},{artist},{title},{album}'
NA = 'N/A'
history_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__), '..', 'raw', 'rdio-history-raw.json'))
with open(history_path, 'r') as f:
json_data = json.load(f)
sources = json_data['result']['sources']
for source in sources:
tracks = source['tracks']['items']
for track in tracks:
info = track['track']
print history_fmt.format(**{
'time': process(track.get('time', NA)),
'artist': process(info.get('artist', NA)),
'title': process(info.get('name', NA)),
'album': process(info.get('album', NA)),
})
|
thcipriani/rdio-shit
|
bin/rdio-history-csv.py
|
Python
|
gpl-2.0
| 1,498
|
[
"VisIt"
] |
c0840e61091ae016da0b8f8640dcf27ae79fe58314bd388473c68beb6c773f3a
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
class Migration(migrations.Migration):
dependencies = [
('visit', '0061_visit_insights'),
]
operations = [
migrations.CreateModel(
name='RequestedService',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.UUIDField(default=uuid.uuid4, unique=True)),
('name', models.CharField(max_length=255)),
('index', models.PositiveSmallIntegerField(default=10)),
('is_active', models.BooleanField(default=True)),
('is_custom', models.BooleanField(default=False)),
],
options={
'ordering': ('index',),
'abstract': False,
},
),
migrations.AddField(
model_name='visit',
name='request_services',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='visit',
name='requested_services',
field=models.ManyToManyField(related_name='vists', to='visit.RequestedService', blank=True),
),
]
|
koebbe/homeworks
|
visit/migrations/0062_auto_20150816_0035.py
|
Python
|
mit
| 1,319
|
[
"VisIt"
] |
38fafce789a353eb5fe17adc3da2c3db9848746eacc2776c2435407a676f5d63
|
# ======================================================================
#
# Cosmograil: cosmograil.tools.sextractor
#
# sextractor module.
#
# Author: Laurent Le Guillou <laurentl@ster.kuleuven.ac.be>
#
# $Id: sextractor.py,v 1.2 2005/07/06 21:40:43 hack Exp $
#
# ======================================================================
#
# "sextractor": wrapper around SExtractor.
#
# ======================================================================
#
# $Log: sextractor.py,v $
# Revision 1.2 2005/07/06 21:40:43 hack
# Tweakshifts version 0.5.0 (WJH):
# - added support for SExtractor PSET and user-supplied SExtractor config file
# - added 'nbright' parameter for selecting only 'nbright' objects for matching
# - redefined 'ascend' to 'fluxunits' of 'counts/cps/mag'
# - fixed bug in countSExtractorObjects()reported by Andy
# - turned off overwriting of output WCS file
#
# Revision 1.15 2005/06/29 13:07:41 hack
# Added Python interface to SExtractor to STSDAS$Python for use with 'tweakshifts'. WJH
# Added 3 more parameters to config
#
# Revision 1.14 2005/02/14 19:27:31 laurentl
# Added write facilities to rdb module.
#
# Revision 1.13 2005/02/14 17:47:02 laurentl
# Added iterator interface
#
# Revision 1.12 2005/02/14 17:16:30 laurentl
# clean now removes the NNW config file too.
#
# Revision 1.2 2005/02/14 17:13:49 laurentl
# *** empty log message ***
#
# Revision 1.1 2005/02/14 11:34:10 laurentl
# quality monitor now uses SExtractor wrapper.
#
# Revision 1.10 2005/02/11 14:40:35 laurentl
# minor changes
#
# Revision 1.9 2005/02/11 14:32:44 laurentl
# Fixed bugs in setup()
#
# Revision 1.8 2005/02/11 13:50:08 laurentl
# Fixed bugs in setup()
#
# Revision 1.7 2005/02/10 20:15:14 laurentl
# Improved SExtractor wrapper.
#
# Revision 1.6 2005/02/10 17:46:35 laurentl
# Greatly improved the SExtractor wrapper.
#
# Revision 1.5 2005/02/09 23:32:50 laurentl
# Implemented SExtractor wrapper
#
# Revision 1.4 2005/02/04 05:00:09 laurentl
# *** empty log message ***
#
# Revision 1.3 2005/01/06 13:37:11 laurentl
# *** empty log message ***
#
#
# ======================================================================
"""
A wrapper for SExtractor
A wrapper for SExtractor, the Source Extractor.
by Laurent Le Guillou
version: 1.15 - last modified: 2005-07-06
This wrapper allows you to configure SExtractor, run it and get
back its outputs without the need of editing SExtractor
configuration files. by default, configuration files are created
on-the-fly, and SExtractor is run silently via python.
Tested on SExtractor versions 2.2.1 and 2.3.2.
Example of use:
-----------------------------------------------------------------
import sextractor
# Create a SExtractor instance
sex = sextractor.SExtractor()
# Modify the SExtractor configuration
sex.config['GAIN'] = 0.938
sex.config['PIXEL_SCALE'] = .19
sex.config['VERBOSE_TYPE'] = "FULL"
sex.config['CHECKIMAGE_TYPE'] = "BACKGROUND"
# Add a parameter to the parameter list
sex.config['PARAMETERS_LIST'].append('FLUX_BEST')
# Lauch SExtractor on a FITS file
sex.run("nf260002.fits")
# Read the resulting catalog [first method, whole catalog at once]
catalog = sex.catalog()
for star in catalog:
print star['FLUX_BEST'], star['FLAGS']
if (star['FLAGS'] & sextractor.BLENDED):
print "This star is BLENDED"
# Read the resulting catalog [second method, whole catalog at once]
catalog_name = sex.config['CATALOG_NAME']
catalog_f = sextractor.open(catalog_name)
catalog = catalog_f.readlines()
for star in catalog:
print star['FLUX_BEST'], star['FLAGS']
if (star['FLAGS'] & sextractor.BLENDED):
print "This star is BLENDED"
catalog_f.close()
# Read the resulting catalog [third method, star by star]
catalog_name = sex.config['CATALOG_NAME']
catalog_f = sextractor.open(catalog_name)
star = catalog_f.readline()
while star:
print star['FLUX_BEST'], star['FLAGS']
if (star['FLAGS'] & sextractor.BLENDED):
print "This star is BLENDED"
star = catalog_f.readline()
catalog_f.close()
# Removing the configuration files, the catalog and
# the check image
sex.clean(config=True, catalog=True, check=True)
-----------------------------------------------------------------
"""
# ======================================================================
import __builtin__
import os
import subprocess
import re
import copy
from sexcatalog import *
# ======================================================================
__version__ = "1.15.0 (2005-07-06)"
# ======================================================================
class SExtractorException(Exception):
pass
# ======================================================================
nnw_config = \
"""NNW
# Neural Network Weights for the SExtractor star/galaxy classifier (V1.3)
# inputs: 9 for profile parameters + 1 for seeing.
# outputs: ``Stellarity index'' (0.0 to 1.0)
# Seeing FWHM range: from 0.025 to 5.5'' (images must have 1.5 < FWHM < 5 pixels)
# Optimized for Moffat profiles with 2<= beta <= 4.
3 10 10 1
-1.56604e+00 -2.48265e+00 -1.44564e+00 -1.24675e+00 -9.44913e-01 -5.22453e-01 4.61342e-02 8.31957e-01 2.15505e+00 2.64769e-01
3.03477e+00 2.69561e+00 3.16188e+00 3.34497e+00 3.51885e+00 3.65570e+00 3.74856e+00 3.84541e+00 4.22811e+00 3.27734e+00
-3.22480e-01 -2.12804e+00 6.50750e-01 -1.11242e+00 -1.40683e+00 -1.55944e+00 -1.84558e+00 -1.18946e-01 5.52395e-01 -4.36564e-01 -5.30052e+00
4.62594e-01 -3.29127e+00 1.10950e+00 -6.01857e-01 1.29492e-01 1.42290e+00 2.90741e+00 2.44058e+00 -9.19118e-01 8.42851e-01 -4.69824e+00
-2.57424e+00 8.96469e-01 8.34775e-01 2.18845e+00 2.46526e+00 8.60878e-02 -6.88080e-01 -1.33623e-02 9.30403e-02 1.64942e+00 -1.01231e+00
4.81041e+00 1.53747e+00 -1.12216e+00 -3.16008e+00 -1.67404e+00 -1.75767e+00 -1.29310e+00 5.59549e-01 8.08468e-01 -1.01592e-02 -7.54052e+00
1.01933e+01 -2.09484e+01 -1.07426e+00 9.87912e-01 6.05210e-01 -6.04535e-02 -5.87826e-01 -7.94117e-01 -4.89190e-01 -8.12710e-02 -2.07067e+01
-5.31793e+00 7.94240e+00 -4.64165e+00 -4.37436e+00 -1.55417e+00 7.54368e-01 1.09608e+00 1.45967e+00 1.62946e+00 -1.01301e+00 1.13514e-01
2.20336e-01 1.70056e+00 -5.20105e-01 -4.28330e-01 1.57258e-03 -3.36502e-01 -8.18568e-02 -7.16163e+00 8.23195e+00 -1.71561e-02 -1.13749e+01
3.75075e+00 7.25399e+00 -1.75325e+00 -2.68814e+00 -3.71128e+00 -4.62933e+00 -2.13747e+00 -1.89186e-01 1.29122e+00 -7.49380e-01 6.71712e-01
-8.41923e-01 4.64997e+00 5.65808e-01 -3.08277e-01 -1.01687e+00 1.73127e-01 -8.92130e-01 1.89044e+00 -2.75543e-01 -7.72828e-01 5.36745e-01
-3.65598e+00 7.56997e+00 -3.76373e+00 -1.74542e+00 -1.37540e-01 -5.55400e-01 -1.59195e-01 1.27910e-01 1.91906e+00 1.42119e+00 -4.35502e+00
-1.70059e+00 -3.65695e+00 1.22367e+00 -5.74367e-01 -3.29571e+00 2.46316e+00 5.22353e+00 2.42038e+00 1.22919e+00 -9.22250e-01 -2.32028e+00
0.00000e+00
1.00000e+00
"""
# ======================================================================
class SExtractor:
"""
A wrapper class to transparently use SExtractor.
"""
_SE_config = {
"CATALOG_NAME":
{"comment": "name of the output catalog",
"value": "py-sextractor.cat"},
"CATALOG_TYPE":
{"comment":
'"NONE","ASCII_HEAD","ASCII","FITS_1.0" or "FITS_LDAC"',
"value": "ASCII_HEAD"},
"PARAMETERS_NAME":
{"comment": "name of the file containing catalog contents",
"value": "py-sextractor.param"},
"DETECT_TYPE":
{"comment": '"CCD" or "PHOTO"',
"value": "CCD"},
"FLAG_IMAGE":
{"comment": "filename for an input FLAG-image",
"value": "flag.fits"},
"DETECT_MINAREA":
{"comment": "minimum number of pixels above threshold",
"value": 5},
"DETECT_THRESH":
{"comment": "<sigmas> or <threshold>,<ZP> in mag.arcsec-2",
"value": 1.5},
"ANALYSIS_THRESH":
{"comment": "<sigmas> or <threshold>,<ZP> in mag.arcsec-2",
"value": 1.5},
"FILTER":
{"comment": 'apply filter for detection ("Y" or "N")',
"value": 'Y'},
"FILTER_NAME":
{"comment": "name of the file containing the filter",
"value": "py-sextractor.conv"},
"DEBLEND_NTHRESH":
{"comment": "Number of deblending sub-thresholds",
"value": 32},
"DEBLEND_MINCONT":
{"comment": "Minimum contrast parameter for deblending",
"value": 0.005},
"CLEAN":
{"comment": "Clean spurious detections (Y or N)",
"value": 'Y'},
"CLEAN_PARAM":
{"comment": "Cleaning efficiency",
"value": 1.0},
"MASK_TYPE":
{"comment": 'type of detection MASKing: can be one of "NONE", "BLANK" or "CORRECT"',
"value": "CORRECT"},
"PHOT_APERTURES":
{"comment": "MAG_APER aperture diameter(s) in pixels",
"value": 5},
"PHOT_AUTOPARAMS":
{"comment": 'MAG_AUTO parameters: <Kron_fact>,<min_radius>',
"value": [2.5, 3.5]},
"SATUR_LEVEL":
{"comment": "level (in ADUs) at which arises saturation",
"value": 50000.0},
"MAG_ZEROPOINT":
{"comment": "magnitude zero-point",
"value": 0.0},
"MAG_GAMMA":
{"comment": "gamma of emulsion (for photographic scans)",
"value": 4.0},
"GAIN":
{"comment": "detector gain in e-/ADU",
"value": 0.0},
"PIXEL_SCALE":
{"comment": "size of pixel in arcsec (0=use FITS WCS info)",
"value": 1.0},
"SEEING_FWHM":
{"comment": "stellar FWHM in arcsec",
"value": 1.2},
"STARNNW_NAME":
{"comment": "Neural-Network_Weight table filename",
"value": "py-sextractor.nnw"},
"BACK_SIZE":
{"comment": "Background mesh: <size> or <width>,<height>",
"value": 64},
"BACK_TYPE":
{"comment": "Type of background to subtract: MANUAL or AUTO generated",
"value": 'MANUAL'},
"BACK_VALUE":
{"comment": "User-supplied constant value to be subtracted as sky",
"value": "0.0,0.0"},
"BACK_FILTERSIZE":
{"comment": "Background filter: <size> or <width>,<height>",
"value": 3},
"BACKPHOTO_TYPE":
{"comment": 'can be "GLOBAL" or "LOCAL"',
"value": "GLOBAL"},
"BACKPHOTO_THICK":
{"comment": "Thickness in pixels of the background local annulus",
"value": 24},
"CHECKIMAGE_TYPE":
{"comment": 'can be one of "NONE", "BACKGROUND", "MINIBACKGROUND", "-BACKGROUND", "OBJECTS", "-OBJECTS", "SEGMENTATION", "APERTURES", or "FILTERED"',
"value": "NONE"},
"CHECKIMAGE_NAME":
{"comment": "Filename for the check-image",
"value": "check.fits"},
"MEMORY_OBJSTACK":
{"comment": "number of objects in stack",
"value": 3000},
"MEMORY_PIXSTACK":
{"comment": "number of pixels in stack",
"value": 300000},
"MEMORY_BUFSIZE":
{"comment": "number of lines in buffer",
"value": 1024},
"VERBOSE_TYPE":
{"comment": 'can be "QUIET", "NORMAL" or "FULL"',
"value": "QUIET"},
# -- Extra-keys (will not be saved in the main configuration file
"PARAMETERS_LIST":
{"comment": '[Extra key] catalog contents (to put in PARAMETERS_NAME)',
"value": ["NUMBER", "FLUX_BEST", "FLUXERR_BEST",
"X_IMAGE", "Y_IMAGE", "FLAGS", "FWHM_IMAGE"]},
"CONFIG_FILE":
{"comment": '[Extra key] name of the main configuration file',
"value": "py-sextractor.sex"},
"FILTER_MASK":
{"comment": 'Array to put in the FILTER_MASK file',
"value": [[1, 2, 1],
[2, 4, 2],
[1, 2, 1]]}
}
# -- Special config. keys that should not go into the config. file.
_SE_config_special_keys = ["PARAMETERS_LIST", "CONFIG_FILE", "FILTER_MASK"]
# -- Dictionary of all possible parameters (from sexcatalog.py module)
_SE_parameters = SExtractorfile._SE_keys
def __init__(self):
"""
SExtractor class constructor.
"""
self.config = (
dict([(k, copy.deepcopy(SExtractor._SE_config[k]["value"]))\
for k in SExtractor._SE_config.keys()]))
# print self.config
self.program = None
self.version = None
def setup(self, path=None):
"""
Look for SExtractor program ('sextractor', or 'sex').
If a full path is provided, only this path is checked.
Raise a SExtractorException if it failed.
Return program and version if it succeed.
"""
# -- Finding sextractor program and its version
# first look for 'sextractor', then 'sex'
candidates = ['sextractor', 'sex']
if (path):
candidates = [path]
selected=None
for candidate in candidates:
try:
p = subprocess.Popen(candidate, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, close_fds=True)
(_out_err, _in) = (p.stdout, p.stdin)
versionline = _out_err.read()
if (versionline.find("SExtractor") != -1):
selected=candidate
break
except IOError:
continue
if not(selected):
raise SExtractorException, \
"""
Cannot find SExtractor program. Check your PATH,
or provide the SExtractor program path in the constructor.
"""
_program = selected
# print versionline
_version_match = re.search("[Vv]ersion ([0-9\.])+", versionline)
if not _version_match:
raise SExtractorException, \
"Cannot determine SExtractor version."
_version = _version_match.group()[8:]
if not _version:
raise SExtractorException, \
"Cannot determine SExtractor version."
# print "Use " + self.program + " [" + self.version + "]"
return _program, _version
def update_config(self):
"""
Update the configuration files according to the current
in-memory SExtractor configuration.
"""
# -- Write filter configuration file
# First check the filter itself
filter = self.config['FILTER_MASK']
rows = len(filter)
cols = len(filter[0]) # May raise ValueError, OK
filter_f = __builtin__.open(self.config['FILTER_NAME'], 'w')
filter_f.write("CONV NORM\n")
filter_f.write("# %dx%d Generated from sextractor.py module.\n" %
(rows, cols))
for row in filter:
filter_f.write(" ".join(map(repr, row)))
filter_f.write("\n")
filter_f.close()
# -- Write parameter list file
parameters_f = __builtin__.open(self.config['PARAMETERS_NAME'], 'w')
for parameter in self.config['PARAMETERS_LIST']:
print >>parameters_f, parameter
parameters_f.close()
# -- Write NNW configuration file
nnw_f = __builtin__.open(self.config['STARNNW_NAME'], 'w')
nnw_f.write(nnw_config)
nnw_f.close()
# -- Write main configuration file
main_f = __builtin__.open(self.config['CONFIG_FILE'], 'w')
for key in self.config.keys():
if (key in SExtractor._SE_config_special_keys):
continue
if (key == "PHOT_AUTOPARAMS"): # tuple instead of a single value
value = " ".join(map(str, self.config[key]))
else:
value = str(self.config[key])
print >>main_f, ("%-16s %-16s # %s" %
(key, value, SExtractor._SE_config[key]['comment']))
main_f.close()
def run(self, file, updateconfig=True, clean=False, path=None):
"""
Run SExtractor.
If updateconfig is True (default), the configuration
files will be updated before running SExtractor.
If clean is True (default: False), configuration files
(if any) will be deleted after SExtractor terminates.
"""
if updateconfig:
self.update_config()
# Try to find SExtractor program
# This will raise an exception if it failed
self.program, self.version = self.setup(path)
commandline = (self.program + " -c " + self.config['CONFIG_FILE'] + " " + file)
# print commandline
rcode = os.system(commandline)
if (rcode):
raise SExtractorException, \
"SExtractor command [%s] failed." % commandline
if clean:
self.clean()
def catalog(self):
"""
Read the output catalog produced by the last SExtractor run.
Output is a list of dictionaries, with a dictionary for
each star: {'param1': value, 'param2': value, ...}.
"""
output_f = SExtractorfile(self.config['CATALOG_NAME'], 'r')
c = output_f.read()
output_f.close()
return c
def clean(self, config=True, catalog=False, check=False):
"""
Remove the generated SExtractor files (if any).
If config is True, remove generated configuration files.
If catalog is True, remove the output catalog.
If check is True, remove output check image.
"""
try:
if (config):
os.unlink(self.config['FILTER_NAME'])
os.unlink(self.config['PARAMETERS_NAME'])
os.unlink(self.config['STARNNW_NAME'])
os.unlink(self.config['CONFIG_FILE'])
if (catalog):
os.unlink(self.config['CATALOG_NAME'])
if (check):
os.unlink(self.config['CHECKIMAGE_NAME'])
except OSError:
pass
# ======================================================================
|
wschoenell/chimera_imported_googlecode
|
src/chimera/util/sextractor.py
|
Python
|
gpl-2.0
| 18,823
|
[
"Galaxy"
] |
da19b911f71e4f4bde6c01ba36ed66e90e46beea4dbcd131341f7b036758f16f
|
# -*- coding: utf-8 -*-
import sys
sys.path.append('../')
from browser_interface.queue.QueueFactory import QueueFactory
from browser_interface.field.FieldFactory import FieldFactory
from browser_interface.browser.BrowserFactory import BrowserFactory
from browser_interface.log.Logging import Logging
from common import config
import private_config
from lxml import etree
import re
class Producer(object):
def __init__(self):
# 实例化工厂对象
self.queue_redis = QueueFactory()
self.field_factory = FieldFactory(u'凤凰网-生产者')
self.browser_factory = BrowserFactory()
self.db_factory = QueueFactory()
# 实例化具体对象
self.log = Logging('./log/fenghuang_producer').get_logging()
self.browser = self.browser_factory.create(config.browser_type)
self.queue = self.queue_redis.create(config.queue_type, private_config.queue_table,
config.queue_host, config.queue_port)
def main(self):
# 即时新闻
js = ['http://news.ifeng.com/listpage/11502/20161014/1/rtlist.shtml',
'http://news.ifeng.com/listpage/11502/20161014/1/rtlist.shtml', ]
for jspage in js:
try:
html = self.browser.visit(jspage, timeout=10, retry=5, encoding='utf-8')
tree = etree.HTML(html)
newslist = tree.xpath('//div[@class="newsList"]/ul/li/a/@href')
for url in newslist:
self.queue.put(url)
print url
except Exception as e:
self.log.info(e)
# 资讯
url_zixun = 'http://news.ifeng.com/'
try:
html = self.browser.visit(url_zixun, timeout=10, retry=3, encoding='utf-8')
except Exception as e:
self.log.info(e)
return
try:
somethings = re.findall(r'var dataList(.*?])', html)[1].replace('=[', '')
except Exception as e:
self.log.info(e)
return
news_list = (eval(somethings))
for item in news_list:
news_url = item['url']
# self.queue.put(news_url)
print news_url
if __name__ == '__main__':
pro = Producer()
pro.main()
|
xtuyaowu/jtyd_python_spider
|
feng_huang_net/fh_producer.py
|
Python
|
mit
| 2,301
|
[
"VisIt"
] |
313784242de981eb65a9e9e1d1859558b4b5f92b65cb51624c87b31bc60af7fd
|
#!/usr/bin/env python
"""
Runs checkTransformationIntegrity from ValidateOutputDataAgent on selected Tranformation
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import sys
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
@Script()
def main():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(["transID: transformation ID"])
_, args = Script.parseCommandLine()
transIDs = [int(arg) for arg in args]
from DIRAC.TransformationSystem.Agent.ValidateOutputDataAgent import ValidateOutputDataAgent
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
agent = ValidateOutputDataAgent(
"Transformation/ValidateOutputDataAgent",
"Transformation/ValidateOutputDataAgent",
"dirac-transformation-verify-outputdata",
)
agent.initialize()
client = TransformationClient()
for transID in transIDs:
agent.checkTransformationIntegrity(transID)
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/TransformationSystem/scripts/dirac_transformation_verify_outputdata.py
|
Python
|
gpl-3.0
| 1,147
|
[
"DIRAC"
] |
5ec6e290054c052f11a92203a118d42b40d19615041792113d1832341e5d7a05
|
from math import sqrt
from ase import Atoms, Atom
from ase.calculators.emt import EMT
from ase.constraints import FixAtoms
from ase.optimize import BFGS, QuasiNewton
from ase.neb import NEB
# Distance between Cu atoms on a (111) surface:
a = 3.6
d = a / sqrt(2)
fcc111 = Atoms(symbols='Cu',
cell=[(d, 0, 0),
(d / 2, d * sqrt(3) / 2, 0),
(d / 2, d * sqrt(3) / 6, -a / sqrt(3))],
pbc=True)
slab = fcc111 * (2, 2, 4)
slab.set_cell([2 * d, d * sqrt(3), 1])
slab.set_pbc((1, 1, 0))
slab.calc = EMT()
Z = slab.get_positions()[:, 2]
indices = [i for i, z in enumerate(Z) if z < Z.mean()]
constraint = FixAtoms(indices=indices)
slab.set_constraint(constraint)
dyn = QuasiNewton(slab)
dyn.run(fmax=0.05)
Z = slab.get_positions()[:, 2]
print Z[0] - Z[1]
print Z[1] - Z[2]
print Z[2] - Z[3]
b = 1.2
h = 1.5
slab += Atom('C', (d / 2, -b / 2, h))
slab += Atom('O', (d / 2, +b / 2, h))
s = slab.copy()
dyn = QuasiNewton(slab)
dyn.run(fmax=0.05)
#view(slab)
# Make band:
images = [slab]
for i in range(6):
image = slab.copy()
image.set_constraint(constraint)
image.calc = EMT()
images.append(image)
image[-2].position = image[-1].position
image[-1].x = d
image[-1].y = d / sqrt(3)
dyn = QuasiNewton(images[-1])
dyn.run(fmax=0.05)
neb = NEB(images, climb=not True)
# Set constraints and calculator:
# Displace last image:
# Relax height of Ag atom for initial and final states:
# Interpolate positions between initial and final states:
neb.interpolate()
for image in images:
print image.positions[-1], image.get_potential_energy()
#dyn = MDMin(neb, dt=0.4)
#dyn = FIRE(neb, dt=0.01)
dyn = BFGS(neb, maxstep=0.04, trajectory='mep.traj')
#from ase.optimize.oldqn import GoodOldQuasiNewton
#dyn = GoodOldQuasiNewton(neb)
dyn.run(fmax=0.05)
for image in images:
print image.positions[-1], image.get_potential_energy()
if locals().get('display'):
import os
error = os.system('ag mep.traj@-7:')
assert error == 0
|
JConwayAWT/PGSS14CC
|
lib/python/multimetallics/ase/test/COCu111.py
|
Python
|
gpl-2.0
| 2,010
|
[
"ASE"
] |
5053dc55d13d046b38b5f1d559d226a41e22b3266220d688b2e8f62715309f52
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function, division
import functools
import numpy as np
import matplotlib.pylab as plt
from scipy.optimize import curve_fit
import lsst.daf.persistence as dp
import lsst.afw.image as afw_image
from lsst.afw.fits.fitsLib import FitsError
from desc.twinkles import make_invsnr_arr, fit_invsnr, get_visits
_filter_color = dict(u='blue',
g='green',
r='red',
i='cyan',
z='magenta',
y='black')
_filter_symbol = dict([(band, 'o') for band in 'ugrizy'])
def make_dataId(options):
dataId = {}
if options is None:
return None
for item in options:
key, value = item.split('=')
try:
value = int(value)
except ValueError:
pass
dataId[key] = value
return dataId
class MagStats(object):
def __init__(self, filter_, med_mags, med_err, minMag=17, mid_cut=20,
maxMag=26, fit_curves=False):
self.filter = filter_
self.med_mags = np.array(med_mags)
self.med_err = np.array(med_err)
index = np.where((minMag < self.med_mags) & (self.med_mags < mid_cut))
self.sys_floor = np.median(self.med_err[index])
index = np.where((mid_cut < self.med_mags) & (self.med_mags < maxMag))
self.popt = (0.01, 24.5)
self.pcov = None
fit_func = functools.partial(fit_invsnr, bandpass_name=filter_)
if fit_curves:
self.popt, self.pcov = curve_fit(fit_func, self.med_mags[index],
self.med_err[index], p0=self.popt)
def plot_fit(self, linewidth=3, alpha=0.75):
mags, invsnrs = make_invsnr_arr(floor=self.sys_floor, m5=self.popt[1])
color = _filter_color[self.filter]
plt.plot(mags, invsnrs, color=color, linewidth=linewidth, alpha=alpha)
def plot_point_mags(output_data, visit_list, dataId, minMag=17, mid_cut=20,
maxMag=26, fit_curves=True):
# get a butler
butler = dp.Butler(output_data)
# The following value for refcatId is "mandatory, but meaningless",
# so we won't try to generalize it.
refcatId = {'tract':0, 'patch':'0,0'}
ref = butler.get('deepCoadd_ref', dataId=refcatId)
# get the sources and calib objects for each single epoch visit
forced_srcs = {}
calibs = {}
for visit in visit_list:
dataId['visit'] = visit
try:
my_forced_srcs = butler.get('forced_src', dataId=dataId)
calexp = butler.get('calexp', dataId=dataId)
my_calibs = calexp.getCalib()
del calexp
forced_srcs[visit] = my_forced_srcs
calibs[visit] = my_calibs
except FitsError as eobj:
print(eobj)
# initialize dictionaries to hold lightcurve arrays. Get
# extendedness from the coadd catalog.
lightcurve_fluxes = {}
extendedness = {}
for idx, ext in zip(ref.get('id'),
ref.get('base_ClassificationExtendedness_value')):
lightcurve_fluxes[idx] = []
extendedness[idx] = ext
# pivot the source tables to assemble lightcurves
for visit, forced_src in forced_srcs.items():
calib = calibs[visit]
for idx, flux in zip(forced_src.get('objectId'),
forced_src.get('base_PsfFlux_flux')):
if extendedness[idx] > 0.5:
continue
if flux <= 0.:
continue
lightcurve_fluxes[idx].append(afw_image.fluxFromABMag(calib.getMagnitude(flux)))
# compute aggregate quantities for each object and plot
band = dataId['filter']
med_mags = []
med_err = []
for lightcurve in lightcurve_fluxes.values():
if len(lightcurve) == len(visit_list):
median_flux = np.median(lightcurve)
med_mags.append(afw_image.abMagFromFlux(median_flux))
med_err.append(np.std(lightcurve)/median_flux)
print("number of objects: ", len(med_mags))
mag_stats = MagStats(band, med_mags, med_err, fit_curves=fit_curves)
if mag_stats.pcov is not None:
label ='filter=%s, Floor=%.1f%%, m_5=%0.2f' \
% (band, mag_stats.sys_floor*100, mag_stats.popt[1])
else:
label ='filter=%s, Floor=%.1f%%' \
% (band, mag_stats.sys_floor*100)
scatter = plt.scatter(med_mags, med_err,
alpha=0.3, color=_filter_color[band],
marker=_filter_symbol[band], label=label)
plt.xlabel("Calibrated magnitude of median flux")
plt.ylabel("stdev(flux)/median(flux)")
plt.xlim(15.5, 25)
plt.ylim(0., 0.5)
return scatter, mag_stats
if __name__ == '__main__':
import argparse
description = \
"""
For an output repository of Level 2 data, make a plot of stdev vs
median flux of forced sources.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('data_repo', help='Output repository for Level 2 analysis')
parser.add_argument('outfile', help='Filename of the output png file')
parser.add_argument('--skip_fit', help='Skip the fitting of the snr curve',
action='store_true', default=False)
args = parser.parse_args()
visits = get_visits(args.data_repo)
print('# visits per filter:')
for filter_ in visits:
print(" ", filter_, " ", len(visits[filter_]))
dataId = make_dataId('raft=2,2 sensor=1,1 tract=0'.split())
fit_curves = not args.skip_fit
plots = []
mag_stats = {}
for filter_ in visits:
if len(visits[filter_]) < 2:
print("skipping %s band: too few visits" % filter_)
continue
print("plotting filter ", filter_)
dataId['filter'] = filter_
plot, stats = plot_point_mags(args.data_repo, visits[filter_],
dataId=dataId, fit_curves=fit_curves)
mag_stats[filter_] = stats
plots.append(plot)
if fit_curves:
for filter_ in mag_stats:
mag_stats[filter_].plot_fit()
plt.legend(handles=plots, scatterpoints=1, loc=2)
plt.savefig(args.outfile)
|
LSSTDESC/Twinkles
|
bin/plot_point_mags.py
|
Python
|
mit
| 6,238
|
[
"VisIt"
] |
54b7cf61bce4596f6ae1eb502dbcd15b8697dc84676704c323c056a0ca0084ad
|
# Orca
#
# Copyright 2010 Joanmarie Diggs.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Commonly-required utility methods needed by -- and potentially
customized by -- application and toolkit scripts. They have
been pulled out from the scripts because certain scripts had
gotten way too large as a result of including these methods."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs."
__license__ = "LGPL"
import pyatspi
import orca.orca_state as orca_state
import orca.scripts.toolkits.Gecko as Gecko
#############################################################################
# #
# Utilities #
# #
#############################################################################
class Utilities(Gecko.Utilities):
def __init__(self, script):
"""Creates an instance of the Utilities class.
Arguments:
- script: the script with which this instance is associated.
"""
Gecko.Utilities.__init__(self, script)
#########################################################################
# #
# Utilities for finding, identifying, and comparing accessibles #
# #
#########################################################################
def documentFrame(self):
"""Returns the document frame that holds the content being shown.
Overridden here because multiple open messages are not arranged
in tabs like they are in Firefox."""
obj = orca_state.locusOfFocus
if not obj:
return None
if self.inFindToolbar():
return Gecko.Utilities.documentFrame(self)
while obj:
role = obj.getRole()
if role in [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_EMBEDDED]:
return obj
else:
obj = obj.parent
return None
def isEntry(self, obj):
"""Returns True if we should treat this object as an entry."""
return obj and obj.getRole() == pyatspi.ROLE_ENTRY
def isPasswordText(self, obj):
"""Returns True if we should treat this object as password text."""
return obj and obj.getRole() == pyatspi.ROLE_PASSWORD_TEXT
#########################################################################
# #
# Utilities for working with the accessible text interface #
# #
#########################################################################
#########################################################################
# #
# Miscellaneous Utilities #
# #
#########################################################################
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/scripts/apps/Thunderbird/script_utilities.py
|
Python
|
gpl-3.0
| 4,077
|
[
"ORCA"
] |
8f06f6d6f6016af4b5fa73e8f77d8cf8fe1f455a5ed743aff917d71cd99d6840
|
# Created on 09/18/11 14:14:57
from math import *
from random import *
from UnitCommands import *
def CreateScenario(SM):
SM.SetScenarioDescription("""Backfires and Condors\nVersion 2.0 of 2010/06/24\n\nA scenario for Global Conflicts 2.\n\nWritten by Ralf Koelbach.\nrkoelbach@yahoo.com\n\nBackfires and Condors is a classical Cold War scenario putting the Soviet \nNaval Aviation against an US CVBG near Iceland.\n\nYou are to defend your carrier group against those huge, supersonic \nRussian killer missiles.\n\nComments and critiques welcome!\n\nHave fun!\nRalf\n\n\n\n\n\n\n""")
SM.SetScenarioName("""Backfires""")
SM.CreateAlliance(1, 'USA')
SM.SetAllianceDefaultCountry(1, 'USA')
SM.SetAlliancePlayable(1, 1)
SM.CreateAlliance(2, 'USSR')
SM.SetAllianceDefaultCountry(2, 'USSR')
SM.SetAlliancePlayable(2, 1)
SM.CreateAlliance(3, 'Sweden')
SM.SetAllianceDefaultCountry(3, 'Sweden')
SM.SetAlliancePlayable(3, 1)
SM.SetAllianceRelationship(1, 3, 'Neutral')
SM.SetAllianceRelationship(2, 3, 'Neutral')
SM.SetUserAlliance(1)
SM.SetDateTime(1989,8,14,12,0,8)
SM.SetStartTheater(-20.970835, 59.737499) # (lon, lat) in degrees, negative is West or South
SM.SetScenarioLoaded(1)
SM.SetSeaState(3)
SM.SetSVP('0.000000,1515.000000,200.000000,1500.000000,300.000000,1510.000000,500.000000,1520.000000,5000.000000,1600.000000')
####################
SM.SetSimpleBriefing(1, """ORDERS FOR CMDR US OPERATIONS\n\nINTELLIGENCE\n\nThe control of the GIUK Gap is vital for our efforts to win the war. At \nfirst we must neutralize Iceland, then retake it.\n\nMISSION\n\nMake best speed towards Iceland. Destroy all hostile units on your way \nand the Soviet facilities at Iceland.\n\nEXECUTION\n\nProceed with care. Expect heavy air resistance. Sub threat unknown.\n\nGOALS:\n\nProtect your carrier and destroy both the Russians HQ at Kevlavik and \nReykjavik.\n\nCOMMAND AND SIGNAL\n\nCVN Nimitz \n\n\n\n\n\n\n\n\n""")
####################
SM.SetSimpleBriefing(2, """This scenario was designed for playing US side.\n\n""")
####################
SM.SetSimpleBriefing(3, """No briefing found""")
##############################
### Alliance 1 units
##############################
unit = SM.GetDefaultUnit()
unit.className = 'Los Angeles (1988)'
unit.unitName = "USS Toledo "
unit.SetPosition(-19.123006, 62.378531, -80.0)
unit.heading = 72.69
unit.speed = 7.1
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Mk-48 Mod5', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Mk-48 Mod5', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Mk-48 Mod5', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, 'UGM-84C Harpoon', 1)
SM.SetUnitLauncherItem(unit.unitName, 4, 'UGM-109A', 6)
SM.SetUnitLauncherItem(unit.unitName, 5, 'UGM-109A', 6)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Decoy-1', 10)
SM.SetUnitLauncherItem(unit.unitName, 7, 'Decoy-1', 10)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("USS Toledo ", 'Mk-48 Mod5', 12)
SM.AddToUnitMagazine("USS Toledo ", 'UGM-84C Harpoon', 4)
UI.SetSensorState(5, 0)
UI.SetSensorState(6, 0)
UI.SetSensorState(7, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.369496, 1.094147, 0.000000, 0.000000)
UI.AddTask('SubEvade', 3.000000, 3)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'Los Angeles (1988)'
unit.unitName = "USS Columbus "
unit.SetPosition(-21.899943, 60.849539, -80.0)
unit.heading = 72.69
unit.speed = 7.6
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Mk-48 Mod5', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Mk-48 Mod5', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Mk-48 Mod5', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, 'UGM-84C Harpoon', 1)
SM.SetUnitLauncherItem(unit.unitName, 4, 'UGM-109A', 6)
SM.SetUnitLauncherItem(unit.unitName, 5, 'UGM-109A', 6)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Decoy-1', 10)
SM.SetUnitLauncherItem(unit.unitName, 7, 'Decoy-1', 10)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("USS Columbus ", 'Mk-48 Mod5', 12)
SM.AddToUnitMagazine("USS Columbus ", 'UGM-84C Harpoon', 4)
UI.SetSensorState(5, 0)
UI.SetSensorState(6, 0)
UI.SetSensorState(7, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.380798, 1.076128, 0.000000, 0.000000)
UI.AddTask('SubEvade', 3.000000, 3)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'S-3A'
unit.unitName = "Dragon Fires 9"
unit.SetPosition(-21.408123, 59.239447, 4000.0)
unit.heading = 354.82
unit.speed = 100.0
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, 'LOFAR (85) Sonobuoy', 12)
SM.SetUnitLauncherItem(unit.unitName, 1, 'DICASS (85) Sonobuoy', 12)
SM.SetUnitLauncherItem(unit.unitName, 2, 'DIFAR (85) Sonobuoy', 36)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Mk-46 Mod5', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, 'Mk-46 Mod5', 2)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.374773, 1.036805, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.362703, 1.034594, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'USS Carl Vinson')
unit = SM.GetDefaultUnit()
unit.className = 'S-3A'
unit.unitName = "Dragon Fires 10"
unit.SetPosition(-21.092840, 60.266264, 4000.0)
unit.heading = 304.76
unit.speed = 214.5
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, 'LOFAR (85) Sonobuoy', 12)
SM.SetUnitLauncherItem(unit.unitName, 1, 'DICASS (85) Sonobuoy', 12)
SM.SetUnitLauncherItem(unit.unitName, 2, 'DIFAR (85) Sonobuoy', 36)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Mk-46 Mod5', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, 'Mk-46 Mod5', 2)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.373832, 1.051218, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.375400, 1.046598, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.359333, 1.046400, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.360900, 1.051021, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'USS Carl Vinson')
unit = SM.GetDefaultUnit()
unit.className = 'Nimitz CVN-70 USS Carl Vinson'
unit.unitName = "USS Carl Vinson"
unit.SetPosition(-20.873797, 59.772865, 0.0)
unit.heading = -11.19
unit.speed = 16.5
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, 'RIM-7P(v1)', 8)
SM.SetUnitLauncherItem(unit.unitName, 1, 'RIM-7P(v1)', 8)
SM.SetUnitLauncherItem(unit.unitName, 2, 'RIM-7P(v1)', 8)
SM.SetUnitLauncherItem(unit.unitName, 3, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 4, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 5, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 6, '20mm mark 244-0 ELC', 97)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("USS Carl Vinson", 'Fuel', 10600000)
SM.AddToUnitMagazine("USS Carl Vinson", 'AIM-9M', 239)
SM.AddToUnitMagazine("USS Carl Vinson", 'AIM-7M', 272)
SM.AddToUnitMagazine("USS Carl Vinson", 'AIM-54C', 14)
SM.AddToUnitMagazine("USS Carl Vinson", 'AGM-45B', 88)
SM.AddToUnitMagazine("USS Carl Vinson", 'AGM-88B', 96)
SM.AddToUnitMagazine("USS Carl Vinson", 'AGM-84C Harpoon', 44)
SM.AddToUnitMagazine("USS Carl Vinson", 'Mk-46 Mod5', 127)
SM.AddToUnitMagazine("USS Carl Vinson", 'M117', 1168)
SM.AddToUnitMagazine("USS Carl Vinson", 'M118', 287)
SM.AddToUnitMagazine("USS Carl Vinson", 'Mk-82', 1833)
SM.AddToUnitMagazine("USS Carl Vinson", 'Mk-83', 1114)
SM.AddToUnitMagazine("USS Carl Vinson", 'Mk-84', 377)
SM.AddToUnitMagazine("USS Carl Vinson", 'GBU-24/B', 47)
SM.AddToUnitMagazine("USS Carl Vinson", 'GBU-24B/B', 73)
SM.AddToUnitMagazine("USS Carl Vinson", 'B-57 Mod1 10kT', 4)
SM.AddToUnitMagazine("USS Carl Vinson", 'B-57 Mod5 20kT', 2)
SM.AddToUnitMagazine("USS Carl Vinson", 'FPU-6', 3)
SM.AddToUnitMagazine("USS Carl Vinson", '370 gallon wing tank', 28)
SM.AddToUnitMagazine("USS Carl Vinson", '20mm PGU', 1438)
SM.AddToUnitMagazine("USS Carl Vinson", 'Chaff-1', 3154)
SM.AddToUnitMagazine("USS Carl Vinson", 'Flare-1', 3154)
SM.AddToUnitMagazine("USS Carl Vinson", 'LOFAR (85) Sonobuoy', 491)
SM.AddToUnitMagazine("USS Carl Vinson", 'DICASS (85) Sonobuoy', 491)
SM.AddToUnitMagazine("USS Carl Vinson", 'DIFAR (85) Sonobuoy', 1473)
SM.AddToUnitMagazine("USS Carl Vinson", 'RIM-7P(v1)', 94)
SM.AddToUnitMagazine("USS Carl Vinson", '20mm mark 244-0 ELC', 2092)
UI.AddTask('MissileWarning', 3.000000, 3)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.388189, 1.100971, 0.000000, 0.000000)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Eagle 1', 3)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Eagle 1', '32 20mm PGU;4 AIM-54C;2 AIM-9M;2 AIM-7P;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Eagle 2', 3)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Eagle 2', '32 20mm PGU;4 AIM-54C;2 AIM-9M;2 AIM-7P;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'E-2C Group 0', 'Hormel Hogs 1', 3)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Hormel Hogs 1', '')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'S-3A', 'Dragon Fires 1', 3)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Dragon Fires 1', '12 DICASS (85) Sonobuoy;12 LOFAR (85) Sonobuoy;36 DIFAR (85) Sonobuoy;2 Mk-46 Mod5;2 Mk-46 Mod5;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Eagle 3', 2)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Eagle 3', '32 20mm PGU;4 AIM-54C;2 AIM-9M;2 AIM-7P;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Eagle 4', 2)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Eagle 4', '32 20mm PGU;4 AIM-54C;2 AIM-9M;2 AIM-7P;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Eagle 5', 2)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Eagle 5', '32 20mm PGU;4 AIM-54C;2 AIM-9M;2 AIM-7P;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Eagle 6', 2)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Eagle 6', '32 20mm PGU;4 AIM-54C;2 AIM-9M;2 AIM-7P;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Mace 1', 2)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Mace 1', '6 AGM-65B;6 AGM-65B;6 AGM-65B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Mace 2', 2)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Mace 2', '6 AGM-65B;6 AGM-65B;6 AGM-65B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-6E(78)', 'Viceroy 3', 2)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Viceroy 3', '2 AGM-84D Harpoon;2 AGM-84D Harpoon;1 300 gallon tank;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-6E(78)', 'Viceroy 4', 2)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Viceroy 4', '2 AGM-84D Harpoon;2 AGM-84D Harpoon;1 300 gallon tank;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-6E(78)', 'Viceroy 5', 2)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Viceroy 5', '2 AGM-88B;2 AGM-88B;1 300 gallon tank;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-6E(78)', 'Viceroy 6', 2)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Viceroy 6', '2 AGM-88B;2 AGM-88B;1 300 gallon tank;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'KA-6D', 'Viceroy 103', 2)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Viceroy 103', '2 300 gallon tank;2 300 gallon tank;1 300 gallon tank;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'E-2C Group 0', 'Hormel Hogs 2', 2)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Hormel Hogs 2', '')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'S-3A', 'Dragon Fires 2', 2)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Dragon Fires 2', '12 DICASS (85) Sonobuoy;12 LOFAR (85) Sonobuoy;36 DIFAR (85) Sonobuoy;2 Mk-46 Mod5;2 Mk-46 Mod5;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'S-3A', 'Dragon Fires 3', 2)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Dragon Fires 3', '12 DICASS (85) Sonobuoy;12 LOFAR (85) Sonobuoy;36 DIFAR (85) Sonobuoy;2 Mk-46 Mod5;2 Mk-46 Mod5;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Eagle 7', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Eagle 7', '32 20mm PGU;4 AIM-54C;2 AIM-9M;2 AIM-7P;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Eagle 8', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Eagle 8', '32 20mm PGU;4 AIM-54C;2 AIM-9M;2 AIM-7P;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Eagle 9', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Eagle 9', '32 20mm PGU;4 AIM-54C;2 AIM-9M;2 AIM-7P;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Eagle 10', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Eagle 10','32 20mm PGU;4 AIM-54C;2 AIM-9M;2 AIM-7P;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Eagle 11', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Eagle 11', '32 20mm PGU;4 AIM-54C;2 AIM-9M;2 AIM-7P;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Eagle 12', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Eagle 12', '32 20mm PGU;4 AIM-54C;2 AIM-9M;2 AIM-7P;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Sundown 1', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Sundown 1', '32 20mm PGU;4 AIM-7P;2 AIM-9M;2 AIM-54C;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Sundown 2', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Sundown 2', '32 20mm PGU;4 AIM-7P;2 AIM-9M;2 AIM-54C;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Sundown 3', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Sundown 3', '32 20mm PGU;4 AIM-7P;2 AIM-9M;2 AIM-54C;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Sundown 4', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Sundown 4', '32 20mm PGU;4 AIM-7P;2 AIM-9M;2 AIM-54C;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Sundown 5', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Sundown 5', '32 20mm PGU;4 AIM-7P;2 AIM-9M;2 AIM-54C;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Sundown 6', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Sundown 6', '32 20mm PGU;4 AIM-7P;2 AIM-9M;2 AIM-54C;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Sundown 7', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Sundown 7', '32 20mm PGU;4 AIM-7P;2 AIM-9M;2 AIM-54C;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Sundown 8', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Sundown 8', '32 20mm PGU;4 AIM-7P;2 AIM-9M;2 AIM-54C;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Sundown 9', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Sundown 9', '32 20mm PGU;4 AIM-7P;2 AIM-9M;2 AIM-54C;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Sundown 10', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Sundown 10', '32 20mm PGU;4 AIM-7P;2 AIM-9M;2 AIM-54C;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Sundown 11', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Sundown 11', '32 20mm PGU;4 AIM-7P;2 AIM-9M;2 AIM-54C;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'F-14A', 'Sundown 12', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Sundown 12', '32 20mm PGU;4 AIM-7P;2 AIM-9M;2 AIM-54C;30 Flare-1;30 Chaff-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Warhawk 1', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Warhawk 1', '0 Empty;2 GBU-24B/B;2 GBU-24B/B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Warhawk 2', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Warhawk 2', '0 Empty;2 GBU-24B/B;2 GBU-24B/B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Warhawk 3', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Warhawk 3', '0 Empty;2 GBU-24B/B;2 GBU-24B/B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Warhawk 4', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Warhawk 4', '0 Empty;2 GBU-24B/B;2 GBU-24B/B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Warhawk 5', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Warhawk 5', '0 Empty;2 GBU-24B/B;2 GBU-24B/B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Warhawk 6', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Warhawk 6', '0 Empty;2 GBU-24B/B;2 GBU-24B/B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Warhawk 7', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Warhawk 7', '0 Empty;2 GBU-24B/B;2 GBU-24B/B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Warhawk 8', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Warhawk 8', '0 Empty;2 GBU-24B/B;2 GBU-24B/B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Warhawk 9', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Warhawk 9', '0 Empty;2 GBU-24B/B;2 GBU-24B/B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Warhawk 10', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Warhawk 10', '0 Empty;2 GBU-24B/B;2 GBU-24B/B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Warhawk 11', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Warhawk 11', '0 Empty;2 GBU-24B/B;2 GBU-24B/B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Warhawk 12', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Warhawk 12', '0 Empty;2 GBU-24B/B;2 GBU-24B/B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Mace 3', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Mace 3', '6 AGM-65B;6 AGM-65B;6 AGM-65B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Mace 4', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Mace 4', '6 AGM-65B;6 AGM-65B;6 AGM-65B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Mace 5', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Mace 5', '6 AGM-65B;6 AGM-65B;6 AGM-65B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Mace 6', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Mace 6', '6 AGM-65B;6 AGM-65B;6 AGM-65B;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Mace 7', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Mace 7', '6 Mk-82;6 Mk-82;2 300 gallon tank;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Mace 8', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Mace 8', '6 Mk-82;6 Mk-82;2 300 gallon tank;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Mace 9', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Mace 9', '6 Mk-82;6 Mk-82;2 300 gallon tank;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Mace 10', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Mace 10','6 Mk-82;6 Mk-82;2 300 gallon tank;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Mace 11', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Mace 11','6 Mk-82;6 Mk-82;2 300 gallon tank;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-7E(79)', 'Mace 12', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Mace 12','6 Mk-82;6 Mk-82;2 300 gallon tank;0 Empty;48 20mm PGU-28/B;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-6E(78)', 'Viceroy 1', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Viceroy 1', '2 AGM-84D Harpoon;2 AGM-84D Harpoon;1 300 gallon tank;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'A-6E(78)', 'Viceroy 2', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Viceroy 2', '2 AGM-84D Harpoon;2 AGM-84D Harpoon;1 300 gallon tank;30 Chaff-1;30 Flare-1;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'KA-6D', 'Viceroy 101', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Viceroy 101', '1 300 gallon wing tank;1 300 gallon wing tank;1 300 gallon wing tank;1 300 gallon wing tank;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'KA-6D', 'Viceroy 102', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Viceroy 102', '1 300 gallon wing tank;1 300 gallon wing tank;1 300 gallon wing tank;1 300 gallon wing tank;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'KA-6D', 'Viceroy 104', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Viceroy 104', '0 Empty;0 Empty;0 Empty;0 Empty;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'EA-6B', 'Garudas 1', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Garudas 1', '2 AGM-88B;2 AGM-88B;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'EA-6B', 'Garudas 2', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Garudas 2', '2 AGM-88B;2 AGM-88B;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'EA-6B', 'Garudas 3', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Garudas 3', '2 AGM-88B;2 AGM-88B;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'EA-6B', 'Garudas 4', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Garudas 4', '2 AGM-88B;2 AGM-88B;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'EA-6B', 'Garudas 5', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Garudas 5', '2 AGM-88B;0 Empty;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'EA-6B', 'Garudas 6', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Garudas 6', '2 AGM-88B;0 Empty;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'E-2C Group 0', 'Hormel Hogs 3', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Hormel Hogs 3', '')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'E-2C Group 0', 'Hormel Hogs 4', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Hormel Hogs 4', '')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'E-2C Group 0', 'Hormel Hogs 5', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Hormel Hogs 5', '')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'E-2C Group 0', 'Hormel Hogs 6', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Hormel Hogs 6', '')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'S-3A', 'Dragon Fires 4', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Dragon Fires 4', '12 DICASS (85) Sonobuoy;12 LOFAR (85) Sonobuoy;36 DIFAR (85) Sonobuoy;2 Mk-46 Mod5;2 Mk-46 Mod5;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'S-3A', 'Dragon Fires 5', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Dragon Fires 5', '12 DICASS (85) Sonobuoy;12 LOFAR (85) Sonobuoy;36 DIFAR (85) Sonobuoy;2 Mk-46 Mod5;2 Mk-46 Mod5;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'S-3A', 'Dragon Fires 6', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Dragon Fires 6', '12 DICASS (85) Sonobuoy;12 LOFAR (85) Sonobuoy;36 DIFAR (85) Sonobuoy;2 Mk-46 Mod5;2 Mk-46 Mod5;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'S-3A', 'Dragon Fires 7', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Dragon Fires 7', '12 DICASS (85) Sonobuoy;12 LOFAR (85) Sonobuoy;36 DIFAR (85) Sonobuoy;2 Mk-46 Mod5;2 Mk-46 Mod5;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'S-3A', 'Dragon Fires 8', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Dragon Fires 8', '12 DICASS (85) Sonobuoy;12 LOFAR (85) Sonobuoy;36 DIFAR (85) Sonobuoy;2 Mk-46 Mod5;2 Mk-46 Mod5;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'SH-3H', 'Knight 1', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Knight 1', '2 Mk-46 Mod5;2 Mk-46 Mod5;10 DICASS (85) Sonobuoy;10 LOFAR (85) Sonobuoy;30 DIFAR (85) Sonobuoy;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'SH-3H', 'Knight 2', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Knight 2', '2 Mk-46 Mod5;2 Mk-46 Mod5;10 DICASS (85) Sonobuoy;10 LOFAR (85) Sonobuoy;30 DIFAR (85) Sonobuoy;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'SH-3H', 'Knight 3', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Knight 3', '2 Mk-46 Mod5;2 Mk-46 Mod5;10 DICASS (85) Sonobuoy;10 LOFAR (85) Sonobuoy;30 DIFAR (85) Sonobuoy;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'SH-3H', 'Knight 4', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Knight 4', '2 Mk-46 Mod5;2 Mk-46 Mod5;10 DICASS (85) Sonobuoy;10 LOFAR (85) Sonobuoy;30 DIFAR (85) Sonobuoy;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'SH-3H', 'Knight 5', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Knight 5', '2 Mk-46 Mod5;2 Mk-46 Mod5;10 DICASS (85) Sonobuoy;10 LOFAR (85) Sonobuoy;30 DIFAR (85) Sonobuoy;')
SM.AddUnitToFlightDeck('USS Carl Vinson', 'SH-3H', 'Knight 6', 1)
SM.SetFlightDeckUnitLoadout('USS Carl Vinson', 'Knight 6', '2 Mk-46 Mod5;2 Mk-46 Mod5;10 DICASS (85) Sonobuoy;10 LOFAR (85) Sonobuoy;30 DIFAR (85) Sonobuoy;')
unit = SM.GetDefaultUnit()
unit.className = 'Oliver Hazard Perry FFGHM'
unit.unitName = "USS Rentz"
UI = SM.GetUnitInterface('USS Carl Vinson')
leader_track = UI.GetTrackById(UI.GetPlatformId())
lon_deg = 57.296*leader_track.Lon + 0.3405
lat_deg = 57.296*leader_track.Lat + -0.0534
unit.SetPosition(lon_deg, lat_deg, 0.0)
unit.heading = -11.19
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 1, '76mm HE-MOM', 80)
SM.SetUnitLauncherItem(unit.unitName, 2, 'RIM-66L', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Mk-46 Mod5', 3)
SM.SetUnitLauncherItem(unit.unitName, 4, 'Mk-46 Mod5', 3)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("USS Rentz", 'Fuel', 28080)
SM.AddToUnitMagazine("USS Rentz", 'Mk-46 Mod5', 25)
SM.AddToUnitMagazine("USS Rentz", 'AGM-114 Hellfire', 8)
SM.AddToUnitMagazine("USS Rentz", '120 gallon tank', 2)
SM.AddToUnitMagazine("USS Rentz", 'Chaff-1', 50)
SM.AddToUnitMagazine("USS Rentz", 'Flare-1', 50)
SM.AddToUnitMagazine("USS Rentz", 'DICASS (85) Sonobuoy', 105)
SM.AddToUnitMagazine("USS Rentz", 'LOFAR (85) Sonobuoy', 105)
SM.AddToUnitMagazine("USS Rentz", 'DIFAR (85) Sonobuoy', 315)
SM.AddToUnitMagazine("USS Rentz", 'RIM-66L', 35)
SM.AddToUnitMagazine("USS Rentz", 'RGM-84D Harpoon', 4)
SM.AddToUnitMagazine("USS Rentz", '20mm mark 244-0 ELC', 523)
SM.AddToUnitMagazine("USS Rentz", '76mm HE-MOM', 240)
UI.AddTask('MissileWarning', 3.000000, 3)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
leader_id = UI.LookupFriendlyId('USS Carl Vinson')
UI.SetFormationLeader(leader_id)
UI.SetFormationMode(2)
UI.SetFormationPosition(20.002, 4.934, 2.100, 0.322)
UI.SetFormationAltitudeOffset(0.0)
UI.SetFormationUseNorthBearing(0)
SM.AddUnitToFlightDeck('USS Rentz', 'SH-60B', 'Perry FFG Seahawk 101', 1)
SM.SetFlightDeckUnitLoadout('USS Rentz', 'Perry FFG Seahawk 101', '1 Mk-46 Mod5;1 120 gallon tank;1 Mk-46 Mod5;25 Flare-1;25 Chaff-1;5 DICASS (85) Sonobuoy;5 LOFAR (85) Sonobuoy;15 DIFAR (85) Sonobuoy;')
unit = SM.GetDefaultUnit()
unit.className = 'Ticonderoga CG Baseline 3'
unit.unitName = "USS Princeton"
UI = SM.GetUnitInterface('USS Carl Vinson')
leader_track = UI.GetTrackById(UI.GetPlatformId())
lon_deg = 57.296*leader_track.Lon + 0.1650
lat_deg = 57.296*leader_track.Lat + 0.0177
unit.SetPosition(lon_deg, lat_deg, 0.0)
unit.heading = -11.19
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, 'RIM-66M', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'RIM-66M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'RIM-66M', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 4, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 5, 'RGM-84D Harpoon', 4)
SM.SetUnitLauncherItem(unit.unitName, 6, 'RGM-84D Harpoon', 4)
SM.SetUnitLauncherItem(unit.unitName, 7, '127mm mk 80 HE-PD mk 67', 20)
SM.SetUnitLauncherItem(unit.unitName, 8, '127mm mk 80 HE-PD mk 67', 20)
SM.SetUnitLauncherItem(unit.unitName, 9, 'Mk-46 Mod5', 3)
SM.SetUnitLauncherItem(unit.unitName, 10, 'Mk-46 Mod5', 3)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("USS Princeton", 'Fuel', 56161)
SM.AddToUnitMagazine("USS Princeton", 'Mk-46 Mod5', 46)
SM.AddToUnitMagazine("USS Princeton", 'AGM-114 Hellfire', 16)
SM.AddToUnitMagazine("USS Princeton", '120 gallon tank', 4)
SM.AddToUnitMagazine("USS Princeton", 'Chaff-1', 75)
SM.AddToUnitMagazine("USS Princeton", 'Flare-1', 75)
SM.AddToUnitMagazine("USS Princeton", 'LOFAR (85) Sonobuoy', 203)
SM.AddToUnitMagazine("USS Princeton", 'DICASS (85) Sonobuoy', 203)
SM.AddToUnitMagazine("USS Princeton", 'DIFAR (85) Sonobuoy', 608)
SM.AddToUnitMagazine("USS Princeton", 'RIM-66M', 119)
SM.AddToUnitMagazine("USS Princeton", '20mm mark 244-0 ELC', 1046)
SM.AddToUnitMagazine("USS Princeton", '127mm mk 80 HE-PD mk 67', 1200)
UI.AddTask('MissileWarning', 3.000000, 3)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
leader_id = UI.LookupFriendlyId('USS Carl Vinson')
UI.SetFormationLeader(leader_id)
UI.SetFormationMode(2)
UI.SetFormationPosition(9.511, 1.481, 1.563, 0.290)
UI.SetFormationAltitudeOffset(0.0)
UI.SetFormationUseNorthBearing(0)
SM.AddUnitToFlightDeck('USS Princeton', 'SH-60B', 'Tico Seahawk 1', 1)
SM.SetFlightDeckUnitLoadout('USS Princeton', 'Tico Seahawk 1', '1 Mk-46 Mod5;1 120 gallon tank;1 Mk-46 Mod5;25 Flare-1;25 Chaff-1;5 DICASS (85) Sonobuoy;5 LOFAR (85) Sonobuoy;15 DIFAR (85) Sonobuoy;')
SM.AddUnitToFlightDeck('USS Princeton', 'SH-60B', 'Tico Seahawk 2', 1)
SM.SetFlightDeckUnitLoadout('USS Princeton', 'Tico Seahawk 2', '1 Mk-46 Mod5;1 120 gallon tank;1 Mk-46 Mod5;25 Flare-1;25 Chaff-1;5 DICASS (85) Sonobuoy;5 LOFAR (85) Sonobuoy;15 DIFAR (85) Sonobuoy;')
unit = SM.GetDefaultUnit()
unit.className = 'Oliver Hazard Perry FFGHM'
unit.unitName = "USS McInerney"
UI = SM.GetUnitInterface('USS Carl Vinson')
leader_track = UI.GetTrackById(UI.GetPlatformId())
lon_deg = 57.296*leader_track.Lon + -0.0756
lat_deg = 57.296*leader_track.Lat + 0.1754
unit.SetPosition(lon_deg, lat_deg, 0.0)
unit.heading = -11.19
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 1, '76mm HE-MOM', 80)
SM.SetUnitLauncherItem(unit.unitName, 2, 'RIM-66L', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Mk-46 Mod5', 3)
SM.SetUnitLauncherItem(unit.unitName, 4, 'Mk-46 Mod5', 3)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("USS McInerney", 'Fuel', 28080)
SM.AddToUnitMagazine("USS McInerney", 'Mk-46 Mod5', 25)
SM.AddToUnitMagazine("USS McInerney", 'AGM-114 Hellfire', 8)
SM.AddToUnitMagazine("USS McInerney", '120 gallon tank', 2)
SM.AddToUnitMagazine("USS McInerney", 'Chaff-1', 50)
SM.AddToUnitMagazine("USS McInerney", 'Flare-1', 50)
SM.AddToUnitMagazine("USS McInerney", 'DICASS (85) Sonobuoy', 105)
SM.AddToUnitMagazine("USS McInerney", 'LOFAR (85) Sonobuoy', 105)
SM.AddToUnitMagazine("USS McInerney", 'DIFAR (85) Sonobuoy', 315)
SM.AddToUnitMagazine("USS McInerney", 'RIM-66L', 35)
SM.AddToUnitMagazine("USS McInerney", 'RGM-84D Harpoon', 4)
SM.AddToUnitMagazine("USS McInerney", '20mm mark 244-0 ELC', 523)
SM.AddToUnitMagazine("USS McInerney", '76mm HE-MOM', 240)
UI.AddTask('MissileWarning', 3.000000, 3)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
leader_id = UI.LookupFriendlyId('USS Carl Vinson')
UI.SetFormationLeader(leader_id)
UI.SetFormationMode(2)
UI.SetFormationPosition(19.986, 4.455, -0.002, 0.356)
UI.SetFormationAltitudeOffset(0.0)
UI.SetFormationUseNorthBearing(0)
SM.AddUnitToFlightDeck('USS McInerney', 'SH-60B', 'Perry FFG Seahawk 201', 1)
SM.SetFlightDeckUnitLoadout('USS McInerney', 'Perry FFG Seahawk 201', '1 Mk-46 Mod5;1 120 gallon tank;1 Mk-46 Mod5;25 Flare-1;25 Chaff-1;5 DICASS (85) Sonobuoy;5 LOFAR (85) Sonobuoy;15 DIFAR (85) Sonobuoy;')
unit = SM.GetDefaultUnit()
unit.className = 'Ticonderoga CG Baseline 2'
unit.unitName = "USS Antietam"
UI = SM.GetUnitInterface('USS Carl Vinson')
leader_track = UI.GetTrackById(UI.GetPlatformId())
lon_deg = 57.296*leader_track.Lon + -0.1639
lat_deg = 57.296*leader_track.Lat + -0.0172
unit.SetPosition(lon_deg, lat_deg, 0.0)
unit.heading = -11.19
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, 'RIM-66M', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'RIM-66M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'RIM-66M', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, '20mm Mark 149-4', 90)
SM.SetUnitLauncherItem(unit.unitName, 4, '20mm Mark 149-4', 90)
SM.SetUnitLauncherItem(unit.unitName, 5, 'RGM-84D Harpoon', 4)
SM.SetUnitLauncherItem(unit.unitName, 6, 'RGM-84D Harpoon', 4)
SM.SetUnitLauncherItem(unit.unitName, 7, '127mm mk 80 HE-PD mk 67', 20)
SM.SetUnitLauncherItem(unit.unitName, 8, '127mm mk 80 HE-PD mk 67', 20)
SM.SetUnitLauncherItem(unit.unitName, 9, 'Mk-46 Mod5', 3)
SM.SetUnitLauncherItem(unit.unitName, 10, 'Mk-46 Mod5', 3)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("USS Antietam", 'Fuel', 56161)
SM.AddToUnitMagazine("USS Antietam", 'Mk-46 Mod5', 46)
SM.AddToUnitMagazine("USS Antietam", 'AGM-114 Hellfire', 16)
SM.AddToUnitMagazine("USS Antietam", '120 gallon tank', 4)
SM.AddToUnitMagazine("USS Antietam", 'Chaff-1', 75)
SM.AddToUnitMagazine("USS Antietam", 'Flare-1', 75)
SM.AddToUnitMagazine("USS Antietam", 'LOFAR (85) Sonobuoy', 203)
SM.AddToUnitMagazine("USS Antietam", 'DICASS (85) Sonobuoy', 203)
SM.AddToUnitMagazine("USS Antietam", 'DIFAR (85) Sonobuoy', 608)
SM.AddToUnitMagazine("USS Antietam", 'RIM-66M', 119)
SM.AddToUnitMagazine("USS Antietam", '20mm Mark 149-4', 1046)
SM.AddToUnitMagazine("USS Antietam", '127mm mk 80 HE-PD mk 67', 1200)
UI.AddTask('MissileWarning', 3.000000, 3)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
leader_id = UI.LookupFriendlyId('USS Carl Vinson')
UI.SetFormationLeader(leader_id)
UI.SetFormationMode(2)
UI.SetFormationPosition(9.460, 1.713, -1.564, 0.285)
UI.SetFormationAltitudeOffset(0.0)
UI.SetFormationUseNorthBearing(0)
SM.AddUnitToFlightDeck('USS Antietam', 'SH-60B', 'Tico Seahawk 101', 1)
SM.SetFlightDeckUnitLoadout('USS Antietam', 'Tico Seahawk 101', '1 Mk-46 Mod5;1 120 gallon tank;1 Mk-46 Mod5;25 Flare-1;25 Chaff-1;5 DICASS (85) Sonobuoy;5 LOFAR (85) Sonobuoy;15 DIFAR (85) Sonobuoy;')
SM.AddUnitToFlightDeck('USS Antietam', 'SH-60B', 'Tico Seahawk 102', 1)
SM.SetFlightDeckUnitLoadout('USS Antietam', 'Tico Seahawk 102', '1 Mk-46 Mod5;1 120 gallon tank;1 Mk-46 Mod5;25 Flare-1;25 Chaff-1;5 DICASS (85) Sonobuoy;5 LOFAR (85) Sonobuoy;15 DIFAR (85) Sonobuoy;')
unit = SM.GetDefaultUnit()
unit.className = 'Sacramento AOEHM'
unit.unitName = "USS Sacramento"
UI = SM.GetUnitInterface('USS Carl Vinson')
leader_track = UI.GetTrackById(UI.GetPlatformId())
lon_deg = 57.296*leader_track.Lon + 0.0432
lat_deg = 57.296*leader_track.Lat + 0.0072
unit.SetPosition(lon_deg, lat_deg, 0.0)
unit.heading = 348.81
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 1, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 2, 'RIM-7P(v1)', 8)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("USS Sacramento", '20mm mark 244-0 ELC', 1046)
UI.AddTask('MissileWarning', 3.000000, 3)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
leader_id = UI.LookupFriendlyId('USS Carl Vinson')
UI.SetFormationLeader(leader_id)
UI.SetFormationMode(2)
UI.SetFormationPosition(2.900, 3.200, 1.433, 0.385)
UI.SetFormationAltitudeOffset(0.0)
UI.SetFormationUseNorthBearing(0)
unit = SM.GetDefaultUnit()
unit.className = 'Oliver Hazard Perry FFGHM'
unit.unitName = "USS Duncan"
UI = SM.GetUnitInterface('USS Carl Vinson')
leader_track = UI.GetTrackById(UI.GetPlatformId())
lon_deg = 57.296*leader_track.Lon + -0.2653
lat_deg = 57.296*leader_track.Lat + -0.1191
unit.SetPosition(lon_deg, lat_deg, 0.0)
unit.heading = -11.19
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 1, '76mm HE-MOM', 80)
SM.SetUnitLauncherItem(unit.unitName, 2, 'RIM-66L', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Mk-46 Mod5', 3)
SM.SetUnitLauncherItem(unit.unitName, 4, 'Mk-46 Mod5', 3)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("USS Duncan", 'Fuel', 28080)
SM.AddToUnitMagazine("USS Duncan", 'Mk-46 Mod5', 25)
SM.AddToUnitMagazine("USS Duncan", 'AGM-114 Hellfire', 8)
SM.AddToUnitMagazine("USS Duncan", '120 gallon tank', 2)
SM.AddToUnitMagazine("USS Duncan", 'Chaff-1', 50)
SM.AddToUnitMagazine("USS Duncan", 'Flare-1', 50)
SM.AddToUnitMagazine("USS Duncan", 'DICASS (85) Sonobuoy', 105)
SM.AddToUnitMagazine("USS Duncan", 'LOFAR (85) Sonobuoy', 105)
SM.AddToUnitMagazine("USS Duncan", 'DIFAR (85) Sonobuoy', 315)
SM.AddToUnitMagazine("USS Duncan", 'RIM-66L', 35)
SM.AddToUnitMagazine("USS Duncan", 'RGM-84D Harpoon', 4)
SM.AddToUnitMagazine("USS Duncan", '20mm mark 244-0 ELC', 523)
SM.AddToUnitMagazine("USS Duncan", '76mm HE-MOM', 240)
UI.AddTask('MissileWarning', 3.000000, 3)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
leader_id = UI.LookupFriendlyId('USS Carl Vinson')
UI.SetFormationLeader(leader_id)
UI.SetFormationMode(2)
UI.SetFormationPosition(20.026, 3.861, -2.094, 0.341)
UI.SetFormationAltitudeOffset(0.0)
UI.SetFormationUseNorthBearing(0)
SM.AddUnitToFlightDeck('USS Duncan', 'SH-60B', 'Perry FFG Seahawk 1', 1)
SM.SetFlightDeckUnitLoadout('USS Duncan', 'Perry FFG Seahawk 1', '1 Mk-46 Mod5;1 120 gallon tank;1 Mk-46 Mod5;25 Flare-1;25 Chaff-1;5 DICASS (85) Sonobuoy;5 LOFAR (85) Sonobuoy;15 DIFAR (85) Sonobuoy;')
unit = SM.GetDefaultUnit()
unit.className = 'Spruance DDG ABL'
unit.unitName = "USS Merrill"
UI = SM.GetUnitInterface('USS Carl Vinson')
leader_track = UI.GetTrackById(UI.GetPlatformId())
lon_deg = 57.296*leader_track.Lon + 0.0244
lat_deg = 57.296*leader_track.Lat + -0.0488
unit.SetPosition(lon_deg, lat_deg, 0.0)
unit.heading = -11.19
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 1)
SM.SetUnitLauncherItem(unit.unitName, 0, 'RIM-7P(v1)', 8)
SM.SetUnitLauncherItem(unit.unitName, 1, '127mm mk 80 HE-PD mk 67', 20)
SM.SetUnitLauncherItem(unit.unitName, 2, '127mm mk 80 HE-PD mk 67', 20)
SM.SetUnitLauncherItem(unit.unitName, 3, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 4, '20mm mark 244-0 ELC', 97)
SM.SetUnitLauncherItem(unit.unitName, 5, 'RGM-84D Harpoon', 4)
SM.SetUnitLauncherItem(unit.unitName, 6, 'RGM-84D Harpoon', 4)
SM.SetUnitLauncherItem(unit.unitName, 7, 'RUM-139 Mod4 ASROC', 8)
SM.SetUnitLauncherItem(unit.unitName, 8, 'BGM-109 TLAM', 4)
SM.SetUnitLauncherItem(unit.unitName, 9, 'BGM-109 TLAM', 4)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("USS Merrill", 'Fuel', 65139)
SM.AddToUnitMagazine("USS Merrill", 'Mk-46 Mod5', 31)
SM.AddToUnitMagazine("USS Merrill", '120 gallon tank', 2)
SM.AddToUnitMagazine("USS Merrill", 'AGM-114 Hellfire', 16)
SM.AddToUnitMagazine("USS Merrill", 'Chaff-1', 100)
SM.AddToUnitMagazine("USS Merrill", 'Flare-1', 100)
SM.AddToUnitMagazine("USS Merrill", 'LOFAR (85) Sonobuoy', 253)
SM.AddToUnitMagazine("USS Merrill", 'DICASS (85) Sonobuoy', 253)
SM.AddToUnitMagazine("USS Merrill", 'DIFAR (85) Sonobuoy', 758)
SM.AddToUnitMagazine("USS Merrill", '20mm mark 244-0 ELC', 1046)
SM.AddToUnitMagazine("USS Merrill", '127mm mk 80 HE-PD mk 67', 1200)
UI.AddTask('MissileWarning', 3.000000, 3)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
leader_id = UI.LookupFriendlyId('USS Carl Vinson')
UI.SetFormationLeader(leader_id)
UI.SetFormationMode(2)
UI.SetFormationPosition(5.639, 3.194, 3.146, 0.571)
UI.SetFormationAltitudeOffset(0.0)
UI.SetFormationUseNorthBearing(0)
SM.AddUnitToFlightDeck('USS Merrill', 'SH-60B', 'Spruance ABL DDG Seahawk 1', 1)
SM.SetFlightDeckUnitLoadout('USS Merrill', 'Spruance ABL DDG Seahawk 1', '1 Mk-46 Mod5;1 120 gallon tank;1 Mk-46 Mod5;25 Flare-1;25 Chaff-1;5 DICASS (85) Sonobuoy;5 LOFAR (85) Sonobuoy;15 DIFAR (85) Sonobuoy;')
SM.AddUnitToFlightDeck('USS Merrill', 'SH-60B', 'Spruance ABL DDG Seahawk 2', 1)
SM.SetFlightDeckUnitLoadout('USS Merrill', 'Spruance ABL DDG Seahawk 2', '1 Mk-46 Mod5;1 120 gallon tank;1 Mk-46 Mod5;25 Flare-1;25 Chaff-1;5 DICASS (85) Sonobuoy;5 LOFAR (85) Sonobuoy;15 DIFAR (85) Sonobuoy;')
##############################
### Alliance 2 units
##############################
unit = SM.GetDefaultUnit()
unit.className = 'Pr 949A Antey'
unit.unitName = "K-148 Krasnodar"
unit.SetPosition(-21.749936, 63.431814, -71.7)
unit.heading = 90.00
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '3M45 Granit', 24)
SM.SetUnitLauncherItem(unit.unitName, 1, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 4, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 5, '65-76 Kit', 1)
SM.SetUnitLauncherItem(unit.unitName, 6, '65-76 Kit', 1)
SM.SetUnitLauncherItem(unit.unitName, 7, '65-76 Kit', 1)
SM.SetUnitLauncherItem(unit.unitName, 8, '65-76 Kit', 1)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("K-148 Krasnodar", 'SET-65M', 8)
SM.AddToUnitMagazine("K-148 Krasnodar", '65-76 Kit', 8)
UI.SetSensorState(0, 0)
UI.SetSensorState(4, 0)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('MissileWarning', 0.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.390969, 1.104553, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.370938, 1.110084, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.388578, 1.110532, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('SubEvade', 3.000000, 3)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'Army HQ Bunker'
unit.unitName = "Russian HQ Reykjavik"
unit.SetPosition(-21.565616, 64.095586, 1.0)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
UI = SM.GetUnitInterface(unit.unitName)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'S-200D Dubna'
unit.unitName = "S-200 (1)"
unit.SetPosition(-21.578450, 64.102863, 0.0)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'V-880M', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'V-880M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'V-880M', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, 'V-880M', 1)
SM.SetUnitLauncherItem(unit.unitName, 4, 'V-880M', 1)
SM.SetUnitLauncherItem(unit.unitName, 5, 'V-880M', 1)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("S-200 (1)", 'V-880M', 12)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'ZSU-23-4V Shilka'
unit.unitName = "ZSU-23 (6)"
unit.SetPosition(-21.537025, 64.066136, 183.7)
unit.heading = 90.00
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '23mm OFZ', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'ZSU-23-4V Shilka'
unit.unitName = "ZSU-23 (5)"
unit.SetPosition(-21.614203, 64.065677, 0.0)
unit.heading = 90.00
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '23mm OFZ', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'Fuel Tanks'
unit.unitName = "Fuel Tanks Reykjavik"
unit.SetPosition(-21.551120, 64.097247, 1.0)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
UI = SM.GetUnitInterface(unit.unitName)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'Ammunition Bunker'
unit.unitName = "Ammo Dump Reykjavik"
unit.SetPosition(-21.604806, 64.096789, 1.0)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
UI = SM.GetUnitInterface(unit.unitName)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'S-125 Pechora-M'
unit.unitName = "S-125 (3)"
unit.SetPosition(-21.575242, 64.116728, 549.6)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'V-601PD', 4)
SM.SetUnitLauncherItem(unit.unitName, 1, 'V-601PD', 4)
SM.SetUnitLauncherItem(unit.unitName, 2, 'V-601PD', 4)
SM.SetUnitLauncherItem(unit.unitName, 3, 'V-601PD', 4)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'S-125 Pechora-M'
unit.unitName = "S-125 (2)"
unit.SetPosition(-21.497262, 64.086590, 320.6)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'V-601PD', 4)
SM.SetUnitLauncherItem(unit.unitName, 1, 'V-601PD', 4)
SM.SetUnitLauncherItem(unit.unitName, 2, 'V-601PD', 4)
SM.SetUnitLauncherItem(unit.unitName, 3, 'V-601PD', 4)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("S-125 (2)", 'V-601PD', 16)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'S-125 Pechora-M'
unit.unitName = "S-125 (4)"
unit.SetPosition(-21.641475, 64.083611, 180.0)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'V-601PD', 4)
SM.SetUnitLauncherItem(unit.unitName, 1, 'V-601PD', 4)
SM.SetUnitLauncherItem(unit.unitName, 2, 'V-601PD', 4)
SM.SetUnitLauncherItem(unit.unitName, 3, 'V-601PD', 4)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("S-125 (4)", 'V-601PD', 16)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'S-125 Pechora-M'
unit.unitName = "S-125 (1)"
unit.SetPosition(-21.572950, 64.060979, 0.0)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'V-601PD', 4)
SM.SetUnitLauncherItem(unit.unitName, 1, 'V-601PD', 4)
SM.SetUnitLauncherItem(unit.unitName, 2, 'V-601PD', 4)
SM.SetUnitLauncherItem(unit.unitName, 3, 'V-601PD', 4)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("S-125 (1)", 'V-601PD', 16)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'ZIL-157 Truck'
unit.unitName = "Truck"
unit.SetPosition(-22.181202, 63.937850, -0.0)
unit.heading = 90.00
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 2)
UI = SM.GetUnitInterface(unit.unitName)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'Generic Mobile Artillery 155mm'
unit.unitName = "MA 155mm SP (1)"
unit.SetPosition(-22.221595, 63.939970, 0.0)
unit.heading = 90.00
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '12.7mm B-32 APi', 17)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Gen155ART', 40)
UI = SM.GetUnitInterface(unit.unitName)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'T-80 MBT'
unit.unitName = "T-80 MBT (5)"
unit.SetPosition(-22.175186, 63.930918, 784.0)
unit.heading = 90.00
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Gen105AT', 40)
UI = SM.GetUnitInterface(unit.unitName)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'T-80 MBT'
unit.unitName = "T-80 MBT (4)"
unit.SetPosition(-22.224632, 63.931261, 550.2)
unit.heading = 90.00
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Gen105AT', 40)
UI = SM.GetUnitInterface(unit.unitName)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'T-80 MBT'
unit.unitName = "T-80 MBT (3)"
unit.SetPosition(-22.201141, 63.943583, 384.5)
unit.heading = 90.00
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Gen105AT', 40)
UI = SM.GetUnitInterface(unit.unitName)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'T-80 MBT'
unit.unitName = "T-80 MBT (2)"
unit.SetPosition(-22.199937, 63.919800, 201.9)
unit.heading = 90.00
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Gen105AT', 40)
UI = SM.GetUnitInterface(unit.unitName)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'T-80 MBT'
unit.unitName = "T-80 MBT (1)"
unit.SetPosition(-22.202458, 63.932006, 0.0)
unit.heading = 90.00
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Gen105AT', 40)
UI = SM.GetUnitInterface(unit.unitName)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'ZSU-23-4M4 Biryusa'
unit.unitName = "ZSU-23 (4)"
unit.SetPosition(-22.303700, 63.914130, 0.0)
unit.heading = 90.00
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '23mm OFZ', 25)
SM.SetUnitLauncherItem(unit.unitName, 1, '9M32 Strela 2', 4)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'Army HQ Bunker'
unit.unitName = "Russian HQ Kevlavik"
unit.SetPosition(-22.319628, 63.915562, 1.0)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
UI = SM.GetUnitInterface(unit.unitName)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'ZSU-23-4M4 Biryusa'
unit.unitName = "ZSU-23 (3)"
unit.SetPosition(-22.368387, 63.860787, 121.9)
unit.heading = 90.00
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '23mm OFZ', 25)
SM.SetUnitLauncherItem(unit.unitName, 1, '9M32 Strela 2', 4)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'ZSU-23-4M4 Biryusa'
unit.unitName = "ZSU-23 (2)"
unit.SetPosition(-21.992469, 63.843713, 81.0)
unit.heading = 90.00
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '23mm OFZ', 25)
SM.SetUnitLauncherItem(unit.unitName, 1, '9M32 Strela 2', 4)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'ZSU-23-4M4 Biryusa'
unit.unitName = "ZSU-23 (1)"
unit.SetPosition(-22.336588, 63.884737, -0.0)
unit.heading = 90.00
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '23mm OFZ', 25)
SM.SetUnitLauncherItem(unit.unitName, 1, '9M32 Strela 2', 4)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'Communications Station'
unit.unitName = "Communications Stations"
unit.SetPosition(-22.293043, 63.916135, 1.0)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
UI = SM.GetUnitInterface(unit.unitName)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'SSC-3(36)'
unit.unitName = "SSC-5"
unit.SetPosition(-21.976541, 63.846406, 117.7)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'P-20K', 6)
SM.SetUnitLauncherItem(unit.unitName, 1, 'P-20K', 6)
SM.SetUnitLauncherItem(unit.unitName, 2, 'P-20K', 6)
SM.SetUnitLauncherItem(unit.unitName, 3, 'P-20K', 6)
SM.SetUnitLauncherItem(unit.unitName, 4, 'P-20K', 6)
SM.SetUnitLauncherItem(unit.unitName, 5, 'P-20K', 6)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("SSC-5", 'P-20K', 36)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'SSC-3(36)'
unit.unitName = "SSC-4"
unit.SetPosition(-21.623370, 63.844172, 117.7)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'P-20K', 6)
SM.SetUnitLauncherItem(unit.unitName, 1, 'P-20K', 6)
SM.SetUnitLauncherItem(unit.unitName, 2, 'P-20K', 6)
SM.SetUnitLauncherItem(unit.unitName, 3, 'P-20K', 6)
SM.SetUnitLauncherItem(unit.unitName, 4, 'P-20K', 6)
SM.SetUnitLauncherItem(unit.unitName, 5, 'P-20K', 6)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("SSC-4", 'P-20K', 36)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'SSC-3(36)'
unit.unitName = "SSC-3"
unit.SetPosition(-22.348792, 63.866231, -0.0)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'P-20K', 6)
SM.SetUnitLauncherItem(unit.unitName, 1, 'P-20K', 6)
SM.SetUnitLauncherItem(unit.unitName, 2, 'P-20K', 6)
SM.SetUnitLauncherItem(unit.unitName, 3, 'P-20K', 6)
SM.SetUnitLauncherItem(unit.unitName, 4, 'P-20K', 6)
SM.SetUnitLauncherItem(unit.unitName, 5, 'P-20K', 6)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("SSC-3", 'P-20K', 36)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'Fuel Tanks'
unit.unitName = "Fuel Tanks"
unit.SetPosition(-22.356527, 63.920318, 1.0)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
UI = SM.GetUnitInterface(unit.unitName)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'Ammunition Bunker'
unit.unitName = "Ammo Dump"
unit.SetPosition(-22.338593, 63.921750, 1.0)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
UI = SM.GetUnitInterface(unit.unitName)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'S-300PMU'
unit.unitName = "SA-10 Site 2"
unit.SetPosition(-21.582002, 63.940772, 1.0)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '5V55RUD', 4)
SM.SetUnitLauncherItem(unit.unitName, 1, '5V55RUD', 4)
SM.SetUnitLauncherItem(unit.unitName, 2, '5V55RUD', 4)
SM.SetUnitLauncherItem(unit.unitName, 3, '5V55RUD', 4)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("SA-10 Site 2", '5V55RUD', 70)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'Airstrip'
unit.unitName = "Reykjavik"
unit.SetPosition(-21.578507, 64.086705, -0.0)
unit.heading = 270.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("Reykjavik", 'Fuel', 1000000)
SM.AddToUnitMagazine("Reykjavik", '2000 liter tank', 500)
SM.AddToUnitMagazine("Reykjavik", '23mm AM-23', 400)
SM.AddToUnitMagazine("Reykjavik", 'Chaff-1', 400)
SM.AddToUnitMagazine("Reykjavik", 'Flare-1', 400)
SM.AddToUnitMagazine("Reykjavik", 'R-33', 200)
SM.AddToUnitMagazine("Reykjavik", 'R-40T', 200)
SM.AddToUnitMagazine("Reykjavik", 'R-60', 200)
SM.AddToUnitMagazine("Reykjavik", 'R-27R', 256)
SM.AddToUnitMagazine("Reykjavik", 'R-73M', 100)
SM.AddToUnitMagazine("Reykjavik", 'Kh-31P', 726)
SM.AddToUnitMagazine("Reykjavik", 'R-77', 300)
SM.AddToUnitMagazine("Reykjavik", 'Kh-29T', 300)
UI.SetSensorState(0, 1)
UI.AddTask('EngageAllAir', 2.000000, 0)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-1', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-1', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-2', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-2', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-3', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-3', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-4', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-4', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-5', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-5', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-6', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-6', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-7', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-7', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-8', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-8', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-9', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-9', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-10', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-10', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-11', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-11', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-12', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-12', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-13', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-13', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-14', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-14', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-15', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-15', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-16', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-16', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-17', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-17', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-18', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-18', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-19', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-19', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-20', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-20', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-21', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-21', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-22', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-22', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-23', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-23', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'MiG-31', 'Foxhound-24', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Foxhound-24', '6 23mm AM-23;4 R-33;4 R-33;2 2000 liter tank;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-1', 2)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-1', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-2', 2)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-2', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-3', 2)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-3', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-4', 2)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-4', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-5', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-5', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-6', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-6', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-7', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-7', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-8', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-8', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-9', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-9', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-10', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-10', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-11', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-11', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-12', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-12', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-13', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-13', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-14', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-14', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-15', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-15', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-16', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-16', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-17', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-17', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-18', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-18', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-19', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-19', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-20', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-20', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-21', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-21', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-22', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-22', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-23', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-23', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-24', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-24', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-25', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-25', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-26', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-26', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'Su-27', 'Curtain-27', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'Curtain-27', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Reykjavik', 'A-50', 'AEW Reykjavik-1', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'AEW Reykjavik-1', '')
SM.AddUnitToFlightDeck('Reykjavik', 'A-50', 'AEW Reykjavik-2', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'AEW Reykjavik-2', '')
SM.AddUnitToFlightDeck('Reykjavik', 'A-50', 'AEW Reykjavik-3', 1)
SM.SetFlightDeckUnitLoadout('Reykjavik', 'AEW Reykjavik-3', '')
FP = UI.GetFlightPortInfo()
base_track = UI.GetTrackById(UI.GetPlatformId())
mission_id = FP.AddGenericMission()
FP.AddAircraftToMission(mission_id, 'AEW Reykjavik-1')
FP.AddAircraftToMission(mission_id, 'AEW Reykjavik-2')
FP.AddAircraftToMission(mission_id, 'AEW Reykjavik-3')
FP.SetMissionLaunchTime(mission_id, '13:00:08+0m+R2.0')
FP.SetMissionDatum(mission_id, 0.0000000, 0.0000000)
FP.SetMissionLandingTarget(mission_id, '')
FP.SetMissionWaveQuantity(mission_id, 1)
FP.SetMissionType(mission_id, '')
FP.SetMissionPatrolArea(mission_id, '0.0011194,-0.0011487,0.0005687,-0.0009476,0.0005604,0.0003021,0.0011687,0.0004100,')
FP.SetMissionPatrolAnchor(mission_id, 'Reykjavik', 2)
FP.AddMissionWaypointAdvanced(mission_id, -0.3758280, 1.1189780, 2000.0, 200.0)
FP.SetMissionWaypointTasks(mission_id, 0, 'WaitForGroup,EngageAll')
FP.AddMissionWaypointAdvanced(mission_id, -0.3771100, 1.1188869, 3000.0, 300.0)
FP.SetMissionWaypointTasks(mission_id, 1, 'AirPatrolArea,EngageAll')
mission_id = FP.AddGenericMission()
FP.AddAircraftToMission(mission_id, 'Curtain-1')
FP.AddAircraftToMission(mission_id, 'Curtain-2')
FP.AddAircraftToMission(mission_id, 'Curtain-3')
FP.AddAircraftToMission(mission_id, 'Curtain-4')
FP.AddAircraftToMission(mission_id, 'Curtain-5')
FP.AddAircraftToMission(mission_id, 'Curtain-6')
FP.AddAircraftToMission(mission_id, 'Curtain-7')
FP.AddAircraftToMission(mission_id, 'Curtain-8')
FP.AddAircraftToMission(mission_id, 'Curtain-9')
FP.AddAircraftToMission(mission_id, 'Curtain-10')
FP.AddAircraftToMission(mission_id, 'Curtain-11')
FP.AddAircraftToMission(mission_id, 'Curtain-12')
FP.AddAircraftToMission(mission_id, 'Curtain-13')
FP.AddAircraftToMission(mission_id, 'Curtain-14')
FP.AddAircraftToMission(mission_id, 'Curtain-15')
FP.AddAircraftToMission(mission_id, 'Curtain-16')
FP.AddAircraftToMission(mission_id, 'Curtain-17')
FP.AddAircraftToMission(mission_id, 'Curtain-18')
FP.AddAircraftToMission(mission_id, 'Foxhound-1')
FP.AddAircraftToMission(mission_id, 'Foxhound-2')
FP.AddAircraftToMission(mission_id, 'Foxhound-3')
FP.AddAircraftToMission(mission_id, 'Foxhound-4')
FP.AddAircraftToMission(mission_id, 'Foxhound-5')
FP.AddAircraftToMission(mission_id, 'Foxhound-6')
FP.AddAircraftToMission(mission_id, 'Foxhound-7')
FP.AddAircraftToMission(mission_id, 'Foxhound-8')
FP.AddAircraftToMission(mission_id, 'Foxhound-9')
FP.AddAircraftToMission(mission_id, 'Foxhound-10')
FP.AddAircraftToMission(mission_id, 'Foxhound-11')
FP.AddAircraftToMission(mission_id, 'Foxhound-12')
FP.SetMissionLaunchTime(mission_id, '13:00:08+0m+R2.0')
FP.SetMissionDatum(mission_id, 0.0000000, 0.0000000)
FP.SetMissionLandingTarget(mission_id, '')
FP.SetMissionWaveQuantity(mission_id, 6)
FP.SetMissionType(mission_id, '')
FP.SetMissionPatrolArea(mission_id, '-0.3846892,1.1138377,-0.3695370,1.1137897,-0.3693123,1.1122327,-0.3850102,1.1125697,')
FP.AddMissionWaypointAdvanced(mission_id, -0.3756160, 1.1195240, 2000.0, 200.0)
FP.SetMissionWaypointTasks(mission_id, 0, 'WaitForGroup,EngageAll')
FP.AddMissionWaypointAdvanced(mission_id, -0.3778050, 1.1141660, 3000.0, 300.0)
FP.SetMissionWaypointTasks(mission_id, 1, 'AirPatrolArea,EngageAll')
mission_id = FP.AddGenericMission()
FP.AddAircraftToMission(mission_id, 'Foxhound-13')
FP.AddAircraftToMission(mission_id, 'Foxhound-14')
FP.AddAircraftToMission(mission_id, 'Foxhound-15')
FP.AddAircraftToMission(mission_id, 'Foxhound-16')
FP.AddAircraftToMission(mission_id, 'Foxhound-17')
FP.AddAircraftToMission(mission_id, 'Foxhound-18')
FP.AddAircraftToMission(mission_id, 'Foxhound-19')
FP.AddAircraftToMission(mission_id, 'Foxhound-20')
FP.AddAircraftToMission(mission_id, 'Foxhound-21')
FP.AddAircraftToMission(mission_id, 'Foxhound-22')
FP.AddAircraftToMission(mission_id, 'Foxhound-23')
FP.AddAircraftToMission(mission_id, 'Foxhound-24')
FP.SetMissionLaunchTime(mission_id, '13:00:08+0m+R2.0')
FP.SetMissionDatum(mission_id, 0.0000000, 0.0000000)
FP.SetMissionLandingTarget(mission_id, '')
FP.SetMissionWaveQuantity(mission_id, 4)
FP.SetMissionType(mission_id, '')
FP.SetMissionPatrolArea(mission_id, '-0.3802805,1.1183320,-0.3722472,1.1166679,-0.3738498,1.1163906,-0.3800956,1.1175718,')
FP.AddMissionWaypointAdvanced(mission_id, -0.3756160, 1.1195240, 2000.0, 200.0)
FP.SetMissionWaypointTasks(mission_id, 0, 'WaitForGroup,EngageAll')
FP.AddMissionWaypointAdvanced(mission_id, -0.3768290, 1.1176130, 3000.0, 300.0)
FP.SetMissionWaypointTasks(mission_id, 1, 'AirPatrolArea,EngageAll')
mission_id = FP.AddGenericMission()
FP.AddAircraftToMission(mission_id, 'Curtain-19')
FP.AddAircraftToMission(mission_id, 'Curtain-20')
FP.AddAircraftToMission(mission_id, 'Curtain-21')
FP.AddAircraftToMission(mission_id, 'Curtain-22')
FP.AddAircraftToMission(mission_id, 'Curtain-23')
FP.AddAircraftToMission(mission_id, 'Curtain-24')
FP.AddAircraftToMission(mission_id, 'Curtain-25')
FP.AddAircraftToMission(mission_id, 'Curtain-26')
FP.AddAircraftToMission(mission_id, 'Curtain-27')
FP.SetMissionLaunchTime(mission_id, '13:00:08+0m')
FP.SetMissionDatum(mission_id, 0.0000000, 0.0000000)
FP.SetMissionLandingTarget(mission_id, '')
FP.SetMissionWaveQuantity(mission_id, 9)
FP.SetMissionType(mission_id, 'Standby-ASuW')
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Shipwreck 2-17"
unit.SetPosition(-22.330991, 63.902230, 10.0)
unit.heading = 164.16
unit.speed = 485.6
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22MP', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.429478, 1.050304, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.300895, 1.067299, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
BB.Write('StationLatitude', '1.066276')
BB.Write('StationLongitude', '-0.365281')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Shipwreck 2-18"
unit.SetPosition(-22.332575, 63.902033, 10.0)
unit.heading = 165.82
unit.speed = 485.8
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22MP', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.415181, 1.089890, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.332888, 1.086096, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.370116, 1.097985, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
BB.Write('StationLatitude', '1.067410')
BB.Write('StationLongitude', '-0.361880')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Shipwreck 2-19"
unit.SetPosition(-22.354776, 63.903298, 10.0)
unit.heading = 205.47
unit.speed = 485.5
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22MP', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.405384, 1.097732, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.332398, 1.098238, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
BB.Write('StationLatitude', '1.051256')
BB.Write('StationLongitude', '-0.427064')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Shipwreck 2-20"
unit.SetPosition(-22.315363, 63.906996, 3337.8)
unit.heading = 129.48
unit.speed = 485.8
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22MP', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.425468, 1.095961, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.331908, 1.094696, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
BB.Write('StationLatitude', '1.068260')
BB.Write('StationLongitude', '-0.287627')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Shadow-1"
unit.SetPosition(-22.326439, 63.901946, 10.0)
unit.heading = 158.81
unit.speed = 562.1
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.408323, 1.098491, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.369626, 1.098238, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
BB.Write('StationLatitude', '1.077329')
BB.Write('StationLongitude', '-0.381719')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Shipwreck 1-19"
unit.SetPosition(-22.358976, 63.904503, 10.0)
unit.heading = 214.75
unit.speed = 485.5
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22MP', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.412242, 1.093938, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.337786, 1.095961, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
BB.Write('StationLatitude', '1.047572')
BB.Write('StationLongitude', '-0.437267')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Shadow-4"
unit.SetPosition(-22.320282, 63.903540, 10.0)
unit.heading = 145.01
unit.speed = 561.6
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
BB.Write('StationLatitude', '1.072228')
BB.Write('StationLongitude', '-0.333540')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Shadow-5"
unit.SetPosition(-22.309154, 63.909874, 10.0)
unit.heading = 114.64
unit.speed = 562.1
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
BB.Write('StationLatitude', '1.057491')
BB.Write('StationLongitude', '-0.287627')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Shadow-6"
unit.SetPosition(-22.322147, 63.902964, 10.0)
unit.heading = 150.39
unit.speed = 562.1
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
BB.Write('StationLatitude', '1.071094')
BB.Write('StationLongitude', '-0.359613')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Shadow-7"
unit.SetPosition(-22.332528, 63.901040, 10.0)
unit.heading = 166.69
unit.speed = 562.2
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
BB.Write('StationLatitude', '1.067410')
BB.Write('StationLongitude', '-0.363581')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Shadow-9"
unit.SetPosition(-22.358212, 63.903078, 10.0)
unit.heading = 210.36
unit.speed = 561.5
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
BB.Write('StationLatitude', '1.074779')
BB.Write('StationLongitude', '-0.420263')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Shadow-10"
unit.SetPosition(-22.311060, 63.908172, 10.0)
unit.heading = 122.07
unit.speed = 561.1
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
BB.Write('StationLatitude', '1.055790')
BB.Write('StationLongitude', '-0.246250')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Shadow-12"
unit.SetPosition(-22.349090, 63.901255, 10.0)
unit.heading = 193.04
unit.speed = 561.1
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
BB.Write('StationLatitude', '1.053523')
BB.Write('StationLongitude', '-0.398723')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Shadow-13"
unit.SetPosition(-22.328796, 63.901551, 10.0)
unit.heading = 161.06
unit.speed = 561.2
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
BB.Write('StationLatitude', '1.067977')
BB.Write('StationLongitude', '-0.359046')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Shadow-14"
unit.SetPosition(-22.342112, 63.900723, 10.0)
unit.heading = 180.75
unit.speed = 561.3
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
BB.Write('StationLatitude', '1.102269')
BB.Write('StationLongitude', '-0.378885')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Fulcrum 8"
unit.SetPosition(-22.686536, 62.576364, 4000.0)
unit.heading = 94.57
unit.speed = 237.2
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.090046')
BB.Write('StationLongitude', '-0.388112')
UI.SetThrottle(0.189654)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Fulcrum 7"
unit.SetPosition(-23.156456, 62.607014, 4000.0)
unit.heading = 94.60
unit.speed = 237.2
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.088441')
BB.Write('StationLongitude', '-0.413788')
UI.SetThrottle(0.189654)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Fulcrum 6"
unit.SetPosition(-20.581955, 63.107612, 50.0)
unit.heading = 94.60
unit.speed = 230.1
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.097535')
BB.Write('StationLongitude', '-0.357800')
UI.SetThrottle(0.189654)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Fulcrum 5"
unit.SetPosition(-21.154027, 63.148470, 50.0)
unit.heading = 94.53
unit.speed = 230.1
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.099139')
BB.Write('StationLongitude', '-0.374561')
UI.SetThrottle(0.189654)
unit = SM.GetDefaultUnit()
unit.className = 'A-50'
unit.unitName = "Sharp Eye 2"
unit.SetPosition(-20.989851, 63.693939, 10000.0)
unit.heading = 94.60
unit.speed = 100.0
SM.AddUnitToAlliance(unit, 2)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.372247, 1.109279, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.362767, 1.109304, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.109659')
BB.Write('StationLongitude', '-0.375987')
unit = SM.GetDefaultUnit()
unit.className = 'A-50'
unit.unitName = "Sharp Eye 1"
unit.SetPosition(-22.999329, 63.536784, 10000.0)
unit.heading = 94.50
unit.speed = 100.0
SM.AddUnitToAlliance(unit, 2)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.413856, 1.108093, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.390344, 1.108240, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.102170')
BB.Write('StationLongitude', '-0.410579')
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Fulcrum 4"
unit.SetPosition(-20.985444, 62.633379, 4000.0)
unit.heading = 94.50
unit.speed = 237.2
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.089154')
BB.Write('StationLongitude', '-0.362793')
UI.SetThrottle(0.189654)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Fulcrum 3"
unit.SetPosition(-21.480976, 62.663110, 4000.0)
unit.heading = 94.57
unit.speed = 237.2
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.089511')
BB.Write('StationLongitude', '-0.375987')
UI.SetThrottle(0.189654)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Fulcrum 2"
unit.SetPosition(-22.869076, 61.751308, 4000.0)
unit.heading = 94.53
unit.speed = 237.2
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.075425')
BB.Write('StationLongitude', '-0.391678')
UI.SetThrottle(0.189654)
unit = SM.GetDefaultUnit()
unit.className = 'MiG-29'
unit.unitName = "Fulcrum 1"
unit.SetPosition(-23.265512, 61.741395, 4000.0)
unit.heading = 94.53
unit.speed = 237.2
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '1520 Liter Tank', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'R-27R', 2)
SM.SetUnitLauncherItem(unit.unitName, 2, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 3, 'R-73M', 2)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm NR-30 HEI', 20)
SM.SetUnitLauncherItem(unit.unitName, 5, 'Chaff-1', 30)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Flare-1', 30)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.072572')
BB.Write('StationLongitude', '-0.405229')
UI.SetThrottle(0.189654)
unit = SM.GetDefaultUnit()
unit.className = 'Pr 670 Skat'
unit.unitName = "K-313"
unit.SetPosition(-22.721245, 61.097742, -76.7)
unit.heading = 121.60
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'SET-65', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'SET-65', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'SET-65', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, 'SET-65', 1)
SM.SetUnitLauncherItem(unit.unitName, 4, '53-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 5, '53-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 6, 'P-70 Ametist', 8)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("K-313", 'SET-65', 8)
SM.AddToUnitMagazine("K-313", '53-65M', 4)
UI.SetSensorState(0, 0)
UI.SetSensorState(2, 0)
UI.SetSensorState(4, 0)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.353112, 1.068079, 0.000000, 10.000000)
UI.AddNavWaypointAdvanced(-0.421442, 1.072985, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.378554, 1.104787, 0.000000, 0.000000)
UI.AddTask('SubEvade', 3.000000, 3)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'Pr 877M Paltus'
unit.unitName = "B-459"
boxes = [[-20.9063, -20.8663, 60.3468, 60.3868],[-20.9063, -20.8663, 60.3468, 60.3868]]
box = boxes[int(2*random())]
lon_deg = random()*(box[1]-box[0]) + box[0]
lat_deg = random()*(box[3]-box[2]) + box[2]
unit.SetPosition(lon_deg, lat_deg, -310.2)
unit.heading = 242.25
unit.speed = 6.3
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 4, '53-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 5, '53-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 6, 'Igla-M SAM', 8)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("B-459", 'SET-65M', 12)
SM.AddToUnitMagazine("B-459", '53-65M', 6)
SM.AddToUnitMagazine("B-459", 'Igla-M SAM', 4)
UI.SetSensorState(0, 0)
UI.SetSensorState(4, 0)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.377827, 1.050087, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.365106, 1.075711, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.387640, 1.109512, 0.000000, 0.000000)
UI.AddTask('SubEvade', 3.000000, 3)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'Airstrip'
unit.unitName = "Kevlavik"
unit.SetPosition(-22.339453, 63.910749, -0.0)
unit.heading = 168.97
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("Kevlavik", 'Fuel', 1000000)
SM.AddToUnitMagazine("Kevlavik", 'Kh-22MP', 610)
SM.AddToUnitMagazine("Kevlavik", 'Chaff-1', 400)
SM.AddToUnitMagazine("Kevlavik", 'FAB-500', 400)
SM.AddToUnitMagazine("Kevlavik", 'Flare-1', 400)
SM.AddToUnitMagazine("Kevlavik", 'Kh-31P', 407)
SM.AddToUnitMagazine("Kevlavik", '1520 Liter Tank', 300)
SM.AddToUnitMagazine("Kevlavik", 'FAB-100', 300)
SM.AddToUnitMagazine("Kevlavik", 'FAB-250', 300)
SM.AddToUnitMagazine("Kevlavik", 'Kh-22M', 300)
SM.AddToUnitMagazine("Kevlavik", 'R-27R', 884)
SM.AddToUnitMagazine("Kevlavik", 'R-73', 292)
SM.AddToUnitMagazine("Kevlavik", 'R-77', 200)
UI.SetSensorState(0, 1)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
SM.AddUnitToFlightDeck('Kevlavik', 'MiG-29', 'Shadow-3', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shadow-3', '1 1520 Liter Tank;2 R-27R;2 R-73;2 R-73M;20 30mm NR-30 HEI;25 Chaff-1;25 Flare-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'MiG-29', 'Shadow-2', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shadow-2', '1 1520 Liter Tank;2 R-27R;2 R-73;2 R-73M;20 30mm NR-30 HEI;25 Chaff-1;25 Flare-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'MiG-29', 'Shadow-8', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shadow-8', '1 1520 Liter Tank;2 R-27R;2 R-73;2 R-73M;20 30mm NR-30 HEI;25 Chaff-1;25 Flare-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'MiG-29', 'Shadow-11', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shadow-11', '1 1520 Liter Tank;2 R-27R;2 R-73;2 R-73M;20 30mm NR-30 HEI;25 Chaff-1;25 Flare-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-15', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-15', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 1-6', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 1-6', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 1-7', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 1-7', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 1-8', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 1-8', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 1-9', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 1-9', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 1-10', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 1-10', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 1-11', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 1-11', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 1-12', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 1-12', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 1-13', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 1-13', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 1-14', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 1-14', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 1-15', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 1-15', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 1-16', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 1-16', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 1-17', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 1-17', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 1-18', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 1-18', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-16', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-16', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 1-20', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 1-20', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-1', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-1', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-2', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-2', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-3', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-3', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-4', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-4', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-5', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-5', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-6', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-6', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-7', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-7', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-8', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-8', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-9', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-9', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-10', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-10', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-11', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-11', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-12', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-12', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-13', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-13', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Tu-22ME', 'Shipwreck 2-14', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Shipwreck 2-14', '2 Kh-22MP;1 Kh-22MP;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-1', 2)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-1', '4 R-27R;2 R-77;4 R-27R;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-2', 2)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-2', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-3', 2)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-3', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-4', 2)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-4', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-5', 2)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-5', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-6', 2)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-6', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-7', 2)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-7', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-8', 2)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-8', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-9', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-9', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-10', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-10', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-11', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-11', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-12', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-12', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-13', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-13', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-14', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-14', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-15', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-15', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-16', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-16', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-17', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-17', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-18', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-18', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-19', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-19', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-20', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-20', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-21', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-21', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-22', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-22', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-23', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-23', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
SM.AddUnitToFlightDeck('Kevlavik', 'Su-27', 'Wall-24', 1)
SM.SetFlightDeckUnitLoadout('Kevlavik', 'Wall-24', '3 Kh-31P;2 R-77;4 Kh-31P;25 Flare-1;25 Chaff-1;')
FP = UI.GetFlightPortInfo()
base_track = UI.GetTrackById(UI.GetPlatformId())
mission_id = FP.AddGenericMission()
FP.AddAircraftToMission(mission_id, 'Wall-1')
FP.AddAircraftToMission(mission_id, 'Wall-2')
FP.AddAircraftToMission(mission_id, 'Wall-3')
FP.AddAircraftToMission(mission_id, 'Wall-4')
FP.AddAircraftToMission(mission_id, 'Wall-5')
FP.AddAircraftToMission(mission_id, 'Wall-6')
FP.AddAircraftToMission(mission_id, 'Wall-7')
FP.AddAircraftToMission(mission_id, 'Wall-8')
FP.AddAircraftToMission(mission_id, 'Wall-9')
FP.AddAircraftToMission(mission_id, 'Wall-10')
FP.AddAircraftToMission(mission_id, 'Wall-11')
FP.AddAircraftToMission(mission_id, 'Wall-12')
FP.SetMissionLaunchTime(mission_id, '13:00:08+0m+R2.0')
FP.SetMissionDatum(mission_id, 0.0000000, 0.0000000)
FP.SetMissionLandingTarget(mission_id, '')
FP.SetMissionWaveQuantity(mission_id, 4)
FP.SetMissionType(mission_id, '')
FP.SetMissionPatrolArea(mission_id, '-0.3966131,1.1144481,-0.3817177,1.1139344,-0.3817177,1.1113020,-0.3957142,1.1115075,')
FP.AddMissionWaypointAdvanced(mission_id, -0.3888970, 1.1164531, 2000.0, 200.0)
FP.SetMissionWaypointTasks(mission_id, 0, 'WaitForGroup,EngageAll')
FP.AddMissionWaypointAdvanced(mission_id, -0.3895190, 1.1136150, 3000.0, 300.0)
FP.SetMissionWaypointTasks(mission_id, 1, 'AirPatrolArea,EngageAll')
mission_id = FP.AddGenericMission()
FP.AddAircraftToMission(mission_id, 'Wall-13')
FP.AddAircraftToMission(mission_id, 'Wall-14')
FP.AddAircraftToMission(mission_id, 'Wall-15')
FP.AddAircraftToMission(mission_id, 'Wall-16')
FP.AddAircraftToMission(mission_id, 'Wall-17')
FP.AddAircraftToMission(mission_id, 'Wall-18')
FP.AddAircraftToMission(mission_id, 'Wall-19')
FP.AddAircraftToMission(mission_id, 'Wall-20')
FP.AddAircraftToMission(mission_id, 'Wall-21')
FP.AddAircraftToMission(mission_id, 'Wall-22')
FP.AddAircraftToMission(mission_id, 'Wall-23')
FP.AddAircraftToMission(mission_id, 'Wall-24')
FP.SetMissionLaunchTime(mission_id, '13:00:08+0m+R2.0')
FP.SetMissionDatum(mission_id, 0.0000000, 0.0000000)
FP.SetMissionLandingTarget(mission_id, '')
FP.SetMissionWaveQuantity(mission_id, 4)
FP.SetMissionType(mission_id, '')
FP.SetMissionPatrolArea(mission_id, '0.0035024,0.0001601,0.0035684,-0.0008994,-0.0032615,0.0005575,-0.0024091,0.0014547,')
FP.SetMissionPatrolAnchor(mission_id, 'Kevlavik', 2)
FP.AddMissionWaypointAdvanced(mission_id, -0.3888970, 1.1164531, 2000.0, 200.0)
FP.SetMissionWaypointTasks(mission_id, 0, 'WaitForGroup,EngageAll')
FP.AddMissionWaypointAdvanced(mission_id, -0.3901000, 1.1150531, 3000.0, 300.0)
FP.SetMissionWaypointTasks(mission_id, 1, 'AirPatrolArea,EngageAll')
mission_id = FP.AddGenericMission()
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-15')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-6')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-7')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-8')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-9')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-10')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-11')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-12')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-13')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-14')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-15')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-16')
FP.SetMissionLaunchTime(mission_id, '13:00:08+0m+R2.0')
FP.SetMissionDatum(mission_id, 0.0000000, 0.0000000)
FP.SetMissionLandingTarget(mission_id, '')
FP.SetMissionWaveQuantity(mission_id, 4)
FP.SetMissionType(mission_id, 'Standby-ASuW')
mission_id = FP.AddGenericMission()
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-16')
FP.AddAircraftToMission(mission_id, 'Shipwreck 1-20')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-1')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-2')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-3')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-4')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-5')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-6')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-7')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-8')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-9')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-10')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-11')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-12')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-13')
FP.AddAircraftToMission(mission_id, 'Shipwreck 2-14')
FP.SetMissionLaunchTime(mission_id, '12:00:08+0m+R2.0')
FP.SetMissionDatum(mission_id, 0.0000000, 0.0000000)
FP.SetMissionLandingTarget(mission_id, '')
FP.SetMissionWaveQuantity(mission_id, 8)
FP.SetMissionType(mission_id, 'Standby-ASuW')
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 1"
unit.SetPosition(-32.584050, 55.516823, 14000.0)
unit.heading = 66.29
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.030556')
BB.Write('StationLongitude', '-0.392433')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 2"
unit.SetPosition(-32.168371, 55.198724, 14000.0)
unit.heading = 66.29
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.028579')
BB.Write('StationLongitude', '-0.377806')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 4"
unit.SetPosition(-32.128469, 55.697659, 14000.0)
unit.heading = 66.29
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.036880')
BB.Write('StationLongitude', '-0.400791')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 3"
unit.SetPosition(-31.673336, 55.422092, 14000.0)
unit.heading = 66.29
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.032137')
BB.Write('StationLongitude', '-0.404274')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 7"
unit.SetPosition(-16.107389, 55.375940, 14000.0)
unit.heading = 312.62
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.030348')
BB.Write('StationLongitude', '-0.333093')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 5"
unit.SetPosition(-16.225302, 54.909432, 14000.0)
unit.heading = 312.62
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.026914')
BB.Write('StationLongitude', '-0.337174')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 8"
unit.SetPosition(-15.432983, 55.408261, 14000.0)
unit.heading = 312.62
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.033322')
BB.Write('StationLongitude', '-0.301887')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 6"
unit.SetPosition(-15.233573, 54.814413, 14000.0)
unit.heading = 312.62
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.025417')
BB.Write('StationLongitude', '-0.294922')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 11"
unit.SetPosition(-9.785666, 64.295426, 14000.0)
unit.heading = 253.54
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.066212')
BB.Write('StationLongitude', '-0.358760')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 9"
unit.SetPosition(-9.529320, 64.052962, 14000.0)
unit.heading = 253.54
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.063091')
BB.Write('StationLongitude', '-0.357678')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 10"
unit.SetPosition(-9.056117, 63.989523, 14000.0)
unit.heading = 253.54
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.061956')
BB.Write('StationLongitude', '-0.340381')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Hammer 12"
unit.SetPosition(-9.117420, 64.294177, 14000.0)
unit.heading = 253.54
unit.speed = 877.7
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('PatrolCircle', 1.000000, 0)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('StationLatitude', '1.067347')
BB.Write('StationLongitude', '-0.342543')
UI.SetThrottle(0.338669)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Shipwreck 1-1"
unit.SetPosition(-22.333016, 63.902860, 2196.9)
unit.heading = 166.10
unit.speed = 467.6
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22MP', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.428529, 1.095929, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.407465, 1.097644, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Shipwreck 1-2"
unit.SetPosition(-22.331662, 63.903010, 2200.2)
unit.heading = 163.35
unit.speed = 468.1
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22MP', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.333010, 1.095685, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.366809, 1.097399, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Shipwreck 1-3"
unit.SetPosition(-22.331827, 63.902989, 2200.2)
unit.heading = 163.69
unit.speed = 468.1
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22MP', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.401097, 1.097154, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.335459, 1.095685, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Shipwreck 1-4"
unit.SetPosition(-22.331335, 63.903067, 2201.7)
unit.heading = 162.66
unit.speed = 467.6
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22MP', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.424120, 1.092256, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.366319, 1.088827, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'Tu-22ME'
unit.unitName = "Shipwreck 1-5"
unit.SetPosition(-22.333956, 63.901631, 10.0)
unit.heading = 168.97
unit.speed = 494.4
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'Kh-22M', 2)
SM.SetUnitLauncherItem(unit.unitName, 1, 'Kh-22MP', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'Flare-1', 25)
SM.SetUnitLauncherItem(unit.unitName, 3, 'Chaff-1', 25)
UI = SM.GetUnitInterface(unit.unitName)
UI.AddTask('AirEvade', 3.000000, 3)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('JetTakeoff', 2.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.416772, 1.099113, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.334479, 1.094950, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('RTB', 2.000000, 3)
BB = UI.GetBlackboardInterface()
BB.Write('Home', 'Kevlavik')
UI.SetThrottle(2.000000)
unit = SM.GetDefaultUnit()
unit.className = 'Pr 971 Shchuka-B'
unit.unitName = "K-284 Puma"
unit.SetPosition(-23.432720, 63.459222, -100.0)
unit.heading = 142.04
unit.speed = 3.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 4, '65-76 Kit', 1)
SM.SetUnitLauncherItem(unit.unitName, 5, '65-76 Kit', 1)
SM.SetUnitLauncherItem(unit.unitName, 6, '65-76 Kit', 1)
SM.SetUnitLauncherItem(unit.unitName, 7, '65-76 Kit', 1)
SM.SetUnitLauncherItem(unit.unitName, 8, 'Igla-M SAM', 18)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("K-284 Puma", 'SET-65M', 8)
SM.AddToUnitMagazine("K-284 Puma", '65-76 Kit', 8)
UI.SetSensorState(0, 0)
UI.SetSensorState(7, 0)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.379901, 1.107412, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.361914, 1.106360, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.396739, 1.104638, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('SubEvade', 3.000000, 3)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'Pr 945 Barrakuda'
unit.unitName = "K-239 Karp"
boxes = [[-18.4605, -18.4205, 62.6356, 62.6756]]
box = boxes[int(1*random())]
lon_deg = random()*(box[1]-box[0]) + box[0]
lat_deg = random()*(box[3]-box[2]) + box[2]
unit.SetPosition(lon_deg, lat_deg, -700.0)
unit.heading = 231.61
unit.speed = 27.2
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, '65-76 Kit', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, '65-76 Kit', 1)
SM.SetUnitLauncherItem(unit.unitName, 4, 'RPK-7 Veter', 1)
SM.SetUnitLauncherItem(unit.unitName, 5, 'RPK-7 Veter', 1)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("K-239 Karp", 'SET-65M', 6)
SM.AddToUnitMagazine("K-239 Karp", '65-76 Kit', 6)
SM.AddToUnitMagazine("K-239 Karp", 'RPK-7 Veter', 6)
UI.SetSensorState(0, 0)
UI.SetSensorState(5, 0)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.367369, 1.068139, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.409465, 1.099591, 0.000000, 0.000000)
UI.SetNavLoopState(1)
UI.AddTask('SubEvade', 3.000000, 3)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'Pr 671RTMK Shchuka'
unit.unitName = "K-448"
unit.SetPosition(-20.816359, 58.863220, -350.0)
unit.heading = 356.71
unit.speed = 22.5
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 2, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 3, 'SET-65M', 1)
SM.SetUnitLauncherItem(unit.unitName, 4, '65-76 Kit', 1)
SM.SetUnitLauncherItem(unit.unitName, 5, '65-76 Kit', 1)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("K-448", 'SET-65M', 12)
SM.AddToUnitMagazine("K-448", '53-65M', 6)
UI.SetSensorState(6, 0)
UI.SetSensorState(7, 0)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.365433, 1.062333, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.382369, 1.107816, 0.000000, 0.000000)
UI.AddTask('SubEvade', 3.000000, 3)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'Pr 956 Sarych'
unit.unitName = "DDG Sovremenny"
unit.SetPosition(-21.049097, 60.881672, 0.0)
unit.heading = -17.88
unit.speed = 16.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '9M38M1', 22)
SM.SetUnitLauncherItem(unit.unitName, 1, '9M38M1', 22)
SM.SetUnitLauncherItem(unit.unitName, 2, '30mm OF-84 HE-FRAG AK-630M', 59)
SM.SetUnitLauncherItem(unit.unitName, 3, '30mm OF-84 HE-FRAG AK-630M', 59)
SM.SetUnitLauncherItem(unit.unitName, 4, '30mm OF-84 HE-FRAG AK-630M', 59)
SM.SetUnitLauncherItem(unit.unitName, 5, '30mm OF-84 HE-FRAG AK-630M', 59)
SM.SetUnitLauncherItem(unit.unitName, 6, '3M80M Moskit-M', 8)
SM.SetUnitLauncherItem(unit.unitName, 7, '130mm F-44 HE', 100)
SM.SetUnitLauncherItem(unit.unitName, 8, '130mm F-44 HE', 100)
SM.SetUnitLauncherItem(unit.unitName, 9, '53-65M', 2)
SM.SetUnitLauncherItem(unit.unitName, 10, '53-65M', 2)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("DDG Sovremenny", 'Fuel', 45900)
SM.AddToUnitMagazine("DDG Sovremenny", 'AT-1', 22)
SM.AddToUnitMagazine("DDG Sovremenny", 'DICASS (80) Sonobuoy', 135)
SM.AddToUnitMagazine("DDG Sovremenny", 'LOFAR (80) Sonobuoy', 135)
SM.AddToUnitMagazine("DDG Sovremenny", 'DIFAR (80) Sonobuoy', 378)
SM.AddToUnitMagazine("DDG Sovremenny", '30mm OF-84 HE-FRAG AK-630M', 944)
SM.AddToUnitMagazine("DDG Sovremenny", '130mm F-44 HE', 1000)
SM.AddToUnitMagazine("DDG Sovremenny", '53-65M', 12)
UI.AddTask('EngageAll', 2.000000, 0)
UI.AddTask('MissileWarning', 0.000000, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.376449, 1.079130, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.374741, 1.111940, 0.000000, 0.000000)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
SM.AddUnitToFlightDeck('DDG Sovremenny', 'Ka-27A', 'Sarych Ka-27 1', 1)
SM.SetFlightDeckUnitLoadout('DDG Sovremenny', 'Sarych Ka-27 1', '5 LOFAR (80) Sonobuoy;5 DICASS (80) Sonobuoy;14 DIFAR (80) Sonobuoy;2 AT-1;')
unit = SM.GetDefaultUnit()
unit.className = 'S-300PMU'
unit.unitName = "SA-10 Site 1"
unit.SetPosition(-22.553051, 63.875283, 1.0)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '5V55RUD', 4)
SM.SetUnitLauncherItem(unit.unitName, 1, '5V55RUD', 4)
SM.SetUnitLauncherItem(unit.unitName, 2, '5V55RUD', 4)
SM.SetUnitLauncherItem(unit.unitName, 3, '5V55RUD', 4)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("SA-10 Site 1", '5V55RUD', 72)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'S-125 Pechora-2M'
unit.unitName = "S-125 Reykjavik (1)"
unit.SetPosition(-21.537312, 64.082465, -0.0)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'V-601P', 4)
SM.SetUnitLauncherItem(unit.unitName, 1, 'V-601P', 4)
SM.SetUnitLauncherItem(unit.unitName, 2, 'V-601P', 4)
SM.SetUnitLauncherItem(unit.unitName, 3, 'V-601P', 4)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("S-125 Reykjavik (1)", 'V-601P', 16)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'S-125 Pechora-2M'
unit.unitName = "S-125 Reykjavik (2)"
unit.SetPosition(-21.566533, 64.045738, 204.5)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, 'V-601P', 4)
SM.SetUnitLauncherItem(unit.unitName, 1, 'V-601P', 4)
SM.SetUnitLauncherItem(unit.unitName, 2, 'V-601P', 4)
SM.SetUnitLauncherItem(unit.unitName, 3, 'V-601P', 4)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("S-125 Reykjavik (2)", 'V-601P', 16)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = '9K37 Buk'
unit.unitName = "SA-11 Kevlavik (1)"
unit.SetPosition(-22.342833, 63.933439, 0.0)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
SM.SetUnitLauncherItem(unit.unitName, 0, '9M38', 4)
SM.SetUnitLauncherItem(unit.unitName, 1, '9M38', 4)
SM.SetUnitLauncherItem(unit.unitName, 2, '9M38', 4)
SM.SetUnitLauncherItem(unit.unitName, 3, '9M38', 4)
SM.SetUnitLauncherItem(unit.unitName, 4, '9M38', 4)
SM.SetUnitLauncherItem(unit.unitName, 5, '9M38', 4)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("SA-11 Kevlavik (1)", '9M38', 48)
UI.AddTask('EngageAll', 2.000000, 0)
BB = UI.GetBlackboardInterface()
unit = SM.GetDefaultUnit()
unit.className = 'Airstrip'
unit.unitName = "Kevlavik 2"
unit.SetPosition(-22.324155, 63.921693, 0.0)
unit.heading = 90.00
unit.speed = 0.0
SM.AddUnitToAlliance(unit, 2)
UI = SM.GetUnitInterface(unit.unitName)
SM.AddToUnitMagazine("Kevlavik 2", 'R-73', 100)
SM.AddToUnitMagazine("Kevlavik 2", 'Kh-22M', 200)
SM.AddToUnitMagazine("Kevlavik 2", 'Kh-22MP', 200)
SM.AddToUnitMagazine("Kevlavik 2", 'Kh-29T', 100)
SM.AddToUnitMagazine("Kevlavik 2", 'R-77', 100)
SM.AddToUnitMagazine("Kevlavik 2", 'Kh-31P', 100)
SM.AddToUnitMagazine("Kevlavik 2", 'R-73M', 100)
SM.AddToUnitMagazine("Kevlavik 2", 'R-27R', 100)
SM.AddToUnitMagazine("Kevlavik 2", 'R-60', 100)
SM.AddToUnitMagazine("Kevlavik 2", 'R-40T', 100)
SM.AddToUnitMagazine("Kevlavik 2", 'R-33', 100)
UI.AddTask('RefuelAllAircraft', 3.000000, 3)
BB = UI.GetBlackboardInterface()
##############################
### Alliance 3 units
##############################
unit = SM.GetDefaultUnit()
unit.className = 'Generic submarine'
unit.unitName = "Swedish Sub"
unit.SetPosition(-25.241260, 62.169770, -100.0)
unit.heading = 88.49
unit.speed = 16.0
SM.AddUnitToAlliance(unit, 3)
SM.SetUnitLauncherItem(unit.unitName, 0, 'MK-T1', 1)
SM.SetUnitLauncherItem(unit.unitName, 1, 'UGM-84C Harpoon', 8)
UI = SM.GetUnitInterface(unit.unitName)
UI.SetSensorState(0, 0)
UI.SetSensorState(1, 0)
UI.SetSensorState(4, 0)
UI.AddTask('Nav', 1.000000, 0)
UI.AddNavWaypointAdvanced(-0.310398, 1.087291, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.295234, 1.052913, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.415467, 1.040089, 0.000000, 0.000000)
UI.AddNavWaypointAdvanced(-0.507537, 1.065736, 0.000000, 0.000000)
UI.AddTask('SubEvade', 3.000000, 3)
BB = UI.GetBlackboardInterface()
##############################
### Alliance 1 goals
##############################
goal_temp = SM.ProtectGoal('')
goal_temp.AddTarget('USS Carl Vinson')
goal_temp.SetQuantity(1)
goal_0_0 = goal_temp
goal_temp = SM.DestroyGoal('')
goal_temp.AddTarget('Russian HQ Kevlavik')
goal_temp.SetQuantity(1)
goal_0_1 = goal_temp
goal_temp = SM.DestroyGoal('')
goal_temp.AddTarget('Russian HQ Reykjavik')
goal_temp.SetQuantity(1)
goal_0_2 = goal_temp
goal_temp = SM.CompoundGoal(0)
goal_temp.AddGoal(goal_0_0)
goal_temp.AddGoal(goal_0_1)
goal_temp.AddGoal(goal_0_2)
SM.SetAllianceGoal(1, goal_temp)
SM.SetAllianceROEByType(1, 2, 2, 2, 2)
##############################
### Alliance 2 goals
##############################
goal_temp = SM.DestroyGoal('')
goal_temp.AddTarget('USS Carl Vinson')
goal_temp.SetQuantity(1)
goal_1_0 = goal_temp
goal_temp = SM.ProtectGoal('')
goal_temp.AddTarget('Russian HQ Reykjavik')
goal_temp.SetQuantity(1)
goal_1_1 = goal_temp
goal_temp = SM.ProtectGoal('')
goal_temp.AddTarget('Russian HQ Kevlavik')
goal_temp.SetQuantity(1)
goal_1_2 = goal_temp
goal_temp = SM.CompoundGoal(0)
goal_temp.AddGoal(goal_1_0)
goal_temp.AddGoal(goal_1_1)
goal_temp.AddGoal(goal_1_2)
SM.SetAllianceGoal(2, goal_temp)
SM.SetAllianceROEByType(2, 2, 2, 2, 2)
##############################
### Alliance 3 goals
##############################
goal_temp = SM.TimeGoal()
goal_temp.SetPassTimeout(599940.0)
goal_temp.SetFailTimeout(599940.0)
SM.SetAllianceGoal(3, goal_temp)
SM.SetAllianceROEByType(3, 0, 0, 0, 0)
##############################
### Overlay Graphics
##############################
##############################
### Randomization Info
##############################
SM.SetIncludeProbability('B-459', 1.000000)
SM.AddRandomBox('B-459', -20.9063, -20.8663, 60.3468, 60.3868)
SM.AddRandomBox('B-459', -20.9063, -20.8663, 60.3468, 60.3868)
SM.SetIncludeProbability('K-239 Karp', 1.000000)
SM.AddRandomBox('K-239 Karp', -18.4605, -18.4205, 62.6356, 62.6756)
|
gcblue/gcblue
|
scenarios/SinglePlayer/ColdWar/Backfires.py
|
Python
|
bsd-3-clause
| 149,535
|
[
"COLUMBUS"
] |
1bfa7b6f9ccc86e28d1b79f186ccda87eda7f4ee1081eb8bb2c502a03187fdf6
|
# Orca
#
# Copyright 2005-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Provides the default implementation for flat review for Orca."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2008 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
import re
from . import braille
from . import debug
from . import eventsynthesizer
from . import messages
from . import object_properties
from . import orca_state
from . import settings
from .braille_generator import BrailleGenerator
from .orca_i18n import _
# [[[WDW - HACK Regular expression to split strings on whitespace
# boundaries, which is what we'll use for word dividers instead of
# living at the whim of whomever decided to implement the AT-SPI
# interfaces for their toolkit or app.]]]
#
whitespace_re = re.compile(r'(\s+)', re.DOTALL | re.IGNORECASE | re.M)
EMBEDDED_OBJECT_CHARACTER = '\ufffc'
class Char:
"""Represents a single char of an Accessibility_Text object."""
def __init__(self,
word,
index,
string,
x, y, width, height):
"""Creates a new char.
Arguments:
- word: the Word instance this belongs to
- index: the index of this char in the word
- string: the actual char
- x, y, width, height: the extents of this Char on the screen
"""
self.word = word
self.string = string
self.index = index
self.x = x
self.y = y
self.width = width
self.height = height
class Word:
"""Represents a single word of an Accessibility_Text object, or
the entire name of an Image or Component if the associated object
does not implement the Accessibility_Text interface. As a rule of
thumb, all words derived from an Accessibility_Text interface will
start with the word and will end with all chars up to the
beginning of the next word. That is, whitespace and punctuation
will usually be tacked on to the end of words."""
def __init__(self,
zone,
index,
startOffset,
string,
x, y, width, height):
"""Creates a new Word.
Arguments:
- zone: the Zone instance this belongs to
- index: the index of this word in the Zone
- string: the actual string
- x, y, width, height: the extents of this Char on the screen"""
self.zone = zone
self.index = index
self.startOffset = startOffset
self.string = string
self.length = len(string)
self.x = x
self.y = y
self.width = width
self.height = height
def __getattr__(self, attr):
"""Used for lazily determining the chars of a word. We do
this to reduce the total number of round trip calls to the app,
and to also spread the round trip calls out over the lifetime
of a flat review context.
Arguments:
- attr: a string indicating the attribute name to retrieve
Returns the value of the given attribute.
"""
if attr == "chars":
if isinstance(self.zone, TextZone):
text = self.zone.accessible.queryText()
# Pylint is confused and flags this warning:
#
# W0201:132:Word.__getattr__: Attribute 'chars' defined
# outside __init__
#
# So for now, we just disable this error in this method.
#
# pylint: disable-msg=W0201
self.chars = []
i = 0
while i < self.length:
[char, startOffset, endOffset] = text.getTextAtOffset(
self.startOffset + i,
pyatspi.TEXT_BOUNDARY_CHAR)
# Sometimes we get more than a character's worth. See
# Bug #495303. We can try to correct this.
#
if len(char):
char[0]
[x, y, width, height] = text.getRangeExtents(
startOffset,
startOffset + 1,
0)
self.chars.append(Char(self,
i,
char,
x, y, width, height))
i += 1
else:
self.chars = None
return self.chars
elif attr.startswith('__') and attr.endswith('__'):
raise AttributeError(attr)
else:
return self.__dict__[attr]
class Zone:
"""Represents text that is a portion of a single horizontal line."""
def __init__(self,
accessible,
string,
x, y,
width, height,
role=None):
"""Creates a new Zone, which is a horizontal region of text.
Arguments:
- accessible: the Accessible associated with this Zone
- string: the string being displayed for this Zone
- extents: x, y, width, height in screen coordinates
- role: Role to override accesible's role.
"""
self.accessible = accessible
self.string = string
self.length = len(string)
self.x = x
self.y = y
self.width = width
self.height = height
self.role = role or accessible.getRole()
def __getattr__(self, attr):
"""Used for lazily determining the words in a Zone.
Arguments:
- attr: a string indicating the attribute name to retrieve
Returns the value of the given attribute.
"""
if attr == "words":
# Pylint is confused and flags this warning:
#
# W0201:203:Zone.__getattr__: Attribute 'words' defined
# outside __init__
#
# So for now, we just disable this error in this method.
#
# pylint: disable-msg=W0201
self.words = []
return self.words
elif attr.startswith('__') and attr.endswith('__'):
raise AttributeError(attr)
else:
return self.__dict__[attr]
def onSameLine(self, zone):
"""Returns True if this Zone is on the same horiztonal line as
the given zone."""
highestBottom = min(self.y + self.height, zone.y + zone.height)
lowestTop = max(self.y, zone.y)
# If we do overlap, lets see how much. We'll require a 25% overlap
# for now...
#
if lowestTop < highestBottom:
overlapAmount = highestBottom - lowestTop
shortestHeight = min(self.height, zone.height)
return ((1.0 * overlapAmount) / shortestHeight) > 0.25
else:
return False
def getWordAtOffset(self, charOffset):
wordAtOffset = None
offset = 0
for word in self.words:
nextOffset = offset + len(word.string)
wordAtOffset = word
if nextOffset > charOffset:
return [wordAtOffset, charOffset - offset]
else:
offset = nextOffset
return [wordAtOffset, offset]
class TextZone(Zone):
"""Represents Accessibility_Text that is a portion of a single
horizontal line."""
def __init__(self,
accessible,
startOffset,
string,
x, y,
width, height):
"""Creates a new Zone, which is a horizontal region of text.
Arguments:
- accessible: the Accessible associated with this Zone
- startOffset: the index of the char in the Accessibility_Text
interface where this Zone starts
- string: the string being displayed for this Zone
- extents: x, y, width, height in screen coordinates
"""
Zone.__init__(self, accessible, string, x, y, width, height)
self.startOffset = startOffset
def __getattr__(self, attr):
"""Used for lazily determining the words in a Zone. The words
will either be all whitespace (interword boundaries) or actual
words. To determine if a Word is whitespace, use
word.string.isspace()
Arguments:
- attr: a string indicating the attribute name to retrieve
Returns the value of the given attribute.
"""
if attr == "words":
text = self.accessible.queryText()
# Pylint is confused and flags this warning:
#
# W0201:288:TextZone.__getattr__: Attribute 'words' defined
# outside __init__
#
# So for now, we just disable this error in this method.
#
# pylint: disable-msg=W0201
self.words = []
wordIndex = 0
offset = self.startOffset
for string in whitespace_re.split(self.string):
if len(string):
endOffset = offset + len(string)
[x, y, width, height] = text.getRangeExtents(
offset,
endOffset,
0)
word = Word(self,
wordIndex,
offset,
string,
x, y, width, height)
self.words.append(word)
wordIndex += 1
offset = endOffset
return self.words
elif attr.startswith('__') and attr.endswith('__'):
raise AttributeError(attr)
else:
return self.__dict__[attr]
class StateZone(Zone):
"""Represents a Zone for an accessible that shows a state using
a graphical indicator, such as a checkbox or radio button."""
def __init__(self,
accessible,
x, y,
width, height,
role=None):
Zone.__init__(self, accessible, "", x, y, width, height, role)
# Force the use of __getattr__ so we get the actual state
# of the accessible each time we look at the 'string' field.
#
del self.string
def __getattr__(self, attr):
if attr in ["string", "length", "brailleString"]:
# stateCount is used as a boolean and as an index
stateset = self.accessible.getState()
if stateset.contains(pyatspi.STATE_INDETERMINATE):
stateCount = 2
elif stateset.contains(pyatspi.STATE_CHECKED):
stateCount = 1
else:
stateCount = 0
if self.role in [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_TABLE_CELL]:
if stateCount == 2:
speechState = object_properties.STATE_PARTIALLY_CHECKED
elif stateCount == 1:
speechState = object_properties.STATE_CHECKED
else:
speechState = object_properties.STATE_NOT_CHECKED
brailleState = \
object_properties.CHECK_BOX_INDICATORS_BRAILLE[stateCount]
elif self.role == pyatspi.ROLE_TOGGLE_BUTTON:
if stateCount:
speechState = object_properties.STATE_PRESSED
else:
speechState = object_properties.STATE_NOT_PRESSED
brailleState = \
object_properties.RADIO_BUTTON_INDICATORS_BRAILLE[stateCount]
else:
if stateCount:
speechState = object_properties.STATE_SELECTED_RADIO_BUTTON
else:
speechState = object_properties.STATE_UNSELECTED_RADIO_BUTTON
brailleState = \
object_properties.RADIO_BUTTON_INDICATORS_BRAILLE[stateCount]
if attr == "string":
return speechState
elif attr == "length":
return len(speechState)
elif attr == "brailleString":
return brailleState
else:
return Zone.__getattr__(self, attr)
class ValueZone(Zone):
"""Represents a Zone for an accessible that shows a value using
a graphical indicator, such as a progress bar or slider."""
def __init__(self,
accessible,
x, y,
width, height,
role=None):
Zone.__init__(self, accessible, "", x, y, width, height, role)
# Force the use of __getattr__ so we get the actual state
# of the accessible each time we look at the 'string' field.
#
del self.string
def __getattr__(self, attr):
if attr in ["string", "length", "brailleString"]:
orientation = None
if self.role in [pyatspi.ROLE_SLIDER,
pyatspi.ROLE_SCROLL_BAR]:
stateset = self.accessible.getState()
if stateset.contains(pyatspi.STATE_HORIZONTAL):
orientation = object_properties.STATE_HORIZONTAL
elif stateset.contains(pyatspi.STATE_VERTICAL):
orientation = object_properties.STATE_VERTICAL
try:
value = self.accessible.queryValue()
except NotImplementedError:
debug.println(debug.LEVEL_FINE,
'ValueZone does not implement Value interface')
try:
percentValue = int((value.currentValue /
(value.maximumValue - value.minimumValue))
* 100.0)
except:
percentValue = 0
rolename = self.accessible.getLocalizedRoleName()
if orientation:
speechValue = orientation + " " + rolename
else:
speechValue = rolename
speechValue = speechValue + " " + messages.percentage(percentValue)
rolename = BrailleGenerator.getLocalizedRoleName(self.accessible)
if orientation:
brailleValue = "%s %s %d%%" % (orientation,
rolename,
percentValue)
else:
brailleValue = "%s %d%%" % (rolename, percentValue)
if attr == "string":
return speechValue
elif attr == "length":
return len(speechValue)
elif attr == "brailleString":
return brailleValue
else:
return Zone.__getattr__(self, attr)
class Line:
"""A Line is a single line across a window and is composed of Zones."""
def __init__(self,
index,
zones):
"""Creates a new Line, which is a horizontal region of text.
Arguments:
- index: the index of this Line in the window
- zones: the Zones that make up this line
"""
self.index = index
self.zones = zones
self.brailleRegions = None
def __getattr__(self, attr):
# We dynamically create the string each time to handle
# StateZone and ValueZone zones.
#
if attr in ["string", "length", "x", "y", "width", "height"]:
bounds = None
string = ""
for zone in self.zones:
if not bounds:
bounds = [zone.x, zone.y,
zone.x + zone.width, zone.y + zone.height]
else:
bounds[0] = min(bounds[0], zone.x)
bounds[1] = min(bounds[1], zone.y)
bounds[2] = max(bounds[2], zone.x + zone.width)
bounds[3] = max(bounds[3], zone.y + zone.height)
if len(zone.string):
if len(string):
string += " "
string += zone.string
if not bounds:
bounds = [-1, -1, -1, -1]
if attr == "string":
return string
elif attr == "length":
return len(string)
elif attr == "x":
return bounds[0]
elif attr == "y":
return bounds[1]
elif attr == "width":
return bounds[2] - bounds[0]
elif attr == "height":
return bounds[3] - bounds[1]
elif attr.startswith('__') and attr.endswith('__'):
raise AttributeError(attr)
else:
return self.__dict__[attr]
def getBrailleRegions(self):
# [[[WDW - We'll always compute the braille regions. This
# allows us to handle StateZone and ValueZone zones whose
# states might be changing on us.]]]
#
if True or not self.brailleRegions:
self.brailleRegions = []
brailleOffset = 0
for zone in self.zones:
# The 'isinstance(zone, TextZone)' test is a sanity check
# to handle problems with Java text. See Bug 435553.
if isinstance(zone, TextZone) and \
((zone.accessible.getRole() in \
(pyatspi.ROLE_TEXT,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_TERMINAL)) or \
# [[[TODO: Eitan - HACK:
# This is just to get FF3 cursor key routing support.
# We really should not be determining all this stuff here,
# it should be in the scripts.
# Same applies to roles above.]]]
(zone.accessible.getRole() in \
(pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_HEADING,
pyatspi.ROLE_LINK))):
region = braille.ReviewText(zone.accessible,
zone.string,
zone.startOffset,
zone)
else:
try:
brailleString = zone.brailleString
except:
brailleString = zone.string
region = braille.ReviewComponent(zone.accessible,
brailleString,
0, # cursor offset
zone)
if len(self.brailleRegions):
pad = braille.Region(" ")
pad.brailleOffset = brailleOffset
self.brailleRegions.append(pad)
brailleOffset += 1
zone.brailleRegion = region
region.brailleOffset = brailleOffset
self.brailleRegions.append(region)
regionString = region.string
brailleOffset += len(regionString)
if not settings.disableBrailleEOL:
if len(self.brailleRegions):
pad = braille.Region(" ")
pad.brailleOffset = brailleOffset
self.brailleRegions.append(pad)
brailleOffset += 1
eol = braille.Region("$l")
eol.brailleOffset = brailleOffset
self.brailleRegions.append(eol)
return self.brailleRegions
class Context:
"""Information regarding where a user happens to be exploring
right now.
"""
ZONE = 0
CHAR = 1
WORD = 2
LINE = 3 # includes all zones on same line
WINDOW = 4
WRAP_NONE = 0
WRAP_LINE = 1 << 0
WRAP_TOP_BOTTOM = 1 << 1
WRAP_ALL = (WRAP_LINE | WRAP_TOP_BOTTOM)
def __init__(self, script):
"""Create a new Context that will be used for handling flat
review mode.
"""
self.script = script
if (not orca_state.locusOfFocus) \
or (orca_state.locusOfFocus.getApplication() \
!= self.script.app):
self.lines = []
else:
# We want to stop at the window or frame or equivalent level.
#
obj = script.utilities.topLevelObject(orca_state.locusOfFocus)
if obj:
self.lines = self.clusterZonesByLine(self.getShowingZones(obj))
else:
self.lines = []
currentLineIndex = 0
currentZoneIndex = 0
currentWordIndex = 0
currentCharIndex = 0
try:
role = orca_state.locusOfFocus.getRole()
except:
role = None
if role == pyatspi.ROLE_TABLE_CELL:
searchZone = self.script.\
utilities.realActiveDescendant(orca_state.locusOfFocus)
else:
searchZone = orca_state.locusOfFocus
foundZoneWithFocus = False
while currentLineIndex < len(self.lines):
line = self.lines[currentLineIndex]
currentZoneIndex = 0
while currentZoneIndex < len(line.zones):
zone = line.zones[currentZoneIndex]
if self.script.utilities.isSameObject(
zone.accessible, searchZone):
foundZoneWithFocus = True
break
else:
currentZoneIndex += 1
if foundZoneWithFocus:
break
else:
currentLineIndex += 1
# Fallback to the first Zone if we didn't find anything.
#
if not foundZoneWithFocus:
currentLineIndex = 0
currentZoneIndex = 0
elif isinstance(zone, TextZone):
# If we're on an accessible text object, try to set the
# review cursor to the caret position of that object.
#
accessible = zone.accessible
lineIndex = currentLineIndex
zoneIndex = currentZoneIndex
try:
caretOffset = zone.accessible.queryText().caretOffset
except NotImplementedError:
caretOffset = -1
foundZoneWithCaret = False
checkForEOF = False
while lineIndex < len(self.lines):
line = self.lines[lineIndex]
while zoneIndex < len(line.zones):
zone = line.zones[zoneIndex]
if zone.accessible == accessible:
if (caretOffset >= zone.startOffset):
if (caretOffset \
< (zone.startOffset + zone.length)):
foundZoneWithCaret = True
break
elif (caretOffset \
== (zone.startOffset + zone.length)):
checkForEOF = True
lineToCheck = lineIndex
zoneToCheck = zoneIndex
zoneIndex += 1
if foundZoneWithCaret:
currentLineIndex = lineIndex
currentZoneIndex = zoneIndex
currentWordIndex = 0
currentCharIndex = 0
offset = zone.startOffset
while currentWordIndex < len(zone.words):
word = zone.words[currentWordIndex]
if (word.length + offset) > caretOffset:
currentCharIndex = caretOffset - offset
break
else:
currentWordIndex += 1
offset += word.length
break
else:
zoneIndex = 0
lineIndex += 1
atEOF = not foundZoneWithCaret and checkForEOF
if atEOF:
line = self.lines[lineToCheck]
zone = line.zones[zoneToCheck]
currentLineIndex = lineToCheck
currentZoneIndex = zoneToCheck
if caretOffset and zone.words:
currentWordIndex = len(zone.words) - 1
currentCharIndex = \
zone.words[currentWordIndex].length - 1
self.lineIndex = currentLineIndex
self.zoneIndex = currentZoneIndex
self.wordIndex = currentWordIndex
self.charIndex = currentCharIndex
# This is used to tell us where we should strive to move to
# when going up and down lines to the closest character.
# The targetChar is the character where we initially started
# moving from, and does not change when one moves up or down
# by line.
#
self.targetCharInfo = None
def clip(self,
ax, ay, awidth, aheight,
bx, by, bwidth, bheight):
"""Clips region 'a' by region 'b' and returns the new region as
a list: [x, y, width, height].
"""
x = max(ax, bx)
x2 = min(ax + awidth, bx + bwidth)
width = x2 - x
y = max(ay, by)
y2 = min(ay + aheight, by + bheight)
height = y2 - y
return [x, y, width, height]
def splitTextIntoZones(self, accessible, string, startOffset, cliprect):
"""Traverses the string, splitting it up into separate zones if the
string contains the EMBEDDED_OBJECT_CHARACTER, which is used by apps
such as Firefox to handle containment of things such as links in
paragraphs.
Arguments:
- accessible: the accessible
- string: a substring from the accessible's text specialization
- startOffset: the starting character offset of the string
- cliprect: the extents that the Zones must fit inside.
Returns a list of Zones for the visible text or None if nothing is
visible.
"""
# We convert the string to unicode and walk through it. While doing
# this, we keep two sets of offsets:
#
# substring{Start,End}Offset: where in the accessible text
# implementation we are
#
# unicodeStartOffset: where we are in the unicodeString
#
anyVisible = False
zones = []
text = accessible.queryText()
substringStartOffset = startOffset
substringEndOffset = startOffset
unicodeStartOffset = 0
unicodeString = string
#print "LOOKING AT '%s'" % unicodeString
for i in range(0, len(unicodeString) + 1):
if (i != len(unicodeString)) \
and (unicodeString[i] != EMBEDDED_OBJECT_CHARACTER):
substringEndOffset += 1
elif (substringEndOffset == substringStartOffset):
substringStartOffset += 1
substringEndOffset = substringStartOffset
unicodeStartOffset = i + 1
else:
[x, y, width, height] = text.getRangeExtents(
substringStartOffset, substringEndOffset, 0)
if self.script.utilities.containsRegion(
x, y, width, height,
cliprect.x, cliprect.y,
cliprect.width, cliprect.height):
anyVisible = True
clipping = self.clip(x, y, width, height,
cliprect.x, cliprect.y,
cliprect.width, cliprect.height)
# [[[TODO: WDW - HACK it would be nice to clip the
# the text by what is really showing on the screen,
# but this seems to hang Orca and the client. Logged
# as bugzilla bug 319770.]]]
#
#ranges = text.getBoundedRanges(\
# clipping[0],
# clipping[1],
# clipping[2],
# clipping[3],
# 0,
# pyatspi.TEXT_CLIP_BOTH,
# pyatspi.TEXT_CLIP_BOTH)
#
#print
#print "HERE!"
#for range in ranges:
# print range.startOffset
# print range.endOffset
# print range.content
substring = unicodeString[unicodeStartOffset:i]
#print " SUBSTRING '%s'" % substring
zones.append(TextZone(accessible,
substringStartOffset,
substring,
clipping[0],
clipping[1],
clipping[2],
clipping[3]))
substringStartOffset = substringEndOffset + 1
substringEndOffset = substringStartOffset
unicodeStartOffset = i + 1
if anyVisible:
return zones
else:
return None
def getZonesFromText(self, accessible, cliprect):
"""Gets a list of Zones from an object that implements the
AccessibleText specialization.
Arguments:
- accessible: the accessible
- cliprect: the extents that the Zones must fit inside.
Returns a list of Zones.
"""
debug.println(debug.LEVEL_FINEST, " looking at text:")
try:
text = accessible.queryText()
except NotImplementedError:
return []
else:
zones = []
offset = 0
lastEndOffset = -1
upperMax = lowerMax = text.characterCount
upperMid = lowerMid = int(upperMax / 2)
upperMin = lowerMin = 0
upperY = lowerY = 0
oldMid = 0
# performing binary search to locate first line inside clipped area
while oldMid != upperMid:
oldMid = upperMid
[x, y, width, height] = text.getRangeExtents(upperMid,
upperMid+1,
0)
upperY = y
if y > cliprect.y:
upperMax = upperMid
else:
upperMin = upperMid
upperMid = int((upperMax - upperMin) / 2) + upperMin
# performing binary search to locate last line inside clipped area
oldMid = 0
limit = cliprect.y+cliprect.height
while oldMid != lowerMid:
oldMid = lowerMid
[x, y, width, height] = text.getRangeExtents(lowerMid,
lowerMid+1,
0)
lowerY = y
if y > limit:
lowerMax = lowerMid
else:
lowerMin = lowerMid
lowerMid = int((lowerMax - lowerMin) / 2) + lowerMin
# finding out the zones
offset = upperMin
length = lowerMax
while offset < length:
[string, startOffset, endOffset] = text.getTextAtOffset(
offset,
pyatspi.TEXT_BOUNDARY_LINE_START)
debug.println(debug.LEVEL_FINEST,
" line at %d is (start=%d end=%d): '%s'" \
% (offset, startOffset, endOffset, string))
# [[[WDW - HACK: well...gnome-terminal sometimes wants to
# give us outrageous values back from getTextAtOffset
# (see http://bugzilla.gnome.org/show_bug.cgi?id=343133),
# so we try to handle it. Evolution does similar things.]]]
#
if (startOffset < 0) \
or (endOffset < 0) \
or (startOffset > offset) \
or (endOffset < offset) \
or (startOffset > endOffset) \
or (abs(endOffset - startOffset) > 666e3):
debug.println(debug.LEVEL_WARNING,
"flat_review:getZonesFromText detected "\
"garbage from getTextAtOffset for accessible "\
"name='%s' role'='%s': offset used=%d, "\
"start/end offset returned=(%d,%d), string='%s'"\
% (accessible.name, accessible.getRoleName(),
offset, startOffset, endOffset, string))
break
# [[[WDW - HACK: this is here because getTextAtOffset
# tends not to be implemented consistently across toolkits.
# Sometimes it behaves properly (i.e., giving us an endOffset
# that is the beginning of the next line), sometimes it
# doesn't (e.g., giving us an endOffset that is the end of
# the current line). So...we hack. The whole 'max' deal
# is to account for lines that might be a brazillion lines
# long.]]]
#
if endOffset == lastEndOffset:
offset = max(offset + 1, lastEndOffset + 1)
lastEndOffset = endOffset
continue
else:
offset = endOffset
lastEndOffset = endOffset
textZones = self.splitTextIntoZones(
accessible, string, startOffset, cliprect)
# We need to account for the fact that newlines at the end of
# text are treated as being on the same line when they in fact
# are a whole separate blank line. So, we check for this and
# make up a new text zone for these cases. See bug 434654.
#
if (endOffset == length) and (string[-1:] == "\n"):
[x, y, width, height] = text.getRangeExtents(startOffset,
endOffset,
0)
if not textZones:
textZones = []
textZones.append(TextZone(accessible,
endOffset,
"",
x, y + height, 0, height))
if textZones:
zones.extend(textZones)
elif len(zones):
# We'll break out of searching all the text - the idea
# here is that we'll at least try to optimize for when
# we gone below the visible clipping area.
#
# [[[TODO: WDW - would be nice to optimize this better.
# for example, perhaps we can assume the caret will always
# be visible, and we can start our text search from there.
# Logged as bugzilla bug 319771.]]]
#
break
# We might have a zero length text area. In that case, well,
# lets hack if this is something whose sole purpose is to
# act as a text entry area.
#
if len(zones) == 0:
if (accessible.getRole() == pyatspi.ROLE_TEXT) \
or ((accessible.getRole() == pyatspi.ROLE_ENTRY)) \
or ((accessible.getRole() == pyatspi.ROLE_PASSWORD_TEXT)):
extents = accessible.queryComponent().getExtents(0)
zones.append(TextZone(accessible,
0,
"",
extents.x, extents.y,
extents.width, extents.height))
return zones
def _insertStateZone(self, zones, accessible, role=None):
"""If the accessible presents non-textual state, such as a
checkbox or radio button, insert a StateZone representing
that state."""
zone = None
stateOnLeft = True
role = role or accessible.getRole()
if role in [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_RADIO_MENU_ITEM]:
# Attempt to infer if the indicator is to the left or
# right of the text.
#
extents = accessible.queryComponent().getExtents(0)
stateX = extents.x
stateY = extents.y
stateWidth = 1
stateHeight = extents.height
try:
text = accessible.queryText()
except NotImplementedError:
pass
else:
[x, y, width, height] = \
text.getRangeExtents( \
0, text.characterCount, 0)
textToLeftEdge = x - extents.x
textToRightEdge = (extents.x + extents.width) - (x + width)
stateOnLeft = textToLeftEdge > 20
if stateOnLeft:
stateWidth = textToLeftEdge
else:
stateX = x + width
stateWidth = textToRightEdge
zone = StateZone(accessible,
stateX, stateY, stateWidth, stateHeight)
elif role == pyatspi.ROLE_TOGGLE_BUTTON:
# [[[TODO: WDW - This is a major hack. We make up an
# indicator for a toggle button to let the user know
# whether a toggle button is pressed or not.]]]
#
extents = accessible.queryComponent().getExtents(0)
zone = StateZone(accessible,
extents.x, extents.y, 1, extents.height)
elif role == pyatspi.ROLE_TABLE_CELL:
# Handle table cells that act like check boxes.
#
try:
action = accessible.queryAction()
except NotImplementedError:
action = None
if action:
hasToggle = False
for i in range(0, action.nActions):
# Translators: this is the action name for
# the 'toggle' action. It must be the same
# string used in the *.po file for gail.
#
if action.getName(i) in ["toggle", _("toggle")]:
hasToggle = True
break
if hasToggle:
self._insertStateZone(zones, accessible,
pyatspi.ROLE_CHECK_BOX)
if zone:
if stateOnLeft:
zones.insert(0, zone)
else:
zones.append(zone)
def getZonesFromAccessible(self, accessible, cliprect):
"""Returns a list of Zones for the given accessible.
Arguments:
- accessible: the accessible
- cliprect: the extents that the Zones must fit inside.
"""
icomponent = accessible.queryComponent()
if not icomponent:
return []
# Get the component extents in screen coordinates.
#
extents = icomponent.getExtents(0)
if not self.script.utilities.containsRegion(
extents.x, extents.y,
extents.width, extents.height,
cliprect.x, cliprect.y,
cliprect.width, cliprect.height):
return []
debug.println(
debug.LEVEL_FINEST,
"flat_review.getZonesFromAccessible (name=%s role=%s)" \
% (accessible.name, accessible.getRoleName()))
# Now see if there is any accessible text. If so, find new zones,
# where each zone represents a line of this text object. When
# creating the zone, only keep track of the text that is actually
# showing on the screen.
#
try:
accessible.queryText()
except NotImplementedError:
zones = []
else:
zones = self.getZonesFromText(accessible, cliprect)
# We really want the accessible text information. But, if we have
# an image, and it has a description, we can fall back on it.
# First, try to get the image interface.
try:
iimage = accessible.queryImage()
except NotImplementedError:
iimage = None
if (len(zones) == 0) and iimage:
# Check for accessible.name, if it exists and has len > 0, use it
# Otherwise, do the same for accessible.description
# Otherwise, do the same for accessible.image.description
imageName = ""
if accessible.name and len(accessible.name):
imageName = accessible.name
elif accessible.description and len(accessible.description):
imageName = accessible.description
elif iimage.imageDescription and \
len(iimage.imageDescription):
imageName = iimage.imageDescription
[x, y] = iimage.getImagePosition(0)
[width, height] = iimage.getImageSize()
if width != 0 and height != 0 \
and self.script.utilities.containsRegion(
x, y, width, height,
cliprect.x, cliprect.y,
cliprect.width, cliprect.height):
clipping = self.clip(x, y, width, height,
cliprect.x, cliprect.y,
cliprect.width, cliprect.height)
if (clipping[2] != 0) or (clipping[3] != 0):
zones.append(Zone(accessible,
imageName,
clipping[0],
clipping[1],
clipping[2],
clipping[3]))
# If the accessible is a parent, we really only looked at it for
# its accessible text. So...we'll skip the hacking here if that's
# the case. [[[TODO: WDW - HACK That is, except in the case of
# combo boxes, which don't implement the accesible text
# interface. We also hack with MENU items for similar reasons.]]]
#
# Otherwise, even if we didn't get anything of use, we certainly
# know there's something there. If that's the case, we'll just
# use the component extents and the name or description of the
# accessible.
#
clipping = self.clip(extents.x, extents.y,
extents.width, extents.height,
cliprect.x, cliprect.y,
cliprect.width, cliprect.height)
role = accessible.getRole()
if (len(zones) == 0) \
and role in [pyatspi.ROLE_SCROLL_BAR,
pyatspi.ROLE_SLIDER,
pyatspi.ROLE_PROGRESS_BAR]:
zones.append(ValueZone(accessible,
clipping[0],
clipping[1],
clipping[2],
clipping[3]))
elif (role != pyatspi.ROLE_COMBO_BOX) \
and (role != pyatspi.ROLE_EMBEDDED) \
and (role != pyatspi.ROLE_LABEL) \
and (role != pyatspi.ROLE_MENU) \
and (role != pyatspi.ROLE_PAGE_TAB) \
and accessible.childCount > 0:
pass
elif len(zones) == 0:
string = ""
if role == pyatspi.ROLE_COMBO_BOX:
try:
selection = accessible[0].querySelection()
except:
string = self.script.utilities.displayedText(accessible[0])
else:
item = selection.getSelectedChild(0)
if item:
string = item.name
if not string and accessible.name and len(accessible.name):
string = accessible.name
elif accessible.description and len(accessible.description):
string = accessible.description
if not string and role == pyatspi.ROLE_ICON:
string = self.script.utilities.displayedText(accessible)
if (string == "") \
and (role != pyatspi.ROLE_TABLE_CELL):
string = accessible.getLocalizedRoleName()
if len(string) and ((clipping[2] != 0) or (clipping[3] != 0)):
zones.append(Zone(accessible,
string,
clipping[0],
clipping[1],
clipping[2],
clipping[3]))
self._insertStateZone(zones, accessible)
return zones
def getShowingZones(self, root):
"""Returns a list of all interesting, non-intersecting, regions
that are drawn on the screen. Each element of the list is the
Accessible object associated with a given region. The term
'zone' here is inherited from OCR algorithms and techniques.
The Zones are returned in no particular order.
Arguments:
- root: the Accessible object to traverse
Returns: a list of Zones under the specified object
"""
if not root:
return []
zones = []
try:
rootexts = root.queryComponent().getExtents(0)
except:
return []
rootrole = root.getRole()
# If we're at a leaf node, then we've got a good one on our hands.
#
try:
childCount = root.childCount
except (LookupError, RuntimeError):
childCount = -1
if root.childCount <= 0:
return self.getZonesFromAccessible(root, rootexts)
# Handle non-leaf Java JTree nodes. If the node is collapsed,
# treat it as a leaf node. If it's expanded, add it to the
# Zones list.
#
stateset = root.getState()
if stateset.contains(pyatspi.STATE_EXPANDABLE):
if stateset.contains(pyatspi.STATE_COLLAPSED):
return self.getZonesFromAccessible(root, rootexts)
elif stateset.contains(pyatspi.STATE_EXPANDED):
treenode = self.getZonesFromAccessible(root, rootexts)
if treenode:
zones.extend(treenode)
# We'll stop at various objects because, while they do have
# children, we logically think of them as one region on the
# screen. [[[TODO: WDW - HACK stopping at menu bars for now
# because their menu items tell us they are showing even though
# they are not showing. Until I can figure out a reliable way to
# get past these lies, I'm going to ignore them.]]]
#
if (root.parent and (root.parent.getRole() == pyatspi.ROLE_MENU_BAR)) \
or (rootrole == pyatspi.ROLE_COMBO_BOX) \
or (rootrole == pyatspi.ROLE_EMBEDDED) \
or (rootrole == pyatspi.ROLE_TEXT) \
or (rootrole == pyatspi.ROLE_SCROLL_BAR):
return self.getZonesFromAccessible(root, rootexts)
# If this is a status bar, only pursue its children if we cannot
# get non-empty text information from the status bar.
# See bug #506874 for more details.
#
if rootrole == pyatspi.ROLE_STATUS_BAR:
zones = self.getZonesFromText(root, rootexts)
if len(zones):
return zones
# Otherwise, dig deeper.
#
# We'll include page tabs: while they are parents, their extents do
# not contain their children. [[[TODO: WDW - need to consider all
# parents, especially those that implement accessible text. Logged
# as bugzilla bug 319773.]]]
#
if rootrole == pyatspi.ROLE_PAGE_TAB:
zones.extend(self.getZonesFromAccessible(root, rootexts))
try:
root.queryText()
if len(zones) == 0:
zones = self.getZonesFromAccessible(root, rootexts)
except NotImplementedError:
pass
showingDescendants = \
self.script.utilities.showingDescendants(root)
if len(showingDescendants):
for child in showingDescendants:
zones.extend(self.getShowingZones(child))
else:
for i in range(0, root.childCount):
child = root.getChildAtIndex(i)
if child == root:
debug.println(debug.LEVEL_WARNING,
"flat_review.getShowingZones: " +
"WARNING CHILD == PARENT!!!")
continue
elif not child:
debug.println(debug.LEVEL_WARNING,
"flat_review.getShowingZones: " +
"WARNING CHILD IS NONE!!!")
continue
elif child.parent != root:
debug.println(debug.LEVEL_WARNING,
"flat_review.getShowingZones: " +
"WARNING CHILD.PARENT != PARENT!!!")
if self.script.utilities.pursueForFlatReview(child):
zones.extend(self.getShowingZones(child))
return zones
def clusterZonesByLine(self, zones):
"""Given a list of interesting accessible objects (the Zones),
returns a list of lines in order from the top to bottom, where
each line is a list of accessible objects in order from left
to right.
"""
if len(zones) == 0:
return []
# Sort the zones and also find the top most zone - we'll bias
# the clustering to the top of the window. That is, if an
# object can be part of multiple clusters, for now it will
# become a part of the top most cluster.
#
numZones = len(zones)
for i in range(0, numZones):
for j in range(0, numZones - 1 - i):
a = zones[j]
b = zones[j + 1]
if b.y < a.y:
zones[j] = b
zones[j + 1] = a
# Now we cluster the zones. We create the clusters on the
# fly, adding a zone to an existing cluster only if it's
# rectangle horizontally overlaps all other zones in the
# cluster.
#
lineClusters = []
for clusterCandidate in zones:
addedToCluster = False
for lineCluster in lineClusters:
inCluster = True
for zone in lineCluster:
if not zone.onSameLine(clusterCandidate):
inCluster = False
break
if inCluster:
# Add to cluster based on the x position.
#
i = 0
while i < len(lineCluster):
zone = lineCluster[i]
if clusterCandidate.x < zone.x:
break
else:
i += 1
lineCluster.insert(i, clusterCandidate)
addedToCluster = True
break
if not addedToCluster:
lineClusters.append([clusterCandidate])
# Now, adjust all the indeces.
#
lines = []
lineIndex = 0
for lineCluster in lineClusters:
lines.append(Line(lineIndex, lineCluster))
zoneIndex = 0
for zone in lineCluster:
zone.line = lines[lineIndex]
zone.index = zoneIndex
zoneIndex += 1
lineIndex += 1
return lines
def setCurrent(self, lineIndex, zoneIndex, wordIndex, charIndex):
"""Sets the current character of interest.
Arguments:
- lineIndex: index into lines
- zoneIndex: index into lines[lineIndex].zones
- wordIndex: index into lines[lineIndex].zones[zoneIndex].words
- charIndex: index lines[lineIndex].zones[zoneIndex].words[wordIndex].chars
"""
self.lineIndex = lineIndex
self.zoneIndex = zoneIndex
self.wordIndex = wordIndex
self.charIndex = charIndex
self.targetCharInfo = self.getCurrent(Context.CHAR)
#print "Current line=%d zone=%d word=%d char=%d" \
# % (lineIndex, zoneIndex, wordIndex, charIndex)
def routeToCurrent(self):
"""Routes the mouse pointer to the current accessible."""
if (not self.lines) \
or (not self.lines[self.lineIndex].zones):
return
[string, x, y, width, height] = self.getCurrent(Context.CHAR)
try:
# We try to move to the left of center. This is to
# handle toolkits that will offset the caret position to
# the right if you click dead on center of a character.
#
x = max(x, x + (width / 2) - 1)
eventsynthesizer.routeToPoint(x, y + height / 2, "abs")
except:
debug.printException(debug.LEVEL_SEVERE)
def clickCurrent(self, button=1):
"""Performs a mouse click on the current accessible."""
if (not self.lines) \
or (not self.lines[self.lineIndex].zones):
return
[string, x, y, width, height] = self.getCurrent(Context.CHAR)
try:
# We try to click to the left of center. This is to
# handle toolkits that will offset the caret position to
# the right if you click dead on center of a character.
#
x = max(x, x + (width / 2) - 1)
eventsynthesizer.clickPoint(x,
y + height / 2,
button)
except:
debug.printException(debug.LEVEL_SEVERE)
def getCurrentAccessible(self):
"""Returns the accessible associated with the current locus of
interest.
"""
if (not self.lines) \
or (not self.lines[self.lineIndex].zones):
return [None, -1, -1, -1, -1]
zone = self.lines[self.lineIndex].zones[self.zoneIndex]
return zone.accessible
def getCurrent(self, flatReviewType=ZONE):
"""Gets the string, offset, and extent information for the
current locus of interest.
Arguments:
- flatReviewType: one of ZONE, CHAR, WORD, LINE
Returns: [string, x, y, width, height]
"""
if (not self.lines) \
or (not self.lines[self.lineIndex].zones):
return [None, -1, -1, -1, -1]
zone = self.lines[self.lineIndex].zones[self.zoneIndex]
if flatReviewType == Context.ZONE:
return [zone.string,
zone.x,
zone.y,
zone.width,
zone.height]
elif flatReviewType == Context.CHAR:
if isinstance(zone, TextZone):
words = zone.words
if words:
chars = zone.words[self.wordIndex].chars
if chars:
char = chars[self.charIndex]
return [char.string,
char.x,
char.y,
char.width,
char.height]
else:
word = words[self.wordIndex]
return [word.string,
word.x,
word.y,
word.width,
word.height]
return self.getCurrent(Context.ZONE)
elif flatReviewType == Context.WORD:
if isinstance(zone, TextZone):
words = zone.words
if words:
word = words[self.wordIndex]
return [word.string,
word.x,
word.y,
word.width,
word.height]
return self.getCurrent(Context.ZONE)
elif flatReviewType == Context.LINE:
line = self.lines[self.lineIndex]
return [line.string,
line.x,
line.y,
line.width,
line.height]
else:
raise Exception("Invalid type: %d" % flatReviewType)
def getCurrentBrailleRegions(self):
"""Gets the braille for the entire current line.
Returns [regions, regionWithFocus]
"""
if (not self.lines) \
or (not self.lines[self.lineIndex].zones):
return [None, None]
regionWithFocus = None
line = self.lines[self.lineIndex]
regions = line.getBrailleRegions()
# Now find the current region and the current character offset
# into that region.
#
for zone in line.zones:
if zone.index == self.zoneIndex:
regionWithFocus = zone.brailleRegion
regionWithFocus.cursorOffset = 0
if zone.words:
for wordIndex in range(0, self.wordIndex):
regionWithFocus.cursorOffset += \
len(zone.words[wordIndex].string)
regionWithFocus.cursorOffset += self.charIndex
regionWithFocus.repositionCursor()
break
return [regions, regionWithFocus]
def goBegin(self, flatReviewType=WINDOW):
"""Moves this context's locus of interest to the first char
of the first relevant zone.
Arguments:
- flatReviewType: one of ZONE, LINE or WINDOW
Returns True if the locus of interest actually changed.
"""
if (flatReviewType == Context.LINE) or (flatReviewType == Context.ZONE):
lineIndex = self.lineIndex
elif flatReviewType == Context.WINDOW:
lineIndex = 0
else:
raise Exception("Invalid type: %d" % flatReviewType)
if flatReviewType == Context.ZONE:
zoneIndex = self.zoneIndex
else:
zoneIndex = 0
wordIndex = 0
charIndex = 0
moved = (self.lineIndex != lineIndex) \
or (self.zoneIndex != zoneIndex) \
or (self.wordIndex != wordIndex) \
or (self.charIndex != charIndex) \
if moved:
self.lineIndex = lineIndex
self.zoneIndex = zoneIndex
self.wordIndex = wordIndex
self.charIndex = charIndex
self.targetCharInfo = self.getCurrent(Context.CHAR)
return moved
def goEnd(self, flatReviewType=WINDOW):
"""Moves this context's locus of interest to the last char
of the last relevant zone.
Arguments:
- flatReviewType: one of ZONE, LINE, or WINDOW
Returns True if the locus of interest actually changed.
"""
if (flatReviewType == Context.LINE) or (flatReviewType == Context.ZONE):
lineIndex = self.lineIndex
elif flatReviewType == Context.WINDOW:
lineIndex = len(self.lines) - 1
else:
raise Exception("Invalid type: %d" % flatReviewType)
if flatReviewType == Context.ZONE:
zoneIndex = self.zoneIndex
else:
zoneIndex = len(self.lines[lineIndex].zones) - 1
zone = self.lines[lineIndex].zones[zoneIndex]
if zone.words:
wordIndex = len(zone.words) - 1
chars = zone.words[wordIndex].chars
if chars:
charIndex = len(chars) - 1
else:
charIndex = 0
else:
wordIndex = 0
charIndex = 0
moved = (self.lineIndex != lineIndex) \
or (self.zoneIndex != zoneIndex) \
or (self.wordIndex != wordIndex) \
or (self.charIndex != charIndex) \
if moved:
self.lineIndex = lineIndex
self.zoneIndex = zoneIndex
self.wordIndex = wordIndex
self.charIndex = charIndex
self.targetCharInfo = self.getCurrent(Context.CHAR)
return moved
def goPrevious(self, flatReviewType=ZONE,
wrap=WRAP_ALL, omitWhitespace=True):
"""Moves this context's locus of interest to the first char
of the previous type.
Arguments:
- flatReviewType: one of ZONE, CHAR, WORD, LINE
- wrap: if True, will cross boundaries, including top and
bottom; if False, will stop on boundaries.
Returns True if the locus of interest actually changed.
"""
if not self.lines:
debug.println(debug.LEVEL_FINE, 'goPrevious(): no lines in context')
return False
moved = False
if flatReviewType == Context.ZONE:
if self.zoneIndex > 0:
self.zoneIndex -= 1
self.wordIndex = 0
self.charIndex = 0
moved = True
elif wrap & Context.WRAP_LINE:
if self.lineIndex > 0:
self.lineIndex -= 1
self.zoneIndex = len(self.lines[self.lineIndex].zones) - 1
self.wordIndex = 0
self.charIndex = 0
moved = True
elif wrap & Context.WRAP_TOP_BOTTOM:
self.lineIndex = len(self.lines) - 1
self.zoneIndex = len(self.lines[self.lineIndex].zones) - 1
self.wordIndex = 0
self.charIndex = 0
moved = True
elif flatReviewType == Context.CHAR:
if self.charIndex > 0:
self.charIndex -= 1
moved = True
else:
moved = self.goPrevious(Context.WORD, wrap, False)
if moved:
zone = self.lines[self.lineIndex].zones[self.zoneIndex]
if zone.words:
chars = zone.words[self.wordIndex].chars
if chars:
self.charIndex = len(chars) - 1
elif flatReviewType == Context.WORD:
zone = self.lines[self.lineIndex].zones[self.zoneIndex]
accessible = zone.accessible
lineIndex = self.lineIndex
zoneIndex = self.zoneIndex
wordIndex = self.wordIndex
charIndex = self.charIndex
if self.wordIndex > 0:
self.wordIndex -= 1
self.charIndex = 0
moved = True
else:
moved = self.goPrevious(Context.ZONE, wrap)
if moved:
zone = self.lines[self.lineIndex].zones[self.zoneIndex]
if zone.words:
self.wordIndex = len(zone.words) - 1
# If we landed on a whitespace word or something with no words,
# we might need to move some more.
#
zone = self.lines[self.lineIndex].zones[self.zoneIndex]
if omitWhitespace \
and moved \
and ((len(zone.string) == 0) \
or (len(zone.words) \
and zone.words[self.wordIndex].string.isspace())):
hasMoreText = False
if self.lineIndex > 0 and isinstance(zone, TextZone):
prevZone = self.lines[self.lineIndex - 1].zones[-1]
if prevZone.accessible == zone.accessible:
hasMoreText = True
# If we're on whitespace in the same zone, then let's
# try to move on. If not, we've definitely moved
# across accessibles. If that's the case, let's try
# to find the first 'real' word in the accessible.
# If we cannot, then we're just stuck on an accessible
# with no words and we should do our best to announce
# this to the user (e.g., "whitespace" or "blank").
#
if zone.accessible == accessible or hasMoreText:
moved = self.goPrevious(Context.WORD, wrap)
else:
wordIndex = self.wordIndex - 1
while wordIndex >= 0:
if (not zone.words[wordIndex].string) \
or not len(zone.words[wordIndex].string) \
or zone.words[wordIndex].string.isspace():
wordIndex -= 1
else:
break
if wordIndex >= 0:
self.wordIndex = wordIndex
if not moved:
self.lineIndex = lineIndex
self.zoneIndex = zoneIndex
self.wordIndex = wordIndex
self.charIndex = charIndex
elif flatReviewType == Context.LINE:
if wrap & Context.WRAP_LINE:
if self.lineIndex > 0:
self.lineIndex -= 1
self.zoneIndex = 0
self.wordIndex = 0
self.charIndex = 0
moved = True
elif (wrap & Context.WRAP_TOP_BOTTOM) \
and (len(self.lines) != 1):
self.lineIndex = len(self.lines) - 1
self.zoneIndex = 0
self.wordIndex = 0
self.charIndex = 0
moved = True
else:
raise Exception("Invalid type: %d" % flatReviewType)
if moved and (flatReviewType != Context.LINE):
self.targetCharInfo = self.getCurrent(Context.CHAR)
return moved
def goNext(self, flatReviewType=ZONE, wrap=WRAP_ALL, omitWhitespace=True):
"""Moves this context's locus of interest to first char of
the next type.
Arguments:
- flatReviewType: one of ZONE, CHAR, WORD, LINE
- wrap: if True, will cross boundaries, including top and
bottom; if False, will stop on boundaries.
"""
if not self.lines:
debug.println(debug.LEVEL_FINE, 'goNext(): no lines in context')
return False
moved = False
if flatReviewType == Context.ZONE:
if self.zoneIndex < (len(self.lines[self.lineIndex].zones) - 1):
self.zoneIndex += 1
self.wordIndex = 0
self.charIndex = 0
moved = True
elif wrap & Context.WRAP_LINE:
if self.lineIndex < (len(self.lines) - 1):
self.lineIndex += 1
self.zoneIndex = 0
self.wordIndex = 0
self.charIndex = 0
moved = True
elif wrap & Context.WRAP_TOP_BOTTOM:
self.lineIndex = 0
self.zoneIndex = 0
self.wordIndex = 0
self.charIndex = 0
moved = True
elif flatReviewType == Context.CHAR:
zone = self.lines[self.lineIndex].zones[self.zoneIndex]
if zone.words:
chars = zone.words[self.wordIndex].chars
if chars:
if self.charIndex < (len(chars) - 1):
self.charIndex += 1
moved = True
else:
moved = self.goNext(Context.WORD, wrap, False)
else:
moved = self.goNext(Context.WORD, wrap)
else:
moved = self.goNext(Context.ZONE, wrap)
elif flatReviewType == Context.WORD:
zone = self.lines[self.lineIndex].zones[self.zoneIndex]
accessible = zone.accessible
lineIndex = self.lineIndex
zoneIndex = self.zoneIndex
wordIndex = self.wordIndex
charIndex = self.charIndex
if zone.words:
if self.wordIndex < (len(zone.words) - 1):
self.wordIndex += 1
self.charIndex = 0
moved = True
else:
moved = self.goNext(Context.ZONE, wrap)
else:
moved = self.goNext(Context.ZONE, wrap)
# If we landed on a whitespace word or something with no words,
# we might need to move some more.
#
zone = self.lines[self.lineIndex].zones[self.zoneIndex]
if omitWhitespace \
and moved \
and ((len(zone.string) == 0) \
or (len(zone.words) \
and zone.words[self.wordIndex].string.isspace())):
# If we're on whitespace in the same zone, then let's
# try to move on. If not, we've definitely moved
# across accessibles. If that's the case, let's try
# to find the first 'real' word in the accessible.
# If we cannot, then we're just stuck on an accessible
# with no words and we should do our best to announce
# this to the user (e.g., "whitespace" or "blank").
#
if zone.accessible == accessible:
moved = self.goNext(Context.WORD, wrap)
else:
wordIndex = self.wordIndex + 1
while wordIndex < len(zone.words):
if (not zone.words[wordIndex].string) \
or not len(zone.words[wordIndex].string) \
or zone.words[wordIndex].string.isspace():
wordIndex += 1
else:
break
if wordIndex < len(zone.words):
self.wordIndex = wordIndex
if not moved:
self.lineIndex = lineIndex
self.zoneIndex = zoneIndex
self.wordIndex = wordIndex
self.charIndex = charIndex
elif flatReviewType == Context.LINE:
if wrap & Context.WRAP_LINE:
if self.lineIndex < (len(self.lines) - 1):
self.lineIndex += 1
self.zoneIndex = 0
self.wordIndex = 0
self.charIndex = 0
moved = True
elif (wrap & Context.WRAP_TOP_BOTTOM) \
and (self.lineIndex != 0):
self.lineIndex = 0
self.zoneIndex = 0
self.wordIndex = 0
self.charIndex = 0
moved = True
else:
raise Exception("Invalid type: %d" % flatReviewType)
if moved and (flatReviewType != Context.LINE):
self.targetCharInfo = self.getCurrent(Context.CHAR)
return moved
def goAbove(self, flatReviewType=LINE, wrap=WRAP_ALL):
"""Moves this context's locus of interest to first char
of the type that's closest to and above the current locus of
interest.
Arguments:
- flatReviewType: LINE
- wrap: if True, will cross top/bottom boundaries; if False, will
stop on top/bottom boundaries.
Returns: [string, startOffset, endOffset, x, y, width, height]
"""
moved = False
if flatReviewType == Context.CHAR:
# We want to shoot for the closest character, which we've
# saved away as self.targetCharInfo, which is the list
# [string, x, y, width, height].
#
if not self.targetCharInfo:
self.targetCharInfo = self.getCurrent(Context.CHAR)
target = self.targetCharInfo
[string, x, y, width, height] = target
middleTargetX = x + (width / 2)
moved = self.goPrevious(Context.LINE, wrap)
if moved:
while True:
[string, bx, by, bwidth, bheight] = \
self.getCurrent(Context.CHAR)
if (bx + width) >= middleTargetX:
break
elif not self.goNext(Context.CHAR, Context.WRAP_NONE):
break
# Moving around might have reset the current targetCharInfo,
# so we reset it to our saved value.
#
self.targetCharInfo = target
elif flatReviewType == Context.LINE:
return self.goPrevious(flatReviewType, wrap)
else:
raise Exception("Invalid type: %d" % flatReviewType)
return moved
def goBelow(self, flatReviewType=LINE, wrap=WRAP_ALL):
"""Moves this context's locus of interest to the first
char of the type that's closest to and below the current
locus of interest.
Arguments:
- flatReviewType: one of WORD, LINE
- wrap: if True, will cross top/bottom boundaries; if False, will
stop on top/bottom boundaries.
Returns: [string, startOffset, endOffset, x, y, width, height]
"""
moved = False
if flatReviewType == Context.CHAR:
# We want to shoot for the closest character, which we've
# saved away as self.targetCharInfo, which is the list
# [string, x, y, width, height].
#
if not self.targetCharInfo:
self.targetCharInfo = self.getCurrent(Context.CHAR)
target = self.targetCharInfo
[string, x, y, width, height] = target
middleTargetX = x + (width / 2)
moved = self.goNext(Context.LINE, wrap)
if moved:
while True:
[string, bx, by, bwidth, bheight] = \
self.getCurrent(Context.CHAR)
if (bx + width) >= middleTargetX:
break
elif not self.goNext(Context.CHAR, Context.WRAP_NONE):
break
# Moving around might have reset the current targetCharInfo,
# so we reset it to our saved value.
#
self.targetCharInfo = target
elif flatReviewType == Context.LINE:
moved = self.goNext(flatReviewType, wrap)
else:
raise Exception("Invalid type: %d" % flatReviewType)
return moved
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/flat_review.py
|
Python
|
gpl-3.0
| 76,473
|
[
"ORCA"
] |
d23d6bd1eec573c2ed990fd8a9b696544183b6a47a02bb1f26c90f1e9abb0d70
|
import numpy as np
import scipy as sp
from scipy.optimize import minimize
from abc import ABC, abstractmethod
###################
#Gaussian processes
###################
class GP(ABC):
'''
General class to hold parameters common to both GPR and GPC.
'''
def __init__(self,kernel):
self.kernel = kernel
self.K = np.array([])
self.K_inv = np.array([])
self.x = np.array([])
self.y = np.array([])
def compute_K(self,x1,x2=None):
'''
Recompute K, K_inv and return the result.
'''
#Compute the covariance matrix and check that it is positive definite.
if x2 is None:
K = self.kernel.get_cov_mat(x1)
else:
K = self.kernel.get_cov_mat(x1,x2)
K = self.numeric_fix(K)
#Compute the inverse of the covariance matrix.
K_inv = np.linalg.inv(K)
return K, K_inv
def set_K(self):
'''
Recompute K, K_inv and set the attributes to the result.
'''
self.K, self.K_inv = self.compute_K(self.x)
def set_K_all(self):
pass
def positive_definite(self,K):
'''
Check whether a matrix is positive definite.
A matrix must be positive definite in order to compute the Cholesky decomposition.
'''
#For every eigenvalue.
for eigval in np.linalg.eigvals(K):
if eigval <= 0:
#If an eigenvalue is not positive, then the matrix is not positive definite.
return False
#If every eigenvalue is positive, then the matrix is positive definite.
return True
def numeric_fix(self,K):
'''
Add a small multiple of the identity matrix to the covariance matrix.
This can help to compute the Cholesky decomposition.
'''
#Define the identity matrix of appropriate size.
I = np.matrix( np.eye(K.shape[0]) )
#Define the multiple for the identity matrix
epsilon = 1e-7
#Define the "rate" at which the multiple should increase.
alpha = 2
#Define the maximum number of iterations until giving up.
maxIter = int(1e9)
#Loop for the maximum number of iterations.
for i in range(0,maxIter,1):
if self.positive_definite(K):
#If the matrix is positive definite, no need to keep adding epsilon*I and can break from loop.
#print((alpha**i)*epsilon,i)
break
#If the matrix is not positive definite, add a small multiple of the identity matrix until it is.
K += (alpha**i)*epsilon*I
#Return the positive definite covariance matrix.
return K
def optimize(self,method='SLSQP'):
'''
Optimize over the hyperparameters.
'''
#Set the initial hyperparameters to the ones entered by the user.
hparams0 = np.array( self.kernel.get_hyperparameters() )
hparams_bounds = self.kernel.get_hyperparameters_bounds()
#print(hparams0)
#Minimize the negative log marginal likelihood based on these initial parameters.
res = minimize(self.lml,hparams0,method=method, bounds=hparams_bounds ,tol=1e-6)
#Update the covariance matrices for the training data (test matrices updated in predict method).
self.set_K()
self.set_K_all()
#If desired, can return the resulting object from SciPy optimize.
return res
def get_samples(self,m,K,n=1):
'''
General method to sample from a distribution with mean m and covariance matrix K.
Also accepts the number of samples desired.
Returns n samples drawn from distribution of N~(m,K)
'''
#Ensure K is 2D and has same number of rows and columns.
if (K.ndim != 2) or (K.shape[0] != K.shape[1]):
errMsg = "The number of rows and columns of this matrix differ ({},{}).".format(K.shape[0],K.shape[1])
raise TypeError(errMsg)
#Check that the covariance matrix is positive definite and fix if it is not.
K = self.numeric_fix(K)
#print(self.positive_definite(K))
#Compute the Cholesky decomposition.
L = np.linalg.cholesky(K)
#Generate random samples with mean **0** and covariance of the identity matrix (i.e., independent).
v = np.random.normal(0,1,m.size)
v = v[:,np.newaxis]
#Generate the desired number of samples.
for i in range(1,n,1):
tmp = np.random.normal(0,1,m.size)
tmp = tmp[:,np.newaxis]
v = np.append( v , tmp , axis=1 )
#Return the sample(s) with distribution N~(m,K).
return m + np.dot(L,v)
############################
#Gaussian process regression
############################
class GPR(GP):
'''
General class for performing Gaussian process regression.
'''
def __init__(self,kernel):
GP.__init__(self,kernel)
def train(self,x,y):
'''
Compute and invert the training covariance matrix and store the training data.
'''
#Store x and y
self.x = x
self.y = y
#Compute the covariance matrix between the test data and itself
self.set_K()
def predict(self,x_star,returnCov=True):
'''
Given a model and a set of independent observations, predict the corresponding dependent variable values.
'''
#Compute the covariance matrix between the test data and the training data.
K_star = self.kernel.get_cov_mat(self.x,x_star)
#Compute the covariance matrix between the training data and itself.
K_star_star = self.kernel.get_cov_mat(x_star)
#Compute the mean (each row of K_star adds another element to the array)
y_star_mean = np.dot( np.dot(K_star,self.K_inv) , self.y )
#Compute the variance by taking the diagonal elements
y_star_cov = K_star_star - np.dot( np.dot(K_star,self.K_inv) , np.transpose(K_star))
y_star_var = np.diag(y_star_cov)
if returnCov:
#Return the mean and covariance matrix
return y_star_mean, y_star_cov
else:
#Return the mean and variance
return y_star_mean, y_star_var
def sample_from_prior(self,x_star,n=1):
'''
Draw a desired number of samples from a Gaussian process prior.
Based on the given kernel function. No training data.
'''
#Generate the mean for the prior distribution (assumed to be zero).
#m = np.transpose( np.matrix( np.zeros(x_star.size) ) )
m = np.zeros(x_star.size)[:,np.newaxis]
#Generate the covariance matrix for prior distribution.
K = self.kernel.get_cov_mat(x_star)
#Return n samples for distribution of N~(m,K)
return self.get_samples(m,K,n)
def sample_from_posterior(self,x_star,n=1):
'''
Draw a desired number of samples from a Gaussian process posterior.
Based on the given kernel function and the traning data.
'''
#Generate the mean and the covariance matrix for the posterior distribution.
m,K = self.predict(x_star)
#Convert the mean to matrix format.
#m = np.transpose( np.matrix(m) )
m = m[:,np.newaxis]
#Return n samples for distribution of N~(m,K)
return self.get_samples(m,K,n)
def lml(self,hparams=None):
'''
Negative log marginal likelihood.
'''
#Check to see if an array of hyperparameters is passed.
if hparams is not None:
#Reassign hyperparameters if an array of hyperparameters is passed.
self.kernel.set_hyperparameters(hparams)
#Covariance matrix must be recomputed and inverted every time!
K,K_inv = self.compute_K(self.x)
#Return the negative log marginal likelihood (scalar).
return np.asscalar( np.matrix(self.y)*K_inv*np.transpose(np.matrix(self.y)) + np.linalg.det(K) )
#################################
#Gaussian process classification
#################################
class GeneralGPC(GP):
'''
General class for performing Gaussian process classification. The binary and multiclass case inherit.
'''
def __init__(self,kernel):
GP.__init__(self,kernel)
self.C = -1
self.n = -1
self.x_all = np.array([])
self.K_all = np.array([])
self.K_all_inv = np.array([])
def compute_K_all(self,K):
if self.C>1:
K_all = K
for i in range(1,self.C,1):
K_all = sp.linalg.block_diag(K_all,K)
K_all = self.numeric_fix(K_all)
K_all_inv = np.linalg.inv(K_all)
return K_all,K_all_inv
else:
return np.array([]),np.array([])
def set_K_all(self):
self.K_all,self.K_all_inv = self.compute_K_all(self.K)
def train(self,x,y,C=1):
'''
Compute and invert the training covariance matrix and store the training data.
'''
self.n = int(y.size/C)
self.C = C
#Store x and y, and also x_c (since x is just a repeat of x_c n times)
self.x = x[0:self.n]
self.x_all = x
self.y = y
#Compute the covariance matrix between the test data and itself
self.set_K()
#Compute the covariance matrix between the test data and itself for all classes.
self.set_K_all()
@abstractmethod
def f_new(self):
'''
This must be implemented in child classes since newton_method depends on it.
'''
pass
def newton_method(self,f_hat_guess,y,K_inv,tol=1e-5):
'''
Iterate to find f_new and obtain the optimal value f_hat.
'''
dist = 10*tol
f_hat = f_hat_guess
while(dist>tol):
f_hat_new = self.f_new(f_hat,y,K_inv)
dist = np.linalg.norm(f_hat_new - f_hat)
f_hat = f_hat_new
return f_hat
class GPCB(GeneralGPC):
'''
General class for performing Gaussian process classification (binary case).
'''
def __init__(self,kernel):
GeneralGPC.__init__(self,kernel)
def train(self,x,y):
'''
If using binary classifier, make it so that there is no option for user to enter the number of classes.
'''
GeneralGPC.train(self,x,y)
def predict(self,x_star,map_prediction=True):
#Obtain optimal value of f_hat
f_hat = np.zeros(self.y.size)
f_hat = self.newton_method(f_hat,self.y,self.K_inv)
#Compute the covariance matrix between the test data and the training data.
K_star = self.kernel.get_cov_mat(self.x,x_star)
#Compute the covariance matrix between the training data and itself.
K_star_star = self.kernel.get_cov_mat(x_star)
#Compute the mean (each row of K_star adds another element to the array)
f_star_mean = np.dot( np.dot(K_star,self.K_inv) , f_hat )
#Return the sigmoid of the mean of f_star if desired (MAP prediction).
if map_prediction is True:
pi_hat_star_mean = self.pi(f_star_mean)
return pi_hat_star_mean
W = -np.diag(self.ddll2(f_hat))
K_prime = self.K + np.linalg.inv(W)
K_prime_inv = np.linalg.inv(K_prime)
#Compute the variance by taking the diagonal elements
f_star_cov = K_star_star - np.dot( np.dot(K_star,K_prime_inv) , np.transpose(K_star) )
f_star_var = np.diag(f_star_cov)
#MacKay approximation of the integral
pi_star_mean = self.pi(self.kappa(f_star_var)*f_star_mean)
return pi_star_mean
def kappa(self,f_star_var):
return np.sqrt( ( 1+np.pi*f_star_var/8 )**(-1) )
def pi(self,f):
return 1.0/(1.0+np.exp(-f))
def dll2(self,f,y):
return (y+1)/2.0 - self.pi(f)
def ddll2(self,f):
return -self.pi(f)*(1.0-self.pi(f))
def f_new(self,f,y,K_inv):
W = -np.diag(self.ddll2(f))
term1 = np.linalg.inv( (K_inv+W) )
term2 = np.matmul(W,f) + self.dll2(f,y)
return np.dot(term1,term2)
class GPC(GeneralGPC):
'''
General class for performing Gaussian process classification (binary case).
'''
def __init__(self,kernel):
GeneralGPC.__init__(self,kernel)
def predict(self,x_star,class_number=None):
n_star = len(x_star)
#Obtain optimal value of f_hat
f_hat = np.zeros(self.y.size)
f_hat = self.newton_method(f_hat,self.y,self.K_all_inv)
#Compute the covariance matrix between the test data and the training data.
K_star = self.kernel.get_cov_mat(self.x,x_star)
#Compute the covariance matrix between the training data and itself.
K_star_star = self.kernel.get_cov_mat(x_star)
#Calculate matrices for K_star and K_star_star for all classes (currently only 1 covariance matrix).
#Note that Q_star here is the transpose of Q_star in Rasmussen.
Q_star = K_star
K_star_star_all = K_star_star
for i in range(1,self.C,1):
Q_star = sp.linalg.block_diag(Q_star,K_star)
K_star_star_all = sp.linalg.block_diag(K_star_star_all,K_star_star)
#Calculate the softmax of the optimal f_hat.
pi_hat = self.softmax(f_hat)
#Calculate the matrix W for the optimal value of f_hat.
W = self.compute_W(pi_hat)
W = self.numeric_fix(W)
#Calculate the matrix K_prime for the optimal value of f_hat.
K_prime = self.K_all + np.linalg.inv(W)
K_prime = self.numeric_fix(K_prime)
K_prime_inv = np.linalg.inv(K_prime)
#Calculate the mean for all classes.
f_star_mean_all = np.dot(Q_star,self.y-pi_hat)
#Calculate the covariance and variance for all classes.
f_star_cov = K_star_star_all - np.dot( np.dot(Q_star,K_prime_inv) , np.transpose(Q_star) )
f_star_var = np.diag(f_star_cov)
#Estimate pi_star_mean by drawing samples from Gaussian distribution N~(f_star_mean,f_star_cov).
n_samples = 100
samples = self.get_samples(f_star_mean_all[:,np.newaxis],f_star_cov,n_samples)
#Initialize pi_star_mean with the softmax of the first sample.
pi_star_mean = self.softmax(samples[:,0],n_points=n_star)
#For each column in samples, softmax and then at the end, average it up.
for i in range(1,n_samples,1):
pi_star_mean += self.softmax(samples[:,i],n_points=n_star)
pi_star_mean /= n_samples
#If desired, can return the estimate of pi_star for only a given class.
if class_number is None:
return pi_star_mean
else:
index_shift = (class_number-1)*n_star
return pi_star_mean[index_shift:index_shift+n_star]
def evaluate(self,pi_star_mean,test_data):
n_test = len(test_data)
pi_star_mean = np.reshape(pi_star_mean,(self.C,n_test))
test_results = []
for i in range(0,n_test,1):
res = pi_star_mean[:,i]
test_results.append( (np.argmax(res), test_data[i][1]) )
n_correct = sum(int(x == y) for (x, y) in test_results)
print("{0}/{1}".format(n_correct,len(test_data)))
def softmax(self,f,class_number=None,n_points=None):
'''
TODO:
-optimize
'''
if n_points is None:
n = self.n
else:
n = n_points
f_m = np.reshape(f,(self.C,n))
pi = np.array([])
for i in range(0,f.size,1):
index = (i)%(n) #index for the ith training point
f_i = f_m[:,index] #the column corresponding to that training point (C classes long)
num = np.exp(f[i])
den = np.sum( np.exp(f_i) )
pi = np.append(pi,num/den)
if class_number is None or class_number<1:
return pi
else:
index_shift = (class_number-1)*n
return pi[index_shift:index_shift+n]
def Pi(self,pi):
pi = np.reshape(pi,(self.C,self.n))
Pi = np.diag(pi[0,:]) #initialize
for i in range(1,pi.shape[0],1):
Pi = np.vstack((Pi,np.diag(pi[i,:])))
return Pi
def PiPiT(self,pi):
BigPi = self.Pi(pi)
return np.dot(BigPi,np.transpose(BigPi))
def compute_W(self,pi):
BigPiPiT = self.PiPiT(pi)
return np.diag(pi)-BigPiPiT
def f_new(self,f,y,K_inv):
pi = self.softmax(f)
W = self.compute_W(pi)
term1 = np.linalg.inv( (K_inv+W) )
term2 = np.dot(W,f) + y - pi
return np.dot(term1,term2)
def lml(self,hparams=None):
'''
Negative log marginal likelihood.
'''
#Check to see if an array of hyperparameters is passed.
if hparams is not None:
#Reassign hyperparameters if an array of hyperparameters is passed.
self.kernel.set_hyperparameters(hparams)
#Covariance matrix must be recomputed and inverted every time!
K,K_inv = self.compute_K(self.x,self.x)
K_all,K_all_inv = self.compute_K_all(K)
f_hat = np.zeros(self.y.size)
f_hat = self.newton_method(f_hat,self.y,self.K_all_inv)
pi_hat = self.softmax(f_hat)
W = self.compute_W(pi_hat)
W = self.numeric_fix(W)
#Calculate the log marginal likelihood.
term1 = -0.5*np.dot( np.dot(f_hat,K_all_inv),f_hat ) + np.dot(self.y,f_hat)
term2 = 0
f_hat_m = np.reshape(f_hat,(self.C,self.n))
for i in range(0,self.n,1):
f_i = f_hat_m[:,i]
term2 += -np.log( np.sum( np.exp(f_i) ) )
tmpterm1 = np.dot(sp.linalg.sqrtm(W),K_all)
tmpterm2 = np.dot( tmpterm1 , sp.linalg.sqrtm(W) )
term3 = -0.5*np.log(np.linalg.det(np.eye(self.C*self.n)+tmpterm2))
return -(term1 + term2 + term3)
|
mark-r-anderson/GaussianProcesses
|
GPs/GP.py
|
Python
|
mit
| 18,385
|
[
"Gaussian"
] |
bff0afc6adec19fb26dda44a48c878951bd2cd1d91be275b55ffe78f300d8ac1
|
import pandas as pd
import nmrpystar
import mdtraj as md
stride = 100
t0 = md.load(["./Trajectories_ff99sbnmr/1am7_%d.dcd" % i for i in range(10)], top="./1am7_fixed.pdb")[::stride]
t1 = md.load(["./Trajectories/1am7_%d.dcd" % i for i in range(15)], top="./1am7_fixed.pdb")[::stride]
#full_prediction0 = md.nmr.chemical_shifts_shiftx2(t0)
#full_prediction1 = md.nmr.chemical_shifts_shiftx2(t1)
#full_prediction0 = md.nmr.chemical_shifts_spartaplus(t0)
#full_prediction1 = md.nmr.chemical_shifts_spartaplus(t1)
full_prediction0 = md.nmr.chemical_shifts_ppm(t0)
full_prediction1 = md.nmr.chemical_shifts_ppm(t1)
parsed = nmrpystar.parse(open("./16664.str").read())
print(parsed.status)
q = parsed.value.saves["assigned_chem_shift_list_1"].loops[1]
x = pd.DataFrame(q.rows, columns=q.keys)
x = x[["Atom_chem_shift.Seq_ID", "Atom_chem_shift.Atom_ID", "Atom_chem_shift.Val"]]
x.rename(columns={"Atom_chem_shift.Seq_ID":"resSeq", "Atom_chem_shift.Atom_ID":"name", "Atom_chem_shift.Val":"value"}, inplace=True)
# Need to make dtypes match to do eventual comparison.
x["resSeq"] = x["resSeq"].astype('int')
x["value"] = x["value"].astype('float')
expt = x.set_index(["resSeq", "name"]).value
prediction0 = full_prediction0.mean(1) # Average over time dimensions
prediction0.name = "value"
prediction1 = full_prediction1.mean(1) # Average over time dimensions
prediction1.name = "value"
sigma = pd.Series({"C":0.8699, "CA":0.7743, "H":0.3783, "HA":0.1967, "HA2":0.1967, "HA3":0.1967, "N":2.0862})
#sigma = pd.Series({"C":0.8699, "CA":0.7743, "H":0.3783, "HA":0.1967, "N":2.0862})
sigma.name = "value"
z0 = ((expt - prediction0)).dropna()
z1 = ((expt - prediction1)).dropna()
rms0 = (z0 ** 2.).reset_index().groupby("name").value.mean() ** 0.5
rms1 = (z1 ** 2.).reset_index().groupby("name").value.mean() ** 0.5
rms0 = rms0 / sigma
rms1 = rms1 / sigma
rms0 / rms1
|
choderalab/open-forcefield-group
|
nmr/code/compare_shifts_compare_forcefields.py
|
Python
|
gpl-2.0
| 1,878
|
[
"MDTraj"
] |
5dff9a606c471b402c8c0b5ca1aee3aef5a1e864a2af5eb8b1282e459281160b
|
# Copyright 2007-2010 by Peter Cock. All rights reserved.
# Revisions copyright 2010 by Uri Laserson. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# This code is NOT intended for direct use. It provides a basic scanner
# (for use with a event consumer such as Bio.GenBank._FeatureConsumer)
# to parse a GenBank or EMBL file (with their shared INSDC feature table).
#
# It is used by Bio.GenBank to parse GenBank files
# It is also used by Bio.SeqIO to parse GenBank and EMBL files
#
# Feature Table Documentation:
# http://www.insdc.org/files/feature_table.html
# http://www.ncbi.nlm.nih.gov/projects/collab/FT/index.html
# ftp://ftp.ncbi.nih.gov/genbank/docs/
#
# 17-MAR-2009: added wgs, wgs_scafld for GenBank whole genome shotgun master records.
# These are GenBank files that summarize the content of a project, and provide lists of
# scaffold and contig files in the project. These will be in annotations['wgs'] and
# annotations['wgs_scafld']. These GenBank files do not have sequences. See
# http://groups.google.com/group/bionet.molbio.genbank/browse_thread/thread/51fb88bf39e7dc36
# http://is.gd/nNgk
# for more details of this format, and an example.
# Added by Ying Huang & Iddo Friedberg
import warnings
import os
import re
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_alphabet, generic_protein
class InsdcScanner(object):
"""Basic functions for breaking up a GenBank/EMBL file into sub sections.
The International Nucleotide Sequence Database Collaboration (INSDC)
between the DDBJ, EMBL, and GenBank. These organisations all use the
same "Feature Table" layout in their plain text flat file formats.
However, the header and sequence sections of an EMBL file are very
different in layout to those produced by GenBank/DDBJ."""
#These constants get redefined with sensible values in the sub classes:
RECORD_START = "XXX" # "LOCUS " or "ID "
HEADER_WIDTH = 3 # 12 or 5
FEATURE_START_MARKERS = ["XXX***FEATURES***XXX"]
FEATURE_END_MARKERS = ["XXX***END FEATURES***XXX"]
FEATURE_QUALIFIER_INDENT = 0
FEATURE_QUALIFIER_SPACER = ""
SEQUENCE_HEADERS=["XXX"] #with right hand side spaces removed
def __init__(self, debug=0):
assert len(self.RECORD_START)==self.HEADER_WIDTH
for marker in self.SEQUENCE_HEADERS:
assert marker==marker.rstrip()
assert len(self.FEATURE_QUALIFIER_SPACER)==self.FEATURE_QUALIFIER_INDENT
self.debug = debug
self.line = None
def set_handle(self, handle):
self.handle = handle
self.line = ""
def find_start(self):
"""Read in lines until find the ID/LOCUS line, which is returned.
Any preamble (such as the header used by the NCBI on *.seq.gz archives)
will we ignored."""
while True:
if self.line:
line = self.line
self.line = ""
else:
line = self.handle.readline()
if not line:
if self.debug : print "End of file"
return None
if line[:self.HEADER_WIDTH]==self.RECORD_START:
if self.debug > 1: print "Found the start of a record:\n" + line
break
line = line.rstrip()
if line == "//":
if self.debug > 1: print "Skipping // marking end of last record"
elif line == "":
if self.debug > 1: print "Skipping blank line before record"
else:
#Ignore any header before the first ID/LOCUS line.
if self.debug > 1:
print "Skipping header line before record:\n" + line
self.line = line
return line
def parse_header(self):
"""Return list of strings making up the header
New line characters are removed.
Assumes you have just read in the ID/LOCUS line.
"""
assert self.line[:self.HEADER_WIDTH]==self.RECORD_START, \
"Not at start of record"
header_lines = []
while True:
line = self.handle.readline()
if not line:
raise ValueError("Premature end of line during sequence data")
line = line.rstrip()
if line in self.FEATURE_START_MARKERS:
if self.debug : print "Found header table"
break
#if line[:self.HEADER_WIDTH]==self.FEATURE_START_MARKER[:self.HEADER_WIDTH]:
# if self.debug : print "Found header table (?)"
# break
if line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS:
if self.debug : print "Found start of sequence"
break
if line == "//":
raise ValueError("Premature end of sequence data marker '//' found")
header_lines.append(line)
self.line = line
return header_lines
def parse_features(self, skip=False):
"""Return list of tuples for the features (if present)
Each feature is returned as a tuple (key, location, qualifiers)
where key and location are strings (e.g. "CDS" and
"complement(join(490883..490885,1..879))") while qualifiers
is a list of two string tuples (feature qualifier keys and values).
Assumes you have already read to the start of the features table.
"""
if self.line.rstrip() not in self.FEATURE_START_MARKERS:
if self.debug : print "Didn't find any feature table"
return []
while self.line.rstrip() in self.FEATURE_START_MARKERS:
self.line = self.handle.readline()
features = []
line = self.line
while True:
if not line:
raise ValueError("Premature end of line during features table")
if line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS:
if self.debug : print "Found start of sequence"
break
line = line.rstrip()
if line == "//":
raise ValueError("Premature end of features table, marker '//' found")
if line in self.FEATURE_END_MARKERS:
if self.debug : print "Found end of features"
line = self.handle.readline()
break
if line[2:self.FEATURE_QUALIFIER_INDENT].strip() == "":
#This is an empty feature line between qualifiers. Empty
#feature lines within qualifiers are handled below (ignored).
line = self.handle.readline()
continue
if skip:
line = self.handle.readline()
while line[:self.FEATURE_QUALIFIER_INDENT] == self.FEATURE_QUALIFIER_SPACER:
line = self.handle.readline()
else:
#Build up a list of the lines making up this feature:
if line[self.FEATURE_QUALIFIER_INDENT]!=" " \
and " " in line[self.FEATURE_QUALIFIER_INDENT:]:
#The feature table design enforces a length limit on the feature keys.
#Some third party files (e.g. IGMT's EMBL like files) solve this by
#over indenting the location and qualifiers.
feature_key, line = line[2:].strip().split(None,1)
feature_lines = [line]
warnings.warn("Overindented %s feature?" % feature_key)
else:
feature_key = line[2:self.FEATURE_QUALIFIER_INDENT].strip()
feature_lines = [line[self.FEATURE_QUALIFIER_INDENT:]]
line = self.handle.readline()
while line[:self.FEATURE_QUALIFIER_INDENT] == self.FEATURE_QUALIFIER_SPACER \
or line.rstrip() == "" : # cope with blank lines in the midst of a feature
#Use strip to remove any harmless trailing white space AND and leading
#white space (e.g. out of spec files with too much intentation)
feature_lines.append(line[self.FEATURE_QUALIFIER_INDENT:].strip())
line = self.handle.readline()
features.append(self.parse_feature(feature_key, feature_lines))
self.line = line
return features
def parse_feature(self, feature_key, lines):
"""Expects a feature as a list of strings, returns a tuple (key, location, qualifiers)
For example given this GenBank feature:
CDS complement(join(490883..490885,1..879))
/locus_tag="NEQ001"
/note="conserved hypothetical [Methanococcus jannaschii];
COG1583:Uncharacterized ACR; IPR001472:Bipartite nuclear
localization signal; IPR002743: Protein of unknown
function DUF57"
/codon_start=1
/transl_table=11
/product="hypothetical protein"
/protein_id="NP_963295.1"
/db_xref="GI:41614797"
/db_xref="GeneID:2732620"
/translation="MRLLLELKALNSIDKKQLSNYLIQGFIYNILKNTEYSWLHNWKK
EKYFNFTLIPKKDIIENKRYYLIISSPDKRFIEVLHNKIKDLDIITIGLAQFQLRKTK
KFDPKLRFPWVTITPIVLREGKIVILKGDKYYKVFVKRLEELKKYNLIKKKEPILEEP
IEISLNQIKDGWKIIDVKDRYYDFRNKSFSAFSNWLRDLKEQSLRKYNNFCGKNFYFE
EAIFEGFTFYKTVSIRIRINRGEAVYIGTLWKELNVYRKLDKEEREFYKFLYDCGLGS
LNSMGFGFVNTKKNSAR"
Then should give input key="CDS" and the rest of the data as a list of strings
lines=["complement(join(490883..490885,1..879))", ..., "LNSMGFGFVNTKKNSAR"]
where the leading spaces and trailing newlines have been removed.
Returns tuple containing: (key as string, location string, qualifiers as list)
as follows for this example:
key = "CDS", string
location = "complement(join(490883..490885,1..879))", string
qualifiers = list of string tuples:
[('locus_tag', '"NEQ001"'),
('note', '"conserved hypothetical [Methanococcus jannaschii];\nCOG1583:..."'),
('codon_start', '1'),
('transl_table', '11'),
('product', '"hypothetical protein"'),
('protein_id', '"NP_963295.1"'),
('db_xref', '"GI:41614797"'),
('db_xref', '"GeneID:2732620"'),
('translation', '"MRLLLELKALNSIDKKQLSNYLIQGFIYNILKNTEYSWLHNWKK\nEKYFNFT..."')]
In the above example, the "note" and "translation" were edited for compactness,
and they would contain multiple new line characters (displayed above as \n)
If a qualifier is quoted (in this case, everything except codon_start and
transl_table) then the quotes are NOT removed.
Note that no whitespace is removed.
"""
#Skip any blank lines
iterator = iter(filter(None, lines))
try:
line = iterator.next()
feature_location = line.strip()
while feature_location[-1:]==",":
#Multiline location, still more to come!
line = iterator.next()
feature_location += line.strip()
qualifiers=[]
for line in iterator:
if line[0]=="/":
#New qualifier
i = line.find("=")
key = line[1:i] #does not work if i==-1
value = line[i+1:] #we ignore 'value' if i==-1
if i==-1:
#Qualifier with no key, e.g. /pseudo
key = line[1:]
qualifiers.append((key,None))
elif not value:
#ApE can output /note=
qualifiers.append((key,""))
elif value[0]=='"':
#Quoted...
if value[-1]!='"' or value!='"':
#No closing quote on the first line...
while value[-1] != '"':
value += "\n" + iterator.next()
else:
#One single line (quoted)
assert value == '"'
if self.debug : print "Quoted line %s:%s" % (key, value)
#DO NOT remove the quotes...
qualifiers.append((key,value))
else:
#Unquoted
#if debug : print "Unquoted line %s:%s" % (key,value)
qualifiers.append((key,value))
else:
#Unquoted continuation
assert len(qualifiers) > 0
assert key==qualifiers[-1][0]
#if debug : print "Unquoted Cont %s:%s" % (key, line)
qualifiers[-1] = (key, qualifiers[-1][1] + "\n" + line)
return (feature_key, feature_location, qualifiers)
except StopIteration:
#Bummer
raise ValueError("Problem with '%s' feature:\n%s" \
% (feature_key, "\n".join(lines)))
def parse_footer(self):
"""returns a tuple containing a list of any misc strings, and the sequence"""
#This is a basic bit of code to scan and discard the sequence,
#which was useful when developing the sub classes.
if self.line in self.FEATURE_END_MARKERS:
while self.line[:self.HEADER_WIDTH].rstrip() not in self.SEQUENCE_HEADERS:
self.line = self.handle.readline()
if not self.line:
raise ValueError("Premature end of file")
self.line = self.line.rstrip()
assert self.line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS, \
"Not at start of sequence"
while True:
line = self.handle.readline()
if not line : raise ValueError("Premature end of line during sequence data")
line = line.rstrip()
if line == "//" : break
self.line = line
return ([],"") #Dummy values!
def _feed_first_line(self, consumer, line):
"""Handle the LOCUS/ID line, passing data to the comsumer
This should be implemented by the EMBL / GenBank specific subclass
Used by the parse_records() and parse() methods.
"""
pass
def _feed_header_lines(self, consumer, lines):
"""Handle the header lines (list of strings), passing data to the comsumer
This should be implemented by the EMBL / GenBank specific subclass
Used by the parse_records() and parse() methods.
"""
pass
def _feed_feature_table(self, consumer, feature_tuples):
"""Handle the feature table (list of tuples), passing data to the comsumer
Used by the parse_records() and parse() methods.
"""
consumer.start_feature_table()
for feature_key, location_string, qualifiers in feature_tuples:
consumer.feature_key(feature_key)
consumer.location(location_string)
for q_key, q_value in qualifiers:
consumer.feature_qualifier_name([q_key])
if q_value is not None:
consumer.feature_qualifier_description(q_value.replace("\n"," "))
def _feed_misc_lines(self, consumer, lines):
"""Handle any lines between features and sequence (list of strings), passing data to the consumer
This should be implemented by the EMBL / GenBank specific subclass
Used by the parse_records() and parse() methods.
"""
pass
def feed(self, handle, consumer, do_features=True):
"""Feed a set of data into the consumer.
This method is intended for use with the "old" code in Bio.GenBank
Arguments:
handle - A handle with the information to parse.
consumer - The consumer that should be informed of events.
do_features - Boolean, should the features be parsed?
Skipping the features can be much faster.
Return values:
true - Passed a record
false - Did not find a record
"""
#Should work with both EMBL and GenBank files provided the
#equivalent Bio.GenBank._FeatureConsumer methods are called...
self.set_handle(handle)
if not self.find_start():
#Could not find (another) record
consumer.data=None
return False
#We use the above class methods to parse the file into a simplified format.
#The first line, header lines and any misc lines after the features will be
#dealt with by GenBank / EMBL specific derived classes.
#First line and header:
self._feed_first_line(consumer, self.line)
self._feed_header_lines(consumer, self.parse_header())
#Features (common to both EMBL and GenBank):
if do_features:
self._feed_feature_table(consumer, self.parse_features(skip=False))
else:
self.parse_features(skip=True) # ignore the data
#Footer and sequence
misc_lines, sequence_string = self.parse_footer()
self._feed_misc_lines(consumer, misc_lines)
consumer.sequence(sequence_string)
#Calls to consumer.base_number() do nothing anyway
consumer.record_end("//")
assert self.line == "//"
#And we are done
return True
def parse(self, handle, do_features=True):
"""Returns a SeqRecord (with SeqFeatures if do_features=True)
See also the method parse_records() for use on multi-record files.
"""
from Bio.GenBank import _FeatureConsumer
from Bio.GenBank.utils import FeatureValueCleaner
consumer = _FeatureConsumer(use_fuzziness = 1,
feature_cleaner = FeatureValueCleaner())
if self.feed(handle, consumer, do_features):
return consumer.data
else:
return None
def parse_records(self, handle, do_features=True):
"""Returns a SeqRecord object iterator
Each record (from the ID/LOCUS line to the // line) becomes a SeqRecord
The SeqRecord objects include SeqFeatures if do_features=True
This method is intended for use in Bio.SeqIO
"""
#This is a generator function
while True:
record = self.parse(handle, do_features)
if record is None : break
assert record.id is not None
assert record.name != "<unknown name>"
assert record.description != "<unknown description>"
yield record
def parse_cds_features(self, handle,
alphabet=generic_protein,
tags2id=('protein_id','locus_tag','product')):
"""Returns SeqRecord object iterator
Each CDS feature becomes a SeqRecord.
alphabet - Used for any sequence found in a translation field.
tags2id - Tupple of three strings, the feature keys to use
for the record id, name and description,
This method is intended for use in Bio.SeqIO
"""
self.set_handle(handle)
while self.find_start():
#Got an EMBL or GenBank record...
self.parse_header() # ignore header lines!
feature_tuples = self.parse_features()
#self.parse_footer() # ignore footer lines!
while True:
line = self.handle.readline()
if not line : break
if line[:2]=="//" : break
self.line = line.rstrip()
#Now go though those features...
for key, location_string, qualifiers in feature_tuples:
if key=="CDS":
#Create SeqRecord
#================
#SeqRecord objects cannot be created with annotations, they
#must be added afterwards. So create an empty record and
#then populate it:
record = SeqRecord(seq=None)
annotations = record.annotations
#Should we add a location object to the annotations?
#I *think* that only makes sense for SeqFeatures with their
#sub features...
annotations['raw_location'] = location_string.replace(' ','')
for (qualifier_name, qualifier_data) in qualifiers:
if qualifier_data is not None \
and qualifier_data[0]=='"' and qualifier_data[-1]=='"':
#Remove quotes
qualifier_data = qualifier_data[1:-1]
#Append the data to the annotation qualifier...
if qualifier_name == "translation":
assert record.seq is None, "Multiple translations!"
record.seq = Seq(qualifier_data.replace("\n",""), alphabet)
elif qualifier_name == "db_xref":
#its a list, possibly empty. Its safe to extend
record.dbxrefs.append(qualifier_data)
else:
if qualifier_data is not None:
qualifier_data = qualifier_data.replace("\n"," ").replace(" "," ")
try:
annotations[qualifier_name] += " " + qualifier_data
except KeyError:
#Not an addition to existing data, its the first bit
annotations[qualifier_name]= qualifier_data
#Fill in the ID, Name, Description
#=================================
try:
record.id = annotations[tags2id[0]]
except KeyError:
pass
try:
record.name = annotations[tags2id[1]]
except KeyError:
pass
try:
record.description = annotations[tags2id[2]]
except KeyError:
pass
yield record
class EmblScanner(InsdcScanner):
"""For extracting chunks of information in EMBL files"""
RECORD_START = "ID "
HEADER_WIDTH = 5
FEATURE_START_MARKERS = ["FH Key Location/Qualifiers","FH"]
FEATURE_END_MARKERS = ["XX"] #XX can also mark the end of many things!
FEATURE_QUALIFIER_INDENT = 21
FEATURE_QUALIFIER_SPACER = "FT" + " " * (FEATURE_QUALIFIER_INDENT-2)
SEQUENCE_HEADERS=["SQ", "CO"] #Remove trailing spaces
def parse_footer(self):
"""returns a tuple containing a list of any misc strings, and the sequence"""
assert self.line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS, \
"Eh? '%s'" % self.line
#Note that the SQ line can be split into several lines...
misc_lines = []
while self.line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS:
misc_lines.append(self.line)
self.line = self.handle.readline()
if not self.line:
raise ValueError("Premature end of file")
self.line = self.line.rstrip()
assert self.line[:self.HEADER_WIDTH] == " " * self.HEADER_WIDTH \
or self.line.strip() == '//', repr(self.line)
seq_lines = []
line = self.line
while True:
if not line:
raise ValueError("Premature end of file in sequence data")
line = line.strip()
if not line:
raise ValueError("Blank line in sequence data")
if line=='//':
break
assert self.line[:self.HEADER_WIDTH] == " " * self.HEADER_WIDTH, \
repr(self.line)
#Remove tailing number now, remove spaces later
seq_lines.append(line.rsplit(None,1)[0])
line = self.handle.readline()
self.line = line
return (misc_lines, "".join(seq_lines).replace(" ", ""))
def _feed_first_line(self, consumer, line):
assert line[:self.HEADER_WIDTH].rstrip() == "ID"
if line[self.HEADER_WIDTH:].count(";") == 6:
#Looks like the semi colon separated style introduced in 2006
self._feed_first_line_new(consumer, line)
elif line[self.HEADER_WIDTH:].count(";") == 3:
#Looks like the pre 2006 style
self._feed_first_line_old(consumer, line)
else:
raise ValueError('Did not recognise the ID line layout:\n' + line)
def _feed_first_line_old(self, consumer, line):
#Expects an ID line in the style before 2006, e.g.
#ID SC10H5 standard; DNA; PRO; 4870 BP.
#ID BSUB9999 standard; circular DNA; PRO; 4214630 BP.
assert line[:self.HEADER_WIDTH].rstrip() == "ID"
fields = [line[self.HEADER_WIDTH:].split(None,1)[0]]
fields.extend(line[self.HEADER_WIDTH:].split(None,1)[1].split(";"))
fields = [entry.strip() for entry in fields]
"""
The tokens represent:
0. Primary accession number
(space sep)
1. ??? (e.g. standard)
(semi-colon)
2. Topology and/or Molecule type (e.g. 'circular DNA' or 'DNA')
3. Taxonomic division (e.g. 'PRO')
4. Sequence length (e.g. '4639675 BP.')
"""
consumer.locus(fields[0]) #Should we also call the accession consumer?
consumer.residue_type(fields[2])
consumer.data_file_division(fields[3])
self._feed_seq_length(consumer, fields[4])
def _feed_first_line_new(self, consumer, line):
#Expects an ID line in the style introduced in 2006, e.g.
#ID X56734; SV 1; linear; mRNA; STD; PLN; 1859 BP.
#ID CD789012; SV 4; linear; genomic DNA; HTG; MAM; 500 BP.
assert line[:self.HEADER_WIDTH].rstrip() == "ID"
fields = [data.strip() for data in line[self.HEADER_WIDTH:].strip().split(";")]
assert len(fields) == 7
"""
The tokens represent:
0. Primary accession number
1. Sequence version number
2. Topology: 'circular' or 'linear'
3. Molecule type (e.g. 'genomic DNA')
4. Data class (e.g. 'STD')
5. Taxonomic division (e.g. 'PRO')
6. Sequence length (e.g. '4639675 BP.')
"""
consumer.locus(fields[0])
#Call the accession consumer now, to make sure we record
#something as the record.id, in case there is no AC line
consumer.accession(fields[0])
#TODO - How to deal with the version field? At the moment the consumer
#will try and use this for the ID which isn't ideal for EMBL files.
version_parts = fields[1].split()
if len(version_parts)==2 \
and version_parts[0]=="SV" \
and version_parts[1].isdigit():
consumer.version_suffix(version_parts[1])
#Based on how the old GenBank parser worked, merge these two:
consumer.residue_type(" ".join(fields[2:4])) #TODO - Store as two fields?
#consumer.xxx(fields[4]) #TODO - What should we do with the data class?
consumer.data_file_division(fields[5])
self._feed_seq_length(consumer, fields[6])
def _feed_seq_length(self, consumer, text):
length_parts = text.split()
assert len(length_parts) == 2
assert length_parts[1].upper() in ["BP", "BP.", "AA."]
consumer.size(length_parts[0])
def _feed_header_lines(self, consumer, lines):
EMBL_INDENT = self.HEADER_WIDTH
EMBL_SPACER = " " * EMBL_INDENT
consumer_dict = {
'AC' : 'accession',
'SV' : 'version', # SV line removed in June 2006, now part of ID line
'DE' : 'definition',
#'RN' : 'reference_num',
#'RC' : reference comment... TODO
#'RP' : 'reference_bases',
#'RX' : reference cross reference... DOI or Pubmed
'RG' : 'consrtm', #optional consortium
#'RA' : 'authors',
#'RT' : 'title',
'RL' : 'journal',
'OS' : 'organism',
'OC' : 'taxonomy',
#'DR' : data reference
'CC' : 'comment',
#'XX' : splitter
}
#We have to handle the following specially:
#RX (depending on reference type...)
for line in lines:
line_type = line[:EMBL_INDENT].strip()
data = line[EMBL_INDENT:].strip()
if line_type == 'XX':
pass
elif line_type == 'RN':
# Reformat reference numbers for the GenBank based consumer
# e.g. '[1]' becomes '1'
if data[0] == "[" and data[-1] == "]" : data = data[1:-1]
consumer.reference_num(data)
elif line_type == 'RP':
# Reformat reference numbers for the GenBank based consumer
# e.g. '1-4639675' becomes '(bases 1 to 4639675)'
# and '160-550, 904-1055' becomes '(bases 160 to 550; 904 to 1055)'
parts = [bases.replace("-"," to ").strip() for bases in data.split(",")]
consumer.reference_bases("(bases %s)" % "; ".join(parts))
elif line_type == 'RT':
#Remove the enclosing quotes and trailing semi colon.
#Note the title can be split over multiple lines.
if data.startswith('"'):
data = data[1:]
if data.endswith('";'):
data = data[:-2]
consumer.title(data)
elif line_type == 'RX':
# EMBL support three reference types at the moment:
# - PUBMED PUBMED bibliographic database (NLM)
# - DOI Digital Object Identifier (International DOI Foundation)
# - AGRICOLA US National Agriculture Library (NAL) of the US Department
# of Agriculture (USDA)
#
# Format:
# RX resource_identifier; identifier.
#
# e.g.
# RX DOI; 10.1016/0024-3205(83)90010-3.
# RX PUBMED; 264242.
#
# Currently our reference object only supports PUBMED and MEDLINE
# (as these were in GenBank files?).
key, value = data.split(";",1)
if value.endswith(".") : value = value[:-1]
value = value.strip()
if key == "PUBMED":
consumer.pubmed_id(value)
#TODO - Handle other reference types (here and in BioSQL bindings)
elif line_type == 'CC':
# Have to pass a list of strings for this one (not just a string)
consumer.comment([data])
elif line_type == 'DR':
# Database Cross-reference, format:
# DR database_identifier; primary_identifier; secondary_identifier.
#
# e.g.
# DR MGI; 98599; Tcrb-V4.
#
# TODO - How should we store any secondary identifier?
parts = data.rstrip(".").split(";")
#Turn it into "database_identifier:primary_identifier" to
#mimic the GenBank parser. e.g. "MGI:98599"
consumer.dblink("%s:%s" % (parts[0].strip(),
parts[1].strip()))
elif line_type == 'RA':
# Remove trailing ; at end of authors list
consumer.authors(data.rstrip(";"))
elif line_type == 'PR':
# Remove trailing ; at end of the project reference
# In GenBank files this corresponds to the old PROJECT
# line which is being replaced with the DBLINK line.
consumer.project(data.rstrip(";"))
elif line_type in consumer_dict:
#Its a semi-automatic entry!
getattr(consumer, consumer_dict[line_type])(data)
else:
if self.debug:
print "Ignoring EMBL header line:\n%s" % line
def _feed_misc_lines(self, consumer, lines):
#TODO - Should we do something with the information on the SQ line(s)?
lines.append("")
line_iter = iter(lines)
try:
for line in line_iter:
if line.startswith("CO "):
line = line[5:].strip()
contig_location = line
while True:
line = line_iter.next()
if not line:
break
elif line.startswith("CO "):
#Don't need to preseve the whitespace here.
contig_location += line[5:].strip()
else:
raise ValueError('Expected CO (contig) continuation line, got:\n' + line)
consumer.contig_location(contig_location)
return
except StopIteration:
raise ValueError("Problem in misc lines before sequence")
class _ImgtScanner(EmblScanner):
"""For extracting chunks of information in IMGT (EMBL like) files (PRIVATE).
IMGT files are like EMBL files but in order to allow longer feature types
the features should be indented by 25 characters not 21 characters. In
practice the IMGT flat files tend to use either 21 or 25 characters, so we
must cope with both.
This is private to encourage use of Bio.SeqIO rather than Bio.GenBank.
"""
FEATURE_START_MARKERS = ["FH Key Location/Qualifiers",
"FH Key Location/Qualifiers (from EMBL)",
"FH Key Location/Qualifiers",
"FH"]
def parse_features(self, skip=False):
"""Return list of tuples for the features (if present)
Each feature is returned as a tuple (key, location, qualifiers)
where key and location are strings (e.g. "CDS" and
"complement(join(490883..490885,1..879))") while qualifiers
is a list of two string tuples (feature qualifier keys and values).
Assumes you have already read to the start of the features table.
"""
if self.line.rstrip() not in self.FEATURE_START_MARKERS:
if self.debug : print "Didn't find any feature table"
return []
while self.line.rstrip() in self.FEATURE_START_MARKERS:
self.line = self.handle.readline()
bad_position_re = re.compile(r'([0-9]+)>{1}')
features = []
line = self.line
while True:
if not line:
raise ValueError("Premature end of line during features table")
if line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS:
if self.debug : print "Found start of sequence"
break
line = line.rstrip()
if line == "//":
raise ValueError("Premature end of features table, marker '//' found")
if line in self.FEATURE_END_MARKERS:
if self.debug : print "Found end of features"
line = self.handle.readline()
break
if line[2:self.FEATURE_QUALIFIER_INDENT].strip() == "":
#This is an empty feature line between qualifiers. Empty
#feature lines within qualifiers are handled below (ignored).
line = self.handle.readline()
continue
if skip:
line = self.handle.readline()
while line[:self.FEATURE_QUALIFIER_INDENT] == self.FEATURE_QUALIFIER_SPACER:
line = self.handle.readline()
else:
assert line[:2] == "FT"
try:
feature_key, location_start = line[2:].strip().split()
except ValueError:
#e.g. "FT TRANSMEMBRANE-REGION2163..2240\n"
#Assume indent of 25 as per IMGT spec, with the location
#start in column 26 (one-based).
feature_key = line[2:25].strip()
location_start = line[25:].strip()
feature_lines = [location_start]
line = self.handle.readline()
while line[:self.FEATURE_QUALIFIER_INDENT] == self.FEATURE_QUALIFIER_SPACER \
or line.rstrip() == "" : # cope with blank lines in the midst of a feature
#Use strip to remove any harmless trailing white space AND and leading
#white space (copes with 21 or 26 indents and orther variants)
assert line[:2] == "FT"
feature_lines.append(line[self.FEATURE_QUALIFIER_INDENT:].strip())
line = self.handle.readline()
feature_key, location, qualifiers = \
self.parse_feature(feature_key, feature_lines)
#Try to handle known problems with IMGT locations here:
if ">" in location:
#Nasty hack for common IMGT bug, should be >123 not 123>
#in a location string. At least here the meaning is clear,
#and since it is so common I don't want to issue a warning
#warnings.warn("Feature location %s is invalid, "
# "moving greater than sign before position"
# % location)
location = bad_position_re.sub(r'>\1',location)
features.append((feature_key, location, qualifiers))
self.line = line
return features
class GenBankScanner(InsdcScanner):
"""For extracting chunks of information in GenBank files"""
RECORD_START = "LOCUS "
HEADER_WIDTH = 12
FEATURE_START_MARKERS = ["FEATURES Location/Qualifiers","FEATURES"]
FEATURE_END_MARKERS = []
FEATURE_QUALIFIER_INDENT = 21
FEATURE_QUALIFIER_SPACER = " " * FEATURE_QUALIFIER_INDENT
SEQUENCE_HEADERS=["CONTIG", "ORIGIN", "BASE COUNT", "WGS"] # trailing spaces removed
def parse_footer(self):
"""returns a tuple containing a list of any misc strings, and the sequence"""
assert self.line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS, \
"Eh? '%s'" % self.line
misc_lines = []
while self.line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS \
or self.line[:self.HEADER_WIDTH] == " "*self.HEADER_WIDTH \
or "WGS" == self.line[:3]:
misc_lines.append(self.line.rstrip())
self.line = self.handle.readline()
if not self.line:
raise ValueError("Premature end of file")
self.line = self.line
assert self.line[:self.HEADER_WIDTH].rstrip() not in self.SEQUENCE_HEADERS, \
"Eh? '%s'" % self.line
#Now just consume the sequence lines until reach the // marker
#or a CONTIG line
seq_lines = []
line = self.line
while True:
if not line:
raise ValueError("Premature end of file in sequence data")
line = line.rstrip()
if not line:
import warnings
warnings.warn("Blank line in sequence data")
line = self.handle.readline()
continue
if line=='//':
break
if line.find('CONTIG')==0:
break
if len(line) > 9 and line[9:10]!=' ':
raise ValueError("Sequence line mal-formed, '%s'" % line)
seq_lines.append(line[10:]) #remove spaces later
line = self.handle.readline()
self.line = line
#Seq("".join(seq_lines), self.alphabet)
return (misc_lines,"".join(seq_lines).replace(" ",""))
def _feed_first_line(self, consumer, line):
"""Scan over and parse GenBank LOCUS line (PRIVATE).
This must cope with several variants, primarily the old and new column
based standards from GenBank. Additionally EnsEMBL produces GenBank
files where the LOCUS line is space separated rather that following
the column based layout.
We also try to cope with GenBank like files with partial LOCUS lines.
"""
#####################################
# LOCUS line #
#####################################
GENBANK_INDENT = self.HEADER_WIDTH
GENBANK_SPACER = " "*GENBANK_INDENT
assert line[0:GENBANK_INDENT] == 'LOCUS ', \
'LOCUS line does not start correctly:\n' + line
#Have to break up the locus line, and handle the different bits of it.
#There are at least two different versions of the locus line...
if line[29:33] in [' bp ', ' aa ',' rc '] and line[55:62] == ' ':
#Old... note we insist on the 55:62 being empty to avoid trying
#to parse space separated LOCUS lines from Ensembl etc, see below.
#
# Positions Contents
# --------- --------
# 00:06 LOCUS
# 06:12 spaces
# 12:?? Locus name
# ??:?? space
# ??:29 Length of sequence, right-justified
# 29:33 space, bp, space
# 33:41 strand type
# 41:42 space
# 42:51 Blank (implies linear), linear or circular
# 51:52 space
# 52:55 The division code (e.g. BCT, VRL, INV)
# 55:62 space
# 62:73 Date, in the form dd-MMM-yyyy (e.g., 15-MAR-1991)
#
#assert line[29:33] in [' bp ', ' aa ',' rc '] , \
# 'LOCUS line does not contain size units at expected position:\n' + line
assert line[41:42] == ' ', \
'LOCUS line does not contain space at position 42:\n' + line
assert line[42:51].strip() in ['','linear','circular'], \
'LOCUS line does not contain valid entry (linear, circular, ...):\n' + line
assert line[51:52] == ' ', \
'LOCUS line does not contain space at position 52:\n' + line
#assert line[55:62] == ' ', \
# 'LOCUS line does not contain spaces from position 56 to 62:\n' + line
if line[62:73].strip():
assert line[64:65] == '-', \
'LOCUS line does not contain - at position 65 in date:\n' + line
assert line[68:69] == '-', \
'LOCUS line does not contain - at position 69 in date:\n' + line
name_and_length_str = line[GENBANK_INDENT:29]
while name_and_length_str.find(' ')!=-1:
name_and_length_str = name_and_length_str.replace(' ',' ')
name_and_length = name_and_length_str.split(' ')
assert len(name_and_length)<=2, \
'Cannot parse the name and length in the LOCUS line:\n' + line
assert len(name_and_length)!=1, \
'Name and length collide in the LOCUS line:\n' + line
#Should be possible to split them based on position, if
#a clear definition of the standard exists THAT AGREES with
#existing files.
consumer.locus(name_and_length[0])
consumer.size(name_and_length[1])
#consumer.residue_type(line[33:41].strip())
if line[33:51].strip() == "" and line[29:33] == ' aa ':
#Amino acids -> protein (even if there is no residue type given)
#We want to use a protein alphabet in this case, rather than a
#generic one. Not sure if this is the best way to achieve this,
#but it works because the scanner checks for this:
consumer.residue_type("PROTEIN")
else:
consumer.residue_type(line[33:51].strip())
consumer.data_file_division(line[52:55])
if line[62:73].strip():
consumer.date(line[62:73])
elif line[40:44] in [' bp ', ' aa ',' rc '] \
and line[54:64].strip() in ['','linear','circular']:
#New... linear/circular/big blank test should avoid EnsEMBL style
#LOCUS line being treated like a proper column based LOCUS line.
#
# Positions Contents
# --------- --------
# 00:06 LOCUS
# 06:12 spaces
# 12:?? Locus name
# ??:?? space
# ??:40 Length of sequence, right-justified
# 40:44 space, bp, space
# 44:47 Blank, ss-, ds-, ms-
# 47:54 Blank, DNA, RNA, tRNA, mRNA, uRNA, snRNA, cDNA
# 54:55 space
# 55:63 Blank (implies linear), linear or circular
# 63:64 space
# 64:67 The division code (e.g. BCT, VRL, INV)
# 67:68 space
# 68:79 Date, in the form dd-MMM-yyyy (e.g., 15-MAR-1991)
#
assert line[40:44] in [' bp ', ' aa ',' rc '] , \
'LOCUS line does not contain size units at expected position:\n' + line
assert line[44:47] in [' ', 'ss-', 'ds-', 'ms-'], \
'LOCUS line does not have valid strand type (Single stranded, ...):\n' + line
assert line[47:54].strip() == "" \
or line[47:54].strip().find('DNA') != -1 \
or line[47:54].strip().find('RNA') != -1, \
'LOCUS line does not contain valid sequence type (DNA, RNA, ...):\n' + line
assert line[54:55] == ' ', \
'LOCUS line does not contain space at position 55:\n' + line
assert line[55:63].strip() in ['','linear','circular'], \
'LOCUS line does not contain valid entry (linear, circular, ...):\n' + line
assert line[63:64] == ' ', \
'LOCUS line does not contain space at position 64:\n' + line
assert line[67:68] == ' ', \
'LOCUS line does not contain space at position 68:\n' + line
if line[68:79].strip():
assert line[70:71] == '-', \
'LOCUS line does not contain - at position 71 in date:\n' + line
assert line[74:75] == '-', \
'LOCUS line does not contain - at position 75 in date:\n' + line
name_and_length_str = line[GENBANK_INDENT:40]
while name_and_length_str.find(' ')!=-1:
name_and_length_str = name_and_length_str.replace(' ',' ')
name_and_length = name_and_length_str.split(' ')
assert len(name_and_length)<=2, \
'Cannot parse the name and length in the LOCUS line:\n' + line
assert len(name_and_length)!=1, \
'Name and length collide in the LOCUS line:\n' + line
#Should be possible to split them based on position, if
#a clear definition of the stand exists THAT AGREES with
#existing files.
consumer.locus(name_and_length[0])
consumer.size(name_and_length[1])
if line[44:54].strip() == "" and line[40:44] == ' aa ':
#Amino acids -> protein (even if there is no residue type given)
#We want to use a protein alphabet in this case, rather than a
#generic one. Not sure if this is the best way to achieve this,
#but it works because the scanner checks for this:
consumer.residue_type(("PROTEIN " + line[54:63]).strip())
else:
consumer.residue_type(line[44:63].strip())
consumer.data_file_division(line[64:67])
if line[68:79].strip():
consumer.date(line[68:79])
elif line[GENBANK_INDENT:].strip().count(" ")==0 :
#Truncated LOCUS line, as produced by some EMBOSS tools - see bug 1762
#
#e.g.
#
# "LOCUS U00096"
#
#rather than:
#
# "LOCUS U00096 4639675 bp DNA circular BCT"
#
# Positions Contents
# --------- --------
# 00:06 LOCUS
# 06:12 spaces
# 12:?? Locus name
if line[GENBANK_INDENT:].strip() != "":
consumer.locus(line[GENBANK_INDENT:].strip())
else:
#Must just have just "LOCUS ", is this even legitimate?
#We should be able to continue parsing... we need real world testcases!
warnings.warn("Minimal LOCUS line found - is this correct?\n:%r" % line)
elif len(line.split())==7 and line.split()[3] in ["aa","bp"]:
#Cope with EnsEMBL genbank files which use space separation rather
#than the expected column based layout. e.g.
#LOCUS HG531_PATCH 1000000 bp DNA HTG 18-JUN-2011
#LOCUS HG531_PATCH 759984 bp DNA HTG 18-JUN-2011
#LOCUS HG506_HG1000_1_PATCH 814959 bp DNA HTG 18-JUN-2011
#LOCUS HG506_HG1000_1_PATCH 1219964 bp DNA HTG 18-JUN-2011
#Notice that the 'bp' can occur in the position expected by either
#the old or the new fixed column standards (parsed above).
splitline = line.split()
consumer.locus(splitline[1])
consumer.size(splitline[2])
consumer.residue_type(splitline[4])
consumer.data_file_division(splitline[5])
consumer.date(splitline[6])
elif len(line.split())>=4 and line.split()[3] in ["aa","bp"]:
#Cope with EMBOSS seqret output where it seems the locus id can cause
#the other fields to overflow. We just IGNORE the other fields!
warnings.warn("Malformed LOCUS line found - is this correct?\n:%r" % line)
consumer.locus(line.split()[1])
consumer.size(line.split()[2])
elif len(line.split())>=4 and line.split()[-1] in ["aa","bp"]:
#Cope with psuedo-GenBank files like this:
# "LOCUS RNA5 complete 1718 bp"
#Treat everything between LOCUS and the size as the identifier.
warnings.warn("Malformed LOCUS line found - is this correct?\n:%r" % line)
consumer.locus(line[5:].rsplit(None,2)[0].strip())
consumer.size(line.split()[-2])
else:
raise ValueError('Did not recognise the LOCUS line layout:\n' + line)
def _feed_header_lines(self, consumer, lines):
#Following dictionary maps GenBank lines to the associated
#consumer methods - the special cases like LOCUS where one
#genbank line triggers several consumer calls have to be
#handled individually.
GENBANK_INDENT = self.HEADER_WIDTH
GENBANK_SPACER = " "*GENBANK_INDENT
consumer_dict = {
'DEFINITION' : 'definition',
'ACCESSION' : 'accession',
'NID' : 'nid',
'PID' : 'pid',
'DBSOURCE' : 'db_source',
'KEYWORDS' : 'keywords',
'SEGMENT' : 'segment',
'SOURCE' : 'source',
'AUTHORS' : 'authors',
'CONSRTM' : 'consrtm',
'PROJECT' : 'project',
'DBLINK' : 'dblink',
'TITLE' : 'title',
'JOURNAL' : 'journal',
'MEDLINE' : 'medline_id',
'PUBMED' : 'pubmed_id',
'REMARK' : 'remark'}
#We have to handle the following specially:
#ORIGIN (locus, size, residue_type, data_file_division and date)
#COMMENT (comment)
#VERSION (version and gi)
#REFERENCE (eference_num and reference_bases)
#ORGANISM (organism and taxonomy)
lines = filter(None,lines)
lines.append("") #helps avoid getting StopIteration all the time
line_iter = iter(lines)
try:
line = line_iter.next()
while True:
if not line : break
line_type = line[:GENBANK_INDENT].strip()
data = line[GENBANK_INDENT:].strip()
if line_type == 'VERSION':
#Need to call consumer.version(), and maybe also consumer.gi() as well.
#e.g.
# VERSION AC007323.5 GI:6587720
while data.find(' ')!=-1:
data = data.replace(' ',' ')
if data.find(' GI:')==-1:
consumer.version(data)
else:
if self.debug : print "Version [" + data.split(' GI:')[0] + "], gi [" + data.split(' GI:')[1] + "]"
consumer.version(data.split(' GI:')[0])
consumer.gi(data.split(' GI:')[1])
#Read in the next line!
line = line_iter.next()
elif line_type == 'REFERENCE':
if self.debug >1 : print "Found reference [" + data + "]"
#Need to call consumer.reference_num() and consumer.reference_bases()
#e.g.
# REFERENCE 1 (bases 1 to 86436)
#
#Note that this can be multiline, see Bug 1968, e.g.
#
# REFERENCE 42 (bases 1517 to 1696; 3932 to 4112; 17880 to 17975; 21142 to
# 28259)
#
#For such cases we will call the consumer once only.
data = data.strip()
#Read in the next line, and see if its more of the reference:
while True:
line = line_iter.next()
if line[:GENBANK_INDENT] == GENBANK_SPACER:
#Add this continuation to the data string
data += " " + line[GENBANK_INDENT:]
if self.debug >1 : print "Extended reference text [" + data + "]"
else:
#End of the reference, leave this text in the variable "line"
break
#We now have all the reference line(s) stored in a string, data,
#which we pass to the consumer
while data.find(' ')!=-1:
data = data.replace(' ',' ')
if data.find(' ')==-1:
if self.debug >2 : print 'Reference number \"' + data + '\"'
consumer.reference_num(data)
else:
if self.debug >2 : print 'Reference number \"' + data[:data.find(' ')] + '\", \"' + data[data.find(' ')+1:] + '\"'
consumer.reference_num(data[:data.find(' ')])
consumer.reference_bases(data[data.find(' ')+1:])
elif line_type == 'ORGANISM':
#Typically the first line is the organism, and subsequent lines
#are the taxonomy lineage. However, given longer and longer
#species names (as more and more strains and sub strains get
#sequenced) the oragnism name can now get wrapped onto multiple
#lines. The NCBI say we have to recognise the lineage line by
#the presense of semi-colon delimited entries. In the long term,
#they are considering adding a new keyword (e.g. LINEAGE).
#See Bug 2591 for details.
organism_data = data
lineage_data = ""
while True:
line = line_iter.next()
if line[0:GENBANK_INDENT] == GENBANK_SPACER:
if lineage_data or ";" in line:
lineage_data += " " + line[GENBANK_INDENT:]
else:
organism_data += " " + line[GENBANK_INDENT:].strip()
else:
#End of organism and taxonomy
break
consumer.organism(organism_data)
if lineage_data.strip() == "" and self.debug > 1:
print "Taxonomy line(s) missing or blank"
consumer.taxonomy(lineage_data.strip())
del organism_data, lineage_data
elif line_type == 'COMMENT':
if self.debug > 1 : print "Found comment"
#This can be multiline, and should call consumer.comment() once
#with a list where each entry is a line.
comment_list=[]
comment_list.append(data)
while True:
line = line_iter.next()
if line[0:GENBANK_INDENT] == GENBANK_SPACER:
data = line[GENBANK_INDENT:]
comment_list.append(data)
if self.debug > 2 : print "Comment continuation [" + data + "]"
else:
#End of the comment
break
consumer.comment(comment_list)
del comment_list
elif line_type in consumer_dict:
#Its a semi-automatic entry!
#Now, this may be a multi line entry...
while True:
line = line_iter.next()
if line[0:GENBANK_INDENT] == GENBANK_SPACER:
data += ' ' + line[GENBANK_INDENT:]
else:
#We now have all the data for this entry:
getattr(consumer, consumer_dict[line_type])(data)
#End of continuation - return to top of loop!
break
else:
if self.debug:
print "Ignoring GenBank header line:\n" % line
#Read in next line
line = line_iter.next()
except StopIteration:
raise ValueError("Problem in header")
def _feed_misc_lines(self, consumer, lines):
#Deals with a few misc lines between the features and the sequence
GENBANK_INDENT = self.HEADER_WIDTH
GENBANK_SPACER = " "*GENBANK_INDENT
lines.append("")
line_iter = iter(lines)
try:
for line in line_iter:
if line.find('BASE COUNT')==0:
line = line[10:].strip()
if line:
if self.debug : print "base_count = " + line
consumer.base_count(line)
if line.find("ORIGIN")==0:
line = line[6:].strip()
if line:
if self.debug : print "origin_name = " + line
consumer.origin_name(line)
if line.find("WGS ")==0 :
line = line[3:].strip()
consumer.wgs(line)
if line.find("WGS_SCAFLD")==0 :
line = line[10:].strip()
consumer.add_wgs_scafld(line)
if line.find("CONTIG")==0:
line = line[6:].strip()
contig_location = line
while True:
line = line_iter.next()
if not line:
break
elif line[:GENBANK_INDENT]==GENBANK_SPACER:
#Don't need to preseve the whitespace here.
contig_location += line[GENBANK_INDENT:].rstrip()
else:
raise ValueError('Expected CONTIG continuation line, got:\n' + line)
consumer.contig_location(contig_location)
return
except StopIteration:
raise ValueError("Problem in misc lines before sequence")
if __name__ == "__main__":
from StringIO import StringIO
gbk_example = \
"""LOCUS SCU49845 5028 bp DNA PLN 21-JUN-1999
DEFINITION Saccharomyces cerevisiae TCP1-beta gene, partial cds, and Axl2p
(AXL2) and Rev7p (REV7) genes, complete cds.
ACCESSION U49845
VERSION U49845.1 GI:1293613
KEYWORDS .
SOURCE Saccharomyces cerevisiae (baker's yeast)
ORGANISM Saccharomyces cerevisiae
Eukaryota; Fungi; Ascomycota; Saccharomycotina; Saccharomycetes;
Saccharomycetales; Saccharomycetaceae; Saccharomyces.
REFERENCE 1 (bases 1 to 5028)
AUTHORS Torpey,L.E., Gibbs,P.E., Nelson,J. and Lawrence,C.W.
TITLE Cloning and sequence of REV7, a gene whose function is required for
DNA damage-induced mutagenesis in Saccharomyces cerevisiae
JOURNAL Yeast 10 (11), 1503-1509 (1994)
PUBMED 7871890
REFERENCE 2 (bases 1 to 5028)
AUTHORS Roemer,T., Madden,K., Chang,J. and Snyder,M.
TITLE Selection of axial growth sites in yeast requires Axl2p, a novel
plasma membrane glycoprotein
JOURNAL Genes Dev. 10 (7), 777-793 (1996)
PUBMED 8846915
REFERENCE 3 (bases 1 to 5028)
AUTHORS Roemer,T.
TITLE Direct Submission
JOURNAL Submitted (22-FEB-1996) Terry Roemer, Biology, Yale University, New
Haven, CT, USA
FEATURES Location/Qualifiers
source 1..5028
/organism="Saccharomyces cerevisiae"
/db_xref="taxon:4932"
/chromosome="IX"
/map="9"
CDS <1..206
/codon_start=3
/product="TCP1-beta"
/protein_id="AAA98665.1"
/db_xref="GI:1293614"
/translation="SSIYNGISTSGLDLNNGTIADMRQLGIVESYKLKRAVVSSASEA
AEVLLRVDNIIRARPRTANRQHM"
gene 687..3158
/gene="AXL2"
CDS 687..3158
/gene="AXL2"
/note="plasma membrane glycoprotein"
/codon_start=1
/function="required for axial budding pattern of S.
cerevisiae"
/product="Axl2p"
/protein_id="AAA98666.1"
/db_xref="GI:1293615"
/translation="MTQLQISLLLTATISLLHLVVATPYEAYPIGKQYPPVARVNESF
TFQISNDTYKSSVDKTAQITYNCFDLPSWLSFDSSSRTFSGEPSSDLLSDANTTLYFN
VILEGTDSADSTSLNNTYQFVVTNRPSISLSSDFNLLALLKNYGYTNGKNALKLDPNE
VFNVTFDRSMFTNEESIVSYYGRSQLYNAPLPNWLFFDSGELKFTGTAPVINSAIAPE
TSYSFVIIATDIEGFSAVEVEFELVIGAHQLTTSIQNSLIINVTDTGNVSYDLPLNYV
YLDDDPISSDKLGSINLLDAPDWVALDNATISGSVPDELLGKNSNPANFSVSIYDTYG
DVIYFNFEVVSTTDLFAISSLPNINATRGEWFSYYFLPSQFTDYVNTNVSLEFTNSSQ
DHDWVKFQSSNLTLAGEVPKNFDKLSLGLKANQGSQSQELYFNIIGMDSKITHSNHSA
NATSTRSSHHSTSTSSYTSSTYTAKISSTSAAATSSAPAALPAANKTSSHNKKAVAIA
CGVAIPLGVILVALICFLIFWRRRRENPDDENLPHAISGPDLNNPANKPNQENATPLN
NPFDDDASSYDDTSIARRLAALNTLKLDNHSATESDISSVDEKRDSLSGMNTYNDQFQ
SQSKEELLAKPPVQPPESPFFDPQNRSSSVYMDSEPAVNKSWRYTGNLSPVSDIVRDS
YGSQKTVDTEKLFDLEAPEKEKRTSRDVTMSSLDPWNSNISPSPVRKSVTPSPYNVTK
HRNRHLQNIQDSQSGKNGITPTTMSTSSSDDFVPVKDGENFCWVHSMEPDRRPSKKRL
VDFSNKSNVNVGQVKDIHGRIPEML"
gene complement(3300..4037)
/gene="REV7"
CDS complement(3300..4037)
/gene="REV7"
/codon_start=1
/product="Rev7p"
/protein_id="AAA98667.1"
/db_xref="GI:1293616"
/translation="MNRWVEKWLRVYLKCYINLILFYRNVYPPQSFDYTTYQSFNLPQ
FVPINRHPALIDYIEELILDVLSKLTHVYRFSICIINKKNDLCIEKYVLDFSELQHVD
KDDQIITETEVFDEFRSSLNSLIMHLEKLPKVNDDTITFEAVINAIELELGHKLDRNR
RVDSLEEKAEIERDSNWVKCQEDENLPDNNGFQPPKIKLTSLVGSDVGPLIIHQFSEK
LISGDDKILNGVYSQYEEGESIFGSLF"
ORIGIN
1 gatcctccat atacaacggt atctccacct caggtttaga tctcaacaac ggaaccattg
61 ccgacatgag acagttaggt atcgtcgaga gttacaagct aaaacgagca gtagtcagct
121 ctgcatctga agccgctgaa gttctactaa gggtggataa catcatccgt gcaagaccaa
181 gaaccgccaa tagacaacat atgtaacata tttaggatat acctcgaaaa taataaaccg
241 ccacactgtc attattataa ttagaaacag aacgcaaaaa ttatccacta tataattcaa
301 agacgcgaaa aaaaaagaac aacgcgtcat agaacttttg gcaattcgcg tcacaaataa
361 attttggcaa cttatgtttc ctcttcgagc agtactcgag ccctgtctca agaatgtaat
421 aatacccatc gtaggtatgg ttaaagatag catctccaca acctcaaagc tccttgccga
481 gagtcgccct cctttgtcga gtaattttca cttttcatat gagaacttat tttcttattc
541 tttactctca catcctgtag tgattgacac tgcaacagcc accatcacta gaagaacaga
601 acaattactt aatagaaaaa ttatatcttc ctcgaaacga tttcctgctt ccaacatcta
661 cgtatatcaa gaagcattca cttaccatga cacagcttca gatttcatta ttgctgacag
721 ctactatatc actactccat ctagtagtgg ccacgcccta tgaggcatat cctatcggaa
781 aacaataccc cccagtggca agagtcaatg aatcgtttac atttcaaatt tccaatgata
841 cctataaatc gtctgtagac aagacagctc aaataacata caattgcttc gacttaccga
901 gctggctttc gtttgactct agttctagaa cgttctcagg tgaaccttct tctgacttac
961 tatctgatgc gaacaccacg ttgtatttca atgtaatact cgagggtacg gactctgccg
1021 acagcacgtc tttgaacaat acataccaat ttgttgttac aaaccgtcca tccatctcgc
1081 tatcgtcaga tttcaatcta ttggcgttgt taaaaaacta tggttatact aacggcaaaa
1141 acgctctgaa actagatcct aatgaagtct tcaacgtgac ttttgaccgt tcaatgttca
1201 ctaacgaaga atccattgtg tcgtattacg gacgttctca gttgtataat gcgccgttac
1261 ccaattggct gttcttcgat tctggcgagt tgaagtttac tgggacggca ccggtgataa
1321 actcggcgat tgctccagaa acaagctaca gttttgtcat catcgctaca gacattgaag
1381 gattttctgc cgttgaggta gaattcgaat tagtcatcgg ggctcaccag ttaactacct
1441 ctattcaaaa tagtttgata atcaacgtta ctgacacagg taacgtttca tatgacttac
1501 ctctaaacta tgtttatctc gatgacgatc ctatttcttc tgataaattg ggttctataa
1561 acttattgga tgctccagac tgggtggcat tagataatgc taccatttcc gggtctgtcc
1621 cagatgaatt actcggtaag aactccaatc ctgccaattt ttctgtgtcc atttatgata
1681 cttatggtga tgtgatttat ttcaacttcg aagttgtctc cacaacggat ttgtttgcca
1741 ttagttctct tcccaatatt aacgctacaa ggggtgaatg gttctcctac tattttttgc
1801 cttctcagtt tacagactac gtgaatacaa acgtttcatt agagtttact aattcaagcc
1861 aagaccatga ctgggtgaaa ttccaatcat ctaatttaac attagctgga gaagtgccca
1921 agaatttcga caagctttca ttaggtttga aagcgaacca aggttcacaa tctcaagagc
1981 tatattttaa catcattggc atggattcaa agataactca ctcaaaccac agtgcgaatg
2041 caacgtccac aagaagttct caccactcca cctcaacaag ttcttacaca tcttctactt
2101 acactgcaaa aatttcttct acctccgctg ctgctacttc ttctgctcca gcagcgctgc
2161 cagcagccaa taaaacttca tctcacaata aaaaagcagt agcaattgcg tgcggtgttg
2221 ctatcccatt aggcgttatc ctagtagctc tcatttgctt cctaatattc tggagacgca
2281 gaagggaaaa tccagacgat gaaaacttac cgcatgctat tagtggacct gatttgaata
2341 atcctgcaaa taaaccaaat caagaaaacg ctacaccttt gaacaacccc tttgatgatg
2401 atgcttcctc gtacgatgat acttcaatag caagaagatt ggctgctttg aacactttga
2461 aattggataa ccactctgcc actgaatctg atatttccag cgtggatgaa aagagagatt
2521 ctctatcagg tatgaataca tacaatgatc agttccaatc ccaaagtaaa gaagaattat
2581 tagcaaaacc cccagtacag cctccagaga gcccgttctt tgacccacag aataggtctt
2641 cttctgtgta tatggatagt gaaccagcag taaataaatc ctggcgatat actggcaacc
2701 tgtcaccagt ctctgatatt gtcagagaca gttacggatc acaaaaaact gttgatacag
2761 aaaaactttt cgatttagaa gcaccagaga aggaaaaacg tacgtcaagg gatgtcacta
2821 tgtcttcact ggacccttgg aacagcaata ttagcccttc tcccgtaaga aaatcagtaa
2881 caccatcacc atataacgta acgaagcatc gtaaccgcca cttacaaaat attcaagact
2941 ctcaaagcgg taaaaacgga atcactccca caacaatgtc aacttcatct tctgacgatt
3001 ttgttccggt taaagatggt gaaaattttt gctgggtcca tagcatggaa ccagacagaa
3061 gaccaagtaa gaaaaggtta gtagattttt caaataagag taatgtcaat gttggtcaag
3121 ttaaggacat tcacggacgc atcccagaaa tgctgtgatt atacgcaacg atattttgct
3181 taattttatt ttcctgtttt attttttatt agtggtttac agatacccta tattttattt
3241 agtttttata cttagagaca tttaatttta attccattct tcaaatttca tttttgcact
3301 taaaacaaag atccaaaaat gctctcgccc tcttcatatt gagaatacac tccattcaaa
3361 attttgtcgt caccgctgat taatttttca ctaaactgat gaataatcaa aggccccacg
3421 tcagaaccga ctaaagaagt gagttttatt ttaggaggtt gaaaaccatt attgtctggt
3481 aaattttcat cttcttgaca tttaacccag tttgaatccc tttcaatttc tgctttttcc
3541 tccaaactat cgaccctcct gtttctgtcc aacttatgtc ctagttccaa ttcgatcgca
3601 ttaataactg cttcaaatgt tattgtgtca tcgttgactt taggtaattt ctccaaatgc
3661 ataatcaaac tatttaagga agatcggaat tcgtcgaaca cttcagtttc cgtaatgatc
3721 tgatcgtctt tatccacatg ttgtaattca ctaaaatcta aaacgtattt ttcaatgcat
3781 aaatcgttct ttttattaat aatgcagatg gaaaatctgt aaacgtgcgt taatttagaa
3841 agaacatcca gtataagttc ttctatatag tcaattaaag caggatgcct attaatggga
3901 acgaactgcg gcaagttgaa tgactggtaa gtagtgtagt cgaatgactg aggtgggtat
3961 acatttctat aaaataaaat caaattaatg tagcatttta agtataccct cagccacttc
4021 tctacccatc tattcataaa gctgacgcaa cgattactat tttttttttc ttcttggatc
4081 tcagtcgtcg caaaaacgta taccttcttt ttccgacctt ttttttagct ttctggaaaa
4141 gtttatatta gttaaacagg gtctagtctt agtgtgaaag ctagtggttt cgattgactg
4201 atattaagaa agtggaaatt aaattagtag tgtagacgta tatgcatatg tatttctcgc
4261 ctgtttatgt ttctacgtac ttttgattta tagcaagggg aaaagaaata catactattt
4321 tttggtaaag gtgaaagcat aatgtaaaag ctagaataaa atggacgaaa taaagagagg
4381 cttagttcat cttttttcca aaaagcaccc aatgataata actaaaatga aaaggatttg
4441 ccatctgtca gcaacatcag ttgtgtgagc aataataaaa tcatcacctc cgttgccttt
4501 agcgcgtttg tcgtttgtat cttccgtaat tttagtctta tcaatgggaa tcataaattt
4561 tccaatgaat tagcaatttc gtccaattct ttttgagctt cttcatattt gctttggaat
4621 tcttcgcact tcttttccca ttcatctctt tcttcttcca aagcaacgat ccttctaccc
4681 atttgctcag agttcaaatc ggcctctttc agtttatcca ttgcttcctt cagtttggct
4741 tcactgtctt ctagctgttg ttctagatcc tggtttttct tggtgtagtt ctcattatta
4801 gatctcaagt tattggagtc ttcagccaat tgctttgtat cagacaattg actctctaac
4861 ttctccactt cactgtcgag ttgctcgttt ttagcggaca aagatttaat ctcgttttct
4921 ttttcagtgt tagattgctc taattctttg agctgttctc tcagctcctc atatttttct
4981 tgccatgact cagattctaa ttttaagcta ttcaatttct ctttgatc
//"""
# GenBank format protein (aka GenPept) file from:
# http://www.molecularevolution.org/resources/fileformats/
gbk_example2 = \
"""LOCUS AAD51968 143 aa linear BCT 21-AUG-2001
DEFINITION transcriptional regulator RovA [Yersinia enterocolitica].
ACCESSION AAD51968
VERSION AAD51968.1 GI:5805369
DBSOURCE locus AF171097 accession AF171097.1
KEYWORDS .
SOURCE Yersinia enterocolitica
ORGANISM Yersinia enterocolitica
Bacteria; Proteobacteria; Gammaproteobacteria; Enterobacteriales;
Enterobacteriaceae; Yersinia.
REFERENCE 1 (residues 1 to 143)
AUTHORS Revell,P.A. and Miller,V.L.
TITLE A chromosomally encoded regulator is required for expression of the
Yersinia enterocolitica inv gene and for virulence
JOURNAL Mol. Microbiol. 35 (3), 677-685 (2000)
MEDLINE 20138369
PUBMED 10672189
REFERENCE 2 (residues 1 to 143)
AUTHORS Revell,P.A. and Miller,V.L.
TITLE Direct Submission
JOURNAL Submitted (22-JUL-1999) Molecular Microbiology, Washington
University School of Medicine, Campus Box 8230, 660 South Euclid,
St. Louis, MO 63110, USA
COMMENT Method: conceptual translation.
FEATURES Location/Qualifiers
source 1..143
/organism="Yersinia enterocolitica"
/mol_type="unassigned DNA"
/strain="JB580v"
/serotype="O:8"
/db_xref="taxon:630"
Protein 1..143
/product="transcriptional regulator RovA"
/name="regulates inv expression"
CDS 1..143
/gene="rovA"
/coded_by="AF171097.1:380..811"
/note="regulator of virulence"
/transl_table=11
ORIGIN
1 mestlgsdla rlvrvwrali dhrlkplelt qthwvtlhni nrlppeqsqi qlakaigieq
61 pslvrtldql eekglitrht candrrakri klteqsspii eqvdgvicst rkeilggisp
121 deiellsgli dklerniiql qsk
//
"""
embl_example="""ID X56734; SV 1; linear; mRNA; STD; PLN; 1859 BP.
XX
AC X56734; S46826;
XX
DT 12-SEP-1991 (Rel. 29, Created)
DT 25-NOV-2005 (Rel. 85, Last updated, Version 11)
XX
DE Trifolium repens mRNA for non-cyanogenic beta-glucosidase
XX
KW beta-glucosidase.
XX
OS Trifolium repens (white clover)
OC Eukaryota; Viridiplantae; Streptophyta; Embryophyta; Tracheophyta;
OC Spermatophyta; Magnoliophyta; eudicotyledons; core eudicotyledons; rosids;
OC eurosids I; Fabales; Fabaceae; Papilionoideae; Trifolieae; Trifolium.
XX
RN [5]
RP 1-1859
RX PUBMED; 1907511.
RA Oxtoby E., Dunn M.A., Pancoro A., Hughes M.A.;
RT "Nucleotide and derived amino acid sequence of the cyanogenic
RT beta-glucosidase (linamarase) from white clover (Trifolium repens L.)";
RL Plant Mol. Biol. 17(2):209-219(1991).
XX
RN [6]
RP 1-1859
RA Hughes M.A.;
RT ;
RL Submitted (19-NOV-1990) to the EMBL/GenBank/DDBJ databases.
RL Hughes M.A., University of Newcastle Upon Tyne, Medical School, Newcastle
RL Upon Tyne, NE2 4HH, UK
XX
FH Key Location/Qualifiers
FH
FT source 1..1859
FT /organism="Trifolium repens"
FT /mol_type="mRNA"
FT /clone_lib="lambda gt10"
FT /clone="TRE361"
FT /tissue_type="leaves"
FT /db_xref="taxon:3899"
FT CDS 14..1495
FT /product="beta-glucosidase"
FT /EC_number="3.2.1.21"
FT /note="non-cyanogenic"
FT /db_xref="GOA:P26204"
FT /db_xref="InterPro:IPR001360"
FT /db_xref="InterPro:IPR013781"
FT /db_xref="UniProtKB/Swiss-Prot:P26204"
FT /protein_id="CAA40058.1"
FT /translation="MDFIVAIFALFVISSFTITSTNAVEASTLLDIGNLSRSSFPRGFI
FT FGAGSSAYQFEGAVNEGGRGPSIWDTFTHKYPEKIRDGSNADITVDQYHRYKEDVGIMK
FT DQNMDSYRFSISWPRILPKGKLSGGINHEGIKYYNNLINELLANGIQPFVTLFHWDLPQ
FT VLEDEYGGFLNSGVINDFRDYTDLCFKEFGDRVRYWSTLNEPWVFSNSGYALGTNAPGR
FT CSASNVAKPGDSGTGPYIVTHNQILAHAEAVHVYKTKYQAYQKGKIGITLVSNWLMPLD
FT DNSIPDIKAAERSLDFQFGLFMEQLTTGDYSKSMRRIVKNRLPKFSKFESSLVNGSFDF
FT IGINYYSSSYISNAPSHGNAKPSYSTNPMTNISFEKHGIPLGPRAASIWIYVYPYMFIQ
FT EDFEIFCYILKINITILQFSITENGMNEFNDATLPVEEALLNTYRIDYYYRHLYYIRSA
FT IRAGSNVKGFYAWSFLDCNEWFAGFTVRFGLNFVD"
FT mRNA 1..1859
FT /experiment="experimental evidence, no additional details
FT recorded"
XX
SQ Sequence 1859 BP; 609 A; 314 C; 355 G; 581 T; 0 other;
aaacaaacca aatatggatt ttattgtagc catatttgct ctgtttgtta ttagctcatt 60
cacaattact tccacaaatg cagttgaagc ttctactctt cttgacatag gtaacctgag 120
tcggagcagt tttcctcgtg gcttcatctt tggtgctgga tcttcagcat accaatttga 180
aggtgcagta aacgaaggcg gtagaggacc aagtatttgg gataccttca cccataaata 240
tccagaaaaa ataagggatg gaagcaatgc agacatcacg gttgaccaat atcaccgcta 300
caaggaagat gttgggatta tgaaggatca aaatatggat tcgtatagat tctcaatctc 360
ttggccaaga atactcccaa agggaaagtt gagcggaggc ataaatcacg aaggaatcaa 420
atattacaac aaccttatca acgaactatt ggctaacggt atacaaccat ttgtaactct 480
ttttcattgg gatcttcccc aagtcttaga agatgagtat ggtggtttct taaactccgg 540
tgtaataaat gattttcgag actatacgga tctttgcttc aaggaatttg gagatagagt 600
gaggtattgg agtactctaa atgagccatg ggtgtttagc aattctggat atgcactagg 660
aacaaatgca ccaggtcgat gttcggcctc caacgtggcc aagcctggtg attctggaac 720
aggaccttat atagttacac acaatcaaat tcttgctcat gcagaagctg tacatgtgta 780
taagactaaa taccaggcat atcaaaaggg aaagataggc ataacgttgg tatctaactg 840
gttaatgcca cttgatgata atagcatacc agatataaag gctgccgaga gatcacttga 900
cttccaattt ggattgttta tggaacaatt aacaacagga gattattcta agagcatgcg 960
gcgtatagtt aaaaaccgat tacctaagtt ctcaaaattc gaatcaagcc tagtgaatgg 1020
ttcatttgat tttattggta taaactatta ctcttctagt tatattagca atgccccttc 1080
acatggcaat gccaaaccca gttactcaac aaatcctatg accaatattt catttgaaaa 1140
acatgggata cccttaggtc caagggctgc ttcaatttgg atatatgttt atccatatat 1200
gtttatccaa gaggacttcg agatcttttg ttacatatta aaaataaata taacaatcct 1260
gcaattttca atcactgaaa atggtatgaa tgaattcaac gatgcaacac ttccagtaga 1320
agaagctctt ttgaatactt acagaattga ttactattac cgtcacttat actacattcg 1380
ttctgcaatc agggctggct caaatgtgaa gggtttttac gcatggtcat ttttggactg 1440
taatgaatgg tttgcaggct ttactgttcg ttttggatta aactttgtag attagaaaga 1500
tggattaaaa aggtacccta agctttctgc ccaatggtac aagaactttc tcaaaagaaa 1560
ctagctagta ttattaaaag aactttgtag tagattacag tacatcgttt gaagttgagt 1620
tggtgcacct aattaaataa aagaggttac tcttaacata tttttaggcc attcgttgtg 1680
aagttgttag gctgttattt ctattatact atgttgtagt aataagtgca ttgttgtacc 1740
agaagctatg atcataacta taggttgatc cttcatgtat cagtttgatg ttgagaatac 1800
tttgaattaa aagtcttttt ttattttttt aaaaaaaaaa aaaaaaaaaa aaaaaaaaa 1859
//
"""
print "GenBank CDS Iteration"
print "====================="
g = GenBankScanner()
for record in g.parse_cds_features(StringIO(gbk_example)):
print record
g = GenBankScanner()
for record in g.parse_cds_features(StringIO(gbk_example2),
tags2id=('gene','locus_tag','product')):
print record
g = GenBankScanner()
for record in g.parse_cds_features(StringIO(gbk_example + "\n" + gbk_example2),
tags2id=('gene','locus_tag','product')):
print record
print
print "GenBank Iteration"
print "================="
g = GenBankScanner()
for record in g.parse_records(StringIO(gbk_example),do_features=False):
print record.id, record.name, record.description
print record.seq
g = GenBankScanner()
for record in g.parse_records(StringIO(gbk_example),do_features=True):
print record.id, record.name, record.description
print record.seq
g = GenBankScanner()
for record in g.parse_records(StringIO(gbk_example2),do_features=False):
print record.id, record.name, record.description
print record.seq
g = GenBankScanner()
for record in g.parse_records(StringIO(gbk_example2),do_features=True):
print record.id, record.name, record.description
print record.seq
print
print "EMBL CDS Iteration"
print "=================="
e = EmblScanner()
for record in e.parse_cds_features(StringIO(embl_example)):
print record
print
print "EMBL Iteration"
print "=============="
e = EmblScanner()
for record in e.parse_records(StringIO(embl_example),do_features=True):
print record.id, record.name, record.description
print record.seq
|
LyonsLab/coge
|
bin/last_wrapper/Bio/GenBank/Scanner.py
|
Python
|
bsd-2-clause
| 81,886
|
[
"Biopython"
] |
60f370071cf85ad73243ba8516f71e33acba3beba06af12bc43850bab03de6c2
|
import numpy
from Bio import Phylo
from sys import argv
def to_adjacency_matrix(tree):
"""Create an adjacency matrix (NumPy array) from clades/branches in tree.
Also returns a list of all clades in tree ("all_clades"), where the position
of each clade in the list corresponds to a row and column of the numpy
array: a cell (i,j) in the array is 1 if there is a branch from all_clades[i]
to all_clades[j], otherwise 0.
Returns a tuple of (all_clades, adjacency_matrix) where all_clades is a list
of clades and adjacency_matrix is a NumPy 2D array.
Source: http://biopython.org/wiki/Phylo_cookbook#Convert_to_a_NumPy_array_or_matrix
"""
all_clades = list(tree.find_clades(order='level'))
lookup = {}
for i, elem in enumerate(all_clades):
lookup[elem] = i
adjacency_matrix = numpy.zeros((len(all_clades), len(all_clades)))
for parent in tree.find_clades(terminal=False, order='level'):
for child in parent.clades:
adjacency_matrix[lookup[parent], lookup[child]] = 1
if not tree.rooted:
# Branches can go from "child" to "parent" in unrooted trees
adjacency_matrix += adjacency_matrix.transpose() # Fixed an error here
return all_clades, adjacency_matrix
clades, adjacent = to_adjacency_matrix(Phylo.read(argv[1], 'newick'))
leaves = dict()
internal = dict()
mapping = dict()
adjacency_list = dict()
for idx, row in enumerate(adjacent):
if 0.5 < sum(row) < 1.5:
leaves[clades[idx].name] = idx
i = 0
for name in sorted(leaves.keys()):
mapping[leaves[name]] = i
i += 1
for idx, row in enumerate(adjacent):
if sum(row) > 1.0:
mapping[idx] = i
i += 1
for idx, row in enumerate(adjacent):
tmp = list()
for neighbor, element in enumerate(row):
if element > 0.5:
tmp.append(mapping[neighbor])
adjacency_list[mapping[idx]] = tmp
with open(argv[1][:-7] + '.adj', 'wb') as f:
f.write(bytes(str(len(adjacency_list)), 'utf8') + b"\n")
for k in sorted(adjacency_list.keys()):
f.write(bytes(str(len(adjacency_list[k])), 'utf8') + b' ')
f.write(bytes(' '.join(map(str, adjacency_list[k])), 'utf8'))
f.write(b'\n')
|
YnkDK/AiBToS-Project2
|
bin/parser3.py
|
Python
|
mit
| 2,222
|
[
"Biopython"
] |
fc2f655ce3c6c24cdf561fa06f555f517e931bc51ab8408871480eb7ff8cfa56
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2008, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
import re
import logging
from Products.ZenRRD.CommandParser import CommandParser
log = logging.getLogger("zen.zencommand")
"""
Items to collect:
bladeHealth
bladePowerConsumed
"""
class BladeStatus(CommandParser):
def processResults(self, cmd, result):
"""
Parse the results of the "SHOW ENCLOSURE STATUS" command
to get details about power usage and component status.
"""
output = cmd.result.output
dps = dict([(dp.id, dp) for dp in cmd.points])
outlines = output.split('\n')
log.debug("BladeStatusParser: I have %d lines to parse" % len(outlines))
for i, line in enumerate(outlines):
match = re.match('^([^:]+):.(.*)$', line.strip())
if match:
(key, value) = match.groups()
if "Health" in key:
if "OK" in value:
result.values.append( (dps["bladeHealth"], float(0)) )
else:
result.values.append( (dps["bladeHealth"], float(1)) )
if "Current Wattage" in key:
result.values.append( (dps["bladePowerConsumed"], float(value)) )
return result
|
zenoss/ZenPacks.community.HPBladeChassis
|
ZenPacks/community/HPBladeChassis/parsers/HPBladeChassis/BladeStatus.py
|
Python
|
gpl-2.0
| 1,759
|
[
"VisIt"
] |
fd41911c659ad8788a850513c987beba4923d609a9c18fd5b9ab1bc71fa6a4d6
|
#!/usr/bin/env python
import argparse
from Bio import AlignIO
# A simple converter from Nexus to Phylip format using BioPython.
# Matt Gitzendanner
# University of Florida
parser = argparse.ArgumentParser()
parser.add_argument("-i", help="input file")
parser.add_argument("-o", help="output file")
args = parser.parse_args()
infile = args.i
outfile = args.o
try:
IN=open(infile, 'r')
except IOError:
print "Can't open file", infile
try:
OUT=open(outfile, 'a')
except IOError:
print "Can't open file", outfile
alignment = AlignIO.read(IN, "nexus")
AlignIO.write([alignment], OUT, "phylip-relaxed")
|
Cactusolo/ToolBox
|
nex_to_phy.py
|
Python
|
mit
| 612
|
[
"Biopython"
] |
750e58fcfcef259728000e26ad638941ea07c0653a23205bea6204f60eec496f
|
#
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from ige import *
from xml.dom.minidom import Node
from ige.IObject import IObject
from ige.IDataHolder import IDataHolder
from Const import *
import Rules, Utils, math, random, copy
from ige import log
class ISystem(IObject):
typeID = T_SYSTEM
def init(self, obj):
IObject.init(self, obj)
#
obj.x = 0.0
obj.y = 0.0
obj.planets = []
obj.fleets = []
obj.closeFleets = []
obj.starClass = u'---' # Star clasification
obj.signature = 100
# rotation
#~ obj.dist = 0.0
#~ obj.dAngle = 0.0
#~ obj.sAngle = 0.0
# renaming
obj.lastNameChng = 0
# combat
obj.combatCounter = 0
# system wide data
obj.scannerPwrs = {}
# mine field
obj.minefield = {}
def update(self, tran, obj):
# check existence of all planets
if 0:
for planetID in obj.planets:
if not tran.db.has_key(planetID):
log.debug("CONSISTENCY - planet %d from system %d does not exists" % (planetID, obj.oid))
elif tran.db[planetID].type != T_PLANET:
log.debug("CONSISTENCY - planet %d from system %d is not a T_PLANET" % (planetID, obj.oid))
if not hasattr(obj,'minefield'):
obj.miefield = {}
# check that all .fleet are in .closeFleets
for fleetID in obj.fleets:
if fleetID not in obj.closeFleets:
log.debug("CONSISTENCY - fleet %d is in .fleet but not in .closeFleets - adding" % fleetID)
obj.closeFleets.append(fleetID)
# check existence of all fleets
for fleetID in obj.closeFleets:
if not tran.db.has_key(fleetID):
log.debug("CONSISTENCY - fleet %d from system %d does not exists" % (fleetID, obj.oid))
elif tran.db[fleetID].type not in (T_FLEET, T_ASTEROID):
log.debug("CONSISTENCY - fleet %d from system %d is not a T_FLEET" % (fleetID, obj.oid))
# delete nonexistent fleets
index = 0
while index < len(obj.closeFleets) and obj.closeFleets:
fleet = tran.db.get(obj.closeFleets[index], None)
if fleet == None:
log.debug("CONSISTENCY - fleet %d does not exists" % obj.closeFleets[index])
fleetID = obj.closeFleets[index]
obj.closeFleets.remove(fleetID)
obj.fleets.remove(fleetID)
else:
index += 1
# check compOf
if not tran.db.has_key(obj.compOf) or tran.db[obj.compOf].type != T_GALAXY:
log.debug("CONSISTENCY invalid compOf for system", obj.oid)
# rebuild closeFleets attribute
old = obj.closeFleets
obj.closeFleets = []
for fleetID in old:
fleet = tran.db.get(fleetID, None)
if fleet and fleet.closeSystem == obj.oid and fleetID not in obj.closeFleets:
obj.closeFleets.append(fleetID)
if old != obj.closeFleets:
log.debug("System close fleets fixed", obj.oid, old, obj.closeFleets)
# try to find starting planets
starting = 0
free = 1
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.plStarting:
starting = planetID
if planet.owner != OID_NONE:
free = 0
if starting and free:
# good starting position
#@log.debug("Found starting position", obj.oid, starting)
# get galaxy
galaxy = tran.db[obj.compOf]
if starting not in galaxy.startingPos:
log.debug("Adding to starting positions of galaxy", galaxy.oid)
galaxy.startingPos.append(starting)
# check if system has planets
hasHabitable = 0
for planetID in obj.planets:
if tran.db[planetID].plSlots > 0:
hasHabitable = 1
break
if (not obj.planets or not hasHabitable) and obj.starClass[0] != "b" and obj.starClass != "wW0":
log.debug("No planet for system", obj.oid, obj.name, obj.starClass)
# delete old planets
for planetID in obj.planets:
del tran.db[planetID]
obj.planets = []
# find matching systems
avail = []
for systemID in tran.db[obj.compOf].systems:
system = tran.db[systemID]
if system.starClass[1] == obj.starClass[1] \
or (obj.starClass[1] == "G" and system.starClass[1] == "F"):
ok = 0
for planetID in system.planets:
planet = tran.db[planetID]
if planet.plStarting:
ok = 0
break
if planet.plSlots > 0:
ok = 1
if ok and system.planets:
avail.append(systemID)
# select random system
import random
log.debug("Can copy", avail)
try:
systemID = random.choice(avail)
# copy it
log.debug("Will copy system", systemID)
nType = Utils.getPlanetNamesType()
orbit = 1
for planetID in tran.db[systemID].planets:
orig = tran.db[planetID]
planet = tran.db[self.createPlanet(tran, obj)]
planet.name = Utils.getPlanetName(obj.name, nType, orbit - 1)
planet.x = obj.x
planet.y = obj.y
planet.plDiameter = orig.plDiameter
planet.plType = orig.plType
planet.plMin = orig.plMin
planet.plBio = orig.plBio
planet.plEn = orig.plEn
planet.plEnv = orig.plEnv
planet.plSlots = orig.plSlots
planet.plMaxSlots = orig.plMaxSlots
planet.plStratRes = 0
planet.plDisease = 0
planet.plStarting = 0
planet.orbit = orbit
planet.storPop = 0
planet.slots = []
orbit += 1
except:
log.debug("Copy failed")
update.public = 0
def getReferences(self, tran, obj):
return obj.planets
getReferences.public = 0
def getScanInfos(self, tran, obj, scanPwr, player):
result = IDataHolder()
results = [result]
if scanPwr >= Rules.level1InfoScanPwr:
result._type = T_SCAN
result.scanPwr = scanPwr
result.oid = obj.oid
result.x = obj.x
result.y = obj.y
if hasattr(obj, 'destinationOid'):
result.destinationOid = obj.destinationOid
# multiply by 1000 to increase accuracy
#~ result.dist = obj.dist * 1000
#~ result.dAngle = obj.dAngle * 1000
#~ result.sAngle = obj.sAngle * 1000
result.signature = obj.signature
result.type = obj.type
result.compOf = obj.compOf
result.starClass = obj.starClass
if scanPwr >= Rules.level2InfoScanPwr:
result.name = obj.name
result.combatCounter = obj.combatCounter
if scanPwr >= Rules.level3InfoScanPwr:
result.planets = obj.planets
result.owner = obj.owner
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.owner == player: ####### This was player.owner, which made no sense. Hope this change doesn't break something
continue
newPwr = scanPwr * planet.signature / obj.signature
results.extend(self.cmd(planet).getScanInfos(tran, planet, newPwr, player))
if scanPwr >= Rules.level4InfoScanPwr:
result.fleets = obj.fleets
for fleetID in obj.fleets:
fleet = tran.db[fleetID]
if fleet.owner == player:
continue
newPwr = scanPwr * fleet.signature / obj.signature
results.extend(self.cmd(fleet).getScanInfos(tran, fleet, newPwr, player))
result.hasmines = 0 #no
if len(obj.minefield) > 0:
result.hasmines = 1 #yes
result.minefield = self.getMines(obj,player.oid) #only shows mines you own
if len(obj.minefield) > 1 or (len(obj.minefield) == 1 and len(result.minefield) == 0):
result.hasmines = 2 #yes, and some aren't my mines
return results
def processINITPhase(self, tran, obj, data):
obj.scannerPwrs = {}
processINITPhase.public = 1
processINITPhase.accLevel = AL_ADMIN
def processPRODPhase(self, tran, obj, data):
#mine deployment
owners = []
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.owner not in owners:
owners.append(planet.owner)
for ownerid in owners:
tech,structtech = self.getSystemMineLauncher(tran,obj,ownerid)
if tech==0: #no control structure
continue
owner = tran.db[ownerid]
turn = tran.db[OID_UNIVERSE].turn
minerate = int(tech.minerate * Rules.techImprEff[owner.techs.get(structtech, Rules.techBaseImprovement)])
minenum = int(tech.minenum / Rules.techImprEff[owner.techs.get(structtech, Rules.techBaseImprovement)])
if (turn%minerate)==0: #it is the launch turn
self.addMine(obj,ownerid,tech.mineclass,minenum)
log.debug('ISystem', 'Mine deployed for owner %d in system %d' % (ownerid, obj.oid))
return obj.planets
processPRODPhase.public = 1
processPRODPhase.accLevel = AL_ADMIN
def processACTIONPhase(self, tran, obj, data):
# distribute resources
planets = {}
# group planets by owner
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.owner != OID_NONE:
tmp = planets.get(planet.owner, [])
tmp.append(planet)
planets[planet.owner] = tmp
# group planets if owners are allied
# TODO
# process each group
for owner in planets.keys():
# skip alone planets
if len(planets[owner]) < 2:
continue
# process each resource
for resName in ('Bio', 'En'):
donors = []
donees = []
minRes = 'min%s' % resName
maxRes = 'max%s' % resName
storRes = 'stor%s' % resName
donorsSum = 0
doneesSum = 0
# put planets into donors/donees
for planet in planets[owner]:
if getattr(planet, storRes) > getattr(planet, minRes):
donors.append(planet)
donorsSum += getattr(planet, storRes) - getattr(planet, minRes)
elif getattr(planet, storRes) < getattr(planet, minRes):
donees.append(planet)
doneesSum += getattr(planet, minRes) - getattr(planet, storRes)
#@log.debug('ISystem', obj.oid, 'Donors / donees for %s' % resName, donorsSum, doneesSum)
# there are requests for donation and there is somebody able to donate
if doneesSum > 0 and donorsSum > 0:
#@log.debug('ISystem', 'Redistributin %s for' % resName, owner)
# give
balance = 0
tmpRatio = min(float(doneesSum) / donorsSum, 1.0)
for planet in donees:
diff = getattr(planet, minRes) - getattr(planet, storRes)
amount = int(float(diff) / doneesSum * donorsSum * tmpRatio)
#@log.debug('ISystem', 'Give res', planet.oid, amount)
balance -= amount
setattr(planet, storRes, getattr(planet, storRes) + amount)
# take
assert donorsSum + balance >= 0
lastPlanet = None
tmpRatio = min(float(donorsSum) / doneesSum, 1.0)
for planet in donors:
diff = getattr(planet, storRes) - getattr(planet, minRes)
amount = int(float(diff) / donorsSum * doneesSum * tmpRatio)
balance += amount
#@log.debug('ISystem', 'Take res', planet.oid, amount)
setattr(planet, storRes, getattr(planet, storRes) - amount)
lastPlanet = planet
# fix rounding error
setattr(lastPlanet, storRes, getattr(lastPlanet, storRes) + balance)
#@log.debug('ISystem', 'Rounding error', balance)
# try to move additional resources to the other planets
for planet in planets[owner]:
if getattr(planet, storRes) > getattr(planet, maxRes):
excess = getattr(planet, storRes) - getattr(planet, maxRes)
#@log.debug('ISystem', 'Trying to move excess rsrcs from', planet.oid, excess)
for planet2 in planets[owner]:
if planet == planet2:
continue
if getattr(planet2, storRes) < getattr(planet2, maxRes):
space = getattr(planet2, maxRes) - getattr(planet2, storRes)
amount = min(space, excess)
#@log.debug('ISystem', 'Moved to', planet2.oid, amount)
setattr(planet2, storRes, getattr(planet2, storRes) + amount)
excess -= amount
if excess == 0:
break
#@log.debug('ISystem', 'Cannot move excess rsrcs on', planet.oid, excess)
setattr(planet, storRes, getattr(planet, maxRes) + excess)
#~ # rotate system around the galaxy core
#~ #log.debug("Rotate, old coords", obj.x, obj.y)
#~ turn = tran.db[OID_UNIVERSE].turn
#~ galaxy = tran.db[obj.compOf]
#~ angle = obj.sAngle + (turn / Rules.rotationMod) * obj.dAngle
#~ obj.x = galaxy.x + obj.dist * math.cos(angle)
#~ obj.y = galaxy.y + obj.dist * math.sin(angle)
#~ #log.debug("Rotate, new coords", obj.x, obj.y)
#~ # change positions of planets and orbitting fleets
#~ for planetID in obj.planets:
#~ planet = tran.db[planetID]
#~ planet.x = obj.x
#~ planet.y = obj.y
#~ for fleetID in obj.fleets:
#~ fleet = tran.db[fleetID]
#~ fleet.x = obj.x
#~ fleet.y = obj.y
# process planets and fleets
#@log.debug("System close fleets", obj.oid, obj.closeFleets)
return obj.planets[:] + obj.closeFleets[:]
processACTIONPhase.public = 1
processACTIONPhase.accLevel = AL_ADMIN
def getObjectsInSpace(self, tran, obj):
inSpace = obj.closeFleets[:]
for fleetID in obj.fleets:
try:
inSpace.remove(fleetID)
except ValueError:
log.warning(obj.oid, "Cannot remove fleet from closeFleets", fleetID, obj.fleets, obj.closeFleets)
return inSpace
getObjectsInSpace.public = 1
getObjectsInSpace.accLevel = AL_ADMIN
def processBATTLEPhase(self, tran, obj, data):
system = obj
#@log.debug('ISystem', 'BATTLE - system', obj.oid)
# we are processing fleets, planets, ...
objects = obj.planets[:] + obj.fleets[:]
# store owners of objects
# find enemies and allies
attack = {}
allies = {}
owners = {}
ownerIDs = {}
systemAtt = {}
systemDef = {}
hasMine = {}
isOwnedObject = 0
for objID in objects:
attack[objID] = []
allies[objID] = []
owner = tran.db[objID].owner
owners[objID] = owner
ownerIDs[owner] = owner
if owner != OID_NONE:
isOwnedObject = 1
for owner in ownerIDs:
tempAtt, tempDef = self.getSystemCombatBonuses(tran,system,owner)
systemAtt[owner] = tempAtt
systemDef[owner] = tempDef
hasMine[owner] = self.getSystemMineSource(tran,system,owner)
if not isOwnedObject:
#@log.debug('ISystem', 'No combat')
# reset combat counters
system.combatCounter = 0
return
# first - direct ones
index = 1
for obj1ID in objects:
obj1 = tran.db[obj1ID]
if obj1.owner == OID_NONE:
index += 1
continue
commander = tran.db[obj1.owner]
# relationships
#for obj2ID in objects[index:]:
for obj2ID in objects:
obj2 = tran.db[obj2ID]
if obj2.owner == OID_NONE or obj1 is obj2:
continue
if obj1.owner == obj2.owner:
allies[obj1ID].append(obj2ID)
allies[obj2ID].append(obj1ID)
continue
# planet and military object
elif obj1.type == T_PLANET and obj2.isMilitary and \
not self.cmd(commander).isPactActive(tran, commander, obj2.owner, PACT_ALLOW_MILITARY_SHIPS):
#@log.debug("ISystem pl - mil", obj1ID, obj2ID)
if obj2ID not in attack[obj1ID]:
attack[obj1ID].append(obj2ID)
if obj1ID not in attack[obj2ID]:
attack[obj2ID].append(obj1ID)
# planet and civilian object
elif obj1.type == T_PLANET and not obj2.isMilitary and \
not self.cmd(commander).isPactActive(tran, commander, obj2.owner, PACT_ALLOW_CIVILIAN_SHIPS):
#@log.debug("ISystem pl - civ", obj1ID, obj2ID)
if obj2ID not in attack[obj1ID]:
attack[obj1ID].append(obj2ID)
if obj1ID not in attack[obj2ID]:
attack[obj2ID].append(obj1ID)
# military and military object
elif obj1.isMilitary and obj2.isMilitary and \
not self.cmd(commander).isPactActive(tran, commander, obj2.owner, PACT_ALLOW_MILITARY_SHIPS):
#@log.debug("ISystem mil - mil", obj1ID, obj2ID)
if obj2ID not in attack[obj1ID]:
attack[obj1ID].append(obj2ID)
if obj1ID not in attack[obj2ID]:
attack[obj2ID].append(obj1ID)
# military and civilian object
elif obj1.isMilitary and not obj2.isMilitary and \
not self.cmd(commander).isPactActive(tran, commander, obj2.owner, PACT_ALLOW_CIVILIAN_SHIPS):
#@log.debug("ISystem mil - civ", obj1ID, obj2ID)
if obj2ID not in attack[obj1ID]:
attack[obj1ID].append(obj2ID)
if obj1ID not in attack[obj2ID]:
attack[obj2ID].append(obj1ID)
# planet and fleet
#elif obj1.type == T_PLANET and obj2.type == T_FLEET and \
# self.cmd(commander).isPactActive(tran, commander, obj2.owner, PACT_MUTUAL_DEFENCE):
# allies[obj1ID].append(obj2ID)
# allies[obj2ID].append(obj1ID)
# fleet and fleet
#elif obj1.type == T_FLEET and obj2.type == T_FLEET and \
# self.cmd(commander).isPactActive(tran, commander, obj2.owner, PACT_MUTUAL_OFFENCE):
# allies[obj1ID].append(obj2ID)
# allies[obj2ID].append(obj1ID)
# asteroid
if obj2.type == T_ASTEROID:
attack[obj1ID].append(obj2ID)
attack[obj2ID].append(obj1ID)
index += 1
#@log.debug('ISystem', 'Targets:', targets)
#@log.debug('ISystem', 'Allies:', allies)
# find indirect a/e
#for objID in objects:
# iTargets = []
# iAllies = []
# # find indirect a/e
# todo = allies[objID][:]
# while todo:
# id = todo.pop(0)
# iTargets.extend(targets[id])
# for tmpID in allies[id]:
# if tmpID not in iAllies:
# todo.append(tmpID)
# iAllies.append(tmpID)
# # remove allies from targets
# for id in iAllies:
# if id in iTargets:
# iTargets.remove(id)
# # IMPORTATNT preffer NOT to fire at possible allies
# # add my targets
# #for id in targets[objID]:
# # if id not in iTargets:
# # iTargets.append(id)
# # that's all folks
# for id in iTargets:
# if objID not in attack[id]:
# attack[id].append(objID)
# if id not in attack[objID]:
# attack[objID].append(id)
# NOT VALID: objects with action ACTION_ATTACK will attack only their targets
# check, if there are any targets
isCombat = 0
for objID in objects:
if attack[objID]:
isCombat = 1
break #end loop
if not isCombat:
#@log.debug('ISystem', 'No combat')
# reset combat counters
system.combatCounter = 0
for fleetID in system.fleets:
tran.db[fleetID].combatCounter = 0
return
# increase combat counters
system.combatCounter += 1
for fleetID in system.fleets:
tran.db[fleetID].combatCounter += 1
# debug
log.debug('ISystem', 'Final attacks in system %d:' % system.oid, attack)
# mines detonate before battle
shots = {}
targets = {}
firing = {}
damageCaused = {}
damageTaken = {}
shipsLost = {}
isCombat = False
isMineCombat = False
mineKills = 0
for owner in ownerIDs:
if not (owner in hasMine): #no planets
continue
if hasMine[owner] == 0: #no control structure
continue
objID = hasMine[owner]
if len(self.getMines(system,owner))==0:
continue #no mines, something broke
#log.debug('ISystem-Mines', 'Mines Found')
if len(attack[objID])==0:
continue #no targets
fireMine = True
mineTargets = copy.copy(attack[objID])
while fireMine:
while len(mineTargets) > 0:
targetID = random.choice(mineTargets) #select random target
targetobj = tran.db.get(targetID, None)
try:
if targetobj.type == T_FLEET:
break #target found
mineTargets.remove(targetID) #remove an object type that a mine can't hit from the temporary targets list
except:
mineTargets.remove(targetID) #remove a dead fleet from the temporary targets list
if len(mineTargets) == 0:
break #no fleet targets for mines
temp, temp, firing[targetID] = self.cmd(targetobj).getPreCombatData(tran, targetobj) #fix firing for "surrender to" section
damage,att,ignoreshield = self.fireMine(system,owner)
if not damage: #no more mines
fireMine = False
break
log.debug('ISystem', 'Mine Shooting (damage, att, ignore shield):',damage,att,ignoreshield)
isMineCombat = True
#Process Combat
dmg, destroyed = self.cmd(targetobj).applyMine(tran, targetobj, att, damage, ignoreshield)
#log.debug('ISystem-Mines', 'Actual Damage Done:',dmg)
damageTaken[targetID] = damageTaken.get(targetID, 0) + dmg
if destroyed > 0:
shipsLost[targetID] = shipsLost.get(targetID, 0) + destroyed
mineKills += destroyed
if dmg > 0:
damageCaused[objID] = damageCaused.get(objID, 0) + dmg
# now to battle
for objID in objects:
obj = tran.db.get(objID, None)
# get shots from object, should be sorted by weaponClass
# shots = [ shot, ...], shot = (combatAtt, weaponID)
# get target classes and numbers
# (class1, class2, class3, class4)
# cls0 == fighters, cls1 == midships, cls2 == capital ships, cls3 == planet installations
#@log.debug(objID, obj.name, "getting pre combat data")
if obj: # source already destroyed; ignore
shots[objID], targets[objID], firing[objID] = self.cmd(obj).getPreCombatData(tran, obj)
if firing[objID]:
isCombat = True
if not isCombat and not isMineCombat:
# no shots has been fired
#@log.debug('ISystem', 'No combat')
# reset combat counters
system.combatCounter = 0
for fleetID in system.fleets:
tran.db[fleetID].combatCounter = 0
return
#@log.debug("Shots:", shots)
#@log.debug("Targets", targets)
if isCombat:
for shotIdx in (3, 2, 1, 0):
for objID in objects:
# obj CAN be deleted at this point
obj = tran.db.get(objID, None)
if obj == None:
continue # source already destroyed; move to next source
# if object is fleet, then it's signature is max
if obj and obj.type == T_FLEET:
obj.signature = Rules.maxSignature
# target preselection
totalClass = [0, 0, 0, 0]
total = 0
for targetID in attack[objID]:
totalClass[0] += targets[targetID][0]
totalClass[1] += targets[targetID][1]
totalClass[2] += targets[targetID][2]
totalClass[3] += targets[targetID][3]
total = totalClass[0] + totalClass[1] + totalClass[2] + totalClass[3]
# process shots
for combatAtt, weaponID in shots[objID][shotIdx]:
weapon = Rules.techs[weaponID]
weaponClass = weapon.weaponClass
if total == 0:
# there are no targets
break
#@log.debug('ISystem', 'Processing shot', objID, weapon.name, weaponClass)
# process from weaponClass up
# never shoot on smaller ships than weaponClass
applied = 0
for tmpWpnClass in xrange(weaponClass, 4):
#@log.debug('ISystem', 'Trying target class', tmpWpnClass, totalClass[tmpWpnClass])
# select target
if totalClass[tmpWpnClass]:
target = Utils.rand(0, totalClass[tmpWpnClass])
#@log.debug('ISystem', 'Target rnd num', target, totalClass[tmpWpnClass])
for targetID in attack[objID]:
if target < targets[targetID][tmpWpnClass]:
#@log.debug(objID, 'attacks', targetID, tmpWpnClass)
# targetID can be deleted at this point
anObj = tran.db.get(targetID, None)
if anObj:
dmg, destroyed, destroyedClass = self.cmd(anObj).applyShot(tran, anObj, systemDef[owners[targetID]], combatAtt + systemAtt[owners[objID]], weaponID, tmpWpnClass, target)
#@log.debug("ISystem result", dmg, destroyed, destroyedClass, tmpWpnClass)
#@print objID, 'dmg, destroyed', dmg, destroyed
damageTaken[targetID] = damageTaken.get(targetID, 0) + dmg
if destroyed > 0:
shipsLost[targetID] = shipsLost.get(targetID, 0) + destroyed
total -= destroyed
totalClass[destroyedClass] -= destroyed
if dmg > 0 and obj:
obj.combatExp += dmg
damageCaused[objID] = damageCaused.get(objID, 0) + dmg
applied = 1
else:
continue # target already destroyed, move to next target
break
else:
#@log.debug('ISystem', 'Lovering target by', targets[targetID][tmpWpnClass])
target -= targets[targetID][tmpWpnClass]
if applied:
break
# send messages and modify diplomacy relations
# distribute experience pts
for objID in objects:
obj = tran.db.get(objID, None)
if obj:
self.cmd(obj).distributeExp(tran, obj)
if attack[objID]:
source = obj or tran.db[owners[objID]]
# collect players
players = {}
for attackerID in attack[objID]:
players[owners[attackerID]] = None
d1 = damageTaken.get(objID,0)
d2 = damageCaused.get(objID,0)
l = shipsLost.get(objID, 0)
if d1 or d2 or l:
# send only if damage is taken/caused
Utils.sendMessage(tran, source, MSG_COMBAT_RESULTS, system.oid, (d1, d2, l, players.keys()))
if not obj:
# report DESTROYED status
Utils.sendMessage(tran, source, MSG_DESTROYED_FLEET, system.oid, ())
# modify diplomacy relations
objOwner = tran.db[owners[objID]]
for attackerID in attack[objID]:
attOwner = tran.db.get(owners[attackerID], None)
# owner of the fleet
rel = self.cmd(objOwner).getDiplomacyWith(tran, objOwner, attOwner.oid)
rel.relChng = Rules.relLostWhenAttacked
# attacker
rel = self.cmd(attOwner).getDiplomacyWith(tran, attOwner, objOwner.oid)
rel.rechChng = Rules.relLostWhenAttacked
# check if object surrenders
for objID in objects:
# object surrender IFF it and its allies had target and was not able
# to fire at it, planet is not counted as ally in this case
obj = tran.db.get(objID, None)
if firing[objID] and obj:
continue
surrenderTo = []
for attID in attack[objID]:
if firing[attID] and tran.db.has_key(attID):
surrenderTo.append(tran.db[attID].owner)
for allyID in allies[objID]:
if not tran.db.has_key(allyID):
continue
ally = tran.db[allyID]
if firing[allyID] and ally.type != T_PLANET:
surrenderTo = []
break
if surrenderTo:
index = Utils.rand(0, len(surrenderTo))
if obj:
if self.cmd(obj).surrenderTo(tran, obj, surrenderTo[index]):
winner = tran.db[surrenderTo[index]]
source = tran.db.get(owners[objID], None)
log.debug('ISystem', 'BATTLE - surrender', objID, surrenderTo[index], surrenderTo)
if source:
Utils.sendMessage(tran, source, MSG_COMBAT_LOST, system.oid, winner.oid)
Utils.sendMessage(tran, winner, MSG_COMBAT_WON, system.oid, source.oid)
else:
Utils.sendMessage(tran, winner, MSG_COMBAT_WON, system.oid, obj.oid)
else:
winner = tran.db[surrenderTo[index]]
source = tran.db[owners[objID]]
log.debug('ISystem', 'BATTLE - surrender', objID, surrenderTo[index], surrenderTo)
Utils.sendMessage(tran, source, MSG_COMBAT_LOST, system.oid, winner.oid)
Utils.sendMessage(tran, winner, MSG_COMBAT_WON, system.oid, source.oid)
return
processBATTLEPhase.public = 1
processBATTLEPhase.accLevel = AL_ADMIN
def processFINALPhase(self, tran, obj, data):
# TODO find new starting points
# clean up mines if system ownership was lost
owners = []
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.owner not in owners:
owners.append(planet.owner)
for ownerid in obj.minefield:
if ownerid not in owners:
self.removeMines(obj,ownerid)
return obj.planets[:] + obj.closeFleets[:]
processFINALPhase.public = 1
processFINALPhase.accLevel = AL_ADMIN
def cmpPlanetByEnergy(self, tran, planetID1, planetID2):
planet1 = tran.db[planetID1]
planet2 = tran.db[planetID2]
return cmp(planet2.plEn, planet1.plEn)
cmpPlanetByEnergy.public = 0
def sortPlanets(self, tran, obj, data):
obj.planets.sort(lambda x, y: self.cmpPlanetByEnergy(tran, x, y))
orbit = 1
for planetID in obj.planets:
planet = tran.db[planetID]
planet.orbit = orbit
orbit += 1
sortPlanets.public = 0
def rename(self, tran, obj, newName, nType):
newName = newName.strip()
# you have to own all planets
# TODO: Throw another cmdr exc AFTER you have no planet
haveOne = 0
anotherComm = 0
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.owner != tran.session.cid and planet.owner != OID_NONE:
anotherComm = 1
if planet.owner == tran.session.cid:
haveOne = 1
if not haveOne:
raise GameException('You cannot change name of this system - you have no planet in this system.')
if anotherComm:
raise GameException('You cannot change name of this system - another commander in system.')
# check validity of name
if not Utils.isCorrectName(newName):
raise GameException('Invalid name. Only characters, digits, space, dot and dash permitted, max. length is 30 characters.')
# check if there is other system with this name
galaxy = tran.db[obj.compOf]
for systemID in galaxy.systems:
if tran.db[systemID].name == newName and systemID != obj.oid:
raise GameException('This name is already used.')
# TODO you have to own this system longer than previous owner
# one change per 1 day allowed
turn = tran.db[OID_UNIVERSE].turn
if obj.lastNameChng + Rules.turnsPerDay <= turn:
# rename system
obj.name = newName
# rename planets
newNames = [obj.name]
for planetID in obj.planets:
planet = tran.db[planetID]
planet.name = Utils.getPlanetName(obj.name, nType, planet.orbit - 1)
newNames.append(planet.name)
obj.lastNameChng = turn
else:
raise GameException('You cannot change name of this system - name has been changed recently (try it one day later).')
return newNames
rename.public = 1
rename.accLevel = AL_NONE
def createPlanet(self, tran, obj):
planet = self.new(T_PLANET)
planet.compOf = obj.oid
oid = tran.db.create(planet)
obj.planets.append(oid)
return oid
def addMine(self,obj,ownerid,minetechid,maxnum): #add a mine for an owner
if ownerid in obj.minefield:
if len(obj.minefield[ownerid]) < maxnum:
obj.minefield[ownerid].append(minetechid)
else:
obj.minefield[ownerid]= [minetechid]
addMine.public = 1
addMine.accLevel = AL_ADMIN
def getMines(self,obj,ownerid): #get all mines of an owner
if ownerid in obj.minefield:
return obj.minefield[ownerid]
else:
return []
getMines.public = 1
getMines.accLevel = AL_ADMIN
def removeMines(self,obj,ownerid): #remove all mines of an owner
if ownerid in obj.minefield:
del obj.minefield[ownerid]
removeMines.public = 0
def fireMine(self,obj,ownerid): #shoot the mine
if ownerid in obj.minefield:
mine = obj.minefield[ownerid].pop(random.randrange(0,len(obj.minefield[ownerid]))) #select a random mine to detonate
if len(obj.minefield[ownerid]) == 0:
obj.minefield.pop(ownerid) #delete the owner if no more mines
else:
return False,False,False
tech = Rules.techs[mine]
damage = random.randrange(tech.weaponDmgMin,tech.weaponDmgMax)
attack = tech.weaponAtt
ignoreshield = tech.weaponIgnoreShield
return damage,attack,ignoreshield
fireMine.public = 1
fireMine.accLevel = AL_ADMIN
def getSystemMineLauncher(self,tran,obj,playerID):
launchtech = 0
mineclass = 0
structure = 0
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.owner == playerID:
for struct in planet.slots:
tech = Rules.techs[struct[STRUCT_IDX_TECHID]]
if tech.mineclass > mineclass:
if tech.mineclass > mineclass:
mineclass = tech.mineclass
launchtech = tech
structure = struct[STRUCT_IDX_TECHID]
return launchtech, structure
getSystemMineLauncher.public = 0
def getSystemMineSource(self,tran,obj,playerID):
source = 0
mineclass = 0
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.owner == playerID:
for struct in planet.slots:
tech = Rules.techs[struct[STRUCT_IDX_TECHID]]
if tech.mineclass > mineclass:
if tech.mineclass > mineclass:
mineclass = tech.mineclass
source = planetID
return source
getSystemMineSource.public = 0
def getSystemCombatBonuses(self,tran,obj,playerID):
systemAtt = 0;
systemDef = 0;
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.owner == playerID:
for struct in planet.slots:
tech = Rules.techs[struct[STRUCT_IDX_TECHID]]
techEff = Utils.getTechEff(tran, struct[STRUCT_IDX_TECHID], planet.owner)
if tech.systemAtt > 0 or tech.systemDef > 0:
systemAtt = max(systemAtt,tech.systemAtt*techEff)
systemDef = max(systemDef,tech.systemDef*techEff)
return (systemAtt,systemDef)
getSystemCombatBonuses.public = 0
def loadDOMNode(self, tran, obj, xoff, yoff, node):
obj.x = float(node.getAttribute('x')) + xoff
obj.y = float(node.getAttribute('y')) + yoff
orbit = 1
nType = Utils.getPlanetNamesType()
for elem in node.childNodes:
if elem.nodeType == Node.ELEMENT_NODE:
name = elem.tagName
if name == 'properties':
self.loadDOMAttrs(obj, elem)
elif name == 'planet':
# create planet
planet = tran.db[self.createPlanet(tran, obj)]
self.cmd(planet).loadDOMNode(tran, planet, obj.x, obj.y, orbit, elem)
# planet.name = u'%s %s' % (obj.name, '-ABCDEFGHIJKLMNOPQRSTUVWXYZ'[orbit])
planet.name = Utils.getPlanetName(obj.name, nType, orbit - 1)
orbit += 1
else:
raise GameException('Unknown element %s' % name)
#~ # compute rotational constants
#~ galaxy = tran.db[obj.compOf]
#~ dx = obj.x - galaxy.x
#~ dy = obj.y - galaxy.y
#~ obj.dist = math.sqrt(dx * dx + dy * dy)
#~ if obj.dist > 0:
#~ obj.dAngle = math.sqrt(galaxy.centerWeight / obj.dist) / obj.dist
#~ else:
#~ obj.dAngle = 0.0
#~ if dx != 0:
#~ obj.sAngle = math.atan(dy / dx)
#~ if dx < 0: obj.sAngle += math.pi
#~ elif dy > 0:
#~ obj.sAngle = math.pi / 2
#~ elif dx < 0:
#~ obj.sAngle = math.pi * 3 / 2
#~ # this is a check only
#~ angle = obj.sAngle + (0 / 384.0) * obj.dAngle
#~ x = galaxy.x + obj.dist * math.cos(angle)
#~ y = galaxy.y + obj.dist * math.sin(angle)
#~ if x != obj.x or y != obj.y:
#~ log.warning(obj.name, obj.x, obj.y, dx, dy, obj.dist, obj.dAngle, obj.sAngle, x, y)
return SUCC
|
mozts2005/OuterSpace
|
server/lib/ige/ospace/ISystem.py
|
Python
|
gpl-2.0
| 44,480
|
[
"Galaxy"
] |
f3dbf6c20c85bddec5fd1fc9fb2064c1ec5a186b004033144f546460897df615
|
# Jython Database Specification API 2.0
#
# $Id: zxtest.py 2281 2003-04-10 20:17:43Z bzimmer $
#
# Copyright (c) 2001 brian zimmer <bzimmer@ziclix.com>
from com.ziclix.python.sql import zxJDBC
from java.util import Calendar, Date as JDate
import tempfile, os, time, runner
class zxCoreTestCase(runner.SQLTestCase):
def setUp(self):
runner.SQLTestCase.setUp(self)
self.db = self.connect()
self.db.autocommit = 0
def tearDown(self):
self.db.close()
runner.SQLTestCase.tearDown(self)
def connect(self):
factory = runner.__imp__(self.factory.classname)
args = map(lambda x: x[1], self.factory.arguments)
connect = getattr(factory, self.factory.method)
return apply(connect, args, self.factory.keywords)
def cursor(self, *args, **kws):
c = apply(self.db.cursor, args, kws)
if hasattr(self, "datahandler"):
c.datahandler = self.datahandler(c.datahandler)
return c
class zxJDBCTestCase(zxCoreTestCase):
def setUp(self):
zxCoreTestCase.setUp(self)
c = self.cursor()
try:
c.execute("drop table zxtesting")
self.db.commit()
except:
self.db.rollback()
try:
c.execute("create table zxtesting (id int not null, name varchar(32), state varchar(32), primary key (id))")
self.db.commit()
c.execute("insert into zxtesting (id, name, state) values (1, 'test0', 'il')")
c.execute("insert into zxtesting (id, name, state) values (2, 'test1', 'wi')")
c.execute("insert into zxtesting (id, name, state) values (3, 'test2', 'tx')")
c.execute("insert into zxtesting (id, name, state) values (4, 'test3', 'co')")
c.execute("insert into zxtesting (id, name, state) values (5, 'test4', 'il')")
c.execute("insert into zxtesting (id, name, state) values (6, 'test5', 'ca')")
c.execute("insert into zxtesting (id, name, state) values (7, 'test6', 'wi')")
self.db.commit()
finally:
c.close()
def tearDown(self):
c = self.cursor()
try:
try:
c.execute("drop table zxtesting")
except:
self.db.rollback()
finally:
c.close()
zxCoreTestCase.tearDown(self)
class zxAPITestCase(zxJDBCTestCase):
def testConnection(self):
"""testing connection"""
assert self.db, "invalid connection"
def testAutocommit(self):
"""testing autocommit functionality"""
if self.db.__connection__.getMetaData().supportsTransactions():
self.db.autocommit = 1
self.assertEquals(1, self.db.__connection__.getAutoCommit())
self.db.autocommit = 0
self.assertEquals(0, self.db.__connection__.getAutoCommit())
def testSimpleQuery(self):
"""testing simple queries with cursor.execute(), no parameters"""
c = self.cursor()
try:
c.execute("select count(*) from zxtesting")
f = c.fetchall()
assert len(f) == 1, "expecting one row"
c.execute("select * from zxtesting")
data = c.fetchone()
assert len(f) == 1, "expecting one row"
assert data[0] == 1, "expected [1] rows, got [%d]" % (data[0])
finally:
c.close()
def testNoneQuery(self):
"""testing that executing None doesn't fail"""
c = self.cursor()
try:
c.execute(None)
finally:
c.close()
def _test_preparedstatement(self, dynamic):
c = self.cursor(dynamic)
try:
p = c.prepare("select * from zxtesting where id = ?")
for i in range(1, 8):
c.execute(p, (i,))
data = c.fetchall()
self.assertEquals(1, len(data))
assert not p.closed
p.close()
assert p.closed
self.assertRaises(zxJDBC.ProgrammingError, c.execute, p, (1,))
finally:
c.close()
def testStaticPrepare(self):
"""testing the prepare() functionality for static cursors"""
self._test_preparedstatement(0)
def testDynamicPrepare(self):
"""testing the prepare() functionality for dynamic cursors"""
self._test_preparedstatement(1)
def _test_cursorkeywords(self, *args, **kws):
c = self.cursor(*args, **kws)
try:
c.execute("select * from zxtesting")
data = c.fetchmany(1)
assert len(data) == 1, "expecting one row"
finally:
c.close()
def testCursorKeywords(self):
"""testing the creation of a cursor with keywords"""
self._test_cursorkeywords(dynamic=1)
self._test_cursorkeywords(dynamic=1,
rstype=zxJDBC.TYPE_SCROLL_INSENSITIVE,
rsconcur=zxJDBC.CONCUR_READ_ONLY
)
self._test_cursorkeywords(1,
rstype=zxJDBC.TYPE_SCROLL_INSENSITIVE,
rsconcur=zxJDBC.CONCUR_READ_ONLY
)
self._test_cursorkeywords(1,zxJDBC.TYPE_SCROLL_INSENSITIVE,zxJDBC.CONCUR_READ_ONLY)
self.assertRaises(TypeError, self.cursor, 1, zxJDBC.TYPE_SCROLL_INSENSITIVE)
def testFileLikeCursor(self):
"""testing the cursor as a file-like object"""
c = self.cursor()
try:
print >> c, "insert into zxtesting (id, name, state) values (100, 'test100', 'wa')"
print >> c, "insert into zxtesting (id, name, state) values (101, 'test101', 'co')"
print >> c, "insert into zxtesting (id, name, state) values (102, 'test102', 'or')"
self.db.commit()
finally:
c.close()
c = self.cursor()
try:
c.execute("select * from zxtesting where id in (100, 101, 102)")
f = c.fetchall()
self.assertEquals(3, len(f))
finally:
c.close()
def testIteration(self):
"""testing the iteration protocol"""
c = self.cursor()
try:
# first with a for loop
cnt = 0
c.execute("select * from zxtesting")
for a in c:
self.assertEquals(3, len(a))
cnt += 1
self.assertEquals(7, cnt)
# then with a while loop
cnt = 0
c.execute("select * from zxtesting")
while 1:
try:
self.assertEquals(3, len(c.next()))
except StopIteration:
break
cnt += 1
self.assertEquals(7, cnt)
finally:
c.close()
def testClosingCursor(self):
"""testing that a closed cursor throws an exception"""
c = self.cursor()
try:
c.execute("select * from zxtesting")
finally:
c.close()
self.assertRaises(zxJDBC.ProgrammingError, c.execute, ("select * from zxtesting",))
def testClosingConnectionWithOpenCursors(self):
"""testing that a closed connection closes any open cursors"""
c = self.cursor()
d = self.cursor()
e = self.cursor()
self.db.close()
# open a new connection so the tearDown can run
self.db = self.connect()
self.assertRaises(zxJDBC.ProgrammingError, c.execute, ("select * from zxtesting",))
self.assertRaises(zxJDBC.ProgrammingError, d.execute, ("select * from zxtesting",))
self.assertRaises(zxJDBC.ProgrammingError, e.execute, ("select * from zxtesting",))
def testNativeSQL(self):
"""testing the connection's ability to convert sql"""
sql = self.db.nativesql("select * from zxtesting where id = ?")
assert sql is not None
assert len(sql) > 0
def testTables(self):
"""testing cursor.tables()"""
c = self.cursor()
try:
c.tables(None, None, None, None)
# let's look for zxtesting
found = 0
while not found:
try:
found = "zxtesting" == c.next()[2].lower()
except StopIteration:
break
assert found, "expected to find 'zxtesting'"
c.tables(None, None, "zxtesting", None)
self.assertEquals(1, len(c.fetchall()))
c.tables(None, None, "zxtesting", ("TABLE",))
self.assertEquals(1, len(c.fetchall()))
c.tables(None, None, "zxtesting", ("table",))
self.assertEquals(1, len(c.fetchall()))
finally:
c.close()
def testColumns(self):
"""testing cursor.columns()"""
c = self.cursor()
try:
# deliberately copied so as to produce useful line numbers
c.columns(None, None, "zxtesting", None)
f = c.fetchall()
self.assertEquals(3, c.rowcount)
f.sort(lambda x, y: cmp(x[3], y[3]))
self.assertEquals("name", f[1][3].lower())
# if the db engine handles mixed case, then don't ask about a different
# case because it will fail
if not self.db.__connection__.getMetaData().storesMixedCaseIdentifiers():
c.columns(None, None, "ZXTESTING", None)
f = c.fetchall()
self.assertEquals(3, c.rowcount)
f.sort(lambda x, y: cmp(x[3], y[3]))
self.assertEquals("name", f[1][3].lower())
finally:
c.close()
def testBestRow(self):
"""testing bestrow which finds the optimal set of columns that uniquely identify a row"""
c = self.cursor()
try:
# we're really just testing that this doesn't blow up
c.bestrow(None, None, "zxtesting")
f = c.fetchall()
if f: # we might as well see that it worked
self.assertEquals(1, len(f))
finally:
c.close()
def testTypeInfo(self):
"""testing cursor.gettypeinfo()"""
c = self.cursor()
try:
c.gettypeinfo()
f = c.fetchall()
assert f is not None, "expected some type information, got None"
# this worked prior to the Fetch re-write, now the client will have to bear the burden, sorry
#c.gettypeinfo(zxJDBC.INTEGER)
#f = c.fetchall()
#assert f[0][1] == zxJDBC.INTEGER, "expected [%d], got [%d]" % (zxJDBC.INTEGER, f[0][1])
finally:
c.close()
def _test_scrolling(self, dynamic=0):
if self.vendor.scroll:
c = self.cursor(dynamic,
rstype=getattr(zxJDBC, self.vendor.scroll),
rsconcur=zxJDBC.CONCUR_READ_ONLY
)
else:
c = self.cursor(dynamic)
try:
# set everything up
c.execute("select id, name, state from zxtesting order by id")
self.assertEquals(1, c.fetchone()[0])
self.assertEquals(2, c.fetchone()[0])
self.assertEquals(3, c.fetchone()[0])
# move back two and fetch the row again
c.scroll(-2)
self.assertEquals(2, c.fetchone()[0])
# move to the fifth row (0-based indexing)
c.scroll(4, "absolute")
self.assertEquals(5, c.fetchone()[0])
# move back to the start
c.scroll(-5)
self.assertEquals(1, c.fetchone()[0])
# move to the end
c.scroll(6, "absolute")
self.assertEquals(7, c.fetchone()[0])
# make sure we get an IndexError
self.assertRaises(IndexError, c.scroll, 1, "relative")
self.assertRaises(IndexError, c.scroll, -1, "absolute")
self.assertRaises(zxJDBC.ProgrammingError, c.scroll, 1, "somethingelsealltogether")
finally:
c.close()
def testDynamicCursorScrolling(self):
"""testing the ability to scroll a dynamic cursor"""
self._test_scrolling(1)
def testStaticCursorScrolling(self):
"""testing the ability to scroll a static cursor"""
self._test_scrolling(0)
def _test_rownumber(self, dynamic=0):
if self.vendor.scroll:
c = self.cursor(dynamic,
rstype=getattr(zxJDBC, self.vendor.scroll),
rsconcur=zxJDBC.CONCUR_READ_ONLY
)
else:
c = self.cursor(dynamic)
try:
if not dynamic:
# a dynamic cursor doesn't know if any rows really exist
# maybe the 'possibility' of rows should change .rownumber to 0?
c.execute("select * from zxtesting where 1=0")
self.assertEquals(0, c.rownumber)
c.execute("select * from zxtesting")
self.assertEquals(0, c.rownumber)
c.next()
self.assertEquals(1, c.rownumber)
c.next()
self.assertEquals(2, c.rownumber)
c.scroll(-1)
self.assertEquals(1, c.rownumber)
c.scroll(2, "absolute")
self.assertEquals(2, c.rownumber)
c.scroll(6, "absolute")
self.assertEquals(6, c.rownumber)
finally:
c.close()
self.assertEquals(None, c.rownumber)
def testStaticRownumber(self):
"""testing a static cursor's rownumber"""
self._test_rownumber(0)
def testDynamicRownumber(self):
"""testing a dynamic cursor's rownumber"""
self._test_rownumber(1)
def _test_rowcount(self, dynamic=0):
if self.vendor.scroll:
c = self.cursor(dynamic,
rstype=getattr(zxJDBC, self.vendor.scroll),
rsconcur=zxJDBC.CONCUR_READ_ONLY
)
else:
c = self.cursor(dynamic)
try:
c.execute("select * from zxtesting")
c.next()
c.next()
c.next()
if dynamic:
# dynamic cursors only know about the number of rows encountered
self.assertEquals(3, c.rowcount)
else:
self.assertEquals(7, c.rowcount)
c.scroll(-1)
# make sure they don't change just because we scrolled backwards
if dynamic:
# dynamic cursors only know about the number of rows encountered
self.assertEquals(3, c.rowcount)
else:
self.assertEquals(7, c.rowcount)
finally:
c.close()
def testStaticRowcount(self):
"""testing a static cursor's rowcount"""
self._test_rowcount(0)
def testDynamicRowcount(self):
"""testing a dynamic cursor's rowcount"""
self._test_rowcount(1)
def testTableTypeInfo(self):
"""testing cursor.gettabletypeinfo()"""
c = self.cursor()
try:
c.gettabletypeinfo()
c.fetchall()
assert c.rowcount > 0, "expected some table types"
finally:
c.close()
def testTupleParams(self):
"""testing the different ways to pass params to execute()"""
c = self.cursor()
try:
self.assertRaises(zxJDBC.ProgrammingError, c.execute, "select * from zxtesting where id = ?", params=4)
c.execute("select * from zxtesting where id = ?", params=[4])
c.execute("select * from zxtesting where id = ?", params=(4,))
finally:
c.close()
def testConnectionAttribute(self):
"""testing the getting and setting of cursor.connection"""
c = self.cursor()
try:
from com.ziclix.python.sql import PyConnection
assert isinstance(c.connection, PyConnection), "expected PyConnection"
self.assertRaises(TypeError, setattr, (c, "connection", None), None)
finally:
c.close()
def testFetchMany(self):
"""testing cursor.fetchmany()"""
c = self.cursor()
try:
c.execute("select * from zxtesting")
data = c.fetchmany(6)
assert len(data) == 6, "expected [6] rows, got [%d]" % (len(data))
c.execute("select * from zxtesting")
data = c.fetchmany(16)
assert len(data) == 7, "expected [7] rows, got [%d]" % (len(data))
finally:
c.close()
def testQueryWithParameter(self):
"""testing query by parameter"""
c = self.cursor()
try:
c.execute("select name from zxtesting where state = ?", [("il",)], {0:zxJDBC.VARCHAR})
data = c.fetchall()
assert len(data) == 2, "expected [2] rows, got [%d]" % (len(data))
c.execute("select name from zxtesting where state = ?", [("co",)], {0:zxJDBC.VARCHAR})
data = c.fetchall()
assert len(data) == 1, "expected [1] row, got [%d]" % (len(data))
finally:
c.close()
def testInsertWithFile(self):
"""testing insert with file"""
assert self.has_table("texttable"), "missing attribute texttable"
fp = open(tempfile.mktemp(), "w")
c = self.cursor()
try:
try:
c.execute(self.table("texttable")[1])
data = fp.name * 300
data = data[:3500]
fp.write(data)
fp.flush()
fp.close()
fp = open(fp.name, "r")
c.execute("insert into %s (a, b) values (?, ?)" % (self.table("texttable")[0]), [(0, fp)], {1:zxJDBC.LONGVARCHAR})
self.db.commit()
c.execute("select b from %s" % (self.table("texttable")[0]))
f = c.fetchall()
assert len(f) == 1, "expected [1] row, got [%d]" % (len(f))
assert len(f[0][0]) == len(data), "expected [%d], got [%d]" % (len(data), len(f[0][0]))
assert data == f[0][0], "failed to retrieve the same text as inserted"
except Exception, e:
raise e
finally:
c.execute("drop table %s" % (self.table("texttable")[0]))
c.close()
self.db.commit()
fp.close()
os.remove(fp.name)
def calendar(self):
c = Calendar.getInstance()
c.setTime(JDate())
return c
def testDate(self):
"""testing creation of Date"""
# Java uses milliseconds and Python uses seconds, so adjust the time accordingly
# seeded with Java
c = self.calendar()
o = zxJDBC.DateFromTicks(c.getTime().getTime() / 1000L)
v = zxJDBC.Date(c.get(Calendar.YEAR), c.get(Calendar.MONTH) + 1, c.get(Calendar.DATE))
assert o.equals(v), "incorrect date conversion using java, got [%ld], expected [%ld]" % (v.getTime(), o.getTime())
# seeded with Python
t = time.time()
l = time.localtime(t)
o = zxJDBC.DateFromTicks(t)
v = zxJDBC.Date(l[0], l[1], l[2])
assert o.equals(v), "incorrect date conversion, got [%ld], expected [%ld]" % (v.getTime(), o.getTime())
def testTime(self):
"""testing creation of Time"""
# Java uses milliseconds and Python uses seconds, so adjust the time accordingly
# seeded with Java
c = self.calendar()
o = zxJDBC.TimeFromTicks(c.getTime().getTime() / 1000L)
v = zxJDBC.Time(c.get(Calendar.HOUR), c.get(Calendar.MINUTE), c.get(Calendar.SECOND))
assert o.equals(v), "incorrect date conversion using java, got [%ld], expected [%ld]" % (v.getTime(), o.getTime())
# seeded with Python
#t = time.time()
#l = time.localtime(t)
#o = zxJDBC.TimeFromTicks(t)
#v = zxJDBC.Time(l[3], l[4], l[5])
#assert o.equals(v), "incorrect date conversion using python, got [%ld], expected [%ld]" % (v.getTime(), o.getTime())
def testTimestamp(self):
"""testing creation of Timestamp"""
# Java uses milliseconds and Python uses seconds, so adjust the time accordingly
# seeded with Java
c = self.calendar()
o = zxJDBC.TimestampFromTicks(c.getTime().getTime() / 1000L)
v = zxJDBC.Timestamp(c.get(Calendar.YEAR), c.get(Calendar.MONTH) + 1, c.get(Calendar.DATE),
c.get(Calendar.HOUR), c.get(Calendar.MINUTE), c.get(Calendar.SECOND))
assert o.equals(v), "incorrect date conversion using java, got [%ld], expected [%ld]" % (v.getTime(), o.getTime())
# seeded with Python
#t = time.time()
#l = time.localtime(t)
#o = zxJDBC.TimestampFromTicks(t)
#v = zxJDBC.Timestamp(l[0], l[1], l[2], l[3], l[4], l[5])
#assert o.equals(v), "incorrect date conversion using python, got [%ld], expected [%ld]" % (v.getTime(), o.getTime())
def _test_precision(self, (tabname, sql), diff, values, attr):
try:
c = self.cursor()
try:
c.execute("drop table %s" % (tabname))
self.db.commit()
except:
self.db.rollback()
finally:
c.close()
try:
c = self.cursor()
c.execute(sql)
c.execute("insert into %s (a, b) values (?, ?)" % (tabname), map(lambda x: (0, x), values))
c.execute("select a, b from %s" % (tabname))
f = c.fetchall()
assert len(values) == len(f), "mismatched result set length"
for i in range(0, len(f)):
v = values[i]
if attr: v = getattr(v, attr)()
msg = "expected [%0.10f], got [%0.10f] for index [%d] of [%d]" % (v, f[i][1], (i+1), len(f))
assert diff(f[i][1], values[i]) < 0.01, msg
self.db.commit()
finally:
c.close()
try:
c = self.cursor()
try:
c.execute("drop table %s" % (tabname))
self.db.commit()
except:
self.db.rollback()
finally:
c.close()
def testFloat(self):
"""testing value of float"""
assert self.has_table("floattable"), "missing attribute floattable"
values = [4.22, 123.44, 292.09, 33.2, 102.00, 445]
self._test_precision(self.table("floattable"), lambda x, y: x-y, values, None)
def testBigDecimal(self):
"""testing value of BigDecimal"""
assert self.has_table("floattable"), "missing attribute floattable"
from java.math import BigDecimal
values = [BigDecimal(x).setScale(2, BigDecimal.ROUND_UP) for x in [4.22, 123.44, 292.09, 33.2, 102.00, 445]]
self._test_precision(self.table("floattable"), lambda x, y, b=BigDecimal: b(x).subtract(y).doubleValue(), values, "doubleValue")
def testBigDecimalConvertedToDouble(self):
"""testing value of BigDecimal when converted to double"""
assert self.has_table("floattable"), "missing attribute floattable"
from java.math import BigDecimal
values = [BigDecimal(x).setScale(2, BigDecimal.ROUND_UP) for x in [4.22, 123.44, 292.09, 33.2, 102.00, 445]]
self._test_precision(self.table("floattable"), lambda x, y: x - y.doubleValue(), values, "doubleValue")
def testNextset(self):
"""testing nextset"""
c = self.cursor()
try:
c.execute("select * from zxtesting where id = ?", [(3,), (4,)])
f = c.fetchall()
assert f, "expected results, got None"
assert len(f) == 1, "expected [1], got [%d]" % (len(f))
assert c.nextset(), "expected next set, got None"
f = c.fetchall()
assert f, "expected results after call to nextset(), got None"
assert len(f) == 1, "expected [1], got [%d]" % (len(f))
finally:
c.close()
def testJavaUtilList(self):
"""testing parameterized values in a java.util.List"""
c = self.cursor()
try:
from java.util import LinkedList
a = LinkedList()
a.add((3,))
c.execute("select * from zxtesting where id = ?", a)
f = c.fetchall()
assert len(f) == 1, "expected [1], got [%d]" % (len(f))
finally:
c.close()
def testUpdateCount(self):
"""testing update count functionality"""
c = self.cursor()
try:
c.execute("insert into zxtesting values (?, ?, ?)", [(500, 'bz', 'or')])
assert c.updatecount == 1, "expected [1], got [%d]" % (c.updatecount)
c.execute("select * from zxtesting")
self.assertEquals(None, c.updatecount)
# there's a *feature* in the mysql engine where it returns 0 for delete if there is no
# where clause, regardless of the actual value. using a where clause forces it to calculate
# the appropriate value
c.execute("delete from zxtesting where 1>0")
assert c.updatecount == 8, "expected [8], got [%d]" % (c.updatecount)
c.execute("update zxtesting set name = 'nothing'")
self.assertEquals(0, c.updatecount)
finally:
c.close()
def _test_time(self, (tabname, sql), factory, values, _type, _cmp=cmp, datahandler=None):
c = self.cursor()
if datahandler: c.datahandler = datahandler(c.datahandler)
try:
c.execute(sql)
dates = map(lambda x, f=factory: apply(f, x), values)
for a in dates:
c.execute("insert into %s values (1, ?)" % (tabname), [(a,)], {0:_type})
self.db.commit()
c.execute("select * from %s where b = ?" % (tabname), [(dates[0],)], {0:_type})
f = c.fetchall()
assert len(f) == 1, "expected length [1], got [%d]" % (len(f))
assert _cmp(f[0][1], dates[0]) == 0, "expected date [%s], got [%s]" % (str(dates[0]), str(f[0][1]))
c.execute("delete from %s where b = ?" % (tabname), [(dates[1],)], {0:_type})
self.db.commit()
c.execute("select * from %s" % (tabname))
f = c.fetchall()
self.assertEquals(len(f), len(dates) - 1)
finally:
c.execute("drop table %s" % (tabname))
c.close()
self.db.commit()
def testUpdateSelectByDate(self):
"""testing insert, update, query and delete by java.sql.Date"""
assert self.has_table("datetable"), "missing attribute datetable"
def _cmp_(x, y):
xt = (x.getYear(), x.getMonth(), x.getDay())
yt = (y.getYear(), y.getMonth(), y.getDay())
return not xt == yt
values = [(1996, 6, 22), (2000, 11, 12), (2000, 1, 12), (1999, 9, 24)]
self._test_time(self.table("datetable"), zxJDBC.Date, values, zxJDBC.DATE, _cmp_)
def testUpdateSelectByTime(self):
"""testing insert, update, query and delete by java.sql.Time"""
assert self.has_table("timetable"), "missing attribute timetable"
def _cmp_(x, y):
xt = (x.getHours(), x.getMinutes(), x.getSeconds())
yt = (y.getHours(), y.getMinutes(), y.getSeconds())
return not xt == yt
values = [(10, 11, 12), (3, 1, 12), (22, 9, 24)]
self._test_time(self.table("timetable"), zxJDBC.Time, values, zxJDBC.TIME, _cmp_)
def testUpdateSelectByTimestamp(self):
"""testing insert, update, query and delete by java.sql.Timestamp"""
assert self.has_table("timestamptable"), "missing attribute timestamptable"
def _cmp_(x, y):
xt = (x.getYear(), x.getMonth(), x.getDay(), x.getHours(), x.getMinutes(), x.getSeconds())
yt = (y.getYear(), y.getMonth(), y.getDay(), y.getHours(), y.getMinutes(), y.getSeconds())
return not xt == yt
values = [(1996, 6, 22, 10, 11, 12), (2000, 11, 12, 3, 1, 12), (2001, 1, 12, 4, 9, 24)]
self._test_time(self.table("timestamptable"), zxJDBC.Timestamp, values, zxJDBC.TIMESTAMP, _cmp_)
def testOrderOfArgsMaxRowsOnly(self):
"""testing execute with max rows only"""
c = self.cursor()
try:
# maxrows only (SAPDB doesn't support maxrows as of version 7.2.0)
c.execute("select * from zxtesting", maxrows=3)
f = c.fetchall()
assert len(f) == 3, "expected length [3], got [%d]" % (len(f))
finally:
c.close()
self.db.commit()
def testOrderOfArgs(self):
"""testing execute with different argument orderings"""
c = self.cursor()
try:
# bindings and params flipped
c.execute("select * from zxtesting where id = ?", bindings={0:zxJDBC.INTEGER}, params=[(3,)])
f = c.fetchall()
assert len(f) == 1, "expected length [1], got [%d]" % (len(f))
# bindings and params flipped, empty params
c.execute("select * from zxtesting where id = ?", bindings={}, params=[(3,)])
f = c.fetchall()
assert len(f) == 1, "expected length [1], got [%d]" % (len(f))
# bindings and params flipped, empty params, empty bindings
c.execute("select * from zxtesting where id = 3", bindings={}, params=[])
f = c.fetchall()
assert len(f) == 1, "expected length [1], got [%d]" % (len(f))
finally:
c.close()
self.db.commit()
def testMaxrows(self):
"""testing maxrows"""
c = self.cursor()
try:
c.execute("select * from zxtesting", maxrows=3)
self.assertEquals(3, len(c.fetchall()))
c.execute("select * from zxtesting where id > ?", (1,), maxrows=3)
self.assertEquals(3, len(c.fetchall()))
c.execute("select count(*) from zxtesting")
f = c.fetchall()
num = f[0][0]
c.execute("select * from zxtesting", maxrows=0)
self.assertEquals(num, len(c.fetchall()))
finally:
c.close()
self.db.commit()
def testPrimaryKey(self):
"""testing for primary key information"""
c = self.cursor()
try:
c.primarykeys(None, None, "zxtesting")
f = c.fetchall()
assert len(f) == 1, "expected [1], got [%d]" % (len(f))
assert f[0][3].lower() == "id", "expected [id], got [%s]" % (f[0][3])
finally:
c.close()
self.db.commit()
def testForeignKey(self):
"""testing for foreign key information"""
pass
def testIndexInfo(self):
"""testing index information"""
c = self.cursor()
try:
c.statistics(None, None, "zxtesting", 0, 0)
f = c.fetchall()
assert f is not None, "expected some values"
# filter out any indicies with name None
f = filter(lambda x: x[5], f)
assert len(f) == 1, "expected [1], got [%d]" % (len(f))
finally:
c.close()
def _test_fetching(self, dynamic=0):
c = self.cursor(dynamic)
try:
# make sure None if the result is an empty result set
c.execute("select * from zxtesting where 1<0")
self.assertEquals(None, c.fetchone())
# make sure an empty sequence if the result is an empty result set
c.execute("select * from zxtesting where 1<0")
self.assertEquals([], c.fetchmany())
# make sure an empty sequence if the result is an empty result set
c.execute("select * from zxtesting where 1<0")
self.assertEquals([], c.fetchall())
# test some arraysize features
c.execute("select * from zxtesting")
f = c.fetchmany()
assert len(f) == c.arraysize, "expecting [%d] rows, got [%d]" % (c.arraysize, len(f))
c.execute("select * from zxtesting")
c.arraysize = 4
f = c.fetchmany()
assert len(f) == 4, "expecting [4] rows, got [%d]" % (len(f))
c.execute("select * from zxtesting")
c.arraysize = -1
f = c.fetchmany()
assert len(f) == 7, "expecting [7] rows, got [%d]" % (len(f))
finally:
c.close()
def testStaticFetching(self):
"""testing various static fetch methods"""
self._test_fetching(0)
def testDynamicFetching(self):
"""testing various dynamic fetch methods"""
self._test_fetching(1)
def testFetchingBeforeExecute(self):
"""testing fetch methods before execution"""
c = self.cursor()
try:
try:
c.fetchall()
except zxJDBC.Error, e:
pass
else:
self.fail("excepted exception calling fetchall() prior to execute()")
finally:
c.close()
def testBindingsWithNoParams(self):
"""testing bindings with no params"""
c = self.cursor()
try:
self.assertRaises(zxJDBC.ProgrammingError, c.execute, "select * from zxtesting", {0:zxJDBC.INTEGER})
# test an inappropriate value for a binding
self.assertRaises(zxJDBC.ProgrammingError, c.execute, "select * from zxtesting", {0:{}})
finally:
c.close()
def testDynamicCursor(self):
"""testing dynamic cursor queries"""
c = self.cursor(1)
try:
c.execute("select * from zxtesting")
f = c.fetchmany(4)
assert len(f) == 4, "expected [4] rows, got [%d]" % (len(f))
finally:
c.close()
def testRowid(self):
"""testing the autoincrement facilities of the different handlers"""
assert self.has_table("autoincrementtable"), "no autoincrement table"
c = self.cursor()
assert c.lastrowid == None, "expected initial lastrowid to be None"
try:
tabname, sql = self.table("autoincrementtable")
c.execute(sql)
c.execute("insert into %s (b) values (?)" % (tabname), [(0,)])
assert c.lastrowid is not None, "lastrowid is None"
try:
for idx in range(c.lastrowid + 1, c.lastrowid + 25):
c.execute("insert into %s (b) values (?)" % (tabname), [(idx,)])
assert c.lastrowid is not None, "lastrowid is None"
self.assertEquals(idx, c.lastrowid)
except:
self.db.rollback()
finally:
if self.has_table("post_autoincrementtable"):
try:
sequence, sql = self.table("post_autoincrementtable")
c.execute(sql)
self.db.commit()
except:
self.db.rollback()
try:
c.execute("drop table %s" % (tabname))
self.db.commit()
except:
self.db.rollback()
self.db.commit()
c.close()
def _test_fetchapi(self, dynamic=0):
"""Test the public Java API for Fetch"""
from com.ziclix.python.sql import Fetch, WarningListener
cur = self.cursor()
try:
c = self.db.__connection__
stmt = c.prepareStatement("select * from zxtesting where id < ?")
stmt.setInt(1, 5)
rs = stmt.executeQuery()
fetch = Fetch.newFetch(cur.datahandler, dynamic)
class WL(WarningListener):
def warning(self, event):
raise event.getWarning()
wl = WL()
fetch.addWarningListener(wl)
# the RS is closed by Fetch
fetch.add(rs)
if not dynamic:
self.assertEquals(4, fetch.getRowCount())
assert fetch.fetchone()
assert fetch.fetchmany(2)
assert fetch.fetchall()
assert not fetch.fetchall()
self.assertEquals(4, fetch.getRowCount())
assert fetch.removeWarningListener(wl)
fetch.close()
stmt.close()
finally:
cur.close()
def testStaticFetchAPI(self):
"""Test static Java Fetch API"""
self._test_fetchapi(0)
def testDynamicFetchAPI(self):
"""Test dynamic Java Fetch API"""
self._test_fetchapi(1)
class LOBTestCase(zxJDBCTestCase):
def _test_blob(self, obj=0):
assert self.has_table("blobtable"), "no blob table"
tabname, sql = self.table("blobtable")
fn = tempfile.mktemp()
fp = None
c = self.cursor()
try:
hello = ("hello",) * 1024
c.execute(sql)
self.db.commit()
from java.io import FileOutputStream, FileInputStream, ObjectOutputStream, ObjectInputStream, ByteArrayInputStream
fp = FileOutputStream(fn)
oos = ObjectOutputStream(fp)
oos.writeObject(hello)
fp.close()
fp = FileInputStream(fn)
blob = ObjectInputStream(fp)
value = blob.readObject()
fp.close()
assert hello == value, "unable to serialize properly"
if obj == 1:
fp = open(fn, "rb")
else:
fp = FileInputStream(fn)
c.execute("insert into %s (a, b) values (?, ?)" % (tabname), [(0, fp)], {1:zxJDBC.BLOB})
self.db.commit()
c.execute("select * from %s" % (tabname))
f = c.fetchall()
bytes = f[0][1]
blob = ObjectInputStream(ByteArrayInputStream(bytes)).readObject()
assert hello == blob, "blobs are not equal"
finally:
c.execute("drop table %s" % (tabname))
c.close()
self.db.commit()
if os.path.exists(fn):
if fp:
fp.close()
os.remove(fn)
def testBLOBAsString(self):
"""testing BLOB as string"""
self._test_blob()
def testBLOBAsPyFile(self):
"""testing BLOB as PyFile"""
self._test_blob(1)
def _test_clob(self, asfile=0):
assert self.has_table("clobtable"), "no clob table"
tabname, sql = self.table("clobtable")
c = self.cursor()
try:
hello = "hello" * 1024 * 10
c.execute(sql)
self.db.commit()
if asfile:
fp = open(tempfile.mktemp(), "w")
fp.write(hello)
fp.flush()
fp.close()
obj = open(fp.name, "r")
else:
obj = hello
c.execute("insert into %s (a, b) values (?, ?)" % (tabname), [(0, obj)], {1:zxJDBC.CLOB})
c.execute("select * from %s" % (tabname), maxrows=1)
f = c.fetchall()
assert len(f) == 1, "expected [%d], got [%d]" % (1, len(f))
assert hello == f[0][1], "clobs are not equal"
finally:
c.execute("drop table %s" % (tabname))
c.close()
self.db.commit()
if asfile:
obj.close()
os.remove(obj.name)
def testCLOBAsString(self):
"""testing CLOB as string"""
self._test_clob(0)
def testCLOBAsPyFile(self):
"""testing CLOB as PyFile"""
self._test_clob(1)
class BCPTestCase(zxJDBCTestCase):
def testCSVPipe(self):
"""testing the CSV pipe"""
from java.io import PrintWriter, FileWriter
from com.ziclix.python.sql.pipe import Pipe
from com.ziclix.python.sql.pipe.db import DBSource
from com.ziclix.python.sql.pipe.csv import CSVSink
try:
src = self.connect()
fn = tempfile.mktemp(suffix="csv")
writer = PrintWriter(FileWriter(fn))
csvSink = CSVSink(writer)
c = self.cursor()
try:
c.execute("insert into zxtesting (id, name, state) values (?, ?, ?)", [(1000, 'this,has,a,comma', 'and a " quote')])
c.execute("insert into zxtesting (id, name, state) values (?, ?, ?)", [(1001, 'this,has,a,comma and a "', 'and a " quote')])
# ORACLE has a problem calling stmt.setObject(index, null)
c.execute("insert into zxtesting (id, name, state) values (?, ?, ?)", [(1010, '"this,has,a,comma"', None)], {2:zxJDBC.VARCHAR})
self.db.commit()
finally:
self.db.rollback()
c.close()
dbSource = DBSource(src, c.datahandler.__class__, "zxtesting", None, None, None)
cnt = Pipe().pipe(dbSource, csvSink) - 1 # ignore the header row
finally:
writer.close()
src.close()
os.remove(fn)
def testDBPipe(self):
"""testing the DB pipe"""
from com.ziclix.python.sql.pipe import Pipe
from com.ziclix.python.sql.pipe.db import DBSource, DBSink
try:
src = self.connect()
dst = self.connect()
c = self.cursor()
c.execute("create table zxtestingbcp (id int not null, name varchar(20), state varchar(2), primary key (id))")
self.db.commit()
c.execute("select count(*) from zxtesting")
one = c.fetchone()[0]
c.close()
dbSource = DBSource(src, c.datahandler.__class__, "zxtesting", None, None, None)
dbSink = DBSink(dst, c.datahandler.__class__, "zxtestingbcp", None, None, 1)
cnt = Pipe().pipe(dbSource, dbSink) - 1 # ignore the header row
c = self.cursor()
c.execute("select count(*) from zxtestingbcp")
two = c.fetchone()[0]
c.execute("delete from zxtestingbcp")
self.db.commit()
c.close()
assert one == two, "expected [%d] rows in destination, got [%d] (sql)" % (one, two)
assert one == cnt, "expected [%d] rows in destination, got [%d] (bcp)" % (one, cnt)
# this tests the internal assert in BCP. we need to handle the case where we exclude
# all the rows queried (based on the fact no columns exist) but rows were fetched
# also make sure (eg, Oracle) that the column name case is ignored
dbSource = DBSource(src, c.datahandler.__class__, "zxtesting", None, ["id"], None)
dbSink = DBSink(dst, c.datahandler.__class__, "zxtestingbcp", ["id"], None, 1)
self.assertRaises(zxJDBC.Error, Pipe().pipe, dbSource, dbSink)
params = [(4,)]
dbSource = DBSource(src, c.datahandler.__class__, "zxtesting", "id > ?", None, params)
dbSink = DBSink(dst, c.datahandler.__class__, "zxtestingbcp", None, None, 1)
cnt = Pipe().pipe(dbSource, dbSink) - 1 # ignore the header row
c = self.cursor()
c.execute("select count(*) from zxtesting where id > ?", params)
one = c.fetchone()[0]
c.execute("select count(*) from zxtestingbcp")
two = c.fetchone()[0]
c.close()
assert one == two, "expected [%d] rows in destination, got [%d] (sql)" % (one, two)
assert one == cnt, "expected [%d] rows in destination, got [%d] (bcp)" % (one, cnt)
finally:
try:
c = self.cursor()
try:
c.execute("drop table zxtestingbcp")
self.db.commit()
except:
self.db.rollback()
finally:
c.close()
try:
src.close()
except:
src = None
try:
dst.close()
except:
dst = None
def testBCP(self):
"""testing bcp parameters and functionality"""
from com.ziclix.python.sql.util import BCP
import dbexts
try:
src = self.connect()
dst = self.connect()
c = self.cursor()
c.execute("create table zxtestingbcp (id int not null, name varchar(20), state varchar(2), primary key (id))")
self.db.commit()
c.execute("select count(*) from zxtesting")
one = c.fetchone()[0]
c.close()
b = BCP(src, dst)
if hasattr(self, "datahandler"):
b.sourceDataHandler = self.datahandler
b.destinationDataHandler = self.datahandler
cnt = b.bcp("zxtesting", toTable="zxtestingbcp")
c = self.cursor()
c.execute("select count(*) from zxtestingbcp")
two = c.fetchone()[0]
c.execute("delete from zxtestingbcp")
self.db.commit()
c.close()
assert one == two, "expected [%d] rows in destination, got [%d] (sql)" % (one, two)
assert one == cnt, "expected [%d] rows in destination, got [%d] (bcp)" % (one, cnt)
# this tests the internal assert in BCP. we need to handle the case where we exclude
# all the rows queried (based on the fact no columns exist) but rows were fetched
# also make sure (eg, Oracle) that the column name case is ignored
self.assertRaises(zxJDBC.Error, b.bcp, "zxtesting", toTable="zxtestingbcp", include=["id"], exclude=["id"])
params = [(4,)]
cnt = b.bcp("zxtesting", "id > ?", params, toTable="zxtestingbcp")
c = self.cursor()
c.execute("select count(*) from zxtesting where id > ?", params)
one = c.fetchone()[0]
c.execute("select count(*) from zxtestingbcp")
two = c.fetchone()[0]
c.close()
assert one == two, "expected [%d] rows in destination, got [%d] (sql)" % (one, two)
assert one == cnt, "expected [%d] rows in destination, got [%d] (bcp)" % (one, cnt)
finally:
try:
c = self.cursor()
try:
c.execute("drop table zxtestingbcp")
self.db.commit()
except:
self.db.rollback()
finally:
c.close()
try:
src.close()
except:
src = None
try:
dst.close()
except:
dst = None
|
DarioGT/OMS-PluginXML
|
org.modelsphere.sms/lib/jython-2.2.1/Lib/test/zxjdbc/zxtest.py
|
Python
|
gpl-3.0
| 39,389
|
[
"Brian"
] |
f3171f74354b4433cd1e19edaa67065e53aba19d53534929b9b179ab2a39d05e
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
m = 1000
n = 2000
testMehrotra = True
testIPF = True
testADMM = False
manualInit = False
display = False
progress = True
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
# Make a semidefinite matrix
def Semidefinite(height):
Q = El.DistMatrix()
El.Identity( Q, height, height )
return Q
# Make a dense matrix
def RectangDense(height,width):
A = El.DistMatrix()
El.Gaussian( A, height, width )
return A
Q = Semidefinite(n)
A = RectangDense(m,n)
# Generate a b which implies a primal feasible x
# ==============================================
xGen = El.DistMatrix()
El.Uniform(xGen,n,1,0.5,0.4999)
b = El.DistMatrix()
El.Zeros( b, m, 1 )
El.Gemv( El.NORMAL, 1., A, xGen, 0., b )
# Generate a c which implies a dual feasible (y,z)
# ================================================
yGen = El.DistMatrix()
El.Gaussian(yGen,m,1)
c = El.DistMatrix()
El.Uniform(c,n,1,0.5,0.5)
El.Hemv( El.LOWER, -1, Q, xGen, 1., c )
El.Gemv( El.TRANSPOSE, -1., A, yGen, 1., c )
if display:
El.Display( Q, "Q" )
El.Display( A, "A" )
El.Display( b, "b" )
El.Display( c, "c" )
# Set up the control structure (and possibly initial guesses)
# ===========================================================
ctrl = El.QPDirectCtrl_d()
xOrig = El.DistMatrix()
yOrig = El.DistMatrix()
zOrig = El.DistMatrix()
if manualInit:
El.Uniform(xOrig,n,1,0.5,0.4999)
El.Uniform(yOrig,m,1,0.5,0.4999)
El.Uniform(zOrig,n,1,0.5,0.4999)
x = El.DistMatrix()
y = El.DistMatrix()
z = El.DistMatrix()
if testMehrotra:
ctrl.approach = El.QP_MEHROTRA
ctrl.mehrotraCtrl.primalInit = manualInit
ctrl.mehrotraCtrl.dualInit = manualInit
ctrl.mehrotraCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
startMehrotra = El.mpi.Time()
El.QPDirect(Q,A,b,c,x,y,z,ctrl)
endMehrotra = El.mpi.Time()
if worldRank == 0:
print "Mehrotra time:", endMehrotra-startMehrotra
if display:
El.Display( x, "x Mehrotra" )
El.Display( y, "y Mehrotra" )
El.Display( z, "z Mehrotra" )
d = El.DistMatrix()
El.Zeros( d, n, 1 )
El.Hemv( El.LOWER, 1., Q, x, 0., d )
obj = El.Dot(x,d)/2 + El.Dot(c,x)
if worldRank == 0:
print "Mehrotra (1/2) x^T Q x + c^T x =", obj
if testIPF:
ctrl.approach = El.QP_IPF
ctrl.ipfCtrl.primalInit = manualInit
ctrl.ipfCtrl.dualInit = manualInit
ctrl.ipfCtrl.progress = progress
ctrl.ipfCtrl.lineSearchCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
startIPF = El.mpi.Time()
El.QPDirect(Q,A,b,c,x,y,z,ctrl)
endIPF = El.mpi.Time()
if worldRank == 0:
print "IPF time:", endIPF-startIPF
if display:
El.Display( x, "x IPF" )
El.Display( y, "y IPF" )
El.Display( z, "z IPF" )
d = El.DistMatrix()
El.Zeros( d, n, 1 )
El.Hemv( El.LOWER, 1., Q, x, 0., d )
obj = El.Dot(x,d)/2 + El.Dot(c,x)
if worldRank == 0:
print "IPF c^T x =", obj
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
|
birm/Elemental
|
examples/interface/QPDirectDense.py
|
Python
|
bsd-3-clause
| 3,319
|
[
"Gaussian"
] |
0c80223de5b062bb9b61bdea9ee60395a7d8e522dc2294eb32c3af8be888aba7
|
from math import sqrt, pi, sin
import random
import numpy
import matplotlib.pyplot as plt
neurons = 10
acceptable_error = .0001 #stops updating if the average squared error is less than this
max_iter = 100000 #stops updating if this number of iterations is reached
updates = 5 #number of times to calculate the average error (and write it to the console)
update_iter = max_iter / updates - 1
#the one-neuron function to predict sin(x) is: y = a * max(0, a1 * x + a2) + b
a = []
a1 = []
a2 = []
da = []
da1 = []
da2 = []
linear_combo = [] #this is a1 * x + a2
b = 0.0
db = 0.0
for i in range(neurons):
a.append(random.uniform(-1, 1))
a1.append(random.uniform(-1,1))
a2.append(0.0)
linear_combo.append(0.0)
da.append(0.0)
da1.append(0.0)
da2.append(0.0)
avg_error = 0
current_error = 10
x = 1
#perhaps have the code increase the number of neurons if acceptable error isn't met
#in a future version of the code
while x < max_iter and current_error > acceptable_error:
#get new input between -pi and pi
input = random.uniform(-pi, pi)
act = sin(input)
predict = 0
#update weights and predict sin(input) from the input
for i in range(neurons):
a[i] += da[i]
a1[i] += da1[i]
a2[i] += da2[i]
linear_combo[i] = a1[i] * input + a2[i]
if (linear_combo[i] > 0):
predict += a[i] * linear_combo[i]
b += db
predict += b
#calculate error, write avg error to console if needed
error = (act - predict) ** 2
avg_error += error
if (x % update_iter == 0):
avg_error /= update_iter
print('Iter: {}, Avg squared error: {:.5}'.format(x, avg_error))
current_error = avg_error
avg_error = 0
#calculate weight changes
step = .001
sign = numpy.sign(act - predict)
for i in range(neurons):
if (linear_combo[i] > 0):
da[i] = step * linear_combo[i] * sqrt(error) * sign
da1[i] = step * a[i] * input * sqrt(error) * sign
da2[i] = step * a[i] * sqrt(error) * sign
db = step * sqrt(error) * sign
x += 1
#plot sin(x) against predictions for 100 values of x between -pi and pi
test = []
out = []
sine = []
for y in range (100):
input = -pi + y * 2 * pi / 99
test.append(input)
sine.append(sin(input))
output = 0
for i in range(neurons):
output += a[i] * max(0, a1[i] * input + a2[i])
output += b
out.append(output)
plt.plot(test, out, 'gs', test, sine)
plt.axis([-pi, pi, -1.5, 1.5])
plt.show()
|
bbartoldson/examples
|
hacker_ANN/net.py
|
Python
|
mit
| 2,364
|
[
"NEURON"
] |
b52a6f8b38f5cb0d6ee5117d36285bc28a2c45ce81022087fccb2d4d1185885a
|
from compiler import *
ui_strings = [
("music_volume", "Music Volume:"),
("sound_volume", "Sound Volume:"),
("mouse_sensitivity", "Mouse Sensitivity:"),
("invert_mouse_y_axis", "Invert Mouse Y Axis"),
("enabled", "Enabled"),
("disabled", "Disabled"),
("damage_to_player", "Damage to Player:"),
("reduced_to_1_over_4_easiest", "Reduced to 1/4 (Easiest)"),
("reduced_to_1_over_2_easy", "Reduced to 1/2 (Easy)"),
("damage_to_friends", "Damage to Friends:"),
("reduced_to_1_over_2_easiest", "Reduced to 1/2 (Easiest)"),
("reduced_to_3_over_4_easy", "Reduced to 3/4 (Easy)"),
("normal", "Normal"),
("combat_ai", "Combat AI:"),
("combat_speed", "Combat Speed:"),
("good", "Good"),
("average_caps", "Average"),
("poor", "Poor"),
("faster", "Faster"),
("slower", "Slower"),
("control_block_direction", "Control Block Direction:"),
("automatic_recommended", "Automatic"),
("manual_easy", "Manual (Easy)"),
("manual_hard", "Manual (Hard)"),
("by_mouse_movement", "By mouse movement"),
("control_attack_direction", "Control Attack Direction:"),
("lance_control", "Lance Control:"),
("by_relative_enemy_position", "By relative enemy position"),
("by_inverse_mouse_movement", "By inverse mouse movement"),
("battle_size", "Battle Size:"),
("show_attack_direction", "Show Attack Direction"),
("show_targeting_reticule", "Show Targeting Reticle"),
("show_names_of_friendly_troops", "Show Banners on Friendly Troops"),
("report_damage", "Report Damage"),
("report_shot_difficulty", "Report Shot Difficulty"),
("difficulty_rating_percentage", "Difficulty Rating = %d%%"),
("controls", "Controls"),
("video_options", "Video Options"),
("done", "Done"),
("factions", "Factions"),
("item_itemname", "Item - %s"),
("prop_propname", "Prop - %s"),
("unknown_unknownname", "Unknown - %s"),
("entry_point_entrypointname", "Entry Point %d"),
("passage_menu_item_passagename", "Passage (menu item %d)"),
("plant_plantname", "Plant - %s"),
("export_file_for_character_playername_already_exists_overwrite_it", "Export file for character %s already exists. Overwrite it?"),
("yes", "Yes"),
("no", "No"),
("set_save_file_name", "Enter a name for this save-game:"),
("enter_new_name", "Enter a new name:"),
("export_character", "Export Character"),
("import_character", "Import Character"),
("character_playername_exported_successfully", "Character %s exported successfully."),
("character_playername_imported_successfully", "Character %s imported successfully."),
("unable_to_open_import_file", "Unable to open import file."),
("are_you_sure_you_want_to_import_the_character", "Are you sure you want to import the character?"),
("unable_to_find_character_import_file", "Unable to find character import file."),
("mount_and_blade_is_running_in_trial_mode_please_buy_the_game_for_importing_a_character", "Mount&Blade is running in trial mode. Please buy the game for importing a character."),
("change_skin", "Skin"),
("change_hair", "Hair"),
("change_hair_color", "Hair Color"),
("change_beard", "Beard"),
("tutorial", "Tutorial"),
("tutorial_face_generator", "Adjust your character's face using the buttons and the sliders. To rotate the head, click on it and drag the mouse."),
("restore", "Load"),
("cancel", "Cancel"),
("delete", "Delete"),
("confirm_delete_game", "Are you sure you want to delete this game?"),
("error_removing_file", "Error removing file..."),
("day_datedisplay", "Day %d (%d:%d%d)"),
("reset_changes", "Reset Changes"),
("weapon_proficiencies", "Proficiencies"),
("skills", "Skills"),
("attributes", "Attributes"),
("enter_name_here", "*Enter Name Here*"),
("edit_face", "Click to edit face"),
("statistics", "Statistics"),
("next", "Next"),
("prev", "Prev"),
("learn", "Learn"),
("question_saving_policy", "What will the game's saving policy be?"),
("saving_policy_realistic", "Realistic! No quitting without saving!"),
("saving_policy_nonrealistic", "Allow me to quit without saving."),
("tutorial_character_generation", "Now enter your name and distribute your attribute, skill and weapon points. You can click on various elements on the screen to learn how each one will affect your character."),
("str", "STR"),
("agi", "AGI"),
("int", "INT"),
("cha", "CHA"),
("at_learning_limit", "(At learning limit)"),
("not_enough_skill_points_to_learn", "(Not enough skill points to learn)"),
("strength", "strength"),
("agility", "agility"),
("intelligence", "intelligence"),
("charisma", "charisma"),
("not_enough_attributetype_to_learn_this_skill", "(Not enough %s to learn this skill)"),
("explanation_one_handed_weapon", "Covers usage of one handed swords, axes and blunt weapons."),
("explanation_two_handed_weapon", "Covers usage of two handed swords, great axes and mauls."),
("explanation_polearm", "Covers usage of pole weapons like spears, lances, staffs, etc."),
("explanation_archery", "Covers usage of bows."),
("explanation_crossbow", "Covers usage of crossbows."),
("explanation_throwing", "Covers usage of thrown weapons like javelins, darts, stones etc."),
("explanation_firearms", "Covers usage of pistols and muskets."),
("explanation_strength", "Strength: Every point adds +1 to hit points. The following skills can not be developed beyond 1/3 of Strength: ironflesh, Power-strike, Power-throw, Power-draw."),
("explanation_agility", "Agility: Each point gives five weapon points and slightly increases movement speed. The following skills can not be developed beyond 1/3 of Agility: weapon-master, Shield, Athletics, Riding, Horse archery, Looting."),
("explanation_intelligence", "Intelligence: Every point to intelligence immediately gives one extra skill point. The following skills can not be developed beyond 1/3 of Intelligence: Trainer, Tracking, Tactics, Path finding, Spotting, Inventory Management, Wound treatment, Surgery, First-aid, Engineer, Persuasion."),
("explanation_charisma", "Charisma: Each point increases your party size limit by +1. The following skills can not be developed beyond 1/3 of Charisma: Prisoner Management, Leadership, Trade."),
("level", "Level: %d"),
("xp", "Experience: %d"),
("next_level_at", "Next level at: %d"),
("health_player", "Health: %d/%d"),
("health", "Health: %d"),
("attribute_points", "Attribute points: %d"),
("skill_points", "Skill points: %d"),
("weapon_points", "Weapon points: %d"),
("mission_losses_none", " none."),
("mission_losses_wounded", "wounded :"),
("mission_losses_killed", "killed :"),
("party_losses", "%s : %d wounded --- %d killed of %d."),
("casualties_sustained", "Casualties sustained:"),
("advantage_change", "Advantage change = %c%d "),
("overall_battle_casualties", "Overall battle causalties:"),
("advantage_outnumbered", " You are hopelessly outnumbered."),
("advantage_major_disadvantage", " You have a major disadvantage."),
("advantage_slight_disadvantage", " You are slightly disadvantaged."),
("advantage_balanced", " The situation is balanced."),
("advantage_fair_advantage", " You have a fair advantage for winning."),
("advantage_greatly_favored", " The odds of battle favor you greatly."),
("tactical_advantage", "Tactical advantage: %d (%s)"),
("order_group", "Order group:"),
("question_save_changes", "You have made changes to the objects. Do you want to save changes?"),
("yes_save", "Yes, save"),
("no_discard_changes", "No, discard changes"),
("everyone_control", "Everyone!"),
("everyone_around_control", "Nearby Soldiers!"),
("others_control", "Others!"),
("question_give_up_fight", "Give up the fight?"),
("give_up", "Give up"),
("keep_fighting", "Keep fighting"),
("question_leave_area", "Leave Area"),
("cant_retreat_there_are_enemies_nearby", "Can't retreat. There are enemies nearby!"),
("question_retreat_battle", "Retreat battle?"),
("retreated_battle", "%s has been routed."),
("retreated_battle", "%s has fled from the battlefield."),
("retreat", "Retreat"),
("talk", "Talk"),
("duel", "Duel"),
("mount", "Mount"),
("riding_skill_not_adequate_to_mount", "(Riding skill not adequate to mount)"),
("dismount", "Dismount"),
("exit", "Exit"),
("door_to", "Door to "),
("open", "Open"),
("equip", "Equip"),
("baggage", "Baggage"),
("access_inventory", "Access inventory"),
("chest", "Chest"),
("passage", "Passage"),
("go", "Go"),
("retreat_battle", "Retreat Battle"),
("leave_area", "Leave Area"),
("reports", "Reports"),
("camp", "Camp"),
("terrain", "Terrain"),
("quests", "Notes"),
("inventory", "Inventory"),
("character", "Character"),
("party", "Party"),
("paused", "Paused"),
("click_left_button_to_cancel_wait", "Waiting... (Left click to return)"),
("midnight", "Midnight"),
("late_night", "Late night"),
("dawn", "Dawn"),
("early_morning", "Early morning"),
("morning", "Morning"),
("noon", "Noon"),
("afternoon", "Afternoon"),
("late_afternoon", "Late afternoon"),
("dusk", "Dusk"),
("evening", "Evening"),
("midnight", "Midnight"),
("level_limit_reached", "Level Limit Reached!"),
("explanation_level_limit", "Hail Adventurer, Mount&Blade has not been activated yet and is running in trial mode. In this mode, the game is limited to Level 8. In order to continue playing, please restart the game and activate it with your 16-digit serial key which is included in your boxed copy. After activating, you can continue playing right from here. Now, Mount&Blade will save your game and exit."),
("time_limit_reached", "Time Limit Reached!"),
("explanation_time_limit", "Hail Adventurer, Mount&Blade has not been activated yet and is running in trial mode. In this mode, the game is limited to 30 game days. In oder to continue playing, please restart the game and activate it with your 16-digit serial key which is included in your boxed copy. After activating, you can continue playing right from here. Now, Mount&Blade will save your game and exit."),
("target_lost", "Target lost"),
("waiting", "Waiting."),
("travelling_to", "Travelling to "),
("following", "Following "),
("accompanying", "Accompanying "),
("running_from", "Running from "),
("patrolling", "Patrolling"),
("patrolling_around", "Patrolling around "),
("holding", "Holding"),
("travelling", "Travelling"),
("fighting_against", "Fighting against "),
("speed_equals", "Speed = %2.1f"),
("defenders", "Garrison:"),
("prisoners", "Prisoners:"),
("1_hour", "1 hour"),
("n_hours", "%d hours"),
("between_hours", "%d - %d hours"),
("combatants", "Combatants: %d"),
("party_size", "Party size: %d"),
("party_size_between", "Party size: %d - %d"),
("merchant", "Merchant"),
("return", "Return"),
("no_cost", "No cost"),
("rename", "Rename"),
("use", "Use"),
("destroy", "Destroy"),
("destructible_target", "Destructible target"),
("tutorial_inventory", "This is the trade screen. Hold down control key while clicking on an item to quickly purchase or sell it."),
("head_armor", "Head Armor: %d"),
("body_armor", "Body Armor: %d"),
("leg_armor", "Leg Armor: %d"),
("encumbrance", "Encumbrance: %2.1f"),
("you_dont_have_value", "You don't have %s."),
("merchant_cant_afford_value", "%s: I can't afford %s. I have only %s."),
("merchant_pay_whatever", "Allright, just pay whatever you can."),
("merchant_think_of_something_else", "Hmm. Let us think of something else."),
("dumping_value_items", "%d items will be permanently lost, are you sure?"),
("dumping_value_item", "One item will be permanently lost, are you sure?"),
("question_slaughter_food_and_eat", "Slaughter this %s and eat it?"),
("money_value", "Money: %s"),
("dump", "Discard"),
("outfit", "Outfit"),
("arms", "Arms"),
("horse", "Horse"),
("food", "Food"),
("reclaim_your_sold_goods", "Reclaim your sold goods before buying that!"),
("return_your_bought_goods", "Return your bought goods before selling that!"),
("polearm_no_shield", "Polearm (No shield)"),
("polearm", "Polearm"),
("two_handed", "Two-handed"),
("two_handed_one_handed", "Two-handed/One-handed"),
("one_handed", "One-handed"),
("return_price", "Return price: %d"),
("sell_price", "Sell price: %d"),
("reclaim_price", "Reclaim price: %d"),
("buying_price", "Buying price: %d"),
("default_item", "Default item"),
("buying_price_free", "Buying price: Free"),
("weight", "Weight: %2.1f"),
("plus_value_to_head_armor", "+%d to head armor"),
("plus_value_to_body_armor", "+%d to body armor"),
("plus_value_to_leg_armor", "+%d to leg armor"),
("swing", "Swing: %d%s"),
("damage", "Damage: %d%s"),
("thrust", "Thrust: %d%s"),
("accuracy", "Accuracy: %d"),
("speed_rating", "Speed rating: %d"),
("value_to_damage", "%c%d to damage"),
("value_to_morale", "+%1.1f to party morale"),
("resistance", "Resistance: %d"),
("size", "Size: %d"),
("weapon_reach", "Weapon reach: %d"),
("armor", "Armor: %d"),
("speed", "Speed: %d"),
("maneuver", "Maneuver: %d"),
("charge", "Charge: %d"),
("hit_points", "Hit Points: %d/%d"),
("requires_value_difficulty", "Requires %s: %d"),
("bonus_against_shields", "Bonus against shields"),
("cant_be_used_to_block", "Can't be used to block"),
("troop_cant_use_item", "%s: I can't use that item!"),
("notification_riding_skill_not_enough", "Your riding skill is not high enough to mount this horse."),
("notification_requirements_not_met", "You don't have the required skills or attributes for this weapon."),
("notification_payment_value", "You must pay %s."),
("notification_payment_receive_value", "You will receive %s."),
("one_handed_weapons", "One Handed Weapons"),
("two_handed_weapons", "Two Handed Weapons"),
("polearms", "Polearms"),
("archery", "Archery"),
("crossbows", "Crossbows"),
("throwing", "Throwing"),
("firearms", "Firearms"),
("reset", "Reset"),
("release_one", "Release one"),
("move_up", "Move Up"),
("move_down", " Move Down "),
("upgrade_one", "Upgrade one"),
("party_skills", "Party Skills"),
("morale", "Morale: %s"),
("terrible", "Terrible"),
("very_low", "Very low"),
("low", "Low"),
("below_average", "Below average"),
("average", "Average"),
("above_average", "Above average"),
("high", "High"),
("very_high", "Very high"),
("excellent", "Excellent"),
("starving", "Starving! %d%%"),
("weekly_cost_value", "Weekly cost: %s"),
("company", "Company: %d / %d"),
("prisoners_equal_value", "Prisoners: %d / %d"),
("choose_prisoners", "Choose Prisoners"),
("choose_companions", "Choose Companions"),
("rescued_prisoners", "Rescued Prisoners"),
("captured_enemies", "Captured Enemies"),
("disband", "Disband"),
("take_prisoner", "Take prisoner"),
("take_back", "Take back"),
("give", "Give"),
("take", "Take"),
("sell", "Sell"),
("hire", "Hire"),
("notification_cant_hire", "(Can't hire: not enough money)"),
("uncapture", "Release"),
("capture", "Capture"),
("party_capcity_reached", "(Party capacity reached)"),
("all", " all"),
("joining_cost_weekly_wage", "Joining cost: %d, Weekly wage: %d"),
("weekly_wage", "Weekly wage: %d denars"),
("price", "Price: %d"),
("number_ready_to_upgrade", "%d ready to be upgraded."),
("upgrade_to_value", " Upgrade to %s (%dd)"),
("notification_no_slot_for_upgrade", "No slot for upgrading to %s!"),
("shield_broken", "Shield broken."),
("shield_cracked", "Shield cracked."),
("shield_deformed", "Shield deformed."),
("you_hit_a_friendly_troop", "You hit a friendly troop!"),
("hit_shield_on_back", "Hit shield on back!"),
("delivered_couched_lance_damage", "Delivered couched lance damage!"),
("received_couched_lance_damage", "Received couched lance damage!"),
("speed_bonus_plus", "Speed bonus: +%d%%"),
("speed_bonus", "Speed bonus: %d%%"),
("cant_reload_this_weapon_on_horseback", "Can't reload this weapon on horseback."),
("no_more_bolts", "No more bolts..."),
("you_are_not_carrying_any_bolts", "You are not carrying any bolts."),
("no_more_arrows", "No more arrows..."),
("you_are_not_carrying_any_arrows", "You are not carrying any arrows."),
("head_shot", "Head shot!"),
("delivered_number_damage", "Delivered %d damage."),
("delivered_number_damage_to_horse", "Delivered %d damage to horse."),
("horse_charged_for_number_damage", "Horse charged for %d damage."),
("received_number_damage", "Received %d damage."),
("horse_received_number_damage", "Horse received %d damage."),
("value_killed_teammate", "%s has killed a teammate!"),
("horse_fell_dead", "Horse fell dead..."),
("horse_crippled", "Horse crippled..."),
("shot_difficulty", "Shot difficulty: %2.1f"),
("you_have_improved_your_proficiency_in_value_to_number", "You have improved your proficiency in %s to %d."),
("your_proficiency_in_value_has_improved_by_number_to_number", "Your proficiency in %s has improved by +%d to %d."),
("value_killed_by_value", "%s killed by %s."),
("value_fell_dead", "%s fell dead."),
("value_knocked_unconscious_by_value", "%s knocked unconscious by %s."),
("value_fell_unconscious", "%s fell unconscious."),
("troop_routed", "%s has been routed."),
("troop_panicked", "%s has panicked."),
("troop_fled", "%s has fled the battle."),
("you_got_number_experience", "You got %d experience."),
("you_have_advanced_to_level_number", "You have advanced to level %d."),
("value_has_advanced_to_level_number", "%s has advanced to level %d."),
("you_got_value", "You got %s."),
("new_quest_taken", "New quest taken: %s."),
("quest_completed_value", "Quest completed: %s."),
("quest_succeeded_value", "Quest succeeded: %s."),
("quest_failed_value", "Quest failed: %s."),
("quest_concluded_value", "Quest concluded: %s."),
("quest_cancelled_value", "Quest cancelled: %s."),
("lost_value", " (Lost: %s)"),
("items_lost", " (Items lost:"),
("party_has_nothing_to_eat", "Party has nothing to eat!"),
("days_training_is_complete", "Day's training is complete..."),
("total_experience_gained_through_training_number", "Total experience gained through training: %d"),
("some_soldiers_are_ready_to_upgrade", "Some soldiers are ready to upgrade."),
("number_of_companions_exceeds_leadership_limit", " Number of companions exceeds leadership limit."),
("number_of_prisoners_exceeds_prisoner_management_limit", " Number of prisoners exceeds prisoner management limit."),
("party_morale_is_low", " Party morale is low!"),
("and_one_space", " and"),
("has_deserted_the_party", " has deserted the party."),
("have_deserted_the_party", " have deserted the party."),
("weekly_report", "Weekly report"),
("shared_number_experience_within_party", "Shared %d experience within party."),
("got_item_value", "Got item: %s."),
("game_saved_successfully", "Game saved successfully."),
("autosaving", "Autosaving..."),
("quick_saving", "Quick-saving..."),
("cant_quick_save", "Can't Quick-save during battle..."),
("screenshot_taken_to_value", "Screenshot is saved to %s"),
("screenshot_failed", "Can't save screenshot."),
("value_joined_your_party", "%s joined your party."),
("value_joined_party_as_prisoner", "%s joined party as prisoner."),
("value_has_joined_party", "%s has joined party."),
("value_has_been_taken_prisoner", "%s has been taken prisoner."),
("value_left_the_party", "%s left the party."),
("number_values_left_the_party", "%d %s(s) left the party."),
("number_value_left_the_party", "%d %s left the party."),
("your_relations_with_value_has_improved_from_number_to_number", "Your relations with %s has improved from %d to %d."),
("your_relations_with_value_has_deteriorated_from_number_to_number", "Your relations with %s has deteriorated from %d to %d."),
("you_lost_value", "You lost %s."),
("lost_item_value", "Lost item: %s."),
("got_number_value", "Got %d %s."),
("lost_number_value", "Lost %d %s."),
("set_default_keys", "Set default keys"),
("undo_changes", "Undo changes"),
("press_a_key", "Press a key"),
("return_to_game", "Return to Game"),
("options", "Options"),
("save_and_exit", "Save & Exit"),
("save", "Save"),
("save_as", "Save As"),
("quit_without_saving", "Quit without Saving"),
("empty_slot", "Empty Slot"),
("game_saved", "Game saved..."),
("confirm_overwrite", "Savegame for %s will be overwritten. Are you sure?"),
("dynamic_lighting", "Dynamic Lighting"),
("character_shadows", "Character Shadows"),
("grass_density", "Grass Density:"),
("environment_shadows", "Environment Shadows"),
("realistic_shadows_on_plants", "Realistic Shadows on Plants:"),
("particle_systems", "Particle Systems"),
("gamma", "Monitor Gamma:"),
("character_detail", "Character Detail:"),
("character_shadow_detail", "Character Shadow Detail:"),
("blood_stains", "Blood Stains:"),
("on", "On"),
("off", "Off"),
("near_player_only", "Near player only"),
("default", "Default"),
("3d_grass", "3D Grass:"),
("number_of_ragdolls", "Number of Rag Dolls:"),
("number_of_corpses", "Number of Corpses:"),
("unlimited", "Unlimited"),
("anisotropic_filtering", "Anisotropic Filtering"),
("fast_water_reflection", "Fast Water Reflections"),
("maximum_framerate", "Max. Frame-rate:"),
("show_framerate", "Show Frame-rate:"),
("estimated_performance", "Estimated Performance: %d%%"),
("change_graphics_settings_explanation", "Some changes you have made will take effect when you enter a new area."),
("start_tutorial", "Play Tutorial"),
("start_a_new_game", "Start a New Game"),
("restore_a_saved_game", "Load Game"),
("exit_to_windows", "Exit"),
("credits", "Credits"),
("version_value", "v%s"),
("active_quests", "Active Quests"),
("finished_quests", "Finished Quests"),
("given_on_date", "Given on: %s"),
("days_since_given", "Days since given: %d"),
("quest_progression_number", "Quest progression: %d%%"),
("too_many_quests", "Too many quests"),
("ok", "OK"),
("move_forward", "Move Forward"),
("move_backward", "Move Backward"),
("move_left", "Move Left"),
("move_right", "Move Right"),
("action", "Action"),
("jump", "Jump"),
("attack", "Attack"),
("parry_then_attack", "Counter Attack"),
("defend", "Defend"),
("kick", "Kick"),
("equip_weapon_1", "Equip Item 1"),
("equip_weapon_2", "Equip Item 2"),
("equip_weapon_3", "Equip Item 3"),
("equip_weapon_4", "Equip Item 4"),
("equip_next_weapon", "Equip Next Weapon"),
("equip_next_shield", "Equip Next Shield"),
("sheath_weapon", "Sheath Weapon"),
("character_window", "Character Window"),
("inventory_window", "Inventory Window"),
("party_window", "Party Window"),
("quests_window", "Quests Window"),
("game_log_window", "Game Log Window"),
("leave_location_retreat", "Leave Location/Retreat"),
("zoom", "Zoom"),
("view_outfit", "View Outfit"),
("toggle_first_person_view", "Toggle First Person View"),
("view_orders", "View Orders"),
("quick_save", "Quick Save"),
("no_key_assigned", "No key assigned"),
("new_enemies_have_arrived", "New enemies have arrived."),
("reinforcements_have_arrived", "Reinforcements have arrived."),
("report_casualties", "Report Casualties"),
("report_experience", "Report Experience"),
("current_level_value", "Current Level: %d"),
("base_attribute_value", "Base Attribute: %s"),
("battle_controls", "Battle Controls"),
("map_controls", "Map Controls"),
("general_controls", "General Controls"),
("zoom_in", "Zoom In"),
("zoom_out", "Zoom Out"),
("wait", "Wait"),
("take_screenshot", "Take Screenshot"),
("randomize", "Randomize"),
("hint", "Hint"),
("press_left_mouse_button_to_continue", "Press left mouse button to continue..."),
("loot", "Loot"),
("chest", "Chest"),
("cut_short", "c"),
("pierce_short", "p"),
("blunt_short", "b"),
("battle", "Battle"),
("siege", "Siege"),
("troops", "Troops:"),
("loading_module_info_file", "Loading Module Info File..."),
("processing_ini_file", "Processing INI File..."),
("loading_music", "Loading Music..."),
("loading_data", "Loading Data..."),
("loading_setting_data", "Loading Setting Data..."),
("loading_textures", "Loading Textures..."),
("finished", "Finished."),
("creating_game", "Creating Game..."),
("loading_savegame_file", "Loading Savegame File..."),
("loading_map_file", "Loading Map File..."),
("initializing_map", "Initializing Map..."),
("launching_game", "Launching Game..."),
("capital_battle", "BATTLE:"),
("capital_versus", "--VERSUS--"),
("tracks", "Tracks"),
("battleground", "Battleground"),
("order_1", "Select Order 1"),
("order_2", "Select Order 2"),
("order_3", "Select Order 3"),
("order_4", "Select Order 4"),
("order_5", "Select Order 5"),
("order_6", "Select Order 6"),
("order_button_hold_this_position", "Hold this position"),
("order_button_follow_me", "Follow me"),
("order_button_charge", "Charge"),
("order_button_stand_ground", "Stand ground"),
("order_button_retreat", "Retreat"),
("order_button_advance", "Advance ten paces"),
("order_button_fall_back", "Fall back ten paces"),
("order_button_spread_out", "Spread out"),
("order_button_stand_closer", "Stand closer"),
("order_button_mount_horses", "Mount horses"),
("order_button_dismount", "Dismount"),
("order_button_hold_fire", "Hold your fire"),
("order_button_fire_at_will", "Fire at will"),
("order_button_use_blunt_weapons", "Use only blunt weapons"),
("order_button_use_any_weapon", "Use weapons at will"),
("order_button_movement_orders", "Movement orders"),
("order_button_formation_orders", "Formation orders"),
("order_button_fire_orders", "Fire orders"),
("follow_me_e_", "%s, follow me!"),
("charge_e_", "%s, charge!!!"),
("stand_ground_e_", "%s, stand ground!"),
("retreat_e_", "%s, retreat!"),
("mount_horses_e_", "%s, mount horses!"),
("dismount_e_", "%s, dismount!"),
("advance_e_", "%s, advance ten paces!"),
("fall_back_e_", "%s, fall back ten paces!"),
("stand_closer_e_", "%s, stand closer!"),
("spread_out_e_", "%s, spread out!"),
("use_blunt_weapons_e_", "%s, use only blunt weapons!"),
("use_any_weapon_e_", "%s, use weapons at will!"),
("hold_fire_e_", "%s, hold your fire!"),
("fire_at_will_e_", "%s, fire at will!"),
("hold_this_position_e_", "%s, hold this position!"),
("infantry", "Infantry"),
("archers", "Archers"),
("cavalry", "Cavalry"),
("companions", "Companions"),
("everyone_hear_me", "Everyone, hear me!"),
("everyone", "Everyone"),
("everyone_around_me", "Nearby Soldiers"),
("str_hear_me", "%s, hear me!"),
("str_and_str", "%s and %s"),
("str_comma_str", "%s, %s"),
("need_to_learn_prisoner_management", "You need to learn Prisoner Management skill in order to take prisoners."),
("game_log", "Game Log"),
("recent_messages", "Recent Messages"),
("custom_battle", "Custom Battle"),
("player", "Player"),
("value_denars", "%d denars"),
("back", "Back"),
("forward", "Forward"),
("display_on_map", "Show On Map"),
("info_pages", "Game Concepts"),
("troops2", "Characters"),
("locations", "Locations"),
("click_button_to_view_note", "Click on a link to view the notes"),
("this_page_contains_no_information", "This page contains no information"),
("other_pages_that_link_here", "Other pages that link here: "),
("report_is_value_days_old", " (Report is %d days old)"),
("report_is_current", " (Report is current)"),
("button_party_member_healthy_total", "%s (%d/%d)"),
("button_party_member_total", "%s (%d)"),
("button_party_member_hero_percentage_wounded", "%s (%d%% - Wounded)"),
("button_party_member_hero_percentage", "%s (%d%%)"),
("percentage_value", "%d%%"),
("full", "Full"),
("quick", "Quick"),
("none", "None"),
("change", "Change"),
("how_to_change", "How to change this?"),
("change_directx_explanation", "You can change the render method between DirectX 7 and DirectX 9 by clicking on the Configure button at the launch menu that comes up when you first start the game."),
("dropping_picking_up", "Dropping %s; picking up %s."),
("dropping", "Dropping %s."),
("picking_up", "Picking up %s."),
("unable_to_take", "Unable to take that."),
("age", "Age"),
("cannot_be_used_on_horseback", "Cannot be used on horseback"),
("enable_vertex_shaders2", "Render Method:"),
("screen_size2", "Screen Resolution:"),
("use_desktop_resolution2", "Use Desktop Resolution"),
("shadow_quality2", "Shadow Quality:"),
("m_low2", "Low"),
("m_high2", "High"),
("m_ultra_high2", "Ultra High"),
("off2", "Off"),
("group_header", "Class of troop"),
("group_rename", "Rename group"),
("group_1", "Infantry"),
("group_2", "Archers"),
("group_3", "Cavalry"),
("group_4", "Unnamed 1"),
("group_5", "Unnamed 2"),
("group_6", "Unnamed 3"),
("group_7", "Unnamed 4"),
("group_8", "Unnamed 5"),
("group_9", "Unnamed 6"),
("group_rename", "Rename Group"),
("group_close", "Close"),
("party_b_group_information", "%s belongs to %s group"),
("thrown_or_s", "Thrown/%s"),
("ranged_damage", "Ranged: %d%s"),
("overall_quality", "Overall Quality"),
("shader_quality", "Shader Quality:"),
("flora_lod_detail", "Tree Detail:"),
("flora_degrade_distance", "Tree Degrade Distance:"),
("antialiasing", "AntiAliasing:"),
("use_depth_effects", "Use Depth Effects"),
("hdr_mode", "HDR Mode:"),
("autoexpore", "Auto-exposure"),
("choose_profile", "Choose Profile"),
("create", "Create"),
("edit", "Edit"),
("join_game", "Join a Game"),
("host_game", "Host a Game"),
("custom", "Custom"),
("medium", "Medium"),
("male", "Male"),
("female", "Female"),
("gender", "Choose Gender:"),
("edit_profile", "Edit Profile"),
("new_profile", "New Profile"),
("enter_username", "Enter Username:"),
("invalid_username", "Usernames may only contain letters, numbers or _ - * [ ] ~ characters."),
("confirmation", "Are you sure?"),
("multiplayer", "Multiplayer"),
("server_name", "Server"),
("module_name", "Module"),
("game_type", "Game Type"),
("map_name", "Map"),
("ping", "Ping"),
("dedicated", "Dedicated"),
("number_of_players", "Players"),
("password_protected", "Password"),
("connect", "Connect"),
("local_area_network", "Local Area Network"),
("internet", "Internet"),
("favorites", "Favorites"),
("source", "Source:"),
("server_password", "Server Password:"),
("refresh", "Refresh"),
("start_search", "Start Search"),
("add_to_favorites", "Add to Favorites"),
("remove_from_favorites", "Remove from Favorites"),
("use_speedtree", "Use Speedtree"),
("use_instancing", "Use Instancing"),
("error", "Error"),
("error_server_full", "Server is full."),
("error_server_full_for_non_private", "Server is full for players without a private member password."),
("error_server_password_incorrect", "Incorrect password."),
("error_incorrect_serial", "Incorrect serial number."),
("error_incorrect_authorization_key", "Incorrect authorization key."),
("error_banned_from_server", "You are banned from this server."),
("error_username_taken", "Your profile name is used by another player."),
("error_authentication_failed", "Authentication failed."),
("unable_to_connect_to_server", "Unable to connect to server."),
("connection_to_server_is_lost", "Connection to server is lost."),
("kicked_from_server", "Kicked from server."),
("switch_to_module_question", "This server is running another module than the one you are currently running. Do you want Mount&Blade to switch to this module?"),
("download_module_question", "This server is running a module that is not installed on your computer. Would you like to visit the download site for this module now?"),
("download_mb_new_version_question", "This server is running a newer version (%d.%d%d%d) of Mount&Blade than the one you are currently running (%d.%d%d%d). Would you like to visit TaleWorlds download site now?"),
("download_mb_old_version_question", "This server is running an older version (%d.%d%d%d) of Mount&Blade and than the one you are currently running (%d.%d%d%d)."),
("download_module_new_version_question", "This server is running a newer version (%d.%d%d%d) of the current module than the one you are running (%d.%d%d%d). Would you like to visit the download site for this module now?"),
("download_module_old_version_question", "This server is running an older version (%d.%d%d%d) of the current module than the one you are running (%d.%d%d%d)."),
("authenticating_with_steam", "Authenticating with Steam..."),
("validating_serial_number", "Validating serial number..."),
("scanning_lan", "Scanning local area network..."),
("retrieving_servers", "Retrieving server list..."),
("shield_size2", "Size: %dx%d"),
("click_to_view_notes", "Click to view notes"),
("retrieving_server_infos", "Retrieving information from servers (%d)..."),
("connecting_to_server", "Connecting to server..."),
("requesting_to_join_the_game", "Requesting to join the game..."),
("loading", "Loading..."),
("group_value_control", "Group %d!"),
("drop_weapon", "Drop Weapon"),
("multiplayer_message_all", "Send Message to Everyone"),
("multiplayer_message_team", "Send Message to Team"),
("command_line", "Command Line"),
("use_ranged_weapon_as_melee", "Toggle Weapon Mode"),
("send_message_all", "Send Message to Everyone"),
("send_message_team", "Send Message to Team"),
("select", "Select"),
("context_menu", "Context Menu"),
("round_starts_in_value_seconds", "Round starts in %d seconds..."),
("watching_value", "Following %s"),
("capital_spec", "SPEC"),
("capital_dead", "DEAD"),
("instancing_error1", "Could not lock Instance Buffer (size: %d), Disabled mesh-instancing (Error Code: %d)"),
("instancing_error2", "Could not fit instanced objects, Disabled mesh-instancing"),
("by_keyboard", "By movement keys"),
("combat_speed_slowest", "Slowest"),
("combat_speed_slower", "Slower"),
("combat_speed_normal", "Normal"),
("combat_speed_faster", "Faster"),
("combat_speed_fastest", "Fastest"),
("module_newer_than_application", "The module you have selected requires a newer version of the game."),
("module_older_than_application", "The module you have selected requires an older version of the game."),
("unbalanced", "Unbalanced"),
("can_crush_through_blocks", "Can crush through blocks"),
("turn_camera_with_horse", "Turn Camera with Horse in First Person:"),
("widescreen_mode_on", "Multiple Screen Mode Enabled"),
("widescreen_mode_off", "Multiple Screen Mode Disabled"),
("notification_cant_upgrade", "(Can't upgrade: not enough money)"),
("turn_never", "Never"),
("turn_ranged_only", "Ranged only"),
("turn_melee_only", "Melee only"),
("turn_always", "Always"),
("general_options", "General Options"),
("vac_enabled", "Valve Anti Cheat Enabled"),
("campaign_ai", "Campaign AI:"),
("downloading_map", "Downloading map (%d KB)"),
("download_completed", "Download completed."),
("server_filter", "Server filter"),
("has_players", "Has players"),
("is_not_full", "Not full"),
("is_password_free", "No password"),
("native_only", "Native only"),
("ping_limit", "Ping limit"),
("filter_info", "%d games and %d players filtered"),
("is_version_compatible", "Compatible with module"),
("ttnet_account", "TTNET Oyun account"),
("username", "Username"),
("password", "Password"),
("error_incorrect_username_or_password", "Incorrect username or password"),
("validating_account", "Validating account..."),
("plase_enter_your_serial_key", "Please enter your serial key"),
("texture_detail2", "Texture Detail:"),
("antialiasing2", "Antialiasing:"),
("napoleonic_key_does_not_exist", "This mod requires the Napoleonic Wars DLC to play!"),
("delete_module_workshop", "Are you sure you want to unsubscribe from this module?"),
("delete_module", "Are you sure you want to delete the module?"),
("delete_native_module", "You cannot delete native mods."),
("incompatible_module", "This server is incompatible with your current module. You can use the configuration utility to change module."),
]
|
Sw4T/Warband-Development
|
mb_warband_module_system_1166/Module_system 1.166/DISABLED_module_ui_strings.py
|
Python
|
mit
| 37,731
|
[
"VisIt"
] |
e257908a46c297d917d57639eebaad1ac86faaa3f5001707faa4190f9f770a30
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2012 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012-2014 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Reports/Text Reports/Ahnentafel Report"""
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
import math
#------------------------------------------------------------------------
#
# gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.errors import ReportError
from gramps.gen.lib import ChildRefType
from gramps.gen.plug.menu import (BooleanOption, NumberOption, PersonOption)
from gramps.gen.plug.docgen import (IndexMark, FontStyle, ParagraphStyle,
FONT_SANS_SERIF, INDEX_TYPE_TOC,
PARA_ALIGN_CENTER)
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.plugins.lib.libnarrate import Narrator
from gramps.gen.proxy import CacheProxyDb
from gramps.gen.display.name import displayer as _nd
#------------------------------------------------------------------------
#
# log2val
#
#------------------------------------------------------------------------
def log2(val):
"""
Calculate the log base 2 of a number
"""
return int(math.log(val, 2))
#------------------------------------------------------------------------
#
# AncestorReport
#
#------------------------------------------------------------------------
class AncestorReport(Report):
"""
Ancestor Report class
"""
def __init__(self, database, options, user):
"""
Create the AncestorReport object that produces the Ahnentafel report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
This report needs the following parameters (class variables)
that come in the options class.
gen - Maximum number of generations to include.
pagebbg - Whether to include page breaks between generations.
name_format - Preferred format to display names
incl_private - Whether to include private data
living_people - How to handle living people
years_past_death - Consider as living this many years after death
"""
Report.__init__(self, database, options, user)
self.map = {}
menu = options.menu
lang = menu.get_option_by_name('trans').get_value()
rlocale = self.set_locale(lang)
stdoptions.run_private_data_option(self, menu)
stdoptions.run_living_people_option(self, menu, rlocale)
self.database = CacheProxyDb(self.database)
self.max_generations = menu.get_option_by_name('maxgen').get_value()
self.pgbrk = menu.get_option_by_name('pagebbg').get_value()
self.opt_namebrk = menu.get_option_by_name('namebrk').get_value()
pid = menu.get_option_by_name('pid').get_value()
self.center_person = self.database.get_person_from_gramps_id(pid)
if (self.center_person == None) :
raise ReportError(_("Person %s is not in the Database") % pid )
stdoptions.run_name_format_option(self, menu)
self.__narrator = Narrator(self.database, use_fulldate=True,
nlocale=rlocale)
def apply_filter(self, person_handle, index, generation=1):
"""
Recursive function to walk back all parents of the current person.
When max_generations are hit, we stop the traversal.
"""
# check for end of the current recursion level. This happens
# if the person handle is None, or if the max_generations is hit
if not person_handle or generation > self.max_generations:
return
# store the person in the map based off their index number
# which is passed to the routine.
self.map[index] = person_handle
# retrieve the Person instance from the database from the
# passed person_handle and find the parents from the list.
# Since this report is for natural parents (birth parents),
# we have to handle that parents may not
person = self.database.get_person_from_handle(person_handle)
if person is None:
return
father_handle = None
mother_handle = None
for family_handle in person.get_parent_family_handle_list():
family = self.database.get_family_from_handle(family_handle)
# filter the child_ref_list to find the reference that matches
# the passed person. There should be exactly one, but there is
# nothing that prevents the same child in the list multiple times.
ref = [ c for c in family.get_child_ref_list()
if c.get_reference_handle() == person_handle]
if ref:
# If the father_handle is not defined and the relationship is
# BIRTH, then we have found the birth father. Same applies to
# the birth mother. If for some reason, the we have multiple
# people defined as the birth parents, we will select based on
# priority in the list
if not father_handle and \
ref[0].get_father_relation() == ChildRefType.BIRTH:
father_handle = family.get_father_handle()
if not mother_handle and \
ref[0].get_mother_relation() == ChildRefType.BIRTH:
mother_handle = family.get_mother_handle()
# Recursively call the function. It is okay if the handle is None,
# since routine handles a handle of None
self.apply_filter(father_handle, index*2, generation+1)
self.apply_filter(mother_handle, (index*2)+1, generation+1)
def write_report(self):
"""
The routine the actually creates the report. At this point, the document
is opened and ready for writing.
"""
# Call apply_filter to build the self.map array of people in the
# database that match the ancestry.
self.apply_filter(self.center_person.get_handle(), 1)
# Write the title line. Set in INDEX marker so that this section will be
# identified as a major category if this is included in a Book report.
name = self._name_display.display_formal(self.center_person)
# feature request 2356: avoid genitive form
title = self._("Ahnentafel Report for %s") % name
mark = IndexMark(title, INDEX_TYPE_TOC, 1)
self.doc.start_paragraph("AHN-Title")
self.doc.write_text(title, mark)
self.doc.end_paragraph()
# get the entries out of the map, and sort them.
generation = 0
for key in sorted(self.map):
# check the index number to see if we need to start a new generation
if generation == log2(key):
# generate a page break if requested
if self.pgbrk and generation > 0:
self.doc.page_break()
generation += 1
# Create the Generation title, set an index marker
gen_text = self._("Generation %d") % generation
mark = None # don't need any with no page breaks
if self.pgbrk:
mark = IndexMark(gen_text, INDEX_TYPE_TOC, 2)
self.doc.start_paragraph("AHN-Generation")
self.doc.write_text(gen_text, mark)
self.doc.end_paragraph()
# Build the entry
self.doc.start_paragraph("AHN-Entry","%d." % key)
person = self.database.get_person_from_handle(self.map[key])
if person is None:
continue
name = self._name_display.display(person)
mark = utils.get_person_mark(self.database, person)
# write the name in bold
self.doc.start_bold()
self.doc.write_text(name.strip(), mark)
self.doc.end_bold()
# terminate with a period if it is not already terminated.
# This can happen if the person's name ends with something 'Jr.'
if name[-1:] == '.':
self.doc.write_text(" ")
else:
self.doc.write_text(". ")
# Add a line break if requested (not implemented yet)
if self.opt_namebrk:
self.doc.write_text('\n')
self.__narrator.set_subject(person)
self.doc.write_text(self.__narrator.get_born_string())
self.doc.write_text(self.__narrator.get_baptised_string())
self.doc.write_text(self.__narrator.get_christened_string())
self.doc.write_text(self.__narrator.get_died_string())
self.doc.write_text(self.__narrator.get_buried_string())
self.doc.end_paragraph()
#------------------------------------------------------------------------
#
# AncestorOptions
#
#------------------------------------------------------------------------
class AncestorOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
self.__db = dbase
self.__pid = None
MenuReportOptions.__init__(self, name, dbase)
def get_subject(self):
""" Return a string that describes the subject of the report. """
gid = self.__pid.get_value()
person = self.__db.get_person_from_gramps_id(gid)
return _nd.display(person)
def add_menu_options(self, menu):
"""
Add options to the menu for the ancestor report.
"""
category_name = _("Report Options")
self.__pid = PersonOption(_("Center Person"))
self.__pid.set_help(_("The center person for the report"))
menu.add_option(category_name, "pid", self.__pid)
stdoptions.add_name_format_option(menu, category_name)
stdoptions.add_private_data_option(menu, category_name)
stdoptions.add_living_people_option(menu, category_name)
maxgen = NumberOption(_("Generations"), 10, 1, 100)
maxgen.set_help(_("The number of generations to include in the report"))
menu.add_option(category_name, "maxgen", maxgen)
pagebbg = BooleanOption(_("Page break between generations"), False)
pagebbg.set_help(
_("Whether to start a new page after each generation."))
menu.add_option(category_name, "pagebbg", pagebbg)
namebrk = BooleanOption(_("Add linebreak after each name"), False)
namebrk.set_help(_("Indicates if a line break should follow the name."))
menu.add_option(category_name, "namebrk", namebrk)
stdoptions.add_localization_option(menu, category_name)
def make_default_style(self, default_style):
"""
Make the default output style for the Ahnentafel report.
There are 3 paragraph styles for this report.
AHN_Title - The title for the report. The options are:
Font : Sans Serif
Bold
16pt
Paragraph : First level header
0.25cm top and bottom margin
Centered
AHN-Generation - Used for the generation header
Font : Sans Serif
Italic
14pt
Paragraph : Second level header
0.125cm top and bottom margins
AHN - Normal text display for each entry
Font : default
Paragraph : 1cm margin, with first indent of -1cm
0.125cm top and bottom margins
"""
#
# AHN-Title
#
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=16, bold=1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(1)
para.set_top_margin(0.25)
para.set_bottom_margin(0.25)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_('The style used for the title of the page.'))
default_style.add_paragraph_style("AHN-Title", para)
#
# AHN-Generation
#
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=14, italic=1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(2)
para.set_top_margin(0.125)
para.set_bottom_margin(0.125)
para.set_description(_('The style used for the generation header.'))
default_style.add_paragraph_style("AHN-Generation", para)
#
# AHN-Entry
#
para = ParagraphStyle()
para.set(first_indent=-1.0, lmargin=1.0)
para.set_top_margin(0.125)
para.set_bottom_margin(0.125)
para.set_description(_('The basic style used for the text display.'))
default_style.add_paragraph_style("AHN-Entry", para)
|
beernarrd/gramps
|
gramps/plugins/textreport/ancestorreport.py
|
Python
|
gpl-2.0
| 14,185
|
[
"Brian"
] |
6fa8f657406eeedcac70a08807d28e3660271cf80701e8818755a77a70648337
|
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
#
import os
import GEOM
import SALOMEDS
import hexablock
print "test make elements by transforming elements..."
doc = hexablock.addDocument()
size_x = 1
size_y = 1
size_z = 2
orig = doc.addVertex(0, 0, 0)
dirVr = doc.addVector(1, 1, 1)
grid = doc.makeCartesian1(orig, dirVr, size_x, size_y, size_z, 0, 0,
0)
orig.setScalar(2)
file_name = os.path.join(os.environ['TMP'], 'transfo0.vtk')
#### doc.saveVtk(file_name)
devant = doc.addVector(5, 0, 0)
grid2 = doc.makeTranslation(grid, devant)
file_name = os.path.join(os.environ['TMP'], 'transfo_translation.vtk')
#### doc.saveVtk(file_name)
grid4 = doc.makeRotation(grid2, orig, dirVr, 45)
file_name = os.path.join(os.environ['TMP'], 'transfo_rotation.vtk')
#### doc.saveVtk(file_name)
print "...test make elements by transforming elements OK"
|
FedoraScientific/salome-hexablock
|
doc/pyplots/test_make_elmts_transform.py
|
Python
|
lgpl-2.1
| 1,689
|
[
"VTK"
] |
ff366e630e9d32bd5e8c6f685c9c185e2183d09598c7969fd0aa7bf70907f8c2
|
#!/usr/bin/env python -i
# preceding line should have path for Python on your machine
# simple.py
# Purpose: mimic operation of examples/COUPLE/simple/simple.cpp via Python
# Serial syntax: simple.py in.lammps
# in.lammps = LAMMPS input script
# Parallel syntax: mpirun -np 4 simple.py in.lammps
# in.lammps = LAMMPS input script
# also need to uncomment either Pypar or mpi4py sections below
from __future__ import print_function
import sys
import numpy as np
import ctypes
# parse command line
argv = sys.argv
if len(argv) != 2:
print("Syntax: simple.py in.lammps")
sys.exit()
infile = sys.argv[1]
me = 0
# uncomment this if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
# uncomment this if running in parallel via mpi4py
#from mpi4py import MPI
#me = MPI.COMM_WORLD.Get_rank()
#nprocs = MPI.COMM_WORLD.Get_size()
from lammps import lammps
lmp = lammps()
# run infile one line at a time
lines = open(infile,'r').readlines()
for line in lines: lmp.command(line)
# run 10 more steps
# get coords from LAMMPS
# change coords of 1st atom
# put coords back into LAMMPS
# run a single step with changed coords
lmp.command("run 10")
x = lmp.gather_atoms("x",1,3)
v = lmp.gather_atoms("v",1,3)
epsilon = 0.1
x[0] += epsilon
lmp.scatter_atoms("x",1,3,x)
lmp.command("run 1");
# extract force on single atom two different ways
f = lmp.extract_atom("f",3)
print("Force on 1 atom via extract_atom: ",f[0][0])
fx = lmp.extract_variable("fx","all",1)
print("Force on 1 atom via extract_variable:",fx[0])
# use commands_string() and commands_list() to invoke more commands
strtwo = "run 10\nrun 20"
lmp.commands_string(strtwo)
cmds = ["run 10","run 20"]
lmp.commands_list(cmds)
# delete all atoms
# create_atoms() to create new ones with old coords, vels
# initial thermo should be same as step 20
natoms = lmp.get_natoms()
type = natoms*[1]
lmp.command("delete_atoms group all");
lmp.create_atoms(natoms,None,type,x,v);
lmp.command("run 10");
# uncomment if running in parallel via Pypar
#print("Proc %d out of %d procs has" % (me,nprocs), lmp)
# uncomment if running in parallel via mpi4py
#print("Proc %d out of %d procs has" % (me,nprocs), lmp)
|
aurix/lammps-induced-dipole-polarization-pair-style
|
python/examples/simple.py
|
Python
|
gpl-2.0
| 2,238
|
[
"LAMMPS"
] |
ad4085a65866936331e6e7d3c6642443946ab809f4d8003f7c4b83797fffdcf0
|
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}-{{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}-{{last_name}}',
'{{prefix_male}} {{first_name_male}} {{last_name}}',
'{{prefix_female}} {{first_name_female}} {{last_name}}',
'{{prefix_male}} {{first_name_male}} {{last_name}}',
'{{prefix_female}} {{first_name_female}} {{last_name}}'
)
first_names_male = (
'Adam', 'Albert', 'Aksel', 'Alex', 'Alexander', 'Alf', 'Allan',
'Alvin', 'Anders', 'André', 'Andreas', 'Anton', 'Arne', 'Asger',
'ugust', 'Benjamin', 'Benny', 'Bent', 'Bertil', 'Bertram', 'Birger',
'Bjarne', 'Bo', 'Bob', 'Bobby', 'Boe', 'Boris', 'Borris',
'Brian', 'Bruno', 'Bøje', 'Børge', 'Carl', 'Carlo', 'Carsten',
'Casper', 'Christian', 'Christoffer', 'Christopher', 'Claus', 'Clavs', 'Curt',
'Dan', 'Daniel', 'Danny', 'David', 'Dennis', 'Ebbe', 'Einar',
'Einer', 'Elias', 'Emil ', 'Eric', 'Erik', 'Erling', 'Ernst',
'Esben', 'Finn', 'Flemming ', 'Frank', 'Frans', 'Freddy', 'Frede',
'Frederik', 'Frode', 'Georg ', 'George', 'Gert', 'Gorm', 'Gunnar',
'Gunner', 'Gustav', 'Hans', 'Helge', 'Henrik', 'Henry', 'Herbert',
'Herman', 'Hjalte', 'Holger', 'Hugo', 'Ib', 'Ivan', 'Iver',
'Jack', 'Jacob', 'Jakob', 'James', 'Jan', 'Jano', 'Jarl',
'Jean', 'Jens', 'Jeppe', 'Jesper', 'Jim', 'Jimmy', 'Joachim',
'Joakim', 'Johan', 'Johannes', 'John', 'Johnnie', 'Johnny', 'Jon',
'Jonas', 'Jonathan', 'Julius', 'Jørgen', 'Karl', 'Karlo', 'Karsten',
'Kaspar', 'Kasper', 'Keld', 'Ken', 'Kenn', 'Kenneth', 'Kenny',
'Kent', 'Kim', 'Kjeld', 'Klaus', 'Klavs', 'Kristian', 'Kurt',
'Kåre', 'Lars', 'Lasse', 'Laurits', 'Laus', 'Laust', 'Leif',
'Lennarth', 'Lucas', 'Ludvig', 'Mads', 'Magnus', 'Malthe', 'Marcus',
'Marius', 'Mark', 'Martin', 'Mathias', 'Matthias', 'Michael', 'Mik',
'Mikael', 'Mike', 'Mikkel', 'Mogens', 'Morten', 'Nick', 'Nicklas',
'Nicolai', 'Nicolaj', 'Niels', 'Nikolai', 'Nikolaj', 'Nils', 'Noah',
'Ole', 'Olfert', 'Oliver', 'Oscar', 'Oskar', 'Osvald', 'Otto',
'Ove', 'Palle', 'Patrick', 'Paw', 'Peder', 'Per', 'Pete',
'Peter', 'Paul', 'Philip', 'Poul', 'Preben', 'Ragnar', 'Ragner',
'Rasmus', 'René', 'Richard', 'Richardt', 'Robert', 'Robin', 'Rolf',
'Ron', 'Ronni', 'Ronnie', 'Ronny', 'Ruben', 'Rune', 'Sam',
'Sebastian', 'Silas', 'Simon', 'Simon', 'Sonny', 'Steen', 'Stefan',
'Sten', 'Stephan', 'Steve', 'Steven', 'Stig', 'Svenning', 'Søren',
'Tage', 'Tejs', 'Thomas', 'Tim', 'Timmy', 'Tobias', 'Tom',
'Tommy', 'Tonny', 'Torben', 'Troels', 'Uffe', 'Ulf', 'Ulrik',
'Vagn', 'Valdemar', 'Verner', 'Victor', 'Villads', 'Werner', 'William',
'Yan', 'Yannick', 'Yngve', 'Zacharias', 'Ziggy', 'Øivind', 'Øjvind',
'Ørni', 'Øvli', 'Øystein', 'Øyvind', 'Åbjørn', 'Aage', 'Åge',
)
first_names_female = (
'Abelone', 'Agnes', 'Agnete', 'Alberte', 'Alma', 'Amalie', 'Amanda',
'Andrea', 'Ane', 'Anette', 'Anna', 'Anne', 'Annemette', 'Annette',
'Asta', 'Astrid', 'Benedicte', 'Benedikte', 'Bente', 'Benthe', 'Berit',
'Berta', 'Beth', 'Bettina', 'Birgit', 'Birgitte', 'Birte', 'Birthe',
'Bitten', 'Bodil', 'Britt', 'Britta', 'Camilla', 'Carina', 'Carla',
'Caroline', 'Cathrine', 'Catrine', 'Cecilie', 'Charlotte', 'Christina', 'Christine',
'Cirkeline', 'Clara', 'Connie', 'Conny', 'Dagmar', 'Dagny', 'Daniella',
'Dina', 'Ditte', 'Doris', 'Dorte', 'Dorthe', 'Edith', 'Elin',
'Elisabeth', 'Ella', 'Ellen', 'Elna', 'Else', 'Elsebeth', 'Emilie',
'Emily', 'Emma', 'Erna', 'Esmarelda', 'Ester', 'Filippa', 'Frederikke',
'Freja', 'Frida', 'Gerda', 'Gertrud', 'Gitte', 'Grete', 'Grethe',
'Gundhild', 'Gunhild', 'Gurli', 'Gyda', 'Hannah', 'Hanne', 'Heidi',
'Helen', 'Helle', 'Henriette', 'Herdis', 'Iben', 'Ida', 'Inga',
'Inge', 'Ingelise', 'Inger', 'Ingrid', 'Irma', 'Isabella', 'Jacobine',
'Jacqueline', 'Janne', 'Janni', 'Jannie', 'Jasmin', 'Jean', 'Jenny',
'Joan', 'Johanne', 'Jonna', 'Josefine', 'Josephine ', 'Julie', 'Justina',
'Jytte', 'Karen', 'Karin', 'Karina', 'Karla', 'Karoline', 'Katcha',
'Katja', 'Katrine', 'Kirsten', 'Kirstin', 'Kirstine', 'Klara', 'Kristina',
'Kristine', 'Laura', 'Lea', 'Lena', 'Lene', 'Leonora', 'Line',
'Liva', 'Lona', 'Lone', 'Lotte', 'Louise', 'Lærke', 'Maiken',
'Maja', 'Majken', 'Malene', 'Malou', 'Maren', 'Margit', 'Margrethe',
'Maria', 'Marianne', 'Marie', 'Marlene', 'Mathilde', 'Maya', 'Merete',
'Merethe', 'Mette ', 'Mia', 'Michala', 'Michelle', 'Mie', 'Mille',
'Mimi', 'Minna', 'Nadia', 'Naja', 'Nana', 'Nanna', 'Nanni',
'Natasha', 'Natasja', 'Nete', 'Nicoline', 'Nina', 'Nora', 'Oda',
'Odeline', 'Odette', 'Ofelia', 'Olga', 'Olivia', 'Patricia', 'Paula',
'Paulina', 'Pernille', 'Pia', 'Ragna', 'Ragnhild', 'Randi', 'Rebecca',
'Regitse', 'Regitze', 'Rikke', 'Rita', 'Ritt', 'Ronja', 'Rosa',
'Ruth', 'Sabine', 'Sandra', 'Sanne', 'Sara', 'Sarah', 'Selma',
'Signe', 'Sigrid', 'Silje', 'Sille', 'Simone', 'Sine', 'Sofia',
'Sofie', 'Solveig', 'Solvej', 'Sonja', 'Sophie', 'Stina', 'Stine',
'Susanne', 'Sussanne', 'Sussie', 'Sys', 'Sørine', 'Søs', 'Tammy',
'Tanja', 'Thea', 'Tilde', 'Tina', 'Tine', 'Tove', 'Trine',
'Ulla', 'Ulrike', 'Ursula', 'Vera', 'Victoria', 'Viola', 'Vivian',
'Weena', 'Winni', 'Winnie', 'Xenia', 'Yasmin', 'Yda', 'Yrsa',
'Yvonne', 'Zahra', 'Zara', 'Zehnia', 'Zelma', 'Zenia', 'Åse',
)
first_names = first_names_male + first_names_female
last_names = (
'Jensen', 'Nielsen', 'Hansen', 'Pedersen', 'Andersen', 'Christensen', 'Larsen',
'Sørensen', 'Rasmussen', 'Petersen', 'Jørgensen', 'Madsen', 'Kristensen', 'Olsen',
'Christiansen', 'Thomsen', 'Poulsen', 'Johansen', 'Knudsen', 'Mortensen', 'Møller',
'Jacobsen', 'Jakobsen', 'Olesen', 'Frederiksen', 'Mikkelsen', 'Henriksen', 'Laursen',
'Lund', 'Schmidt', 'Eriksen', 'Holm', 'Kristiansen', 'Clausen', 'Simonsen',
'Svendsen', 'Andreasen', 'Iversen', 'Jeppesen', 'Mogensen', 'Jespersen', 'Nissen',
'Lauridsen', 'Frandsen', 'Østergaard', 'Jepsen', 'Kjær', 'Carlsen', 'Vestergaard',
'Jessen', 'Nørgaard', 'Dahl', 'Christoffersen', 'Skov', 'Søndergaard', 'Bertelsen',
'Bruun', 'Lassen', 'Bach', 'Gregersen', 'Friis', 'Johnsen', 'Steffensen',
'Kjeldsen', 'Bech', 'Krogh', 'Lauritsen', 'Danielsen', 'Mathiesen', 'Andresen',
'Brandt', 'Winther', 'Toft', 'Ravn', 'Mathiasen', 'Dam', 'Holst',
'Nilsson', 'Lind', 'Berg', 'Schou', 'Overgaard', 'Kristoffersen', 'Schultz',
'Klausen', 'Karlsen', 'Paulsen', 'Hermansen', 'Thorsen', 'Koch', 'Thygesen',
)
prefixes_male = (
'Hr', 'Dr.', 'Prof.', 'Univ.Prof.'
)
prefixes_female = (
'Fru', 'Dr.', 'Prof.', 'Univ.Prof.'
)
|
Nebucatnetzer/tamagotchi
|
pygame/lib/python3.4/site-packages/faker/providers/person/dk_DK/__init__.py
|
Python
|
gpl-2.0
| 7,622
|
[
"Brian"
] |
4f824e04d88b16be46a926c03aacd7571b692f068de2449100b9a191acdfbdd9
|
################################################################################
# Copyright (c) 2014, Lee-Ping Wang and the Authors
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
Python interface to Q-Chem, very useful for automating Q-Chem calculations.
Contains a QChem class representing a Q-Chem calculation, where calling
methods like sp() and make_stable() produces Q-Chem results wrapped up in
a Molecule object.
Also contains a number of functions to wrap around TS/IRC calculations and
make the results easier to use.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import sys
import shutil
import glob
import traceback
import time
from collections import defaultdict, OrderedDict
from copy import deepcopy
import six
import numpy as np
from .molecule import Molecule, Elements
from .utils import _exec
# Default Q-Chem input file to be used when QChem class is initialized
# from a .xyz file. Written mainly for HF and DFT calculations; note
# the relatively conservative settings.
qcrem_default = """
$molecule
{chg} {mult}
$end
$rem
method {method}
basis {basis}
symmetry off
incdft false
incfock 0
sym_ignore true
unrestricted true
scf_convergence 8
thresh 14
$end
"""
vib_top = """#==========================================#
#| File containing vibrational modes from |#
#| Q-Chem calculation |#
#| |#
#| Octothorpes are comments |#
#| This file should be formatted like so: |#
#| (Full XYZ file for the molecule) |#
#| Number of atoms |#
#| Comment line |#
#| a1 x1 y1 z1 (xyz for atom 1) |#
#| a2 x2 y2 z2 (xyz for atom 2) |#
#| |#
#| These coords will be actually used |#
#| |#
#| (Followed by vibrational modes) |#
#| Do not use mass-weighted coordinates |#
#| ... |#
#| v (Eigenvalue in wavenumbers) |#
#| dx1 dy1 dz1 (Eigenvector for atom 1) |#
#| dx2 dy2 dz2 (Eigenvector for atom 2) |#
#| ... |#
#| (Empty line is optional) |#
#| v (Eigenvalue) |#
#| dx1 dy1 dz1 (Eigenvector for atom 1) |#
#| dx2 dy2 dz2 (Eigenvector for atom 2) |#
#| ... |#
#| and so on |#
#| |#
#| Please list freqs in increasing order |#
#==========================================#
"""
# Handle known errors.
# Reaction path errors culled from Q-Chem source.
# Spelling errors are probably not mine.
erroks = defaultdict(list)
erroks['rpath'] = ['Bad Hessian -- imaginary mode too soft', 'Bad Hessian -- no negative eigenvalue',
'Bad initial gradient', 'Failed line search', 'First_IRC_step: Illegal value of coordinates',
'First_IRC_step: Internal programming error.', 'IRC backup failure', 'IRC failed bisector line search',
'IRC failed final bisector step', 'IRC --- Failed line search', 'IRC internal programming error',
'Maxium number of steps reached.', 'NAtom.GT.NAtoms', 'RPATH_ITER_MAX reached.',
'rpath_new: EWCs not yet implemented', 'rpath_new: Unimplemented coordinates',
'RPath_new: unimplemented coordinates.', 'rpath: no hessian at the start.',
'rpath: Starting Geometry Does NOT Correspond to TS']
erroks['opt'] = ['OPTIMIZE fatal error']
def tarexit(exitstat=0):
"""
Archive files and quit. It's helpful for remote scripts to call this.
Note that tarexit.tarfnm and tarexit.include need to be set.
Fields (basically globals)
------
tarfnm : str
Name of the archive to create.
include : list or str
Files to be archived; each entry is expanded using glob.
exclude : list or str
Exclude files from being archived; these are expanded using glob.
save : list or str
Do not remove these files even when remove_files is set to True
archive_dirs : bool
If set to True, directories will be archived as well.
remove_files : bool
If set to True, everything that is archived will be removed using --remove-files.
remove_dirs : bool
If set to True, all subdirectories ending in '.d' (generated by Q-Chem) will be removed.
Parameters
----------
exitstat : int
Use this exit status. Note that exit status > 0 indicates error.
"""
# Type checking, make everything into a list
if isinstance(tarexit.include, str):
tarexit.include = [tarexit.include]
if isinstance(tarexit.exclude, str):
tarexit.exclude = [tarexit.exclude]
# Remove .btr files created by OpenMPI (I think?) as well as existing .tar file.
for f in glob.glob('*.btr'):
os.remove(f)
# Expand each term in "exclude" and remove from the list of files.
excludes = sum([glob.glob(g) for g in tarexit.exclude], [])
# If the tar file exists, then extract / delete it.
if os.path.exists(tarexit.tarfnm):
_exec("tar xjf %s --skip-old-files" % tarexit.tarfnm, print_command=True)
os.remove(tarexit.tarfnm)
# Expand each term in "include" and add them to the list of files.
include_files = []
for g in tarexit.include:
for f in glob.glob(g):
# Conditions for including paths in archive list:
# 1) Path isn't added already
# 2) Path isn't in the list of exclusions
# 3) Either archive_dirs is set or path is not a folder
if f not in include_files and f not in excludes and (tarexit.archive_dirs or (not os.path.isdir(f))):
include_files.append(f)
# Files ending in .log are never deleted
if tarexit.remove_files:
saved = [f for f in sum([glob.glob(g) for g in tarexit.save], []) if f in include_files]
if not os.path.exists('saved'):
os.makedirs('saved')
for f in saved:
shutil.copy2(f, 'saved/%s' % f)
# Actually execute the tar command.
_exec("tar cjf %s %s%s" % (tarexit.tarfnm, ' '.join(include_files), ' --remove-files' if tarexit.remove_files else ''), print_command=True)
# Touch the file to ensure that something is created (even zero bytes).
_exec("touch %s" % tarexit.tarfnm)
if tarexit.remove_files:
for f in saved:
shutil.copy2('saved/%s' % f, f)
shutil.rmtree('saved')
# Delete directories that end in .d if desired.
for f in os.listdir('.'):
if tarexit.remove_dirs and os.path.isdir(f) and ('.d' in f):
shutil.rmtree(f)
sys.exit(exitstat)
tarexit.tarfnm = 'default.tar.bz2'
tarexit.include = []
tarexit.exclude = []
tarexit.save = ['*.log']
tarexit.archive_dirs=False
tarexit.remove_files=True
tarexit.remove_dirs=True
# Basis set combinations which may be provided as an argument to "basis".
# Provides rudimentary basis set mixing functionality. You may define
# a mapping from element to basis set here.
basdict = OrderedDict([('%s_lanl2dz' % bas, OrderedDict([(Elements[i], 'lanl2dz' if i > 10 else bas) for i in range(1, 94)])) for
bas in ['3-21g', '3-21+g', '3-21g*',
'6-31g', '6-31g*', '6-31g(d)', '6-31g**', '6-31g(d,p)',
'6-31+g', '6-31+g*', '6-31+g(d)', '6-31+g**', '6-31+g(d,p)',
'6-31++g', '6-31++g*', '6-31++g(d)', '6-31++g**', '6-31++g(d,p)',
'6-311g', '6-311g*', '6-311g(d)', '6-311g**', '6-311g(d,p)',
'6-311+g', '6-311+g*', '6-311+g(d)', '6-311+g**', '6-311+g(d,p)',
'6-311++g', '6-311++g*', '6-311++g(d)', '6-311++g**', '6-311++g(d,p)']])
# In most cases, the ECP can be determined from the basis
ecpdict = OrderedDict([('lanl2dz', 'lanl2dz')] +
[('%s_lanl2dz' % bas, 'lanl2dz') for
bas in ['3-21g', '3-21+g', '3-21g*',
'6-31g', '6-31g*', '6-31g(d)', '6-31g**', '6-31g(d,p)',
'6-31+g', '6-31+g*', '6-31+g(d)', '6-31+g**', '6-31+g(d,p)',
'6-31++g', '6-31++g*', '6-31++g(d)', '6-31++g**', '6-31++g(d,p)',
'6-311g', '6-311g*', '6-311g(d)', '6-311g**', '6-311g(d,p)',
'6-311+g', '6-311+g*', '6-311+g(d)', '6-311+g**', '6-311+g(d,p)',
'6-311++g', '6-311++g*', '6-311++g(d)', '6-311++g**', '6-311++g(d,p)']])
def get_basis(basis, molecule=None):
"""
Get the basis / ECP name and section data for a Q-Chem input file.
Parameters
----------
basis : str
Name of a Q-Chem gaussian basis set or a custom basis as defined above
molecule : Molecule object
Required in case of a general basis (for looking up elements in the basis set dictionary)
Returns
-------
basisname : str
Either the original basis name or "gen" for a general basis
basissect : None or list
A list of strings for the $basis section in case of a general basis
ecpname : None or str
None (for no ECP), the original ECP name, or "gen" for a general ECP
ecpname : None or list
A list of strings for the $ecp section in case of a general ECP
"""
# Look up the basis set (string or dictionary).
basisval = basdict.get(basis.lower(), basis)
if molecule != None:
elems = []
for e in sorted(list(set(molecule.elem))):
if isinstance(e, int):
ee = Elements[e]
elif isinstance(e, six.string_types):
ee = Elements.index(e)
else:
raise ValueError(e)
elems.append(ee)
elemsort = np.argsort(np.array(elems))
elems = [elems[i] for i in elemsort]
elif isinstance(basisval, dict):
raise RuntimeError('Please pass a molecule object if using a general basis set')
basissect = None
if isinstance(basisval, dict):
basisname = 'gen'
basissect = sum([[e, basisval[e], '****'] for e in elems], [])
else:
basisname = basisval.lower()
# Look up the ECP (string, dictionary or None).
ecp = ecpdict.get(basis.lower(), None)
ecpname = None
ecpsect = None
if isinstance(ecp, dict):
ecpname = 'gen'
ecpsect = sum([[e, ecp[e], '****'] for e in elems], [])
elif isinstance(ecp, str):
ecpname = ecp.lower()
return basisname, basissect, ecpname, ecpsect
def prepare_template(docstring, fout, chg, mult, method, basis, molecule=None):
"""
Prepare a Q-Chem template file.
Parameters
----------
docstring : str
A Python docstring with the fields {chg}, {mult}, {method} and {basis} defined.
fout : str
Name of the output file.
chg : int
Charge to print to the template file.
mult : int
Spin multiplicity.
method : str
Electronic structure method.
basis : str
Gaussian basis set.
molecule : Molecule object
molecule.elem provides the elements printed to file in the case of a general basis
"""
basisname, basissect, ecpname, ecpsect = get_basis(basis, molecule)
# Write Q-Chem template file.
with open(fout,'w') as f:
print(qcrem_default.format(chg=chg, mult=mult, method=method, basis=(
basisname + '%s' % (('\necp %s' % ecpname) if ecpname != None else ''))), file=f)
# Print general basis and ECP sections to the Q-Chem template file.
if basisname == 'gen':
with open(fout,'a') as f:
print(file=f)
print('$basis', file=f)
print('\n'.join(basissect), file=f)
print('$end', file=f)
if ecpname == 'gen':
with open(fout,'a') as f:
print(file=f)
print('$ecp', file=f)
print('\n'.join(ecpsect), file=f)
print('$end', file=f)
class QChem(object):
"""
Class for facilitating Q-Chem calculations. I wrote this
because it was helpful to execute a chain of calculations on a
single geometry.
"""
def __init__(self, fin, ftype=None, charge=None, mult=None, method=None,
basis=None, qcin=None, qcout=None, qcdir=None, readsave=None,
readguess=True, clean=False, qcsave=True):
"""
Create a QChem object.
Parameters
----------
fin : str
Name of input .xyz or .in file (other coordinate files in
molecule.py supported as well). The .in file will contain
settings for the Q-Chem calculation whereas the .xyz file
does not; in this case, default fields are filled in, but
the user must procide charge / mult / method / basis.
ftype : str, optional
Force the Molecule class to read the input file as this format.
charge : int, optional
Net charge. Required if xyz coordinates are provided. If
Q-Chem input and charge are both provided, this will override.
mult : int, optional
Spin multiplicity. Required if xyz coordinates are provided.
If Q-Chem input and mult are both provided, this will override.
method : str, optional
Electronic structure method (e.g. b3lyp). This is written
to the "method" field in the Q-Chem input file - supported
by Q-Chem 4.2 and later. Required if xyz coordinate are
provided. If Q-Chem input and method are both provided, this
will override.
basis : str, optional
Gaussian basis set (e.g. 6-31g*). This is written to the
"basis" field in the Q-Chem input file. Required if xyz
coordinate are provided. If Q-Chem input and basis
are both provided, this will override.
qcin : str, optional
Base name of Q-Chem input files to be written. If not provided,
will use "fin" (extension will be changed to ".qcin" if necessary
to avoid overwriting input file.)
qcout : str, optional
Base name of Q-Chem output files. If not provided, will use "qcin"
base name and ".out" extension.
qcdir : str, optional
Base name of Q-Chem temporary folder. If not provided, will use "qcin"
base name and ".d" extension. NOTE: This folder will be removed prior
to calculation start! Also, will create a ".dsav" folder used to recover
from failed calculations, think of it as a "save game file".
readsave : str or bool, optional
If set to True, the "qcdsav" (i.e. qcdir+"sav") folder will automatically
be used to initialize this calculation.
If string, this is a folder containing Q-Chem files used to initialize
this calculation. This folder will be copied to "self.qcdsav" and self.qcdsav
will NOT be removed prior to calculation start.
If not provided, self.qcdsav will be removed prior to calculation start.
readguess : bool, optional
Write "scf_guess read" to Q-Chem input file. If readsave is provided,
then the very first calculation will read the SCF guess as well.
clean : bool, optional
When set, this calculation never updates qcdsav. However, if readsave is
provided it will still be used to initialize each calculation. Use this
if you never want the calculation result to depend on the previous state.
Note that this is relatively uncommon (e.g. if we want to run a series
of calculations without reading the SCF guess from the previous one.)
qcsave : bool, optional
Append the "-save" argument to the system call to Q-Chem. This results
in more files being saved to qcdir.
"""
# Name of the input file.
self.fin = fin
# Molecule object from loading the input file.
self.M = Molecule(fin, ftype)
if 'elem' not in self.M.Data.keys():
raise RuntimeError('Input file contains no atoms')
# Q-Chem input file that will be written for each Q-Chem execution.
# If the original input file happens to also be a Q-Chem input file,
# then use the suffix 'qcin' add an underscore so we
# don't accidentally overwrite our original file.
if qcin == None:
qcin = os.path.splitext(fin)[0]+'.in'
if qcin == fin and fin.endswith('.in'):
self.qcin = os.path.splitext(fin)[0]+'.qcin'
elif qcin == fin:
raise RuntimeError('Please do not provide a file with extension .qcin')
else:
self.qcin = qcin
# Keep track of the number of calculations done.
self.ncalc = 0
# Whether a Hessian calculation has been done.
self.haveH = 0
# Set Q-Chem calculation options ($rem variables).
if 'qcrems' not in self.M.Data.keys():
if method == None or basis == None or charge == None or mult == None:
raise RuntimeError('Must provide charge/mult/method/basis!')
# Print a Q-Chem template file.
prepare_template(qcrem_default, '.qtemp.in', charge, mult, method, basis, molecule=self.M)
self.M.add_quantum('.qtemp.in')
else:
if charge != None:
self.M.charge = charge
if mult != None:
self.M.mult = mult
if method != None:
self.M.edit_qcrems({'method' : method})
# Treat custom basis and ECP.
ecpname = None
ecpsect = None
if basis != None:
basisname, basissect, ecpname, ecpsect = get_basis(basis, self.M)
self.M.edit_qcrems({'basis' : basisname})
if basisname == 'gen':
self.M.qctemplate['basis'] = basissect
if ecpname != None:
self.M.edit_qcrems({'ecp' : ecpname})
if ecpname == 'gen':
self.M.qctemplate['ecp'] = ecpsect
# The current job type, which we can set using
# different methods for job types.
self.jobtype = 'sp'
# Rem dictionary for SCF convergence.
self.remscf = OrderedDict()
# Extra rem variables for a given job type.
self.remextra = OrderedDict()
# Default name of Q-Chem output file
self.qcout = os.path.splitext(self.qcin)[0]+".out" if qcout == None else qcout
self.qcerr = os.path.splitext(self.qcin)[0]+".err"
# Saved Q-Chem calculations if there is more than one.
self.qcins = []
self.qcouts = []
self.qcerrs = []
# Specify whether to tack "-save" onto the end of each Q-Chem call.
self.qcsave = qcsave
# Q-Chem scratch directory
self.qcdir = os.path.splitext(self.qcin)[0]+".d" if qcdir == None else qcdir
# Flag to read SCF guess at the first calculation
self.readguess = readguess
# Without guess to read from, use "scf_guess core"
# and "scf_guess_mix 5" which allows us to find broken
# symmetry states.
self.coreguess = True
# Error message if the calculation failed for a known reason
self.errmsg = ''
# qcdsav is "known-good qcdir for this object",
# used to restore from failed calcs (e.g. SCF failure)
self.qcdsav = self.qcdir+'sav'
#--------
# The clean option makes sure nothing on the disk influences this calculation.
# This can be a bit confusing. There are two modes of usage:
# 1) Clean OFF. Calculation uses whatever is in qcdir and backs it up to qcdsav on successful calcs.
# 2) Clean ON. qcdir is always cleared, and copied over from qcdsav (if exist) prior to calling Q-Chem.
# This allows us to save the state of a good calculation without worrying about outside interference.
# - Use case 1: AnalyzeReaction.py does not like to read SCF guesses from previous calculations so we use clean = True.
# - Use case 2: Growing string does like to read SCF guesses so we use clean = False.
# - Use case 3: IRC calculation requires Hessian from a previous calculation, so again we use clean = False.
self.clean = clean
# If readsave is set, then copy it to self.qcdsav and it will be used
# to initialize this calculation. Otherwise self.qcdsav will be removed.
self.readsave = readsave
if isinstance(self.readsave, str):
if not os.path.isdir(self.readsave):
raise RuntimeError('Tried to initialize Q-Chem reading from a save folder but does not exist')
if self.readsave == self.qcdsav:
pass
elif os.path.exists(self.qcdsav):
shutil.rmtree(self.qcdsav)
shutil.copytree(self.readsave, self.qcdsav)
elif isinstance(self.readsave, int) and self.readsave:
pass
elif os.path.exists(self.qcdsav):
shutil.rmtree(self.qcdsav)
# Remove self.qcdir; it will be restored from self.qcdsav right before calling Q-Chem.
if os.path.exists(self.qcdir):
shutil.rmtree(self.qcdir)
def write(self, *args, **kwargs):
""" Write the Molecule object to a file. """
self.M.write(*args, **kwargs)
def write_qcin(self):
""" Write Q-Chem input file. """
rems = OrderedDict([('jobtype', self.jobtype)])
rems['scf_convergence'] = 8
# If not the first calculation, read SCF guess from the first calculation.
if self.readguess and os.path.exists(self.qcdsav):
rems['scf_guess'] = 'read'
rems['scf_guess_mix'] = None
elif self.coreguess:
rems['scf_guess'] = 'core'
rems['scf_guess_mix'] = 5
# Add SCF convergence rem variables.
rems.update(self.remscf)
# Add job-related rem variables.
rems.update(self.remextra)
# If doing stability analysis, loosen SCF convergence tolerance by 1.
# This is a bootleg solution to our workflow hanging indefinitely
# when Q-Chem crashes.
if 'stability_analysis' in rems.keys():
rems['scf_convergence'] -= 2
# Create copy of stored Molecule object, update
# Q-Chem rem variables and write Q-Chem input file.
M1 = deepcopy(self.M)
M1.edit_qcrems(rems)
M1.write(self.qcin, ftype="qcin")
def DIE(self, errmsg):
""" Does what it says. """
raise RuntimeError("Error: Q-Chem calculation failed! (%s)" % errmsg)
def load_qcout(self):
"""
Return Molecule object corresponding to Q-Chem output
file. SCF convergence failures and maximum optimization cycles
reached will not trigger a parser error.
"""
try:
return Molecule(self.qcout, errok=erroks[self.jobtype.lower()] +
['SCF failed to converge', 'Maximum optimization cycles reached'])
except RuntimeError:
tarexit.include=['*']
tarexit(1)
def call_qchem(self, debug=False):
"""
Call Q-Chem. There are several functions that wrap
around this innermost call. Assumes that Q-Chem input
file has been written.
Determine whether to run in serial, OpenMP-parallel or
MPI-parallel mode. Restore qcdir from qcdsav. Execute
Q-Chem executable but don't copy qcdir back to qcdsav
(outer wrapper functions should do this).
"""
if debug:
print("Calling Q-Chem with jobtype", self.jobtype)
for line in open(self.qcin).readlines():
print(line, end=' ')
# Figure out whether to use OpenMP or MPI.
mode = "openmp"
M1 = Molecule(self.qcin)
for qcrem in M1.qcrems:
for key in qcrem.keys():
if key == 'stability_analysis' and qcrem[key].lower() == 'true':
mode = "mpi"
if key == 'jobtype' and qcrem[key].lower() == 'freq':
mode = "mpi"
# Set commands to run Q-Chem.
# The OMP_NUM_THREADS environment variable shall be used to determine
# the number of processors. The environment variable is then unset.
# If not set, default to one.
if 'OMP_NUM_THREADS' in os.environ:
cores=int(os.environ['OMP_NUM_THREADS'])
del os.environ['OMP_NUM_THREADS']
else:
cores=1
# Q-Chem parallel (OpenMP), serial, and parallel (MPI) commands.
# The MPI command is useful for jobs that aren't OpenMP-parallel,
# such as stability analysis (which uses TDDFT/CIS).
if 'QCCMD' in os.environ:
qccmd = os.environ['QCCMD']
else:
qccmd = "qchem42 -nt %i" % cores
# Command for serial jobs
qc1cmd = qccmd.split()[0]
# Command for MPI jobs
qcmpi = qccmd.replace('-nt', '-np')
# I believe this saves more scratch files from Q-Chem.
if self.qcsave:
qccmd += ' -save'
qc1cmd += ' -save'
qcmpi += ' -save'
# Frequency calculations with less atoms than the # of cores should be serial.
if M1.na < cores and self.jobtype.lower() == 'freq': mode = "serial"
# I don't remember why this is here. Something about "Recomputing EXC"?
# if 'scf_algorithm' in self.remscf and self.remscf['scf_algorithm'] == 'rca_diis': mode = "serial"
#----
# Note that on some clusters I was running into random
# crashes, which led to this code becoming more complicated.
# The code is now cleaned up because I haven't seen the errors
# in a while .. but if they come back, make sure to look back
# in the commit history.
#----
# When "clean mode" is on, we always start from a clean slate
# (restore qcdir from qcdsav if exist; otherwise delete)
if (self.clean or os.path.exists(self.qcdsav)) and os.path.exists(self.qcdir):
shutil.rmtree(self.qcdir)
# If qcdsav exists, we restore from it
if os.path.exists(self.qcdsav):
_exec("rsync -a --delete %s/ %s/" % (self.qcdsav, self.qcdir), print_command=False)
# Execute Q-Chem.
if mode == "openmp":
qccmd_ = qccmd
elif mode == "mpi":
qccmd_ = qcmpi
# Force BW compute node to use single processor instead of MPI.
if 'nid' in os.environ.get('HOSTNAME', 'None'):
qccmd_ = qc1cmd
elif mode == "serial":
qccmd_ = qc1cmd
try:
_exec('%s %s %s %s &> %s' % (qccmd_, self.qcin, self.qcout, self.qcdir, self.qcerr), print_command=False)
except:
tarexit.include=['*']
tarexit(1)
# Catch known Q-Chem crashes. :(
# I've run into a lot of TCP socket errors and OpenMP segfaults on Blue Waters.
for line in open(self.qcerr):
if 'Unable to open a TCP socket for out-of-band communications' in line:
with open(self.qcerr, 'a') as f: print('TCP socket failure :(', file=f)
tarexit.include=['*']
tarexit(1)
# Note that we do NOT copy qcdir to qcdsav here, because we don't know whether the calculation is good.
# Delete the strange .btr files that show up on some clusters.
_exec('rm -rf *.btr', print_command=False)
# Reset the OMP_NUM_THREADS environment variable.
os.environ['OMP_NUM_THREADS'] = str(cores)
def scf_tactic(self, attempt=1):
"""
Set the SCF convergence strategy.
First attempt uses 100 SCF iterations,
all subsequent attempts use 300 SCF iterations.
Attempt 1: DIIS with core / read guess.
Attempt 2: RCA with SAD guess.
Attempt 3: GDM with core / read guess.
Attempts 4-6: Sleazy SCF convergence with the above strategies.
Note that readguess and coreguess are not explicitly set in
self.remscf because their activation depends on the existence
of self.qcdsav.
"""
self.remscf = OrderedDict()
# Set SCF convergence algorithm.
if attempt in [1, 4]:
self.readguess = True
self.coreguess = True
self.remscf['scf_algorithm'] = 'diis'
if attempt in [2, 5]:
print("RCA..", end=' ')
self.readguess = False
self.coreguess = False
self.remscf['scf_algorithm'] = 'rca_diis'
self.remscf['thresh_rca_switch'] = 4
if attempt in [3, 6]:
print("GDM..", end=' ')
self.readguess = True
self.coreguess = True
self.remscf['scf_algorithm'] = 'diis_gdm'
# Set SCF convergence criterion.
if attempt <= 3:
self.remscf['scf_convergence'] = 8
else:
if attempt == 4:
print("Relax convergence criterion..", end=' ')
self.remscf['scf_convergence'] = 6
# Set SCF max number of cycles.
if attempt > 1:
self.remscf['max_scf_cycles'] = 300
else:
self.remscf['max_scf_cycles'] = 100
def converge(self, attempt=1):
""" Attempt to converge the SCF. """
while True:
self.scf_tactic(attempt)
self.write_qcin()
# Note to self: Within this approach, each SCF algorithm
# starts from either (1) the initial guess or (2) the MOs
# from qcdsav. That is to say, the subsequent attempts
# do NOT read partially converged solutions from the previous
# attempts.
self.call_qchem()
if all(["failed to converge" not in line and \
"Convergence failure" not in line \
for line in open(self.qcout)]): break
attempt += 1
if attempt > 6:
self.DIE("SCF convergence failure")
# Reset the SCF tactic back to 1 after convergence.
# Note: This is a bit controversial. :)
self.scf_tactic(1)
# If not running in "clean mode", the qcdsav folder is updated.
if not self.clean:
_exec("rsync -a --delete %s/ %s/" % (self.qcdir, self.qcdsav), print_command=False)
return attempt
def converge_opt(self):
"""
SCF convergence forcing for geometry optimization jobs.
This function exists because SCF convergence may fail for a geometry optimization.
When this happens, we run SP calculations with different SCF algorithms, and then
we may either do the geometry optimization with this new SCF algorithm or revert
back to the first one.
"""
optouts = []
thisopt = 1
# SCF tactic for the optimization itself.
# If any point in the optimization requires an alternate
# algorithm, we continue the optimization using that.
attempt = 1
while True:
self.scf_tactic(attempt)
self.write_qcin()
self.call_qchem()
M1 = self.load_qcout()
# For any optimization with at least one step,
# we copy it to a temporary file to be joined at the end.
if len(M1.qm_energies) >= (2 if (M1.qcerr == 'SCF failed to converge') else 1):
optouts.append('.opt%i.out' % thisopt)
_exec('cp %s .opt%i.out' % (self.qcout, thisopt), print_command=False)
thisopt += 1
if M1.qcerr in ['SCF failed to converge', 'killed']:
# If SCF fails to converge, try different algorithms to enforce convergence.
self.M.xyzs = [M1.xyzs[-1]]
jobtype0 = self.jobtype
self.jobtype = 'sp'
attempt = self.converge(attempt)
# If we were running an IRC calculation, we must revert
# to geometry optimization because not at the TS anymore.
if jobtype0 == 'rpath':
self.jobtype = 'opt'
else:
self.jobtype = jobtype0
else:
# Optimization is finished; concatenate output files.
_exec('cat %s > %s' % (' '.join(optouts), self.qcout), print_command=False)
break
def calculate(self, converge=True):
"""
Perform Q-Chem calculation.
This is a higher-level function that wraps around converge()
and converge_opt(), which themselves wrap around call_qchem().
"""
if converge:
if self.jobtype in ['opt', 'ts', 'rpath']:
self.converge_opt()
else:
self.converge()
else:
self.call_qchem()
# Update qcdsav (if not using clean option).
if not self.clean:
_exec("rsync -a --delete %s/ %s/" % (self.qcdir, self.qcdsav), print_command=False)
# Save the sequence of Q-Chem input and output files.
jobsuf = self.jobsuf if hasattr(self, 'jobsuf') else self.jobtype
this_qcin = os.path.splitext(self.qcin)[0] + '.%02i.%s.in' % (self.ncalc, jobsuf)
this_qcout = os.path.splitext(self.qcout)[0] + '.%02i.%s.out' % (self.ncalc, jobsuf)
this_qcerr = os.path.splitext(self.qcout)[0] + '.%02i.%s.err' % (self.ncalc, jobsuf)
_exec("cp %s %s" % (self.qcin, this_qcin), print_command=False)
_exec("cp %s %s" % (self.qcout, this_qcout), print_command=False)
_exec("cp %s %s" % (self.qcerr, this_qcerr), print_command=False)
self.qcins.append(this_qcin)
self.qcouts.append(this_qcout)
self.qcerrs.append(this_qcerr)
self.ncalc += 1
def sp(self):
""" Q-Chem single point calculation. """
self.jobtype = 'sp'
# Clear dictionary of extra rem variables.
self.remextra = OrderedDict()
self.calculate()
def stab(self):
""" Q-Chem stability analysis calculation. """
self.jobtype = 'sp'
self.jobsuf = 'stb'
self.remextra = OrderedDict([('stability_analysis', 'true'), ('max_cis_cycles', '100'), ('cis_n_roots', '4')])
self.calculate()
delattr(self, 'jobsuf')
def make_stable(self, maxstab=3):
""" Repeat stability analysis calculation until stable. """
self.nstab = 1
self.stable = False
while not self.stable:
self.readguess = True
self.sp()
self.readguess = True
self.stab()
# Parse Q-Chem output file for stability.
stab2 = 0
for line in open(self.qcout):
if "UHF-> UHF stable" in line:
stab2 = 1
if "UKS-> UKS stable" in line:
stab2 = 1
if stab2:
self.stable = True
if self.nstab > 1:
print("HF/KS stable %s" % (("at attempt %i" % self.nstab) if self.nstab > 1 else ""))
break
else:
self.nstab += 1
if self.nstab > maxstab:
print("Warning: Stability analysis could not find HF/KS stable state")
break
def force(self):
""" Q-Chem gradient calculation. """
self.jobtype = 'force'
self.remextra = OrderedDict()
self.calculate()
def freq(self):
""" Q-Chem frequency and Hessian calculation. """
self.jobtype = 'freq'
self.remextra = OrderedDict()
self.calculate()
self.haveH = 1
def write_vdata(self, fout):
""" Write vibrational data to an easy-to-use text file. """
M = self.load_qcout()
with open(fout, 'w') as f:
print(vib_top, file=f)
print(M.na, file=f)
print("Coordinates and vibrations calculated from %s" % self.qcout, file=f)
for e, i in zip(M.elem, M.xyzs[0]):
print("%2s % 8.3f % 8.3f % 8.3f" % (e, i[0], i[1], i[2]), file=f)
for frq, mode in zip(M.freqs, M.modes):
print(file=f)
print("%.4f" % frq, file=f)
for i in mode:
print("% 8.3f % 8.3f % 8.3f" % (i[0], i[1], i[2]), file=f)
def opt(self):
"""
Q-Chem geometry optimization.
Updates the geometry in the object, so subsequent
calculations use the optimized geometry.
"""
self.jobtype = 'opt'
self.remextra = OrderedDict([('geom_opt_max_cycles', '300')])
self.calculate()
M1 = self.load_qcout()
self.M.comms = [M1.comms[-1]]
self.M.xyzs = [M1.xyzs[-1]]
def ts(self):
"""
Q-Chem transition state calculation.
Updates the geometry in the object, so subsequent
calculations use the optimized TS geometry.
"""
self.jobtype = 'ts'
self.remextra = OrderedDict([('geom_opt_max_cycles', '500'),
('geom_opt_dmax', '100'),
('geom_opt_tol_gradient', '10')])
if self.haveH:
self.remextra['geom_opt_hessian'] = 'read'
self.calculate()
M1 = self.load_qcout()
self.M.comms = [M1.comms[-1]]
self.M.xyzs = [M1.xyzs[-1]]
def fsm(self, nnode=21):
"""
Q-Chem freezing string calculation.
Updates the geometry in the object, so subsequent
calculations use the transition state guess geometry.
"""
self.jobtype = 'fsm'
self.remextra = OrderedDict([('fsm_nnode', nnode),
('fsm_ngrad', 3),
('fsm_mode', 2),
('fsm_opt_mode', 2)])
self.calculate()
M1 = self.load_qcout()
self.M.comms = [M1.comms[-1]]
self.M.xyzs = [M1.xyzs[-1]]
|
rmcgibbo/qchem-utils
|
qchem_utils/qchem.py
|
Python
|
gpl-2.0
| 40,007
|
[
"Gaussian",
"Q-Chem"
] |
393c1de26b51fc8c0288e3d487cd6de87cd032b8e5e9fde1124c519abb9f14ed
|
#!/usr/bin/env python
# encoding: utf-8
import os
import sys
import itk
from possum import pos_itk_core
from possum import pos_itk_transforms
from possum.pos_common import r
"""
.. note::
Some of the non-cruical, optional functions in this module require vtk
module to be installed. If it is not available the VTK support will be
disabled.
"""
def calculate_labels_midpoints(itk_image):
"""
This function introduces a workflow for calculating the middle midpoints of
the labelled imags. The term 'middle midpoints' is used on purpose. You might
think that we're calculating centroids here, but not. I use the term
'middle midpoints' as it is not the centroids what is calculated here.
Anyway, this function calculated middle midpoints of labels in the provided image.
The midpoints are calculated in the following way:
Now iterate over all available labels except the background label which
has been removed. The overall idea of this loop is to:
1) Extract given label from the segmentation
2) Extract the largest patch of the segmentation as there
might be multiple disjoint regions colored with given label
3) Apply the distance transform to the largest path with
given segmentation
4) Pick the maximum of the distance transform for given segmentation
and by this define the 'middle point' of given label.
.. note :: Please have in ming that this procedure returns position of the
first (index-wise) voxel with the maimum value. This means that if there is
more than one pixels with the maximum value of the distance transform,
location of the first one is returned. One could think that probably a
centre of mass of the max voxels should be returned, but no. It is unknown
is such centre would be located in the actual structure or outside the
structure. Therefore some of the results may look wired but they are
actually ok.
:param itk_image: Labelled image, the image is expected to be a labelled
image in which individual discrete values correspond
to individual structures. Formally this means that
the image has to be of `uchar` or `ushort` type,
to have a single component and to have
a dimensionality of two or three. Images having
different properties will not be processed.
:type itk_image: `itk.Image`
:return: Middle midpoints of the labels in the image.
:rtype: {int: ((float, float, float), (float, float, float)), ...}
And now it it a time to do some unit testing. Please also consited this set
of unittests as an example how to use this function.
>>> import base64
>>> from possum import pos_itk_transforms
>>> example_two_dimensions='H4sIAAAAAAAAA4thZCACFDEwMWgAISMcogImBg44u8EegdHBBmdUPosTNtvCizJLSlLzFJIqFQIq/TzTQjwVylKLijPz8xQM9IwMDA0MzAzM9QyJcfiAgTxtdPcxwgETHDDDwag6+qjjggNuOOCBA144GFVHH3UicCAKB2JwIA4Ho+roo04ODuThQAEOFOFgVB191AEAXtGveKAHAAA='
>>> input_filename="/tmp/pos_itk_centroids_example_two_dimensions.nii.gz"
>>> open(input_filename, "w").write(base64.decodestring(example_two_dimensions))
>>> itk_image = pos_itk_transforms.read_itk_image(input_filename)
>>> midpoints = calculate_labels_midpoints(itk_image)
>>> sorted(midpoints.keys()) == [1, 2, 3, 10, 11, 12, 13, 20, 21, 22, 23, 30, 31, 32, 33]
True
>>> map(int, midpoints[1][0]) == [14, 0, 0]
True
>>> map(int, midpoints[21][0]) == [14, 24, 0]
True
>>> midpoints[30] == ((0.0, 39.0, 0), (0, 39, 0))
True
>>> type(midpoints[30][1][1]) == type(1)
True
>>> type(midpoints[30][0][1]) == type(1)
False
>>> type(midpoints[30][0][1]) == type(1.0)
True
>>> os.remove(input_filename)
Now we will try to process a 3D image
>>> example_three_dimensions="H4sIAAAAAAAAA+3PPUtCURzH8XN1iUposNzqNAty1MqlchCqu4Rp0NIUXsPlWjcJby3Rw1tI23rYeoCGrLfnF4nEWvxPDfd8DhfOF+5wfvuOGkOg4mpmcJzvMyqmJn7uF8Xh99t7abQLpb//KLUXNFotz9cHoS6H225919WnXnDSaPraZHIma8yKKWSy4zz83/jp4fscxBCHtCcxhWkkIO0kZjGHFKQ9jwVoLELadr/dH+X9OeSxhGVIexVrWEcR0t7AJrbgQtpl7KCCKqRt99v9Ud5fg4c6DiFtH00c4RjSbiPEGc4h7Utc4Ro3kLbdb/dHef8tOujiDtK+xwMe8QRpP+MFr3iDtD/Qwye+IG273+6P8v4+5Jgfs2ARAAA="
>>> input_filename="/tmp/pos_itk_centroids_example_three_dimensions.nii.gz"
>>> open(input_filename, "w").write(base64.decodestring(example_three_dimensions))
>>> itk_image = pos_itk_transforms.read_itk_image(input_filename)
>>> midpoints = calculate_labels_midpoints(itk_image)
>>> os.remove(input_filename)
>>> str(type(midpoints)) == "<type 'dict'>"
True
>>> len(midpoints.keys()) == 63
True
>>> str(midpoints.get(0,None)) == "None"
True
>>> midpoints[1] == ((5.0, 0.0, 0.0), (5, 0, 0))
True
>>> type(midpoints[30][0][1]) == type(1)
False
>>> type(midpoints[30][0][1]) == type(1)
False
>>> type(midpoints[30][0][1]) == type(1.0)
True
>>> midpoints[183] == ((15.0, 15.0, 15.0), (15, 15, 15))
True
>>> midpoints[111] == ((5.0, 5.0, 9.0), (5, 5, 9))
True
>>> midpoints[53] == ((13.0, 0.0, 5.0), (13, 0, 5))
True
"""
C_BACKGROUND_LABEL_IDX = 0
# Define the dimensionality, data type and number of components
# of the label image
label_type = \
pos_itk_core.io_image_type_to_component_string_name[
itk_image.__class__]
# Extract the details of the image provided and check if they are
# ok to use in the routine.
n_dim = len(itk_image.GetLargestPossibleRegion().GetSize())
number_of_components = itk_image.GetNumberOfComponentsPerPixel()
data_type = label_type[1]
assert n_dim in [2, 3], \
"Incorrect dimensionality."
assert number_of_components == 1, \
"Only single component images are allowed."
assert data_type in ["unsigned_char", "unsigned_short"], \
r("Incorrect data type for a labelled image only unsigned_char\
and unsigned_short are accepted.")
# t_label_img is the ITK image type class to be used in filters
# templates.
t_label_img = itk_image.__class__
# We'll be also using another image type. This one is identical
# in terms of size and dimensionality as the labelled image.
# The differe is in data type: this one has to be float to handle
# the distance transform well.
float_type = list(label_type)
float_type[1] = "float"
t_float_img = \
pos_itk_core.io_component_string_name_to_image_type[tuple(float_type)]
# The purpose of the filter below is to define the unique labels
# given segmentation contains.
unique_labels = \
itk.LabelGeometryImageFilter[(t_label_img, t_label_img)].New()
unique_labels.SetInput(itk_image)
unique_labels.CalculatePixelIndicesOff()
unique_labels.Update()
# This is where we'll collect the results. We collect, both, the physical
# location as well as the
middle_points = {}
# We have to map the available labels returned by itk
# as sometimes strange things happen and they are returned as longints
# which are apparently incomparibile with python in type.
# Consider it a safety precaution
available_labels = map(int, unique_labels.GetLabels())
# Now we need to remove the background label (if such
# label actually exists)
C_BACKGROUND_LABEL_IDX
try:
available_labels.remove(C_BACKGROUND_LABEL_IDX)
except:
pass
# Now iterate over all available labels except the background label which
# has been removed. The overall idea of this loop is to:
# 1) Extract given label from the segmentation
# 2) Extract the largest patch of the segmentation as there
# might be multiple disjoint regions colored with given label
# 3) Apply the distance transform to the largest path with
# given segmentation
# 4) Pick the maximum of the distance transform for given segmentation
# and by this define the 'middle point' of given label
# I call the midpoints 'middle midpoints' not centroids as centroids
# are something different and they are calculated in a different
# way. Our center midpoints cannot be called centroids.
for label_idx in available_labels:
extract_label = \
itk.BinaryThresholdImageFilter[
(t_label_img, t_label_img)].New()
extract_label.SetInput(itk_image)
extract_label.SetUpperThreshold(label_idx)
extract_label.SetLowerThreshold(label_idx)
extract_label.SetOutsideValue(0)
extract_label.SetInsideValue(1)
extract_label.Update()
patches = \
itk.ConnectedComponentImageFilter[
(t_label_img, t_label_img)].New()
patches.SetInput(extract_label.GetOutput())
patches.Update()
largest_patch = \
itk.LabelShapeKeepNObjectsImageFilter[t_label_img].New()
largest_patch.SetInput(patches.GetOutput())
largest_patch.SetBackgroundValue(0)
largest_patch.SetNumberOfObjects(1)
largest_patch.SetAttribute(100)
largest_patch.Update()
distance_transform = \
itk.SignedMaurerDistanceMapImageFilter[
(t_label_img, t_float_img)].New()
distance_transform.SetInput(largest_patch.GetOutput())
distance_transform.InsideIsPositiveOn()
distance_transform.Update()
centroid = itk.MinimumMaximumImageCalculator[t_float_img].New()
centroid.SetImage(distance_transform.GetOutput())
centroid.Compute()
centroid.GetIndexOfMaximum()
index = centroid.GetIndexOfMaximum()
point = itk_image.TransformIndexToPhysicalPoint(index)
# We need to slightly refine the results returned by itk
# The results have to be processed in a slightly different way for
# two dimensional results and slightly different for 3D resuls:
# Again, we do a lot of explicit casting assure types
# compatibility. The 2D midpoints are converted into 3D midpoints since
# it is easier to use them in vtk if they're 3D midpoints.
if n_dim == 2:
point = map(float, point) + [0]
index = map(int, index) + [0]
if n_dim == 3:
point = map(float, point)
index = map(int, index)
middle_points[label_idx] = (tuple(point), tuple(index))
# Below there is some debugging code. Not really important for everyday
# use.
# print middle_points.__repr__()
return middle_points
def points_to_vtk_points(points_list):
"""
The function converts the location of the middle points into a vtkPolyData
structure and assigns appropriate label IDs to the individual points of the
vtk points structure. Basically, you can use the resulting vtkPolyData() and
know where is a centre of a particular structure.
... note ::
This function will not work if the vtk module is not loaded.
:param point_list: List of points to turn into vtk points
:type point_list: {int: ((float, float, float), (float, float, float)), ...}
:return: Midpoints of the individual structures expressed as
vtk.vtkPolyData()
:rtype: `vtk.vtkPolyData`
"""
try:
vtk.vtkVersion()
except:
return None
n_points = len(points_list.keys())
points = vtk.vtkPoints()
vertices = vtk.vtkCellArray()
id_array = vtk.vtkUnsignedCharArray()
id_array.SetName("Label_ID")
id_array.SetNumberOfComponents(1)
id_array.SetNumberOfTuples(n_points)
for (i, (pt, idx)) in points_list.items():
id_ = points.InsertNextPoint(pt)
vertices.InsertNextCell(1)
vertices.InsertCellPoint(id_)
id_array.SetTuple1(id_, i)
point = vtk.vtkPolyData()
point.SetPoints(points)
point.SetVerts(vertices)
point.GetPointData().AddArray(id_array)
return point
if __name__ == '__main__':
import doctest
print doctest.testmod(verbose=True)
|
pmajka/poSSum
|
possum/utils/pos_util_midpoints.py
|
Python
|
mit
| 12,120
|
[
"VTK"
] |
a86a98520d4606232da945d1addd9283964184ad390161ba12fad74d5682df2b
|
import numpy as np
from ase.lattice import bulk
from ase.dft.kpoints import monkhorst_pack
from ase.parallel import paropen
from gpaw import GPAW, PW
from gpaw.mpi import size, rank, world, serial_comm
from gpaw.xc.tools import vxc
from gpaw.xc.hybridg import HybridXC
mgo = bulk('MgO', 'rocksalt', a=4.189)
if rank < 3:
comm = world.new_communicator(np.arange(min(3, size)))
else:
comm = world.new_communicator(np.array((rank,)))
if 1:
mgo.calc = GPAW(mode=PW(500),
parallel=dict(band=1),
idiotproof=False,
communicator=comm,
setups={'Mg': '2'},
convergence={'eigenstates': 5.e-9},
kpts=monkhorst_pack((2, 2, 2)) + 0.25)
mgo.get_potential_energy()
if rank < 3:
mgo.calc.write('mgo', 'all')
else:
mgo.calc.write('dummy_%d' % rank, 'all')
world.barrier()
for name in ['PBE0', 'HSE03', 'HSE06']:
calc = GPAW('mgo', setups={'Mg': '2'},
txt=None, communicator=serial_comm)
hyb_calc = HybridXC(name, alpha=5.0, bandstructure=True, world=comm)
de_skn = vxc(calc, hyb_calc) - vxc(calc, 'LDA')
if name == 'PBE0':
de_skn_test = np.array([-1.3700, -1.3643, -1.3777, -24.184590])
if name == 'HSE03':
de_skn_test = np.array([-2.4565, -2.4326, -2.4583, -24.405001])
if name == 'HSE06':
de_skn_test = np.array([-2.0311, -2.0151, -2.0367, -24.324485])
if rank == 0:
print de_skn[0, 0, 1:4], abs(de_skn[0, 0, 1:4] - de_skn_test[0]).max()
print de_skn[0, 1, 2:4], abs(de_skn[0, 1, 2:4] - de_skn_test[1]).max()
print de_skn[0, 2, 2:4], abs(de_skn[0, 2, 2:4] - de_skn_test[2]).max()
print hyb_calc.exx, abs(hyb_calc.exx - de_skn_test[3])
assert abs(de_skn[0, 0, 1:4] - de_skn_test[0]).max() < 0.02
assert abs(de_skn[0, 1, 2:4] - de_skn_test[1]).max() < 0.008
assert abs(de_skn[0, 2, 2:4] - de_skn_test[2]).max() < 0.004
assert abs(hyb_calc.exx - de_skn_test[3]) < 2e-4
|
robwarm/gpaw-symm
|
gpaw/test/pw/mgo_hybrids.py
|
Python
|
gpl-3.0
| 2,057
|
[
"ASE",
"GPAW"
] |
7e2910e3519115f6443f2ba8eff9071d74f11fe4523bd236d44973e0de760781
|
"""Tests for the DIRAC.Core.Utilities.Extensions module"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import six
import DIRAC
from DIRAC.Core.Utilities.Extensions import (
findSystems,
findAgents,
findExecutors,
findServices,
findDatabases,
extensionsByPriority,
getExtensionMetadata,
)
def test_findSystems():
systems = findSystems([DIRAC])
assert len(systems) > 5
assert all(system.endswith("System") for system in systems)
def test_findAgents():
agents = findAgents([DIRAC])
assert len(agents) > 5
def test_findExecutors():
executors = findExecutors([DIRAC])
assert len(executors) > 1
def test_findServices():
services = findServices([DIRAC])
assert len(services) > 5
def test_findDatabases():
databases = findDatabases([DIRAC])
assert len(databases) > 5
assert all(str(fn).endswith(".sql") for system, fn in databases)
def test_extensionsByPriority():
assert "DIRAC" in extensionsByPriority()
@pytest.mark.skipif(six.PY2, reason="Requires Python3")
def test_getExtensionMetadata():
metadata = getExtensionMetadata("DIRAC")
assert metadata["priority"] == 0
|
yujikato/DIRAC
|
src/DIRAC/Core/Utilities/test/Test_Extensions.py
|
Python
|
gpl-3.0
| 1,244
|
[
"DIRAC"
] |
a7c6db839cc4da3f32e9d9d4bb537c2af503c092754f1102b80953e002743b7d
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.sky.skysubtractor Contains the SkySubtractor class.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import standard modules
import io
import imageio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from scipy.interpolate import CloughTocher2DInterpolator as intp
from scipy.interpolate import SmoothBivariateSpline
# Test
# from sklearn.preprocessing import PolynomialFeatures
# from sklearn.linear_model import LinearRegression
# from sklearn.pipeline import Pipeline
# Import astronomical modules
from photutils.background import Background
from astropy.modeling import models
from astropy.modeling.fitting import LevMarLSQFitter
from astropy import stats
# Import the relevant PTS classes and modules
from ..core.frame import Frame
from ..basics.mask import Mask
from ..core.source import Source
from ..basics.geometry import Coordinate, Circle, Composite
from ..basics.region import Region
from ..basics.skyregion import SkyRegion
from ..tools import plotting, statistics, fitting, plotting
from ...core.basics.configurable import OldConfigurable
from ...core.tools.logging import log
from ...core.basics.distribution import Distribution
# -----------------------------------------------------------------
class SkySubtractor(OldConfigurable):
"""
This class ...
"""
def __init__(self, config=None):
"""
The constructor ...
:param config:
:return:
"""
# Call the constructor of the base class
super(SkySubtractor, self).__init__(config, "magic")
# -- Attributes --
# The image frame
self.frame = None
# The mask of sources
self.sources_mask = None
# The extra mask
self.extra_mask = None
# The principal shape
self.principal_shape = None
# The region of saturated stars
self.saturation_region = None
# The animation
self.animation = None
# The sky region
self.region = None
# The output mask (combined input + bad mask + galaxy annulus mask + expanded saturation mask + sigma-clipping mask)
self.mask = None
# The estimated sky (a single floating point value or a Frame, depending on the estimation method)
self.sky = None
# The estimated sky noise
self.noise = None
# Relevant for when estimation method is 'photutils'
self.phot_sky = None
self.phot_rms = None
# Relevant for when estimation method is 'pts'
self.apertures_frame = None
self.apertures_mean_frame = None
self.apertures_noise_frame = None
# -----------------------------------------------------------------
@classmethod
def from_arguments(cls, arguments):
"""
This function ...
:param arguments:
:return:
"""
# Create a new SkySubtractor instance
if arguments.config is not None: subtractor = cls(arguments.config)
elif arguments.settings is not None: subtractor = cls(arguments.settings)
else: subtractor = cls()
# Return the new instance
return subtractor
# -----------------------------------------------------------------
def run(self, frame, principal_shape, sources_mask, extra_mask=None, saturation_region=None, animation=None):
"""
This function ...
:param frame:
:param principal_shape:
:param sources_mask:
:param extra_mask:
:param saturation_region:
:param animation:
:return:
"""
# 1. Call the setup function
self.setup(frame, principal_shape, sources_mask, extra_mask, saturation_region, animation)
# 2. Create the sky region
self.create_region()
# 3. Create mask
self.create_mask()
# 4. Do an extra sigma-clipping step on the data
if self.config.sigma_clip_mask: self.sigma_clip()
# 5. Estimate the sky (and sky noise)
self.estimate()
# 6. Subtract the sky
self.subtract()
# 7. Set the frame to zero outside of the principal galaxy
if self.config.set_zero_outside: self.set_zero_outside()
# 8. Eliminate negative values from the frame, set them to zero
if self.config.eliminate_negatives: self.eliminate_negatives()
# -----------------------------------------------------------------
def clear(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Clearing the sky subtractor ...")
# Set default values for all attributes
self.frame = None
self.sources_mask = None
self.extra_mask = None
self.principal_shape = None
self.saturation_region = None
self.animation = None
self.mask = None
self.sky = None
self.noise = None
self.phot_sky = None
self.phot_rms = None
self.apertures_frame = None
self.apertures_mean_frame = None
self.apertures_noise_frame = None
# -----------------------------------------------------------------
def setup(self, frame, principal_shape, sources_mask, extra_mask=None, saturation_region=None, animation=None):
"""
This function ...
:param frame:
:param principal_shape:
:param sources_mask:
:param extra_mask:
:param saturation_region:
:param animation:
:return:
"""
# Call the setup function of the base class
super(SkySubtractor, self).setup()
# Make a local reference to the image frame
self.frame = frame
# Make a reference to the principal shape
self.principal_shape = principal_shape
# Set the masks
self.sources_mask = sources_mask
self.extra_mask = extra_mask
# Set the saturation_region
self.saturation_region = saturation_region
# Make a reference to the animation
self.animation = animation
# -----------------------------------------------------------------
def create_region(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the sky region ...")
# If the sky region has to be loaded from file
if self.config.sky_region is not None:
sky_region = SkyRegion.from_file(self.config.sky_region)
self.region = sky_region.to_pixel(self.frame.wcs)
# If no region file is given by the user, create an annulus from the principal ellipse
else:
# Create the sky annulus
annulus_outer_factor = self.config.mask.annulus_outer_factor
annulus_inner_factor = self.config.mask.annulus_inner_factor
inner_shape = self.principal_shape * annulus_inner_factor
outer_shape = self.principal_shape * annulus_outer_factor
# Create the annulus
annulus = Composite(outer_shape, inner_shape)
# Create the sky region consisting of only the annulus
self.region = Region()
self.region.append(annulus)
# -----------------------------------------------------------------
def create_mask(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the sky mask ...")
# Create a mask from the pixels outside of the sky region
outside_mask = self.region.to_mask(self.frame.xsize, self.frame.ysize).inverse()
# Create a mask from the principal shape
principal_mask = self.principal_shape.to_mask(self.frame.xsize, self.frame.ysize)
#plotting.plot_mask(outside_mask, title="outside mask")
#plotting.plot_mask(principal_mask, title="principal mask")
#plotting.plot_mask(self.sources_mask, title="sources mask")
# Set the mask, make a copy of the input mask initially
self.mask = self.sources_mask + outside_mask + principal_mask
# Add the extra mask (if specified)
if self.extra_mask is not None: self.mask += self.extra_mask
# Check whether saturation contours are defined
if self.saturation_region is not None:
# Expand all contours
expanded_region = self.saturation_region * 1.5
# Create the saturation mask
saturation_mask = expanded_region.to_mask(self.frame.xsize, self.frame.ysize)
self.mask += saturation_mask
# -----------------------------------------------------------------
def sigma_clip(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Performing sigma-clipping on the pixel values ...")
### TEMPORARY: WRITE OUT MASK BEFORE CLIPPING
# Create a frame where the objects are masked
#frame = copy.deepcopy(self.frame)
#frame[self.mask] = float(self.config.writing.mask_value)
# Save the masked frame
#frame.save("masked_sky_frame_notclipped.fits")
###
# Create the sigma-clipped mask
self.mask = statistics.sigma_clip_mask(self.frame, self.config.sigma_clipping.sigma_level, self.mask)
# -----------------------------------------------------------------
def estimate(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Estimating the sky ...")
# Estimate the sky by taking the mean value of all pixels that are not masked
if self.config.estimation.method == "mean": self.estimate_sky_mean()
# Estimate the sky by taking the median value of all pixels that are not masked
elif self.config.estimation.method == "median": self.estimate_sky_median()
# The sky should be estimated by fitting a polynomial function to the pixels
elif self.config.estimation.method == "polynomial": self.estimate_sky_polynomial()
# Use photutils to estimate the sky and sky noise
elif self.config.estimation.method == "photutils": self.estimate_sky_photutils()
# Use our own method to estimate the sky and sky noise
elif self.config.estimation.method == "pts": self.estimate_sky_pts()
# Unkown sky estimation method
else: raise ValueError("Unknown sky estimation method")
# -----------------------------------------------------------------
def estimate_sky_mean(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Estimating the sky by calculating the mean value of all non-masked pixels ...")
# Create a frame filled with the mean value
self.sky = self.mean
# -----------------------------------------------------------------
def estimate_sky_median(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Estimating the sky by calculating the median value of all non-masked pixels ...")
# Create a frame filled with the median value
self.sky = self.median
# -----------------------------------------------------------------
def estimate_sky_polynomial(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Estimating the sky by fitting a polynomial function to all non-masked pixels ...")
polynomial = fitting.fit_polynomial(self.frame, 3, mask=self.mask)
# Evaluate the polynomial
data = fitting.evaluate_model(polynomial, 0, self.frame.xsize, 0, self.frame.ysize)
#plotting.plot_box(data, title="estimated sky")
# Create sky map
# data, wcs=None, name=None, description=None, unit=None, zero_point=None, filter=None, sky_subtracted=False, fwhm=None
self.sky = Frame(data,
wcs=self.frame.wcs,
name="sky",
description="estimated sky",
unit=self.frame.unit,
zero_point=self.frame.zero_point,
filter=self.frame.filter,
sky_subtracted=False,
fwhm=self.frame.fwhm)
# -----------------------------------------------------------------
def estimate_sky_photutils(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Estimating the sky and sky noise by using photutils ...")
bkg = Background(self.frame, (50, 50), filter_shape=(3, 3), filter_threshold=None, mask=self.mask,
method="sextractor", backfunc=None, interp_order=3, sigclip_sigma=3.0, sigclip_iters=10)
# Masked background
masked_background = np.ma.masked_array(bkg.background, mask=self.mask)
#plotting.plot_box(masked_background, title="masked background")
mean_sky = np.ma.mean(masked_background)
median_sky = np.median(masked_background.compressed())
# data, wcs=None, name=None, description=None, unit=None, zero_point=None, filter=None, sky_subtracted=False, fwhm=None
self.phot_sky = Frame(bkg.background,
wcs=self.frame.wcs,
name="phot_sky",
description="photutils background",
unit=self.frame.unit,
zero_point=self.frame.zero_point,
filter=self.frame.filter,
sky_subtracted=False,
fwhm=self.frame.fwhm)
# data, wcs=None, name=None, description=None, unit=None, zero_point=None, filter=None, sky_subtracted=False, fwhm=None
self.phot_rms = Frame(bkg.background_rms,
wcs=self.frame.wcs,
name="phot_rms",
description="photutils rms",
unit=self.frame.unit,
zero_point=self.frame.zero_point,
filter=self.frame.filter,
sky_subtracted=False,
fwhm=self.frame.fwhm)
# Set sky level
self.sky = median_sky
# -----------------------------------------------------------------
def estimate_sky_pts(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Estimating the sky and sky noise by using or own procedures ...")
# Check whether the FWHM is defined for the frame
if self.frame.fwhm is None: raise RuntimeError("The FWHM of the frame is not defined: sky apertures cannot be generated")
# Determine the aperture radius
aperture_radius = self.determine_aperture_radius()
# Determine the number of apertures to use
napertures = self.determine_number_of_apertures(aperture_radius)
# Generate the apertures
aperture_centers, aperture_means, aperture_stddevs = self.generate_apertures(aperture_radius, napertures)
# Remove outliers
aperture_centers, aperture_means, aperture_stddevs = self.remove_aperture_outliers(aperture_centers, aperture_means, aperture_stddevs)
# Calculate the large-scale variation level
large_scale_variations_error = aperture_means.std()
# Calculate the mean pixel-by-pixel noise over all apertures
pixel_to_pixel_noise = np.mean(aperture_stddevs)
# Determine the median sky level
self.sky = np.median(aperture_means)
# Determine the noise by quadratically adding the large scale variation and the mean pixel-by-pixel noise
self.noise = np.sqrt(large_scale_variations_error**2 + pixel_to_pixel_noise**2)
# Debugging
log.debug("The estimated sky level is " + str(self.sky))
log.debug("The estimated sky noise level is " + str(self.noise))
# Create aperture frames
self.create_aperture_frames(aperture_centers, aperture_means, aperture_stddevs, aperture_radius)
# Finishing step
if self.config.estimation.finishing_step is None: pass
elif self.config.estimation.finishing_step == "polynomial": self.fit_polynomial_to_apertures(aperture_centers, aperture_means)
elif self.config.estimation.finishing_step == "interpolation": self.interpolate_apertures(aperture_centers, aperture_means)
else: raise ValueError("Invalid finishing step")
#self.plot_interpolated(aperture_centers, aperture_means)
#self.try_to_interpolate_smart(aperture_centers, aperture_means)
# -----------------------------------------------------------------
def determine_aperture_radius(self):
"""
This function ...
:return:
"""
# Determine the radius for the sky apertures
fwhm_pix = self.frame.fwhm_pix
radius = 4.0 * fwhm_pix
# Debugging
log.debug("Using sky apertures with a radius of " + str(radius) + " pixels")
# Return the aperture radius
return radius
# -----------------------------------------------------------------
def determine_number_of_apertures(self, radius):
"""
This function ...
:param radius:
:return:
"""
npixels = np.sum(self.mask.inverse())
# Assuming optimal hexagonal packing, get an estimate of the maximum number of circles of given radius
# can fit in the area covered by the pixels that are not masked. This is obviously a significant overestimation
# especially in the case where the radius becomes of the same order of magnitude as the radius of the
# galaxy annulus (the hexagonal packing assumes a rectangular area or at least rectangular-like edges)
# With perfect hexagonal packing, the area of the rectangle that will be covered by the circles is π/(2√3),
# which is approximately equal to 0.907
# See: https://www.quora.com/How-many-3-75-inch-circles-will-fit-inside-a-17-inch-square
coverable_area = 0.907 * npixels
circle_area = np.pi * radius ** 2
optimal_number_of_apertures = coverable_area / circle_area
# Debugging
log.debug("The upper limit to the number of apertures that fit in the part of the frame that is not masked "
"(assuming hexagonal packing) is " + str(optimal_number_of_apertures))
# Determine the number of apertures that are going to be used, take a third of the upper limit
napertures = int(optimal_number_of_apertures / 3.)
# Debugging
log.debug("A total of " + str(napertures) + " apertures are going to be used to estimate the sky ...")
# Return the number of apertures
return napertures
# -----------------------------------------------------------------
def generate_apertures(self, radius, napertures):
"""
This function ...
:param radius:
:param napertures:
:return:
"""
circle_area = np.pi * radius ** 2
# Get arrays of the coordinates of all pixels that are not masked
pixels_y, pixels_x = np.where(self.mask.inverse())
# Get the number of pixels that are not masked (also the area of the frame not masked)
npixels = pixels_x.size
# Create a mask that tags all pixels that have been covered by one of the apertures
apertures_mask = Mask.empty_like(self.frame)
# Counter to keep track of the number of 'succesful' apertures that have been used
current_napertures = 0
# Initialize lists to contain the mean sky levels and noise levels in each of the apertures
aperture_centers = []
aperture_means = []
aperture_stddevs = []
# Draw 100 random coordinates
while True:
# Draw a random pixel index
index = np.random.randint(npixels)
# Get the x and y coordinate of the pixel
x = pixels_x[index]
y = pixels_y[index]
# Create a coordinate for the center of the aperture
center = Coordinate(x, y)
# Create a circular aperture
circle = Circle(center, radius)
# Create a Source from the frame
source = Source.from_shape(self.frame, circle, 1.3)
# Get a mask of the pixels that overlap with the sky mask
sky_mask_cutout = self.mask[source.y_slice, source.x_slice]
overlapping = sky_mask_cutout * source.mask
# Calculate the overlap fraction with the sky mask
number_of_overlapping_pixels = np.sum(overlapping)
overlap_fraction = number_of_overlapping_pixels / circle_area
# If the overlap fraction is larger than 50% for this aperture, skip it
if overlap_fraction >= 0.5:
log.debug(
"For this aperture, an overlap fraction of more than 50% was found with the sky mask, skipping ...")
continue
# Get a mask of the pixels that overlap with the apertures mask
apertures_mask_cutout = apertures_mask[source.y_slice, source.x_slice]
overlapping = apertures_mask_cutout * source.mask
# Calculate the overlap fraction with the apertures mask
number_of_overlapping_pixels = np.sum(overlapping)
overlap_fraction = number_of_overlapping_pixels / circle_area
# If the overlap fraction is larger than 10% for this aperture, skip it
if overlap_fraction >= 0.1:
log.debug(
"For this aperture, an overlap fraction of more than 10% was found with other apertures, skipping ...")
# Add the aperture area to the mask
apertures_mask[source.y_slice, source.x_slice] += source.mask
# Debugging
log.debug("Placed aperture " + str(current_napertures+1) + " of " + str(napertures) + " ({0:.2f}%)".format((current_napertures+1)/napertures*100.))
if self.animation is not None:
plt.figure()
plt.imshow(apertures_mask, origin="lower")
plt.title("Aperture mask")
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
im = imageio.imread(buf)
buf.close()
self.animation.add_frame(im)
# Calculate the mean sky value in this aperture
masked_array_cutout = np.ma.MaskedArray(source.cutout, mask=sky_mask_cutout + source.background_mask)
# plotting.plot_box(masked_array_cutout)
aperture_mean = np.ma.mean(masked_array_cutout)
#aperture_median = np.ma.median(masked_array_cutout)
# aperture_median2 = np.median(masked_array_cutout.compressed()) # same result, but unnecessary compressed step
aperture_stddev = np.std(masked_array_cutout)
# print("aperture mean:", aperture_mean)
# print("aperture median:", aperture_median, aperture_median2)
# print("aperture stddev:", aperture_std)
aperture_centers.append(center)
aperture_means.append(aperture_mean)
aperture_stddevs.append(aperture_stddev)
# Another succesful aperture
current_napertures += 1
# Stop when we have reached the desired number of apertures
if current_napertures == napertures: break
# Create Numpy arrays from the aperture means and standard deviations
aperture_means = np.array(aperture_means)
aperture_stddevs = np.array(aperture_stddevs)
# Return the aperture properties
return aperture_centers, aperture_means, aperture_stddevs
# -----------------------------------------------------------------
def remove_aperture_outliers(self, aperture_centers, aperture_means, aperture_stddevs):
"""
This function ...
:return:
"""
means_distribution = Distribution.from_values(aperture_means, bins=50)
stddevs_distribution = Distribution.from_values(aperture_stddevs, bins=50)
#means_distribution.plot("Aperture means before sigma-clipping")
#stddevs_distribution.plot("Aperture stddevs before sigma-clipping")
clip_mask = stats.sigma_clip(aperture_stddevs, sigma=3.0, iters=None, copy=False).mask
clipped_aperture_centers = []
for i in range(len(clip_mask)):
if clip_mask[i]: continue
else: clipped_aperture_centers.append(aperture_centers[i])
aperture_centers = clipped_aperture_centers
aperture_means = np.ma.MaskedArray(aperture_means, clip_mask).compressed()
aperture_stddevs = np.ma.MaskedArray(aperture_stddevs, clip_mask).compressed()
means_distribution = Distribution.from_values(aperture_means, bins=50)
stddevs_distribution = Distribution.from_values(aperture_stddevs, bins=50)
#means_distribution.plot("Aperture means after sigma-clipping")
#stddevs_distribution.plot("Aperture stddevs after sigma-clipping")
# Return the sigma-clipped aperture properties
return aperture_centers, aperture_means, aperture_stddevs
# -----------------------------------------------------------------
def create_aperture_frames(self, aperture_centers, aperture_means, aperture_stddevs, aperture_radius):
"""
This function ...
:param aperture_centers:
:param aperture_means:
:param aperture_stddevs:
:param aperture_radius:
:return:
"""
self.apertures_frame = Frame.nans_like(self.frame)
self.apertures_mean_frame = Frame.nans_like(self.frame)
self.apertures_noise_frame = Frame.nans_like(self.frame)
for i in range(len(aperture_centers)):
center = aperture_centers[i]
circle = Circle(center, aperture_radius)
mask = Mask.from_shape(circle, self.frame.xsize, self.frame.ysize)
self.apertures_frame[mask] = self.frame[mask]
self.apertures_mean_frame[mask] = aperture_means[i]
self.apertures_noise_frame[mask] = aperture_stddevs[i]
# -----------------------------------------------------------------
def fit_polynomial_to_apertures(self, aperture_centers, aperture_means):
"""
This function ...
:return:
"""
x_values = [center.x for center in aperture_centers]
y_values = [center.y for center in aperture_centers]
# -- Fit polynomial --
# Fit polynomial to aperture means
degree = 4
poly_init = models.Polynomial2D(degree=degree)
fit_model = LevMarLSQFitter()
polynomial = fit_model(poly_init, x_values, y_values, aperture_means)
# Create x and y meshgrid for evaluating
y_grid, x_grid = np.mgrid[:self.frame.ysize, :self.frame.xsize]
# Evaluate the model
data = polynomial(x_grid, y_grid)
self.sky = Frame(data)
# plotting.plot_box(data)
# -- Fit spline --
#f = interpolate.interp2d(x_values, y_values, aperture_means, kind='cubic')
#x_grid = np.array(range(self.frame.xsize))
#y_grid = np.array(range(self.frame.ysize))
#data = f(x_grid, y_grid)
# Set new sky frame
#self.sky = Frame(data)
# -----------------------------------------------------------------
def interpolate_apertures(self, aperture_centers, aperture_means):
"""
This function ...
:param aperture_centers:
:param aperture_means:
:return:
"""
# Inform the user
log.info("Interpolating between the mean values of each aperture to fill the sky frame ...")
x_values = np.array([center.x for center in aperture_centers])
y_values = np.array([center.y for center in aperture_centers])
x_ticks = np.arange(0, self.frame.xsize, 1)
y_ticks = np.arange(0, self.frame.ysize, 1)
z_grid = mlab.griddata(x_values, y_values, aperture_means, x_ticks, y_ticks)
# Set the sky frame
self.sky = Frame(z_grid)
# -----------------------------------------------------------------
def interpolate_apertures_try(self, aperture_centers, aperture_means):
"""
This function ...
:return:
"""
return
x_values = np.array([center.x for center in aperture_centers])
y_values = np.array([center.y for center in aperture_centers])
#X, Y = np.meshgrid(x_values, y_values)
X = x_values
Y = y_values
Z = aperture_means
#print(X, Y, Z)
#print(len(X), len(Y), len(Z))
#C = intp((X, Y), Z)
x_space = np.linspace(0, self.frame.xsize, 1)
y_space = np.linspace(0, self.frame.ysize, 1)
xi, yi = np.meshgrid(x_space, y_space)
#zi = C(xi, yi)
#self.sky = Frame(zi)
from scipy.interpolate import LSQBivariateSpline
spline = SmoothBivariateSpline(X, Y, Z, kx=1, ky=1)
#spline = LSQBivariateSpline(X, Y, Z, X, Y)
#zi = spline(xi, yi)
#self.sky = Frame(zi)
from scipy.interpolate import griddata
#x_space = np.linspace(0.3*self.frame.xsize, 0.7*self.frame.xsize)
#y_space = np.linspace(0.3*self.frame.ysize, 0.7*self.frame.ysize)
x_space = np.array(range(int(0.3*self.frame.xsize), int(0.7*self.frame.xsize)))
y_space = np.array(range(int(0.3*self.frame.ysize), int(0.7*self.frame.ysize)))
znew = griddata((X, Y), Z, (x_space[None,:], y_space[:,None]), method='cubic')
plt.figure()
levels = np.linspace(min(Z), max(Z), 15)
plt.ylabel('Y', size=15)
plt.xlabel('X', size=15)
cmap = plt.cm.jet_r
cs = plt.contourf(x_space, y_space, znew, levels=levels, cmap=cmap)
cbar = plt.colorbar(cs)
cbar.set_label('Z', rotation=90, fontsize=15) # gas fraction
plt.show()
self.sky = Frame.zeros_like(self.frame)
self.sky[int(0.3*self.frame.ysize):int(0.3*self.frame.ysize)+len(y_space), int(0.3*self.frame.xsize):int(0.3*self.frame.xsize)+len(x_space)] = znew
#self.sky = Frame(znew)
# -----------------------------------------------------------------
def plot_interpolated(self, aperture_centers, aperture_means):
"""
This function ...
:param aperture_centers:
:param aperture_means:
:return:
"""
x_values = np.array([center.x for center in aperture_centers])
y_values = np.array([center.y for center in aperture_centers])
x_ticks = np.arange(0, self.frame.xsize, 1)
y_ticks = np.arange(0, self.frame.ysize, 1)
z_grid = mlab.griddata(x_values, y_values, aperture_means, x_ticks, y_ticks)
self.sky = Frame(z_grid)
from matplotlib.backends import backend_agg as agg
from matplotlib import cm
# plot
#fig = Figure() # create the figure
fig = plt.figure()
agg.FigureCanvasAgg(fig) # attach the rasterizer
ax = fig.add_subplot(1, 1, 1) # make axes to plot on
ax.set_title("Interpolated Contour Plot of Experimental Data")
ax.set_xlabel("X")
ax.set_ylabel("Y")
cmap = cm.get_cmap("hot") # get the "hot" color map
contourset = ax.contourf(x_ticks, y_ticks, z_grid, 10, cmap=cmap)
cbar = fig.colorbar(contourset)
cbar.set_ticks([0, 100])
fig.axes[-1].set_ylabel("Z") # last axes instance is the colorbar
plt.show()
# -----------------------------------------------------------------
def try_to_interpolate_smart(self, aperture_centers, aperture_means):
"""
This function ...
:param aperture_centers:
:param aperture_means:
:return:
"""
model = Pipeline([('poly', PolynomialFeatures(degree=3)), ('linear', LinearRegression(fit_intercept=False))])
# fit to an order-3 polynomial data
x = np.arange(5)
y = 3 - 2 * x + x ** 2 - x ** 3
model = model.fit(x[:, np.newaxis], y)
# -----------------------------------------------------------------
def subtract(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Subtracting the sky from the frame ...")
# Subtract the estimated sky from the image frame
self.frame -= self.sky
# -----------------------------------------------------------------
def set_zero_outside(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Setting the frame zero outside of the principal galaxy ...")
# Create a mask from the principal galaxy region
factor = self.config.zero_outside.factor
mask = Mask.from_shape(self.principal_ellipse * factor, self.frame.xsize, self.frame.ysize).inverse()
# Set the primary frame zero outside the principal ellipse
self.frame[mask] = 0.0
# -----------------------------------------------------------------
def eliminate_negatives(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Setting pixels with negative value to zero ...")
# Set all negative pixels to zero
self.frame[self.frame <= 0.] = 0.0
# -----------------------------------------------------------------
def write_histogram(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing sky histogram to " + self.config.writing.histogram_path + " ...")
# Create a masked array
masked = np.ma.masked_array(self.image.frames.primary, mask=self.mask)
masked_clipped = np.ma.masked_array(self.image.frames.primary, mask=self.clipped_mask)
# Create a figure
fig = plt.figure()
min = self.mean - 4.0 * self.stddev
max = self.mean + 4.0 * self.stddev
# Plot the histograms
#b: blue, g: green, r: red, c: cyan, m: magenta, y: yellow, k: black, w: white
plt.subplot(211)
plt.hist(masked.compressed(), 200, range=(min,max), alpha=0.5, normed=1, facecolor='g', histtype='stepfilled', label='not clipped')
if self.config.histogram.log_scale: plt.semilogy()
plt.subplot(212)
plt.hist(masked_clipped.compressed(), 200, range=(min,max), alpha=0.5, normed=1, facecolor='g', histtype='stepfilled', label='clipped')
if self.config.histogram.log_scale: plt.semilogy()
# Save the figure
plt.savefig(self.config.writing.histogram_path, bbox_inches='tight', pad_inches=0.25)
plt.close()
# -----------------------------------------------------------------
@property
def sky_frame(self):
"""
This function ...
:return:
"""
if isinstance(self.sky, Frame): return self.sky
else: return Frame(np.full(self.frame.shape, self.sky))
# -----------------------------------------------------------------
@property
def noise_frame(self):
"""
This function ...
:return:
"""
if isinstance(self.noise, Frame): return self.noise
else: return Frame(np.full(self.frame.shape, self.noise))
# -----------------------------------------------------------------
@property
def mean(self):
"""
This function ...
:return:
"""
# Return the sigma-clipped mean
return np.ma.mean(np.ma.masked_array(self.frame, mask=self.mask))
# -----------------------------------------------------------------
@property
def median(self):
"""
This function ...
:return:
"""
# Return the sigma-clipped median
return np.median(np.ma.masked_array(self.frame, mask=self.mask).compressed())
# -----------------------------------------------------------------
@property
def stddev(self):
"""
This function ...
:return:
"""
# Return the standard deviation of the sigma-clipped frame
return np.ma.masked_array(self.frame, mask=self.mask).std()
# -----------------------------------------------------------------
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/sky/skysubtractor.py
|
Python
|
mit
| 37,310
|
[
"Galaxy"
] |
3cbdb3fc286026b61bc7ee725319800023e4dfe17627ef3b85c5c412d77ee68e
|
from collections import defaultdict
import networkx as nx
__all__ = ["check_planarity", "PlanarEmbedding"]
def check_planarity(G, counterexample=False):
"""Check if a graph is planar and return a counterexample or an embedding.
A graph is planar iff it can be drawn in a plane without
any edge intersections.
Parameters
----------
G : NetworkX graph
counterexample : bool
A Kuratowski subgraph (to proof non planarity) is only returned if set
to true.
Returns
-------
(is_planar, certificate) : (bool, NetworkX graph) tuple
is_planar is true if the graph is planar.
If the graph is planar `certificate` is a PlanarEmbedding
otherwise it is a Kuratowski subgraph.
Notes
-----
A (combinatorial) embedding consists of cyclic orderings of the incident
edges at each vertex. Given such an embedding there are multiple approaches
discussed in literature to drawing the graph (subject to various
constraints, e.g. integer coordinates), see e.g. [2].
The planarity check algorithm and extraction of the combinatorial embedding
is based on the Left-Right Planarity Test [1].
A counterexample is only generated if the corresponding parameter is set,
because the complexity of the counterexample generation is higher.
References
----------
.. [1] Ulrik Brandes:
The Left-Right Planarity Test
2009
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.217.9208
.. [2] Takao Nishizeki, Md Saidur Rahman:
Planar graph drawing
Lecture Notes Series on Computing: Volume 12
2004
"""
planarity_state = LRPlanarity(G)
embedding = planarity_state.lr_planarity()
if embedding is None:
# graph is not planar
if counterexample:
return False, get_counterexample(G)
else:
return False, None
else:
# graph is planar
return True, embedding
def check_planarity_recursive(G, counterexample=False):
"""Recursive version of :meth:`check_planarity`."""
planarity_state = LRPlanarity(G)
embedding = planarity_state.lr_planarity_recursive()
if embedding is None:
# graph is not planar
if counterexample:
return False, get_counterexample_recursive(G)
else:
return False, None
else:
# graph is planar
return True, embedding
def get_counterexample(G):
"""Obtains a Kuratowski subgraph.
Raises nx.NetworkXException if G is planar.
The function removes edges such that the graph is still not planar.
At some point the removal of any edge would make the graph planar.
This subgraph must be a Kuratowski subgraph.
Parameters
----------
G : NetworkX graph
Returns
-------
subgraph : NetworkX graph
A Kuratowski subgraph that proves that G is not planar.
"""
# copy graph
G = nx.Graph(G)
if check_planarity(G)[0]:
raise nx.NetworkXException("G is planar - no counter example.")
# find Kuratowski subgraph
subgraph = nx.Graph()
for u in G:
nbrs = list(G[u])
for v in nbrs:
G.remove_edge(u, v)
if check_planarity(G)[0]:
G.add_edge(u, v)
subgraph.add_edge(u, v)
return subgraph
def get_counterexample_recursive(G):
"""Recursive version of :meth:`get_counterexample`.
"""
# copy graph
G = nx.Graph(G)
if check_planarity_recursive(G)[0]:
raise nx.NetworkXException("G is planar - no counter example.")
# find Kuratowski subgraph
subgraph = nx.Graph()
for u in G:
nbrs = list(G[u])
for v in nbrs:
G.remove_edge(u, v)
if check_planarity_recursive(G)[0]:
G.add_edge(u, v)
subgraph.add_edge(u, v)
return subgraph
class Interval(object):
"""Represents a set of return edges.
All return edges in an interval induce a same constraint on the contained
edges, which means that all edges must either have a left orientation or
all edges must have a right orientation.
"""
def __init__(self, low=None, high=None):
self.low = low
self.high = high
def empty(self):
"""Check if the interval is empty"""
return self.low is None and self.high is None
def copy(self):
"""Returns a copy of this interval"""
return Interval(self.low, self.high)
def conflicting(self, b, planarity_state):
"""Returns True if interval I conflicts with edge b"""
return (not self.empty() and
planarity_state.lowpt[self.high] > planarity_state.lowpt[b])
class ConflictPair(object):
"""Represents a different constraint between two intervals.
The edges in the left interval must have a different orientation than
the one in the right interval.
"""
def __init__(self, left=Interval(), right=Interval()):
self.left = left
self.right = right
def swap(self):
"""Swap left and right intervals"""
temp = self.left
self.left = self.right
self.right = temp
def lowest(self, planarity_state):
"""Returns the lowest lowpoint of a conflict pair"""
if self.left.empty():
return planarity_state.lowpt[self.right.low]
if self.right.empty():
return planarity_state.lowpt[self.left.low]
return min(planarity_state.lowpt[self.left.low],
planarity_state.lowpt[self.right.low])
def top_of_stack(l):
"""Returns the element on top of the stack."""
if not l:
return None
return l[-1]
class LRPlanarity(object):
"""A class to maintain the state during planarity check."""
__slots__ = [
'G', 'roots', 'height', 'lowpt', 'lowpt2', 'nesting_depth',
'parent_edge', 'DG', 'adjs', 'ordered_adjs', 'ref', 'side', 'S',
'stack_bottom', 'lowpt_edge', 'left_ref', 'right_ref', 'embedding'
]
def __init__(self, G):
# copy G without adding self-loops
self.G = nx.Graph()
self.G.add_nodes_from(G.nodes)
for e in G.edges:
if e[0] != e[1]:
self.G.add_edge(e[0], e[1])
self.roots = []
# distance from tree root
self.height = defaultdict(lambda: None)
self.lowpt = {} # height of lowest return point of an edge
self.lowpt2 = {} # height of second lowest return point
self.nesting_depth = {} # for nesting order
# None -> missing edge
self.parent_edge = defaultdict(lambda: None)
# oriented DFS graph
self.DG = nx.DiGraph()
self.DG.add_nodes_from(G.nodes)
self.adjs = {}
self.ordered_adjs = {}
self.ref = defaultdict(lambda: None)
self.side = defaultdict(lambda: 1)
# stack of conflict pairs
self.S = []
self.stack_bottom = {}
self.lowpt_edge = {}
self.left_ref = {}
self.right_ref = {}
self.embedding = PlanarEmbedding()
def lr_planarity(self):
"""Execute the LR planarity test.
Returns
-------
embedding : dict
If the graph is planar an embedding is returned. Otherwise None.
"""
if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6:
# graph is not planar
return None
# make adjacency lists for dfs
for v in self.G:
self.adjs[v] = list(self.G[v])
# orientation of the graph by depth first search traversal
for v in self.G:
if self.height[v] is None:
self.height[v] = 0
self.roots.append(v)
self.dfs_orientation(v)
# Free no longer used variables
self.G = None
self.lowpt2 = None
self.adjs = None
# testing
for v in self.DG: # sort the adjacency lists by nesting depth
# note: this sorting leads to non linear time
self.ordered_adjs[v] = sorted(
self.DG[v], key=lambda x: self.nesting_depth[(v, x)])
for v in self.roots:
if not self.dfs_testing(v):
return None
# Free no longer used variables
self.height = None
self.lowpt = None
self.S = None
self.stack_bottom = None
self.lowpt_edge = None
for e in self.DG.edges:
self.nesting_depth[e] = self.sign(e) * self.nesting_depth[e]
self.embedding.add_nodes_from(self.DG.nodes)
for v in self.DG:
# sort the adjacency lists again
self.ordered_adjs[v] = sorted(
self.DG[v], key=lambda x: self.nesting_depth[(v, x)])
# initialize the embedding
previous_node = None
for w in self.ordered_adjs[v]:
self.embedding.add_half_edge_cw(v, w, previous_node)
previous_node = w
# Free no longer used variables
self.DG = None
self.nesting_depth = None
self.ref = None
# compute the complete embedding
for v in self.roots:
self.dfs_embedding(v)
# Free no longer used variables
self.roots = None
self.parent_edge = None
self.ordered_adjs = None
self.left_ref = None
self.right_ref = None
self.side = None
return self.embedding
def lr_planarity_recursive(self):
"""Recursive version of :meth:`lr_planarity`."""
if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6:
# graph is not planar
return None
# orientation of the graph by depth first search traversal
for v in self.G:
if self.height[v] is None:
self.height[v] = 0
self.roots.append(v)
self.dfs_orientation_recursive(v)
# Free no longer used variable
self.G = None
# testing
for v in self.DG: # sort the adjacency lists by nesting depth
# note: this sorting leads to non linear time
self.ordered_adjs[v] = sorted(
self.DG[v], key=lambda x: self.nesting_depth[(v, x)])
for v in self.roots:
if not self.dfs_testing_recursive(v):
return None
for e in self.DG.edges:
self.nesting_depth[e] = (self.sign_recursive(e) *
self.nesting_depth[e])
self.embedding.add_nodes_from(self.DG.nodes)
for v in self.DG:
# sort the adjacency lists again
self.ordered_adjs[v] = sorted(
self.DG[v], key=lambda x: self.nesting_depth[(v, x)])
# initialize the embedding
previous_node = None
for w in self.ordered_adjs[v]:
self.embedding.add_half_edge_cw(v, w, previous_node)
previous_node = w
# compute the complete embedding
for v in self.roots:
self.dfs_embedding_recursive(v)
return self.embedding
def dfs_orientation(self, v):
"""Orient the graph by DFS, compute lowpoints and nesting order.
"""
# the recursion stack
dfs_stack = [v]
# index of next edge to handle in adjacency list of each node
ind = defaultdict(lambda: 0)
# boolean to indicate whether to skip the initial work for an edge
skip_init = defaultdict(lambda: False)
while dfs_stack:
v = dfs_stack.pop()
e = self.parent_edge[v]
for w in self.adjs[v][ind[v]:]:
vw = (v, w)
if not skip_init[vw]:
if (v, w) in self.DG.edges or (w, v) in self.DG.edges:
ind[v] += 1
continue # the edge was already oriented
self.DG.add_edge(v, w) # orient the edge
self.lowpt[vw] = self.height[v]
self.lowpt2[vw] = self.height[v]
if self.height[w] is None: # (v, w) is a tree edge
self.parent_edge[w] = vw
self.height[w] = self.height[v] + 1
dfs_stack.append(v) # revisit v after finishing w
dfs_stack.append(w) # visit w next
skip_init[vw] = True # don't redo this block
break # handle next node in dfs_stack (i.e. w)
else: # (v, w) is a back edge
self.lowpt[vw] = self.height[w]
# determine nesting graph
self.nesting_depth[vw] = 2 * self.lowpt[vw]
if self.lowpt2[vw] < self.height[v]: # chordal
self.nesting_depth[vw] += 1
# update lowpoints of parent edge e
if e is not None:
if self.lowpt[vw] < self.lowpt[e]:
self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw])
self.lowpt[e] = self.lowpt[vw]
elif self.lowpt[vw] > self.lowpt[e]:
self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw])
else:
self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw])
ind[v] += 1
def dfs_orientation_recursive(self, v):
"""Recursive version of :meth:`dfs_orientation`."""
e = self.parent_edge[v]
for w in self.G[v]:
if (v, w) in self.DG.edges or (w, v) in self.DG.edges:
continue # the edge was already oriented
vw = (v, w)
self.DG.add_edge(v, w) # orient the edge
self.lowpt[vw] = self.height[v]
self.lowpt2[vw] = self.height[v]
if self.height[w] is None: # (v, w) is a tree edge
self.parent_edge[w] = vw
self.height[w] = self.height[v] + 1
self.dfs_orientation_recursive(w)
else: # (v, w) is a back edge
self.lowpt[vw] = self.height[w]
# determine nesting graph
self.nesting_depth[vw] = 2 * self.lowpt[vw]
if self.lowpt2[vw] < self.height[v]: # chordal
self.nesting_depth[vw] += 1
# update lowpoints of parent edge e
if e is not None:
if self.lowpt[vw] < self.lowpt[e]:
self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw])
self.lowpt[e] = self.lowpt[vw]
elif self.lowpt[vw] > self.lowpt[e]:
self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw])
else:
self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw])
def dfs_testing(self, v):
"""Test for LR partition."""
# the recursion stack
dfs_stack = [v]
# index of next edge to handle in adjacency list of each node
ind = defaultdict(lambda: 0)
# boolean to indicate whether to skip the initial work for an edge
skip_init = defaultdict(lambda: False)
while dfs_stack:
v = dfs_stack.pop()
e = self.parent_edge[v]
# to indicate whether to skip the final block after the for loop
skip_final = False
for w in self.ordered_adjs[v][ind[v]:]:
ei = (v, w)
if not skip_init[ei]:
self.stack_bottom[ei] = top_of_stack(self.S)
if ei == self.parent_edge[w]: # tree edge
dfs_stack.append(v) # revisit v after finishing w
dfs_stack.append(w) # visit w next
skip_init[ei] = True # don't redo this block
skip_final = True # skip final work after breaking
break # handle next node in dfs_stack (i.e. w)
else: # back edge
self.lowpt_edge[ei] = ei
self.S.append(ConflictPair(right=Interval(ei, ei)))
# integrate new return edges
if self.lowpt[ei] < self.height[v]:
if w == self.ordered_adjs[v][0]: # e_i has return edge
self.lowpt_edge[e] = self.lowpt_edge[ei]
else: # add constraints of e_i
if not self.add_constraints(ei, e):
# graph is not planar
return False
ind[v] += 1
if not skip_final:
# remove back edges returning to parent
if e is not None: # v isn't root
self.remove_back_edges(e)
return True
def dfs_testing_recursive(self, v):
"""Recursive version of :meth:`dfs_testing`."""
e = self.parent_edge[v]
for w in self.ordered_adjs[v]:
ei = (v, w)
self.stack_bottom[ei] = top_of_stack(self.S)
if ei == self.parent_edge[w]: # tree edge
if not self.dfs_testing_recursive(w):
return False
else: # back edge
self.lowpt_edge[ei] = ei
self.S.append(ConflictPair(right=Interval(ei, ei)))
# integrate new return edges
if self.lowpt[ei] < self.height[v]:
if w == self.ordered_adjs[v][0]: # e_i has return edge
self.lowpt_edge[e] = self.lowpt_edge[ei]
else: # add constraints of e_i
if not self.add_constraints(ei, e):
# graph is not planar
return False
# remove back edges returning to parent
if e is not None: # v isn't root
self.remove_back_edges(e)
return True
def add_constraints(self, ei, e):
P = ConflictPair()
# merge return edges of e_i into P.right
while True:
Q = self.S.pop()
if not Q.left.empty():
Q.swap()
if not Q.left.empty(): # not planar
return False
if self.lowpt[Q.right.low] > self.lowpt[e]:
# merge intervals
if P.right.empty(): # topmost interval
P.right = Q.right.copy()
else:
self.ref[P.right.low] = Q.right.high
P.right.low = Q.right.low
else: # align
self.ref[Q.right.low] = self.lowpt_edge[e]
if top_of_stack(self.S) == self.stack_bottom[ei]:
break
# merge conflicting return edges of e_1,...,e_i-1 into P.L
while (top_of_stack(self.S).left.conflicting(ei, self) or
top_of_stack(self.S).right.conflicting(ei, self)):
Q = self.S.pop()
if Q.right.conflicting(ei, self):
Q.swap()
if Q.right.conflicting(ei, self): # not planar
return False
# merge interval below lowpt(e_i) into P.R
self.ref[P.right.low] = Q.right.high
if Q.right.low is not None:
P.right.low = Q.right.low
if P.left.empty(): # topmost interval
P.left = Q.left.copy()
else:
self.ref[P.left.low] = Q.left.high
P.left.low = Q.left.low
if not (P.left.empty() and P.right.empty()):
self.S.append(P)
return True
def remove_back_edges(self, e):
u = e[0]
# trim back edges ending at parent u
# drop entire conflict pairs
while self.S and top_of_stack(self.S).lowest(self) == self.height[u]:
P = self.S.pop()
if P.left.low is not None:
self.side[P.left.low] = -1
if self.S: # one more conflict pair to consider
P = self.S.pop()
# trim left interval
while P.left.high is not None and P.left.high[1] == u:
P.left.high = self.ref[P.left.high]
if P.left.high is None and P.left.low is not None:
# just emptied
self.ref[P.left.low] = P.right.low
self.side[P.left.low] = -1
P.left.low = None
# trim right interval
while P.right.high is not None and P.right.high[1] == u:
P.right.high = self.ref[P.right.high]
if P.right.high is None and P.right.low is not None:
# just emptied
self.ref[P.right.low] = P.left.low
self.side[P.right.low] = -1
P.right.low = None
self.S.append(P)
# side of e is side of a highest return edge
if self.lowpt[e] < self.height[u]: # e has return edge
hl = top_of_stack(self.S).left.high
hr = top_of_stack(self.S).right.high
if hl is not None and (
hr is None or self.lowpt[hl] > self.lowpt[hr]):
self.ref[e] = hl
else:
self.ref[e] = hr
def dfs_embedding(self, v):
"""Completes the embedding."""
# the recursion stack
dfs_stack = [v]
# index of next edge to handle in adjacency list of each node
ind = defaultdict(lambda: 0)
while dfs_stack:
v = dfs_stack.pop()
for w in self.ordered_adjs[v][ind[v]:]:
ind[v] += 1
ei = (v, w)
if ei == self.parent_edge[w]: # tree edge
self.embedding.add_half_edge_first(w, v)
self.left_ref[v] = w
self.right_ref[v] = w
dfs_stack.append(v) # revisit v after finishing w
dfs_stack.append(w) # visit w next
break # handle next node in dfs_stack (i.e. w)
else: # back edge
if self.side[ei] == 1:
self.embedding.add_half_edge_cw(w, v,
self.right_ref[w])
else:
self.embedding.add_half_edge_ccw(w, v,
self.left_ref[w])
self.left_ref[w] = v
def dfs_embedding_recursive(self, v):
"""Recursive version of :meth:`dfs_embedding`."""
for w in self.ordered_adjs[v]:
ei = (v, w)
if ei == self.parent_edge[w]: # tree edge
self.embedding.add_half_edge_first(w, v)
self.left_ref[v] = w
self.right_ref[v] = w
self.dfs_embedding_recursive(w)
else: # back edge
if self.side[ei] == 1:
# place v directly after right_ref[w] in embed. list of w
self.embedding.add_half_edge_cw(w, v, self.right_ref[w])
else:
# place v directly before left_ref[w] in embed. list of w
self.embedding.add_half_edge_ccw(w, v, self.left_ref[w])
self.left_ref[w] = v
def sign(self, e):
"""Resolve the relative side of an edge to the absolute side."""
# the recursion stack
dfs_stack = [e]
# dict to remember reference edges
old_ref = defaultdict(lambda: None)
while dfs_stack:
e = dfs_stack.pop()
if self.ref[e] is not None:
dfs_stack.append(e) # revisit e after finishing self.ref[e]
dfs_stack.append(self.ref[e]) # visit self.ref[e] next
old_ref[e] = self.ref[e] # remember value of self.ref[e]
self.ref[e] = None
else:
self.side[e] *= self.side[old_ref[e]]
return self.side[e]
def sign_recursive(self, e):
"""Recursive version of :meth:`sign`."""
if self.ref[e] is not None:
self.side[e] = self.side[e] * self.sign_recursive(self.ref[e])
self.ref[e] = None
return self.side[e]
class PlanarEmbedding(nx.DiGraph):
"""Represents a planar graph with its planar embedding.
The planar embedding is given by a `combinatorial embedding
<https://en.wikipedia.org/wiki/Graph_embedding#Combinatorial_embedding>`_.
**Neighbor ordering:**
In comparison to a usual graph structure, the embedding also stores the
order of all neighbors for every vertex.
The order of the neighbors can be given in clockwise (cw) direction or
counterclockwise (ccw) direction. This order is stored as edge attributes
in the underlying directed graph. For the edge (u, v) the edge attribute
'cw' is set to the neighbor of u that follows immediately after v in
clockwise direction.
In order for a PlanarEmbedding to be valid it must fulfill multiple
conditions. It is possible to check if these conditions are fulfilled with
the method :meth:`check_structure`.
The conditions are:
* Edges must go in both directions (because the edge attributes differ)
* Every edge must have a 'cw' and 'ccw' attribute which corresponds to a
correct planar embedding.
* A node with non zero degree must have a node attribute 'first_nbr'.
As long as a PlanarEmbedding is invalid only the following methods should
be called:
* :meth:`add_half_edge_ccw`
* :meth:`add_half_edge_cw`
* :meth:`connect_components`
* :meth:`add_half_edge_first`
Even though the graph is a subclass of nx.DiGraph, it can still be used
for algorithms that require undirected graphs, because the method
:meth:`is_directed` is overridden. This is possible, because a valid
PlanarGraph must have edges in both directions.
**Half edges:**
In methods like `add_half_edge_ccw` the term "half-edge" is used, which is
a term that is used in `doubly connected edge lists
<https://en.wikipedia.org/wiki/Doubly_connected_edge_list>`_. It is used
to emphasize that the edge is only in one direction and there exists
another half-edge in the opposite direction.
While conventional edges always have two faces (including outer face) next
to them, it is possible to assign each half-edge *exactly one* face.
For a half-edge (u, v) that is orientated such that u is below v then the
face that belongs to (u, v) is to the right of this half-edge.
Examples
--------
Create an embedding of a star graph (compare `nx.star_graph(3)`):
>>> G = nx.PlanarEmbedding()
>>> G.add_half_edge_cw(0, 1, None)
>>> G.add_half_edge_cw(0, 2, 1)
>>> G.add_half_edge_cw(0, 3, 2)
>>> G.add_half_edge_cw(1, 0, None)
>>> G.add_half_edge_cw(2, 0, None)
>>> G.add_half_edge_cw(3, 0, None)
Alternatively the same embedding can also be defined in counterclockwise
orientation. The following results in exactly the same PlanarEmbedding:
>>> G = nx.PlanarEmbedding()
>>> G.add_half_edge_ccw(0, 1, None)
>>> G.add_half_edge_ccw(0, 3, 1)
>>> G.add_half_edge_ccw(0, 2, 3)
>>> G.add_half_edge_ccw(1, 0, None)
>>> G.add_half_edge_ccw(2, 0, None)
>>> G.add_half_edge_ccw(3, 0, None)
After creating a graph, it is possible to validate that the PlanarEmbedding
object is correct:
>>> G.check_structure()
"""
def get_data(self):
"""Converts the adjacency structure into a better readable structure.
Returns
-------
embedding : dict
A dict mapping all nodes to a list of neighbors sorted in
clockwise order.
See Also
--------
set_data
"""
embedding = dict()
for v in self:
embedding[v] = list(self.neighbors_cw_order(v))
return embedding
def set_data(self, data):
"""Inserts edges according to given sorted neighbor list.
The input format is the same as the output format of get_data().
Parameters
----------
data : dict
A dict mapping all nodes to a list of neighbors sorted in
clockwise order.
See Also
--------
get_data
"""
for v in data:
for w in reversed(data[v]):
self.add_half_edge_first(v, w)
def neighbors_cw_order(self, v):
"""Generator for the neighbors of v in clockwise order.
Parameters
----------
v : node
Yields
------
node
"""
if len(self[v]) == 0:
# v has no neighbors
return
start_node = self.nodes[v]['first_nbr']
yield start_node
current_node = self[v][start_node]['cw']
while start_node != current_node:
yield current_node
current_node = self[v][current_node]['cw']
def check_structure(self):
"""Runs without exceptions if this object is valid.
Checks that the following properties are fulfilled:
* Edges go in both directions (because the edge attributes differ).
* Every edge has a 'cw' and 'ccw' attribute which corresponds to a
correct planar embedding.
* A node with a degree larger than 0 has a node attribute 'first_nbr'.
Running this method verifies that the underlying Graph must be planar.
Raises
------
nx.NetworkXException
This exception is raised with a short explanation if the
PlanarEmbedding is invalid.
"""
# Check fundamental structure
for v in self:
try:
sorted_nbrs = set(self.neighbors_cw_order(v))
except KeyError:
msg = "Bad embedding. " \
"Missing orientation for a neighbor of {}".format(v)
raise nx.NetworkXException(msg)
unsorted_nbrs = set(self[v])
if sorted_nbrs != unsorted_nbrs:
msg = "Bad embedding. Edge orientations not set correctly."
raise nx.NetworkXException(msg)
for w in self[v]:
# Check if opposite half-edge exists
if not self.has_edge(w, v):
msg = "Bad embedding. Opposite half-edge is missing."
raise nx.NetworkXException(msg)
# Check planarity
counted_half_edges = set()
for component in nx.connected_components(self):
if len(component) == 1:
# Don't need to check single node component
continue
num_nodes = len(component)
num_half_edges = 0
num_faces = 0
for v in component:
for w in self.neighbors_cw_order(v):
num_half_edges += 1
if (v, w) not in counted_half_edges:
# We encountered a new face
num_faces += 1
# Mark all half-edges belonging to this face
self.traverse_face(v, w, counted_half_edges)
num_edges = num_half_edges // 2 # num_half_edges is even
if num_nodes - num_edges + num_faces != 2:
# The result does not match Euler's formula
msg = "Bad embedding. The graph does not match Euler's formula"
raise nx.NetworkXException(msg)
def add_half_edge_ccw(self, start_node, end_node, reference_neighbor):
"""Adds a half-edge from start_node to end_node.
The half-edge is added counter clockwise next to the existing half-edge
(start_node, reference_neighbor).
Parameters
----------
start_node : node
Start node of inserted edge.
end_node : node
End node of inserted edge.
reference_neighbor: node
End node of reference edge.
Raises
------
nx.NetworkXException
If the reference_neighbor does not exist.
See Also
--------
add_half_edge_cw
connect_components
add_half_edge_first
"""
if reference_neighbor is None:
# The start node has no neighbors
self.add_edge(start_node, end_node) # Add edge to graph
self[start_node][end_node]['cw'] = end_node
self[start_node][end_node]['ccw'] = end_node
self.nodes[start_node]['first_nbr'] = end_node
else:
ccw_reference = self[start_node][reference_neighbor]['ccw']
self.add_half_edge_cw(start_node, end_node, ccw_reference)
if reference_neighbor == self.nodes[start_node].get('first_nbr',
None):
# Update first neighbor
self.nodes[start_node]['first_nbr'] = end_node
def add_half_edge_cw(self, start_node, end_node, reference_neighbor):
"""Adds a half-edge from start_node to end_node.
The half-edge is added clockwise next to the existing half-edge
(start_node, reference_neighbor).
Parameters
----------
start_node : node
Start node of inserted edge.
end_node : node
End node of inserted edge.
reference_neighbor: node
End node of reference edge.
Raises
------
nx.NetworkXException
If the reference_neighbor does not exist.
See Also
--------
add_half_edge_ccw
connect_components
add_half_edge_first
"""
self.add_edge(start_node, end_node) # Add edge to graph
if reference_neighbor is None:
# The start node has no neighbors
self[start_node][end_node]['cw'] = end_node
self[start_node][end_node]['ccw'] = end_node
self.nodes[start_node]['first_nbr'] = end_node
return
if reference_neighbor not in self[start_node]:
raise nx.NetworkXException(
"Cannot add edge. Reference neighbor does not exist")
# Get half-edge at the other side
cw_reference = self[start_node][reference_neighbor]['cw']
# Alter half-edge data structures
self[start_node][reference_neighbor]['cw'] = end_node
self[start_node][end_node]['cw'] = cw_reference
self[start_node][cw_reference]['ccw'] = end_node
self[start_node][end_node]['ccw'] = reference_neighbor
def connect_components(self, v, w):
"""Adds half-edges for (v, w) and (w, v) at some position.
This method should only be called if v and w are in different
components, or it might break the embedding.
This especially means that if `connect_components(v, w)`
is called it is not allowed to call `connect_components(w, v)`
afterwards. The neighbor orientations in both directions are
all set correctly after the first call.
Parameters
----------
v : node
w : node
See Also
--------
add_half_edge_ccw
add_half_edge_cw
add_half_edge_first
"""
self.add_half_edge_first(v, w)
self.add_half_edge_first(w, v)
def add_half_edge_first(self, start_node, end_node):
"""The added half-edge is inserted at the first position in the order.
Parameters
----------
start_node : node
end_node : node
See Also
--------
add_half_edge_ccw
add_half_edge_cw
connect_components
"""
if start_node in self and 'first_nbr' in self.nodes[start_node]:
reference = self.nodes[start_node]['first_nbr']
else:
reference = None
self.add_half_edge_ccw(start_node, end_node, reference)
def next_face_half_edge(self, v, w):
"""Returns the following half-edge left of a face.
Parameters
----------
v : node
w : node
Returns
-------
half-edge : tuple
"""
new_node = self[w][v]['ccw']
return w, new_node
def traverse_face(self, v, w, mark_half_edges=None):
"""Returns nodes on the face that belong to the half-edge (v, w).
The face that is traversed lies to the right of the half-edge (in an
orientation where v is below w).
Optionally it is possible to pass a set to which all encountered half
edges are added. Before calling this method, this set must not include
any half-edges that belong to the face.
Parameters
----------
v : node
Start node of half-edge.
w : node
End node of half-edge.
mark_half_edges: set, optional
Set to which all encountered half-edges are added.
Returns
-------
face : list
A list of nodes that lie on this face.
"""
if mark_half_edges is None:
mark_half_edges = set()
face_nodes = [v]
mark_half_edges.add((v, w))
prev_node = v
cur_node = w
# Last half-edge is (incoming_node, v)
incoming_node = self[v][w]['cw']
while cur_node != v or prev_node != incoming_node:
face_nodes.append(cur_node)
prev_node, cur_node = self.next_face_half_edge(prev_node, cur_node)
if (prev_node, cur_node) in mark_half_edges:
raise nx.NetworkXException(
"Bad planar embedding. Impossible face.")
mark_half_edges.add((prev_node, cur_node))
return face_nodes
def is_directed(self):
"""A valid PlanarEmbedding is undirected.
All reverse edges are contained, i.e. for every existing
half-edge (v, w) the half-edge in the opposite direction (w, v) is also
contained.
"""
return False
|
sserrot/champion_relationships
|
venv/Lib/site-packages/networkx/algorithms/planarity.py
|
Python
|
mit
| 38,152
|
[
"VisIt"
] |
f48659ce11ea276adfe718af0e5652672bc4e561e435e67bbb41f50a6c508aff
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, ESS LLP and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import throw, _
from frappe.utils import cstr
from erpnext.accounts.party import validate_party_accounts
from frappe.contacts.address_and_contact import load_address_and_contact, delete_contact_and_address
from frappe.desk.reportview import build_match_conditions, get_filters_cond
class HealthcarePractitioner(Document):
def onload(self):
load_address_and_contact(self)
def autoname(self):
# practitioner first_name and last_name
self.name = " ".join(filter(None,
[cstr(self.get(f)).strip() for f in ["first_name","middle_name","last_name"]]))
def validate(self):
validate_party_accounts(self)
if self.inpatient_visit_charge_item:
validate_service_item(self.inpatient_visit_charge_item, "Configure a service Item for Inpatient Visit Charge Item")
if self.op_consulting_charge_item:
validate_service_item(self.op_consulting_charge_item, "Configure a service Item for Out Patient Consulting Charge Item")
if self.user_id:
self.validate_for_enabled_user_id()
self.validate_duplicate_user_id()
existing_user_id = frappe.db.get_value("Healthcare Practitioner", self.name, "user_id")
if self.user_id != existing_user_id:
frappe.permissions.remove_user_permission(
"Healthcare Practitioner", self.name, existing_user_id)
else:
existing_user_id = frappe.db.get_value("Healthcare Practitioner", self.name, "user_id")
if existing_user_id:
frappe.permissions.remove_user_permission(
"Healthcare Practitioner", self.name, existing_user_id)
def on_update(self):
if self.user_id:
frappe.permissions.add_user_permission("Healthcare Practitioner", self.name, self.user_id)
def validate_for_enabled_user_id(self):
enabled = frappe.db.get_value("User", self.user_id, "enabled")
if enabled is None:
frappe.throw(_("User {0} does not exist").format(self.user_id))
if enabled == 0:
frappe.throw(_("User {0} is disabled").format(self.user_id))
def validate_duplicate_user_id(self):
practitioner = frappe.db.sql_list("""select name from `tabHealthcare Practitioner` where
user_id=%s and name!=%s""", (self.user_id, self.name))
if practitioner:
throw(_("User {0} is already assigned to Healthcare Practitioner {1}").format(
self.user_id, practitioner[0]), frappe.DuplicateEntryError)
def on_trash(self):
delete_contact_and_address('Healthcare Practitioner', self.name)
def validate_service_item(item, msg):
if frappe.db.get_value("Item", item, "is_stock_item") == 1:
frappe.throw(_(msg))
def get_practitioner_list(doctype, txt, searchfield, start, page_len, filters=None):
fields = ["name", "first_name", "mobile_phone"]
match_conditions = build_match_conditions("Healthcare Practitioner")
match_conditions = "and {}".format(match_conditions) if match_conditions else ""
if filters:
filter_conditions = get_filters_cond(doctype, filters, [])
match_conditions += "{}".format(filter_conditions)
return frappe.db.sql("""select %s from `tabHealthcare Practitioner` where docstatus < 2
and (%s like %s or first_name like %s)
and active = 1
{match_conditions}
order by
case when name like %s then 0 else 1 end,
case when first_name like %s then 0 else 1 end,
name, first_name limit %s, %s""".format(
match_conditions=match_conditions) %
(
", ".join(fields),
frappe.db.escape(searchfield),
"%s", "%s", "%s", "%s", "%s", "%s"
),
(
"%%%s%%" % frappe.db.escape(txt),
"%%%s%%" % frappe.db.escape(txt),
"%%%s%%" % frappe.db.escape(txt),
"%%%s%%" % frappe.db.escape(txt),
start,
page_len
)
)
|
ovresko/erpnext
|
erpnext/healthcare/doctype/healthcare_practitioner/healthcare_practitioner.py
|
Python
|
gpl-3.0
| 3,781
|
[
"VisIt"
] |
c34b03d81696b001c293f316365718b0449fb0386d315452eb03c76fa0633cb7
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutron_lib import constants
from oslo_config import cfg
import six
import testtools
from neutron.agent.common import config as a_cfg
from neutron.agent import firewall
from neutron.agent.linux import ipset_manager
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_firewall
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.conf.agent import securitygroups_rpc as security_config
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
_uuid = test_base._uuid
#TODO(mangelajo): replace all 'IPv4', 'IPv6' to constants
FAKE_PREFIX = {'IPv4': '10.0.0.0/24',
'IPv6': 'fe80::/48'}
FAKE_IP = {'IPv4': '10.0.0.1',
'IPv6': 'fe80::1'}
#TODO(mangelajo): replace all '*_sgid' strings for the constants
FAKE_SGID = 'fake_sgid'
OTHER_SGID = 'other_sgid'
_IPv6 = constants.IPv6
_IPv4 = constants.IPv4
RAW_TABLE_OUTPUT = """
# Generated by iptables-save v1.4.21 on Fri Jul 31 16:13:28 2015
*raw
:PREROUTING ACCEPT [11561:3470468]
:OUTPUT ACCEPT [11504:4064044]
:neutron-openvswi-OUTPUT - [0:0]
:neutron-openvswi-PREROUTING - [0:0]
-A PREROUTING -j neutron-openvswi-PREROUTING
-A OUTPUT -j neutron-openvswi-OUTPUT
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvbe804433b-61 -j CT --zone 1
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tape804433b-61 -j CT --zone 1
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb95c24827-02 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap95c24827-02 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb61634509-31 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap61634509-31 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb8f46cf18-12 -j CT --zone 9
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap8f46cf18-12 -j CT --zone 9
COMMIT
# Completed on Fri Jul 31 16:13:28 2015
""" # noqa
class BaseIptablesFirewallTestCase(base.BaseTestCase):
def setUp(self):
super(BaseIptablesFirewallTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
security_config.register_securitygroups_opts()
cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
iptables_cls = self.iptables_cls_p.start()
self.iptables_inst = mock.Mock()
self.v4filter_inst = mock.Mock()
self.v6filter_inst = mock.Mock()
self.iptables_inst.ipv4 = {'filter': self.v4filter_inst,
'raw': self.v4filter_inst
}
self.iptables_inst.ipv6 = {'filter': self.v6filter_inst,
'raw': self.v6filter_inst
}
iptables_cls.return_value = self.iptables_inst
self.iptables_inst.get_rules_for_table.return_value = (
RAW_TABLE_OUTPUT.splitlines())
self.firewall = iptables_firewall.IptablesFirewallDriver()
self.firewall.iptables = self.iptables_inst
# don't mess with sysctl knobs in unit tests
self.firewall._enabled_netfilter_for_bridges = True
class IptablesFirewallTestCase(BaseIptablesFirewallTestCase):
def _fake_port(self):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']]}
def test_prepare_port_filter_with_no_sg(self):
port = self._fake_port()
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ofake_dev',
'-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_filter_ipv4_ingress(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p tcp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p tcp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = mock.call.add_rule('ifake_dev', '-p icmp -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -p icmp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p udp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p udp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress'}
egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp'}
egress = mock.call.add_rule(
'ofake_dev', '-p tcp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p tcp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp'}
egress = mock.call.add_rule('ofake_dev', '-p icmp -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -p icmp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmp -m icmp --icmp-type 8 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_name(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 'echo-request',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmp -m icmp --icmp-type echo-request '
'-j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_code(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_port_range_max': 0,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmp -m icmp --icmp-type 8/0 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp'}
egress = mock.call.add_rule(
'ofake_dev', '-p udp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p udp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p tcp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p tcp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p ipv6-icmp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -p ipv6-icmp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_filter_ingress_tcp_min_port_0(self, ethertype):
rule = {'ethertype': ethertype,
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 0,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 0:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ingress_tcp_min_port_0_for_ipv4(self):
self._test_filter_ingress_tcp_min_port_0('IPv4')
def test_filter_ingress_tcp_min_port_0_for_ipv6(self):
self._test_filter_ingress_tcp_min_port_0('IPv6')
def test_filter_ipv6_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p udp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p udp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress'}
egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp'}
egress = mock.call.add_rule(
'ofake_dev', '-p tcp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p tcp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp'}
egress = mock.call.add_rule(
'ofake_dev', '-p ipv6-icmp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -p ipv6-icmp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p ipv6-icmp -m icmp6 --icmpv6-type 8 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_name(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 'echo-request',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p ipv6-icmp -m icmp6 --icmpv6-type echo-request '
'-j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_code(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_port_range_max': 0,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p ipv6-icmp -m icmp6 --icmpv6-type 8/0 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp'}
egress = mock.call.add_rule(
'ofake_dev', '-p udp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p udp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_prepare_port_filter(self,
rule,
ingress_expected_call=None,
egress_expected_call=None):
port = self._fake_port()
ethertype = rule['ethertype']
prefix = utils.ip_to_cidr(FAKE_IP[ethertype])
filter_inst = self.v4filter_inst
dhcp_rule = [mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None)]
if ethertype == 'IPv6':
filter_inst = self.v6filter_inst
dhcp_rule = [mock.call.add_rule('ofake_dev',
'-s ::/128 -d ff02::/16 '
'-p ipv6-icmp -m icmp6 '
'--icmpv6-type %s -j RETURN' %
icmp6_type,
comment=None) for icmp6_type
in constants.ICMPV6_ALLOWED_UNSPEC_ADDR_TYPES]
sg = [rule]
port['security_group_rules'] = sg
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback',
'-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG)
]
if ethertype == 'IPv6':
for icmp6_type in firewall.ICMPV6_ALLOWED_TYPES:
calls.append(
mock.call.add_rule('ifake_dev',
'-p ipv6-icmp -m icmp6 --icmpv6-type '
'%s -j RETURN' %
icmp6_type, comment=None))
calls += [
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None
)
]
if ingress_expected_call:
calls.append(ingress_expected_call)
calls += [mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s %s -m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN'
% prefix,
comment=ic.PAIR_ALLOW)]
if ethertype == 'IPv6':
calls.append(mock.call.add_rule('sfake_dev',
'-s fe80::fdff:ffff:feff:ffff/128 -m mac '
'--mac-source FF:FF:FF:FF:FF:FF -j RETURN',
comment=ic.PAIR_ALLOW))
calls.append(mock.call.add_rule('sfake_dev', '-j DROP',
comment=ic.PAIR_DROP))
calls += dhcp_rule
calls.append(mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None))
if ethertype == 'IPv4':
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None))
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None))
if ethertype == 'IPv6':
calls.append(mock.call.add_rule('ofake_dev',
'-p ipv6-icmp -m icmp6 '
'--icmpv6-type %s -j DROP' %
constants.ICMPV6_TYPE_RA,
comment=None))
calls.append(mock.call.add_rule('ofake_dev',
'-p ipv6-icmp -j RETURN',
comment=None))
calls.append(mock.call.add_rule('ofake_dev', '-p udp -m udp '
'--sport 546 -m udp --dport 547 '
'-j RETURN', comment=None))
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 547 -m udp --dport 546 -j DROP',
comment=None))
calls += [
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
]
if egress_expected_call:
calls.append(egress_expected_call)
calls += [mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
comb = zip(calls, filter_inst.mock_calls)
for (l, r) in comb:
self.assertEqual(l, r)
filter_inst.assert_has_calls(calls)
def _test_remove_conntrack_entries(self, ethertype, protocol,
direction):
port = self._fake_port()
port['security_groups'] = 'fake_sg_id'
self.firewall.filtered_ports[port['device']] = port
self.firewall.updated_rule_sg_ids = set(['fake_sg_id'])
self.firewall.sg_rules['fake_sg_id'] = [
{'direction': direction, 'ethertype': ethertype,
'protocol': protocol}]
self.firewall.filter_defer_apply_on()
self.firewall.sg_rules['fake_sg_id'] = []
self.firewall.filter_defer_apply_off()
cmd = ['conntrack', '-D']
if protocol:
cmd.extend(['-p', protocol])
if ethertype == 'IPv4':
cmd.extend(['-f', 'ipv4'])
if direction == 'ingress':
cmd.extend(['-d', '10.0.0.1'])
else:
cmd.extend(['-s', '10.0.0.1'])
else:
cmd.extend(['-f', 'ipv6'])
if direction == 'ingress':
cmd.extend(['-d', 'fe80::1'])
else:
cmd.extend(['-s', 'fe80::1'])
# initial data has 1, 2, and 9 in use, CT zone will start at 10.
cmd.extend(['-w', 10])
calls = [
mock.call(cmd, run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
self.utils_exec.assert_has_calls(calls)
def test_remove_conntrack_entries_for_delete_rule_ipv4(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv4', pro, direction)
def test_remove_conntrack_entries_for_delete_rule_ipv6(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv6', pro, direction)
def test_remove_conntrack_entries_for_port_sec_group_change(self):
port = self._fake_port()
port['security_groups'] = ['fake_sg_id']
self.firewall.filtered_ports[port['device']] = port
self.firewall.updated_sg_members = set(['tapfake_dev'])
self.firewall.filter_defer_apply_on()
new_port = copy.deepcopy(port)
new_port['security_groups'] = ['fake_sg_id2']
self.firewall.filtered_ports[port['device']] = new_port
self.firewall.filter_defer_apply_off()
calls = [
# initial data has 1, 2, and 9 in use, CT zone will start at 10.
mock.call(['conntrack', '-D', '-f', 'ipv4', '-d', '10.0.0.1',
'-w', 10],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]),
mock.call(['conntrack', '-D', '-f', 'ipv4', '-s', '10.0.0.1',
'-w', 10],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]),
mock.call(['conntrack', '-D', '-f', 'ipv6', '-d', 'fe80::1',
'-w', 10],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]),
mock.call(['conntrack', '-D', '-f', 'ipv6', '-s', 'fe80::1',
'-w', 10],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
self.utils_exec.assert_has_calls(calls)
def test_remove_conntrack_entries_for_sg_member_changed_ipv4(self):
for direction in ['ingress', 'egress']:
for protocol in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries_sg_member_changed(
'IPv4', protocol, direction)
def test_remove_conntrack_entries_for_sg_member_changed_ipv6(self):
for direction in ['ingress', 'egress']:
for protocol in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries_sg_member_changed(
'IPv6', protocol, direction)
def _test_remove_conntrack_entries_sg_member_changed(self, ethertype,
protocol, direction):
port = self._fake_port()
port['security_groups'] = ['fake_sg_id']
self.firewall.sg_rules.setdefault('fake_sg_id', [])
self.firewall.sg_rules['fake_sg_id'].append(
{'direction': direction, 'remote_group_id': 'fake_sg_id2',
'ethertype': ethertype})
self.firewall.filter_defer_apply_on()
self.firewall.devices_with_updated_sg_members['fake_sg_id2'] = [port]
if ethertype == "IPv4":
self.firewall.pre_sg_members = {'fake_sg_id2': {
'IPv4': ['10.0.0.2', '10.0.0.3']}}
self.firewall.sg_members = {'fake_sg_id2': {
'IPv4': ['10.0.0.3']}}
ethertype = "ipv4"
else:
self.firewall.pre_sg_members = {'fake_sg_id2': {
'IPv6': ['fe80::2', 'fe80::3']}}
self.firewall.sg_members = {'fake_sg_id2': {
'IPv6': ['fe80::3']}}
ethertype = "ipv6"
self.firewall.filter_defer_apply_off()
direction = '-d' if direction == 'ingress' else '-s'
remote_ip_direction = '-s' if direction == '-d' else '-d'
ips = {"ipv4": ['10.0.0.1', '10.0.0.2'],
"ipv6": ['fe80::1', 'fe80::2']}
calls = [
# initial data has 1, 2, and 9 in use, CT zone will start
# at 10.
mock.call(['conntrack', '-D', '-f', ethertype, direction,
ips[ethertype][0], '-w', 10,
remote_ip_direction, ips[ethertype][1]],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
self.utils_exec.assert_has_calls(calls)
def test_user_sg_rules_deduped_before_call_to_iptables_manager(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}] * 2
self.firewall.prepare_port_filter(port)
rules = [''.join(c[1]) for c in self.v4filter_inst.add_rule.mock_calls]
self.assertEqual(len(set(rules)), len(rules))
def test_update_delete_port_filter(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}]
self.firewall.prepare_port_filter(port)
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'egress'}]
self.firewall.update_port_filter(port)
self.firewall.update_port_filter({'device': 'no-exist-device'})
self.firewall.remove_port_filter(port)
self.firewall.remove_port_filter({'device': 'no-exist-device'})
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback',
'-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT'),
mock.call.remove_chain('ifake_dev'),
mock.call.remove_chain('ofake_dev'),
mock.call.remove_chain('sfake_dev'),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT'),
mock.call.remove_chain('ifake_dev'),
mock.call.remove_chain('ofake_dev'),
mock.call.remove_chain('sfake_dev'),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain')]
self.v4filter_inst.assert_has_calls(calls)
def test_remove_unknown_port(self):
port = self._fake_port()
self.firewall.remove_port_filter(port)
# checking no exception occurs
self.assertFalse(self.v4filter_inst.called)
def test_defer_apply(self):
with self.firewall.defer_apply():
pass
self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
mock.call.defer_apply_off()])
def test_filter_defer_with_exception(self):
try:
with self.firewall.defer_apply():
raise Exception("same exception")
except Exception:
pass
self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
mock.call.defer_apply_off()])
def _mock_chain_applies(self):
class CopyingMock(mock.MagicMock):
"""Copies arguments so mutable arguments can be asserted on.
Copied verbatim from unittest.mock documentation.
"""
def __call__(self, *args, **kwargs):
args = copy.deepcopy(args)
kwargs = copy.deepcopy(kwargs)
return super(CopyingMock, self).__call__(*args, **kwargs)
# Need to use CopyingMock because _{setup,remove}_chains_apply are
# usually called with that's modified between calls (i.e.,
# self.firewall.filtered_ports).
chain_applies = CopyingMock()
self.firewall._setup_chains_apply = chain_applies.setup
self.firewall._remove_chains_apply = chain_applies.remove
return chain_applies
def test_mock_chain_applies(self):
chain_applies = self._mock_chain_applies()
port_prepare = {'device': 'd1', 'mac_address': 'prepare'}
port_update = {'device': 'd1', 'mac_address': 'update'}
self.firewall.prepare_port_filter(port_prepare)
self.firewall.update_port_filter(port_update)
self.firewall.remove_port_filter(port_update)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup({'d1': port_prepare}, {}),
mock.call.remove({'d1': port_prepare}, {}),
mock.call.setup({'d1': port_update}, {}),
mock.call.remove({'d1': port_update}, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_need_pre_defer_copy(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
device2port = {port['device']: port}
self.firewall.prepare_port_filter(port)
with self.firewall.defer_apply():
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup(device2port, {}),
mock.call.remove(device2port, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_coalesce_simple(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port)
self.firewall.update_port_filter(port)
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_coalesce_multiple_ports(self):
chain_applies = self._mock_chain_applies()
port1 = {'device': 'd1', 'mac_address': 'mac1', 'network_id': 'net1'}
port2 = {'device': 'd2', 'mac_address': 'mac2', 'network_id': 'net1'}
device2port = {'d1': port1, 'd2': port2}
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port1)
self.firewall.prepare_port_filter(port2)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup(device2port, {})])
def test_ip_spoofing_filter_with_multiple_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': ['10.0.0.1', 'fe80::1', '10.0.0.2']}
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.2/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_ip_spoofing_no_fixed_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': []}
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ifake_dev', '-j $sg-fallback',
comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase):
def setUp(self):
super(IptablesFirewallEnhancedIpsetTestCase, self).setUp()
self.firewall.ipset = mock.Mock()
self.firewall.ipset.get_name.side_effect = (
ipset_manager.IpsetManager.get_name)
self.firewall.ipset.set_name_exists.return_value = True
def _fake_port(self, sg_id=FAKE_SGID):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']],
'security_groups': [sg_id],
'security_group_source_groups': [sg_id]}
def _fake_sg_rule_for_ethertype(self, ethertype, remote_group):
return {'direction': 'ingress', 'remote_group_id': remote_group,
'ethertype': ethertype}
def _fake_sg_rules(self, sg_id=FAKE_SGID, remote_groups=None):
remote_groups = remote_groups or {_IPv4: [FAKE_SGID],
_IPv6: [FAKE_SGID]}
rules = []
for ip_version, remote_group_list in six.iteritems(remote_groups):
for remote_group in remote_group_list:
rules.append(self._fake_sg_rule_for_ethertype(ip_version,
remote_group))
return {sg_id: rules}
def _fake_sg_members(self, sg_ids=None):
return {sg_id: copy.copy(FAKE_IP) for sg_id in (sg_ids or [FAKE_SGID])}
def test_update_security_group_members(self):
sg_members = {'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']}
self.firewall.update_security_group_members('fake_sgid', sg_members)
calls = [
mock.call.set_members('fake_sgid', 'IPv4',
['10.0.0.1', '10.0.0.2']),
mock.call.set_members('fake_sgid', 'IPv6',
['fe80::1'])
]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def _setup_fake_firewall_members_and_rules(self, firewall):
firewall.sg_rules = self._fake_sg_rules()
firewall.pre_sg_rules = self._fake_sg_rules()
firewall.sg_members = self._fake_sg_members()
firewall.pre_sg_members = firewall.sg_members
def _prepare_rules_and_members_for_removal(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.pre_sg_members[OTHER_SGID] = (
self.firewall.pre_sg_members[FAKE_SGID])
def test_determine_remote_sgs_to_remove(self):
self._prepare_rules_and_members_for_removal()
ports = [self._fake_port()]
self.assertEqual(
{_IPv4: set([OTHER_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._determine_remote_sgs_to_remove(ports))
def test_determine_remote_sgs_to_remove_ipv6_unreferenced(self):
self._prepare_rules_and_members_for_removal()
ports = [self._fake_port()]
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [OTHER_SGID, FAKE_SGID],
_IPv6: [FAKE_SGID]})
self.assertEqual(
{_IPv4: set(), _IPv6: set([OTHER_SGID])},
self.firewall._determine_remote_sgs_to_remove(ports))
def test_get_remote_sg_ids_by_ipversion(self):
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID], _IPv6: [OTHER_SGID]})
ports = [self._fake_port()]
self.assertEqual(
{_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._get_remote_sg_ids_sets_by_ipversion(ports))
def test_get_remote_sg_ids(self):
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID, FAKE_SGID, FAKE_SGID],
_IPv6: [OTHER_SGID, OTHER_SGID, OTHER_SGID]})
port = self._fake_port()
self.assertEqual(
{_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._get_remote_sg_ids(port))
def test_determine_sg_rules_to_remove(self):
self.firewall.pre_sg_rules = self._fake_sg_rules(sg_id=OTHER_SGID)
ports = [self._fake_port()]
self.assertEqual(set([OTHER_SGID]),
self.firewall._determine_sg_rules_to_remove(ports))
def test_get_sg_ids_set_for_ports(self):
sg_ids = set([FAKE_SGID, OTHER_SGID])
ports = [self._fake_port(sg_id) for sg_id in sg_ids]
self.assertEqual(sg_ids,
self.firewall._get_sg_ids_set_for_ports(ports))
def test_remove_sg_members(self):
self.firewall.sg_members = self._fake_sg_members([FAKE_SGID,
OTHER_SGID])
remote_sgs_to_remove = {_IPv4: set([FAKE_SGID]),
_IPv6: set([FAKE_SGID, OTHER_SGID])}
self.firewall._remove_sg_members(remote_sgs_to_remove)
self.assertIn(OTHER_SGID, self.firewall.sg_members)
self.assertNotIn(FAKE_SGID, self.firewall.sg_members)
def test_remove_unused_security_group_info_clears_unused_rules(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.prepare_port_filter(self._fake_port())
# create another SG which won't be referenced by any filtered port
fake_sg_rules = self.firewall.sg_rules['fake_sgid']
self.firewall.pre_sg_rules[OTHER_SGID] = fake_sg_rules
self.firewall.sg_rules[OTHER_SGID] = fake_sg_rules
# call the cleanup function, and check the unused sg_rules are out
self.firewall._remove_unused_security_group_info()
self.assertNotIn(OTHER_SGID, self.firewall.sg_rules)
def test_remove_unused_security_group_info(self):
self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}}
self.firewall.pre_sg_members = self.firewall.sg_members
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID], _IPv6: [FAKE_SGID]})
self.firewall.pre_sg_rules = self.firewall.sg_rules
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._remove_unused_security_group_info()
self.assertNotIn(OTHER_SGID, self.firewall.sg_members)
def test_not_remove_used_security_group_info(self):
self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}}
self.firewall.pre_sg_members = self.firewall.sg_members
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [OTHER_SGID], _IPv6: [OTHER_SGID]})
self.firewall.pre_sg_rules = self.firewall.sg_rules
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._remove_unused_security_group_info()
self.assertIn(OTHER_SGID, self.firewall.sg_members)
def test_remove_all_unused_info(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.filtered_ports = {}
self.firewall._remove_unused_security_group_info()
self.assertFalse(self.firewall.sg_members)
self.assertFalse(self.firewall.sg_rules)
def test_single_fallback_accept_rule(self):
p1, p2 = self._fake_port(), self._fake_port()
self.firewall._setup_chains_apply(dict(p1=p1, p2=p2), {})
v4_adds = self.firewall.iptables.ipv4['filter'].add_rule.mock_calls
v6_adds = self.firewall.iptables.ipv6['filter'].add_rule.mock_calls
sg_chain_v4_accept = [call for call in v4_adds
if call == mock.call('sg-chain', '-j ACCEPT')]
sg_chain_v6_accept = [call for call in v6_adds
if call == mock.call('sg-chain', '-j ACCEPT')]
self.assertEqual(1, len(sg_chain_v4_accept))
self.assertEqual(1, len(sg_chain_v6_accept))
def test_remove_port_filter_with_destroy_ipset_chain(self):
self.firewall.sg_rules = self._fake_sg_rules()
port = self._fake_port()
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': [],
'IPv6': []}}
sg_members = {'IPv4': ['10.0.0.1'], 'IPv6': ['fe80::1']}
self.firewall.update_security_group_members('fake_sgid', sg_members)
self.firewall.prepare_port_filter(port)
self.firewall.filter_defer_apply_on()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [],
'IPv6': []}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.1'],
'IPv6': ['fe80::1']}}
self.firewall.remove_port_filter(port)
self.firewall.filter_defer_apply_off()
calls = [
mock.call.set_members('fake_sgid', 'IPv4', ['10.0.0.1']),
mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1']),
mock.call.get_name('fake_sgid', 'IPv4'),
mock.call.set_name_exists('NIPv4fake_sgid'),
mock.call.get_name('fake_sgid', 'IPv6'),
mock.call.set_name_exists('NIPv6fake_sgid'),
mock.call.destroy('fake_sgid', 'IPv4'),
mock.call.destroy('fake_sgid', 'IPv6')]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def test_filter_defer_apply_off_with_sg_only_ipv6_rule(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.pre_sg_rules = self._fake_sg_rules()
self.firewall.ipset_chains = {'IPv4fake_sgid': ['10.0.0.2'],
'IPv6fake_sgid': ['fe80::1']}
self.firewall.sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.sg_rules['fake_sgid'].remove(
{'direction': 'ingress', 'remote_group_id': 'fake_sgid',
'ethertype': 'IPv4'})
self.firewall.sg_rules.update()
self.firewall._defer_apply = True
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._pre_defer_filtered_ports = {}
self.firewall._pre_defer_unfiltered_ports = {}
self.firewall.filter_defer_apply_off()
calls = [mock.call.destroy('fake_sgid', 'IPv4')]
self.firewall.ipset.assert_has_calls(calls, True)
def test_sg_rule_expansion_with_remote_ips(self):
other_ips = ['10.0.0.2', '10.0.0.3', '10.0.0.4']
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [FAKE_IP['IPv4']] + other_ips,
'IPv6': [FAKE_IP['IPv6']]}}
port = self._fake_port()
rule = self._fake_sg_rule_for_ethertype(_IPv4, FAKE_SGID)
rules = self.firewall._expand_sg_rule_with_remote_ips(
rule, port, 'ingress')
self.assertEqual(list(rules),
[dict(list(rule.items()) +
[('source_ip_prefix', '%s/32' % ip)])
for ip in other_ips])
def test_build_ipv4v6_mac_ip_list(self):
mac_oth = 'ffff-ff0f-ffff'
mac_unix = 'FF:FF:FF:0F:FF:FF'
ipv4 = FAKE_IP['IPv4']
ipv6 = FAKE_IP['IPv6']
fake_ipv4_pair = []
fake_ipv4_pair.append((mac_unix, ipv4))
fake_ipv6_pair = []
fake_ipv6_pair.append((mac_unix, ipv6))
fake_ipv6_pair.append((mac_unix, 'fe80::fdff:ffff:fe0f:ffff'))
mac_ipv4_pairs = []
mac_ipv6_pairs = []
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv4,
mac_ipv4_pairs, mac_ipv6_pairs)
self.assertEqual(fake_ipv4_pair, mac_ipv4_pairs)
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv6,
mac_ipv4_pairs, mac_ipv6_pairs)
self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs)
# ensure that LLA is not added again for another v6 addr
ipv62 = 'fe81::1'
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv62,
mac_ipv4_pairs, mac_ipv6_pairs)
fake_ipv6_pair.append((mac_unix, ipv62))
self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs)
class OVSHybridIptablesFirewallTestCase(BaseIptablesFirewallTestCase):
def setUp(self):
super(OVSHybridIptablesFirewallTestCase, self).setUp()
self.firewall = iptables_firewall.OVSHybridIptablesFirewallDriver()
# initial data has 1, 2, and 9 in use, see RAW_TABLE_OUTPUT above.
self._dev_zone_map = {'61634509-31': 2, '8f46cf18-12': 9,
'95c24827-02': 2, 'e804433b-61': 1}
def test__populate_initial_zone_map(self):
self.assertEqual(self._dev_zone_map, self.firewall._device_zone_map)
def test__generate_device_zone(self):
# initial data has 1, 2, and 9 in use.
# we fill from top up first.
self.assertEqual(10, self.firewall._generate_device_zone('test'))
# once it's maxed out, it scans for gaps
self.firewall._device_zone_map['someport'] = (
iptables_firewall.MAX_CONNTRACK_ZONES)
for i in range(3, 9):
self.assertEqual(i, self.firewall._generate_device_zone(i))
# 9 and 10 are taken so next should be 11
self.assertEqual(11, self.firewall._generate_device_zone('p11'))
# take out zone 1 and make sure it's selected
self.firewall._device_zone_map.pop('e804433b-61')
self.assertEqual(1, self.firewall._generate_device_zone('p1'))
# fill it up and then make sure an extra throws an error
for i in range(1, 65536):
self.firewall._device_zone_map['dev-%s' % i] = i
with testtools.ExpectedException(n_exc.CTZoneExhaustedError):
self.firewall._find_open_zone()
# with it full, try again, this should trigger a cleanup and return 1
self.assertEqual(1, self.firewall._generate_device_zone('p12'))
self.assertEqual({'p12': 1}, self.firewall._device_zone_map)
def test_get_device_zone(self):
# initial data has 1, 2, and 9 in use.
self.assertEqual(10,
self.firewall.get_device_zone('12345678901234567'))
# should have been truncated to 11 chars
self._dev_zone_map.update({'12345678901': 10})
self.assertEqual(self._dev_zone_map, self.firewall._device_zone_map)
|
sebrandon1/neutron
|
neutron/tests/unit/agent/linux/test_iptables_firewall.py
|
Python
|
apache-2.0
| 86,686
|
[
"FEFF"
] |
40202d53f024e4de5c8950fbee0bb11efcfa7117f87482da4bbf1fee4d40ed7a
|
#!/usr/bin/env python
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Calculate radius of gyration of neurites.'''
import neurom as nm
from neurom import morphmath as mm
from neurom.core.dataformat import COLS
import numpy as np
def segment_centre_of_mass(seg):
'''Calculate and return centre of mass of a segment.
C, seg_volalculated as centre of mass of conical frustum'''
h = mm.segment_length(seg)
r0 = seg[0][COLS.R]
r1 = seg[1][COLS.R]
num = r0 * r0 + 2 * r0 * r1 + 3 * r1 * r1
denom = 4 * (r0 * r0 + r0 * r1 + r1 * r1)
centre_of_mass_z_loc = num / denom
return seg[0][COLS.XYZ] + (centre_of_mass_z_loc / h) * (seg[1][COLS.XYZ] - seg[0][COLS.XYZ])
def neurite_centre_of_mass(neurite):
'''Calculate and return centre of mass of a neurite.'''
centre_of_mass = np.zeros(3)
total_volume = 0
seg_vol = np.array(map(mm.segment_volume, nm.iter_segments(neurite)))
seg_centre_of_mass = np.array(map(segment_centre_of_mass, nm.iter_segments(neurite)))
# multiply array of scalars with array of arrays
# http://stackoverflow.com/questions/5795700/multiply-numpy-array-of-scalars-by-array-of-vectors
seg_centre_of_mass = seg_centre_of_mass * seg_vol[:, np.newaxis]
centre_of_mass = np.sum(seg_centre_of_mass, axis=0)
total_volume = np.sum(seg_vol)
return centre_of_mass / total_volume
def distance_sqr(point, seg):
'''Calculate and return square Euclidian distance from given point to
centre of mass of given segment.'''
centre_of_mass = segment_centre_of_mass(seg)
return sum(pow(np.subtract(point, centre_of_mass), 2))
def radius_of_gyration(neurite):
'''Calculate and return radius of gyration of a given neurite.'''
centre_mass = neurite_centre_of_mass(neurite)
sum_sqr_distance = 0
N = 0
dist_sqr = [distance_sqr(centre_mass, s) for s in nm.iter_segments(neurite)]
sum_sqr_distance = np.sum(dist_sqr)
N = len(dist_sqr)
return np.sqrt(sum_sqr_distance / N)
def mean_rad_of_gyration(neurites):
'''Calculate mean radius of gyration for set of neurites.'''
return np.mean([radius_of_gyration(n) for n in neurites])
if __name__ == '__main__':
# load a neuron from an SWC file
filename = 'test_data/swc/Neuron.swc'
nrn = nm.load_neuron(filename)
# for every neurite, print (number of segments, radius of gyration, neurite type)
print([(sum(len(s.points) - 1 for s in nrte.iter_sections()),
radius_of_gyration(nrte), nrte.type) for nrte in nrn.neurites])
# print mean radius of gyration per neurite type
print('Mean radius of gyration for axons: ',
mean_rad_of_gyration(n for n in nrn.neurites if n.type == nm.AXON))
print('Mean radius of gyration for basal dendrites: ',
mean_rad_of_gyration(n for n in nrn.neurites if n.type == nm.BASAL_DENDRITE))
print('Mean radius of gyration for apical dendrites: ',
mean_rad_of_gyration(n for n in nrn.neurites
if n.type == nm.APICAL_DENDRITE))
|
mgeplf/NeuroM
|
examples/radius_of_gyration.py
|
Python
|
bsd-3-clause
| 4,691
|
[
"NEURON"
] |
46f277521bf187a3ad3e73493edc95d7e3112c75677e507e3c57e4191f56dbc8
|
from ase.atoms import Atoms
def write_ascii(fileobj, images):
if isinstance(fileobj, str):
fileobj = open(fileobj, 'w')
if not isinstance(images, (list, tuple)):
images = [images]
fileobj.write('atoms = ')
else:
fileobj.write('images = [')
symbols = images[0].get_chemical_symbols()
natoms = len(symbols)
for atoms in images:
fileobj.write('%d\n\n' % natoms)
for s, (x, y, z) in zip(symbols, atoms.get_positions()):
fileobj.write('%-2s %22.15f %22.15f %22.15f\n' % (s, x, y, z))
|
grhawk/ASE
|
tools/ase/io/ascii.py
|
Python
|
gpl-2.0
| 567
|
[
"ASE"
] |
bd6627fc0121907f350fc6661ee34536c28133bec70c07b1247f5787dcfa681f
|
# coding: utf-8
from __future__ import division, unicode_literals
import unittest
from pymatgen.analysis.hhi.hhi import HHIModel
class HHIModelTest(unittest.TestCase):
def test_hhi(self):
hhi = HHIModel()
self.assertEqual(hhi.get_hhi("He"), (3200, 3900))
self.assertEqual(hhi.get_hhi_production("He"), 3200)
self.assertEqual(hhi.get_hhi_reserve("He"), 3900)
self.assertAlmostEqual(hhi.get_hhi_production("Li2O"), 1614.96, 1)
self.assertAlmostEqual(hhi.get_hhi_reserve("Li2O"), 2218.90, 1)
self.assertEqual(hhi.get_hhi_designation(1400), "low")
self.assertEqual(hhi.get_hhi_designation(1800), "medium")
self.assertEqual(hhi.get_hhi_designation(3000), "high")
self.assertEqual(hhi.get_hhi_designation(None), None)
if __name__ == "__main__":
unittest.main()
|
rousseab/pymatgen
|
pymatgen/analysis/hhi/tests/test_hhi.py
|
Python
|
mit
| 849
|
[
"pymatgen"
] |
1cbcd0a8ec3ed5136450f2c43e5f9cc6d524352fd0a8bef4a90507bb5150e610
|
import numpy as np
from gpaw.response.bse import BSE
w_grid = np.linspace(0, 15, 1001)
# It stores the four-points kernel used for building the two-particles
# Hamiltonian in LiF_W_qGG.
bse = BSE('LiF_fulldiag.gpw',
w=w_grid,
q=np.array([0.0001, 0., 0.]),
optical_limit=True,
ecut=30,
nbands=60,
eta=0.1,
kernel_file='LiF_W_qGG',
txt='LiF_BSE_out.txt')
# Calculate the dielectric function calculated at the BSE level:
df_BSE = bse.get_dielectric_function()
|
robwarm/gpaw-symm
|
doc/exercises/bse/LiF_BSE.py
|
Python
|
gpl-3.0
| 540
|
[
"GPAW"
] |
a48d04332a52141dac6c6a69ab5aa1899fd31675604d82d7dc50dcd1e6479456
|
from textwrap import dedent
import _pytest._code
import py
import pytest
from _pytest.config import PytestPluginManager
from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR
@pytest.fixture(scope="module", params=["global", "inpackage"])
def basedir(request, tmpdir_factory):
from _pytest.tmpdir import tmpdir
tmpdir = tmpdir(request, tmpdir_factory)
tmpdir.ensure("adir/conftest.py").write("a=1 ; Directory = 3")
tmpdir.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5")
if request.param == "inpackage":
tmpdir.ensure("adir/__init__.py")
tmpdir.ensure("adir/b/__init__.py")
return tmpdir
def ConftestWithSetinitial(path):
conftest = PytestPluginManager()
conftest_setinitial(conftest, [path])
return conftest
def conftest_setinitial(conftest, args, confcutdir=None):
class Namespace:
def __init__(self):
self.file_or_dir = args
self.confcutdir = str(confcutdir)
self.noconftest = False
conftest._set_initial_conftests(Namespace())
class TestConftestValueAccessGlobal:
def test_basic_init(self, basedir):
conftest = PytestPluginManager()
p = basedir.join("adir")
assert conftest._rget_with_confmod("a", p)[1] == 1
def test_immediate_initialiation_and_incremental_are_the_same(self, basedir):
conftest = PytestPluginManager()
len(conftest._path2confmods)
conftest._getconftestmodules(basedir)
snap1 = len(conftest._path2confmods)
#assert len(conftest._path2confmods) == snap1 + 1
conftest._getconftestmodules(basedir.join('adir'))
assert len(conftest._path2confmods) == snap1 + 1
conftest._getconftestmodules(basedir.join('b'))
assert len(conftest._path2confmods) == snap1 + 2
def test_value_access_not_existing(self, basedir):
conftest = ConftestWithSetinitial(basedir)
with pytest.raises(KeyError):
conftest._rget_with_confmod('a', basedir)
def test_value_access_by_path(self, basedir):
conftest = ConftestWithSetinitial(basedir)
adir = basedir.join("adir")
assert conftest._rget_with_confmod("a", adir)[1] == 1
assert conftest._rget_with_confmod("a", adir.join("b"))[1] == 1.5
def test_value_access_with_confmod(self, basedir):
startdir = basedir.join("adir", "b")
startdir.ensure("xx", dir=True)
conftest = ConftestWithSetinitial(startdir)
mod, value = conftest._rget_with_confmod("a", startdir)
assert value == 1.5
path = py.path.local(mod.__file__)
assert path.dirpath() == basedir.join("adir", "b")
assert path.purebasename.startswith("conftest")
def test_conftest_in_nonpkg_with_init(tmpdir):
tmpdir.ensure("adir-1.0/conftest.py").write("a=1 ; Directory = 3")
tmpdir.ensure("adir-1.0/b/conftest.py").write("b=2 ; a = 1.5")
tmpdir.ensure("adir-1.0/b/__init__.py")
tmpdir.ensure("adir-1.0/__init__.py")
ConftestWithSetinitial(tmpdir.join("adir-1.0", "b"))
def test_doubledash_considered(testdir):
conf = testdir.mkdir("--option")
conf.join("conftest.py").ensure()
conftest = PytestPluginManager()
conftest_setinitial(conftest, [conf.basename, conf.basename])
l = conftest._getconftestmodules(conf)
assert len(l) == 1
def test_issue151_load_all_conftests(testdir):
names = "code proj src".split()
for name in names:
p = testdir.mkdir(name)
p.ensure("conftest.py")
conftest = PytestPluginManager()
conftest_setinitial(conftest, names)
d = list(conftest._conftestpath2mod.values())
assert len(d) == len(names)
def test_conftest_global_import(testdir):
testdir.makeconftest("x=3")
p = testdir.makepyfile("""
import py, pytest
from _pytest.config import PytestPluginManager
conf = PytestPluginManager()
mod = conf._importconftest(py.path.local("conftest.py"))
assert mod.x == 3
import conftest
assert conftest is mod, (conftest, mod)
subconf = py.path.local().ensure("sub", "conftest.py")
subconf.write("y=4")
mod2 = conf._importconftest(subconf)
assert mod != mod2
assert mod2.y == 4
import conftest
assert conftest is mod2, (conftest, mod)
""")
res = testdir.runpython(p)
assert res.ret == 0
def test_conftestcutdir(testdir):
conf = testdir.makeconftest("")
p = testdir.mkdir("x")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [testdir.tmpdir], confcutdir=p)
l = conftest._getconftestmodules(p)
assert len(l) == 0
l = conftest._getconftestmodules(conf.dirpath())
assert len(l) == 0
assert conf not in conftest._conftestpath2mod
# but we can still import a conftest directly
conftest._importconftest(conf)
l = conftest._getconftestmodules(conf.dirpath())
assert l[0].__file__.startswith(str(conf))
# and all sub paths get updated properly
l = conftest._getconftestmodules(p)
assert len(l) == 1
assert l[0].__file__.startswith(str(conf))
def test_conftestcutdir_inplace_considered(testdir):
conf = testdir.makeconftest("")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [conf.dirpath()], confcutdir=conf.dirpath())
l = conftest._getconftestmodules(conf.dirpath())
assert len(l) == 1
assert l[0].__file__.startswith(str(conf))
@pytest.mark.parametrize("name", 'test tests whatever .dotdir'.split())
def test_setinitial_conftest_subdirs(testdir, name):
sub = testdir.mkdir(name)
subconftest = sub.ensure("conftest.py")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir)
if name not in ('whatever', '.dotdir'):
assert subconftest in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 1
else:
assert subconftest not in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 0
def test_conftest_confcutdir(testdir):
testdir.makeconftest("assert 0")
x = testdir.mkdir("x")
x.join("conftest.py").write(_pytest._code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""))
result = testdir.runpytest("-h", "--confcutdir=%s" % x, x)
result.stdout.fnmatch_lines(["*--xyz*"])
assert 'warning: could not load initial' not in result.stdout.str()
def test_no_conftest(testdir):
testdir.makeconftest("assert 0")
result = testdir.runpytest("--noconftest")
assert result.ret == EXIT_NOTESTSCOLLECTED
result = testdir.runpytest()
assert result.ret == EXIT_USAGEERROR
def test_conftest_existing_resultlog(testdir):
x = testdir.mkdir("tests")
x.join("conftest.py").write(_pytest._code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""))
testdir.makefile(ext=".log", result="") # Writes result.log
result = testdir.runpytest("-h", "--resultlog", "result.log")
result.stdout.fnmatch_lines(["*--xyz*"])
def test_conftest_existing_junitxml(testdir):
x = testdir.mkdir("tests")
x.join("conftest.py").write(_pytest._code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""))
testdir.makefile(ext=".xml", junit="") # Writes junit.xml
result = testdir.runpytest("-h", "--junitxml", "junit.xml")
result.stdout.fnmatch_lines(["*--xyz*"])
def test_conftest_import_order(testdir, monkeypatch):
ct1 = testdir.makeconftest("")
sub = testdir.mkdir("sub")
ct2 = sub.join("conftest.py")
ct2.write("")
def impct(p):
return p
conftest = PytestPluginManager()
conftest._confcutdir = testdir.tmpdir
monkeypatch.setattr(conftest, '_importconftest', impct)
assert conftest._getconftestmodules(sub) == [ct1, ct2]
def test_fixture_dependency(testdir, monkeypatch):
ct1 = testdir.makeconftest("")
ct1 = testdir.makepyfile("__init__.py")
ct1.write("")
sub = testdir.mkdir("sub")
sub.join("__init__.py").write("")
sub.join("conftest.py").write(py.std.textwrap.dedent("""
import pytest
@pytest.fixture
def not_needed():
assert False, "Should not be called!"
@pytest.fixture
def foo():
assert False, "Should not be called!"
@pytest.fixture
def bar(foo):
return 'bar'
"""))
subsub = sub.mkdir("subsub")
subsub.join("__init__.py").write("")
subsub.join("test_bar.py").write(py.std.textwrap.dedent("""
import pytest
@pytest.fixture
def bar():
return 'sub bar'
def test_event_fixture(bar):
assert bar == 'sub bar'
"""))
result = testdir.runpytest("sub")
result.stdout.fnmatch_lines(["*1 passed*"])
def test_conftest_found_with_double_dash(testdir):
sub = testdir.mkdir("sub")
sub.join("conftest.py").write(py.std.textwrap.dedent("""
def pytest_addoption(parser):
parser.addoption("--hello-world", action="store_true")
"""))
p = sub.join("test_hello.py")
p.write(py.std.textwrap.dedent("""
import pytest
def test_hello(found):
assert found == 1
"""))
result = testdir.runpytest(str(p) + "::test_hello", "-h")
result.stdout.fnmatch_lines("""
*--hello-world*
""")
class TestConftestVisibility:
def _setup_tree(self, testdir): # for issue616
# example mostly taken from:
# https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html
runner = testdir.mkdir("empty")
package = testdir.mkdir("package")
package.join("conftest.py").write(dedent("""\
import pytest
@pytest.fixture
def fxtr():
return "from-package"
"""))
package.join("test_pkgroot.py").write(dedent("""\
def test_pkgroot(fxtr):
assert fxtr == "from-package"
"""))
swc = package.mkdir("swc")
swc.join("__init__.py").ensure()
swc.join("conftest.py").write(dedent("""\
import pytest
@pytest.fixture
def fxtr():
return "from-swc"
"""))
swc.join("test_with_conftest.py").write(dedent("""\
def test_with_conftest(fxtr):
assert fxtr == "from-swc"
"""))
snc = package.mkdir("snc")
snc.join("__init__.py").ensure()
snc.join("test_no_conftest.py").write(dedent("""\
def test_no_conftest(fxtr):
assert fxtr == "from-package" # No local conftest.py, so should
# use value from parent dir's
"""))
print ("created directory structure:")
for x in testdir.tmpdir.visit():
print (" " + x.relto(testdir.tmpdir))
return {
"runner": runner,
"package": package,
"swc": swc,
"snc": snc}
# N.B.: "swc" stands for "subdir with conftest.py"
# "snc" stands for "subdir no [i.e. without] conftest.py"
@pytest.mark.parametrize("chdir,testarg,expect_ntests_passed", [
# Effective target: package/..
("runner", "..", 3),
("package", "..", 3),
("swc", "../..", 3),
("snc", "../..", 3),
# Effective target: package
("runner", "../package", 3),
("package", ".", 3),
("swc", "..", 3),
("snc", "..", 3),
# Effective target: package/swc
("runner", "../package/swc", 1),
("package", "./swc", 1),
("swc", ".", 1),
("snc", "../swc", 1),
# Effective target: package/snc
("runner", "../package/snc", 1),
("package", "./snc", 1),
("swc", "../snc", 1),
("snc", ".", 1),
])
@pytest.mark.issue616
def test_parsefactories_relative_node_ids(
self, testdir, chdir,testarg, expect_ntests_passed):
dirs = self._setup_tree(testdir)
print("pytest run in cwd: %s" %(
dirs[chdir].relto(testdir.tmpdir)))
print("pytestarg : %s" %(testarg))
print("expected pass : %s" %(expect_ntests_passed))
with dirs[chdir].as_cwd():
reprec = testdir.inline_run(testarg, "-q", "--traceconfig")
reprec.assertoutcome(passed=expect_ntests_passed)
@pytest.mark.parametrize('confcutdir,passed,error', [
('.', 2, 0),
('src', 1, 1),
(None, 1, 1),
])
def test_search_conftest_up_to_inifile(testdir, confcutdir, passed, error):
"""Test that conftest files are detected only up to a ini file, unless
an explicit --confcutdir option is given.
"""
root = testdir.tmpdir
src = root.join('src').ensure(dir=1)
src.join('pytest.ini').write('[pytest]')
src.join('conftest.py').write(_pytest._code.Source("""
import pytest
@pytest.fixture
def fix1(): pass
"""))
src.join('test_foo.py').write(_pytest._code.Source("""
def test_1(fix1):
pass
def test_2(out_of_reach):
pass
"""))
root.join('conftest.py').write(_pytest._code.Source("""
import pytest
@pytest.fixture
def out_of_reach(): pass
"""))
args = [str(src)]
if confcutdir:
args = ['--confcutdir=%s' % root.join(confcutdir)]
result = testdir.runpytest(*args)
match = ''
if passed:
match += '*%d passed*' % passed
if error:
match += '*%d error*' % error
result.stdout.fnmatch_lines(match)
def test_issue1073_conftest_special_objects(testdir):
testdir.makeconftest("""
class DontTouchMe:
def __getattr__(self, x):
raise Exception('cant touch me')
x = DontTouchMe()
""")
testdir.makepyfile("""
def test_some():
pass
""")
res = testdir.runpytest()
assert res.ret == 0
def test_conftest_exception_handling(testdir):
testdir.makeconftest('''
raise ValueError()
''')
testdir.makepyfile("""
def test_some():
pass
""")
res = testdir.runpytest()
assert res.ret == 4
assert 'raise ValueError()' in [line.strip() for line in res.errlines]
|
hpk42/pytest
|
testing/test_conftest.py
|
Python
|
mit
| 14,608
|
[
"VisIt"
] |
cda1e6a54150a4e4388d8a6256d5edf962ca83584df70e8b2a155ca73129512c
|
"""Make or load test datasets."""
import numpy as np
from functools import partial
from numpy.random import random_integers
from sklearn.preprocessing import scale
import simfMRI.noise as noisefns
from simfMRI.expclass import Exp
from simBehave.trials import event_random
from simfMRI.misc import process_prng
class _NCond(Exp):
"""
Simulate N conditions using one then the other as the BOLD signal.
Parameters
----------
n - total trial count
n_cond - the number of conditions
TR - The sampling time
durations - a range of durations [start, stop] (e.g. [2,6] give a
duration range from [2,6] randomly sampled from a uniform
distribution) Default = None ( = 1).
noise - The noise models to use (default = white, options are
ar1, physio, white, lowfreqdrift)
"""
def __init__(self, n, n_cond=2, TR=2, durations=None,):
try:
Exp.__init__(self, TR=TR, ISI=2, prng=None)
except AttributeError:
pass
self.trials, self.prng = event_random(n_cond, n, 1, self.prng)
self.trials = np.array(self.trials)
if durations != None:
start, stop = durations[0], durations[1]
self.durations = [random_integers(start, stop) for
_ in self.trials]
else:
self.durations = [1, ] * len(self.trials)
class _Ar1(_NCond):
"""_NCond with an AR(1) noise model.
Parameters
----------
alpha - the degree of AR(1) autocorrelation (0-1).
"""
def __init__(self, n, n_cond=2, TR=2, durations=None, alpha=0.5):
# Init _NCond.
_NCond.__init__(self, n=n, n_cond=n_cond, TR=TR, durations=durations)
# Then override the noise_f
self.noise_f = partial(getattr(noisefns, "ar1"), alpha=alpha)
class _Physio(_NCond):
"""_NCond with an 'physiological' noise model.
Parameters
----------
See 'physio' in simfMRI.noise for the parametrization.
"""
def __init__(self, n, n_cond=2, TR=2, durations=None, sigma=1,
freq_heart=1.17, freq_resp=0.2):
# Init _NCond.
_NCond.__init__(self, n=n, n_cond=n_cond, TR=TR, durations=durations)
# Then override the noise_f
self.noise_f = partial(getattr(noisefns, "physio"),
TR=TR, sigma=sigma, freq_heart=freq_heart, freq_resp=freq_resp)
def _selectExp(noise):
"""Select a BOLD sim experimental class based on the noise type."""
# ----
# Detect noise, then use PFA to enforce a constant
# signature on ExpClass; what a mess, so so sorry.
if noise == "white":
ExpClass = _NCond
elif noise == "ar1":
ExpClass = partial(_Ar1, alpha=0.5)
elif noise == "physio":
ExpClass = partial(_Physio, sigma=1, freq_heart=1.17, freq_resp=0.2)
else:
raise ValueError("noise was not understood.")
return ExpClass
def _univariate_features(n, boldsim):
"""Create n univariate features."""
Xuni = np.zeros((boldsim.dm.shape[0], n))
arr = scale(boldsim.dm[:, 0:].sum(1), with_mean=False) # Rescale
for jj in range(n):
# Generate a (new) BOLD model (i.e. new noise)
boldsim.create_bold(arr, convolve=False)
# To make the bold, combine dm cols: sum all but the
# first col, which is the baseline.
Xuni[:, jj] = boldsim.bold
return Xuni
def _noise_features(n, boldsim):
"""Create n noise-only features."""
Xnoise = np.zeros((boldsim.dm.shape[0], n))
for jj in range(n):
boldsim.create_bold(np.zeros(boldsim.dm.shape[0]), convolve=False)
# By passing an array of 0's, we get only noise out.
Xnoise[:, jj] = boldsim.bold
return Xnoise
def _accumulator_features(n, boldsim, drift_noise=False, step_noise=False):
"""Create n accumulator-like features. Also return the accumulators
array. """
accumulators = _make_accumulator_array(boldsim.trials, boldsim.durations,
drift_noise=drift_noise, step_noise=step_noise)
Xacc = np.zeros((accumulators.shape[0], n))
for jj in range(n):
boldsim.create_bold(accumulators, convolve=True)
Xacc[:, jj] = boldsim.bold
return Xacc, accumulators
def _decision_features(n, boldsim):
"""Create n decision-like (impulse at TR) features. Also return the
decision array."""
decisions = _make_decision_array(boldsim.trials, boldsim.durations)
Xdec = np.zeros((decisions.shape[0], n))
for jj in range(n):
boldsim.create_bold(decisions, convolve=True)
Xdec[:, jj] = boldsim.bold
return Xdec, decisions
def _repeated_features(n, n_informative, X):
"""Randomly select and copy n features from X, from the col
range [0 ... n_informative].
"""
Xrep = np.zeros((X.shape[0], n))
for jj in range(n):
rand_info_col = np.random.random_integers(0, n_informative - 1)
Xrep[:, jj] = X[:, rand_info_col]
return Xrep
def _make_decision_array(trials, durations):
"""Treat trials as reaction times, and use these rts to create
mock decision signals.
"""
decision = []
for t, d in zip(trials, durations):
if t == 0:
decision.extend([0, ] * d) # Add empty trial if t == 0
else:
trial = np.zeros(d) # Init this trial's data
trial[t - 1] = 1.0
decision.extend(trial.tolist())
return np.array(decision)
def _make_accumulator_array(trials, durations, drift_noise=False,
step_noise=False):
"""Treat trials as reaction times, and use these rts to create
mock accumulator signals whose drift rates and step sizes
could be Gaussian processes.
Note:
-----
Noise processes for both drift and step normal Gaussians with
the following params:
drift : M = 0, SD = 0.5
step : M = 0, SD = .2
The SD in both cases is arbitrary. I played with the SDs until the
noised curves looked plausible - not to much noise, not to little.
As I'm primarily worried about the impact these noise processes
have on BOLD frequency not magnitude this seems a justifiable, at
least in the short term.
"""
accumlator = []
for t, d in zip(trials, durations):
if t == 0:
accumlator.extend([0, ] * d)
# Add empty trial if t == 0
else:
trial = np.zeros(d)
# Init this trial's data
# Drift rate is noisy?
drift = 1 # Does nothing
if drift_noise:
drift = np.abs(np.random.normal(loc=0, scale=0.5))
# Drift params: Mean 0, SD = 0.1
# Steps are noisy?
stepn = np.zeros(t) # Does nothing
if step_noise:
stepn = np.random.normal(loc=0, scale=0.2, size=t)
# Create the ramping (i.e. accumlator) signal for
# this trial
ramp = drift * (np.arange(1, t + 1) / np.float(t))
# Update trial with ramp and add to the accumlator trace
trial[:len(ramp)] = ramp + stepn
accumlator.extend(trial.tolist())
return np.array(accumlator)
def _generate_labels(boldsim):
"""Use the boldsim data to create and return reaction time
and trial-level labels.
"""
# Now use trials and durations to generate labals (i.e., y).
y = []
y_trialcount = []
indext = range(len(boldsim.trials))
for t, d, i in zip(boldsim.trials, boldsim.durations, indext):
t_in_d = [t, ] * d
y.extend(t_in_d)
t_count = [i, ] * d
y_trialcount.extend(t_count)
y = np.array(y) # In metacculate the standard is for
# labels to be in in arrays not lists
y_trialcount = np.array(y_trialcount)
return y, y_trialcount
def make_bold(n_cond, n_trials_per_cond, TR=2, durations=[8, 16],
noise="white", n_features=10, n_univariate=None,
n_accumulator=None, n_decision=None, n_noise=None, n_repeated=None,
drift_noise=False, step_noise=False):
"""Make a simple BOLD dataset.
Note:
----
Defaults to creating univariate features.
"""
# ----
# Process args for feature composition
if n_noise == None:
n_noise = 0
if n_repeated == None:
n_repeated = 0
if n_accumulator == None:
n_accumulator = 0
if n_decision == None:
n_decision = 0
if n_univariate == None:
n_univariate = (n_features - n_noise - n_repeated - n_accumulator
- n_decision)
if (n_features - n_univariate - n_accumulator - n_noise
- n_repeated - n_decision) != 0:
raise ValueError("The number of features don't add up.")
# ----
# Select the BOLD sim class based on noise type
ExpClass = _selectExp(noise)
# ----
# Get to work....
#
# Instantiate a BOLD generator.
boldsim = ExpClass(n_trials_per_cond, n_cond, TR=2, durations=durations)
numactive = 8
drop = [0, ] * numactive + [1, ] * (max(durations) - numactive)
# Simulate a trial structure where the neural
# impulse spans from TR 0 to numactive. See
# dtime() in simfMRI.timing for an explanation
# of drop's structure.
boldsim.create_dm(convolve=True, drop=drop)
# Init X, the features
n_sample_feature = boldsim.dm.shape[0]
X = np.zeros((n_sample_feature, n_features))
# And build up X
# 1. univariate features
start = 0
stop = n_univariate
X[:, start:stop] = _univariate_features(n_univariate, boldsim)
# 2. accumulator features
start = stop
stop = start + n_accumulator
X[:, start:stop], acc = _accumulator_features(n_accumulator, boldsim,
drift_noise=drift_noise, step_noise=step_noise)
# 3. decision features
start = stop
stop = start + n_decision
X[:, start:stop], dec = _decision_features(n_decision, boldsim)
# 4. noise features:
start = stop
stop = start + n_noise
X[:, start:stop] = _noise_features(n_noise, boldsim)
# 5. feature repeats
start = stop
stop = start + n_repeated
X[:, start:stop] = _repeated_features(n_repeated,
(n_univariate + n_accumulator), X)
# 6. Create labels
y, y_trialcount = _generate_labels(boldsim)
return X, y, y_trialcount
if __name__ == "__main__":
"""For later testing and analysis, save a variety of simulated BOLD
timecourses as csv files."""
# ----
# GLOBALS
n_c = 7
n_t_c = 10
ds = [8, 16]
n_f = 3
n_iter = 3
# ----
# univariate
noise = "physio"
for ii in range(n_iter):
X, y, tcs = make_bold(
n_c, n_t_c,
TR=2,
durations=ds,
noise="physio",
n_features=n_f,
n_univariate=None,
n_accumulator=None,
n_decision=None,
n_noise=None,
n_repeated=None,
drift_noise=False,
step_noise=False)
np.savetxt('uni_' + noise + '_' + str(ii) + '.csv', X,
fmt='%1.6f', delimiter=",")
noise = "ar1"
for ii in range(n_iter):
X, y, tcs = make_bold(
n_c, n_t_c,
TR=2,
durations=ds,
noise="ar1",
n_features=n_f,
n_univariate=None,
n_accumulator=None,
n_decision=None,
n_noise=None,
n_repeated=None,
drift_noise=False,
step_noise=False)
np.savetxt('uni_' + noise + '_' + str(ii) + '.csv', X,
fmt='%1.6f', delimiter=",")
# ----
# accumulator
noise = "physio"
for ii in range(n_iter):
X, y, tcs = make_bold(
n_c, n_t_c,
TR=2,
durations=ds,
noise="physio",
n_features=n_f,
n_univariate=None,
n_accumulator=n_f,
n_decision=None,
n_noise=None,
n_repeated=None,
drift_noise=False,
step_noise=False)
np.savetxt('acc_' + noise + '_' + str(ii) + '.csv', X,
fmt='%1.6f', delimiter=",")
noise = "ar1"
for ii in range(n_iter):
X, y, tcs = make_bold(
n_c, n_t_c,
TR=2,
durations=ds,
noise="ar1",
n_features=n_f,
n_univariate=None,
n_accumulator=n_f,
n_decision=None,
n_noise=None,
n_repeated=None,
drift_noise=False,
step_noise=False)
np.savetxt('acc_' + noise + '_' + str(ii) + '.csv', X,
fmt='%1.6f', delimiter=",")
# ----
# accumulator + drift noise
noise = "physio"
for ii in range(n_iter):
X, y, tcs = make_bold(
n_c, n_t_c,
TR=2,
durations=ds,
noise="physio",
n_features=n_f,
n_univariate=None,
n_accumulator=n_f,
n_decision=None,
n_noise=None,
n_repeated=None,
drift_noise=True,
step_noise=False)
np.savetxt('drift_' + noise + '_' + str(ii) + '.csv', X,
fmt='%1.6f', delimiter=",")
noise = "ar1"
for ii in range(n_iter):
X, y, tcs = make_bold(
n_c, n_t_c,
TR=2,
durations=ds,
noise="ar1",
n_features=n_f,
n_univariate=None,
n_accumulator=n_f,
n_decision=None,
n_noise=None,
n_repeated=None,
drift_noise=True,
step_noise=False)
np.savetxt('drift_' + noise + '_' + str(ii) + '.csv', X,
fmt='%1.6f', delimiter=",")
# ----
# accumulator + step
noise = "physio"
for ii in range(n_iter):
X, y, tcs = make_bold(
n_c, n_t_c,
TR=2,
durations=ds,
noise="physio",
n_features=n_f,
n_univariate=None,
n_accumulator=n_f,
n_decision=None,
n_noise=None,
n_repeated=None,
drift_noise=False,
step_noise=True)
np.savetxt('step_' + noise + '_' + str(ii) + '.csv', X,
fmt='%1.6f', delimiter=",")
noise = "ar1"
for ii in range(n_iter):
X, y, tcs = make_bold(
n_c, n_t_c,
TR=2,
durations=ds,
noise="ar1",
n_features=n_f,
n_univariate=None,
n_accumulator=n_f,
n_decision=None,
n_noise=None,
n_repeated=None,
drift_noise=False,
step_noise=True)
np.savetxt('step_' + noise + '_' + str(ii) + '.csv', X,
fmt='%1.6f', delimiter=",")
# ----
# decision
noise = "physio"
for ii in range(n_iter):
X, y, tcs = make_bold(
n_c, n_t_c,
TR=2,
durations=ds,
noise="physio",
n_features=n_f,
n_univariate=None,
n_accumulator=None,
n_decision=n_f,
n_noise=None,
n_repeated=None,
drift_noise=False,
step_noise=False)
np.savetxt('dec_' + noise + '_' + str(ii) + '.csv', X,
fmt='%1.6f', delimiter=",")
noise = "ar1"
for ii in range(n_iter):
X, y, tcs = make_bold(
n_c, n_t_c,
TR=2,
durations=ds,
noise="ar1",
n_features=n_f,
n_univariate=None,
n_accumulator=None,
n_decision=n_f,
n_noise=None,
n_repeated=None,
drift_noise=False,
step_noise=False)
np.savetxt('dec_' + noise + '_' + str(ii) + '.csv', X,
fmt='%1.6f', delimiter=",")
|
parenthetical-e/wheelerdata
|
load/simulated.py
|
Python
|
bsd-2-clause
| 16,089
|
[
"Gaussian"
] |
6e7cbd8854e187e18c0c31be11535b57dd301d25ebd382065e86740ae49460b2
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005, 2006 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Client editor slaves implementation"""
from kiwi.datatypes import ValidationError
from stoqlib.api import api
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.dialogs.creditdialog import CreditInfoListDialog
from stoqlib.gui.editors.baseeditor import BaseEditorSlave
from stoqlib.gui.search.clientsalaryhistorysearch import ClientSalaryHistorySearch
from stoqlib.gui.utils.printing import print_report
from stoqlib.domain.person import Client, ClientCategory, ClientSalaryHistory
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.lib.parameters import sysparam
from stoqlib.reporting.clientcredit import ClientCreditReport
_ = stoqlib_gettext
class ClientStatusSlave(BaseEditorSlave):
model_type = Client
gladefile = 'ClientStatusSlave'
proxy_widgets = ('statuses_combo', 'category_combo')
#
# BaseEditorSlave hooks
#
def setup_proxies(self):
categories = self.store.find(ClientCategory)
self.category_combo.prefill(api.for_combo(categories, empty=''))
table = self.model_type
items = [(value, constant)
for constant, value in table.statuses.items()]
self.statuses_combo.prefill(items)
self.proxy = self.add_proxy(self.model,
ClientStatusSlave.proxy_widgets)
class ClientCreditSlave(BaseEditorSlave):
model_type = Client
gladefile = 'ClientCreditSlave'
proxy_widgets = ('salary', 'credit_limit', 'remaining_store_credit',
'credit_account_balance')
def __init__(self, store, model, visual_mode=False, edit_mode=False):
BaseEditorSlave.__init__(self, store, model, visual_mode, edit_mode)
self._original_salary = self.model.salary
#
# BaseEditorSlave hooks
#
def setup_proxies(self):
self.proxy = self.add_proxy(self.model,
self.proxy_widgets)
self._setup_widgets()
def _setup_widgets(self):
salary_percentage = sysparam.get_decimal('CREDIT_LIMIT_SALARY_PERCENT')
if salary_percentage > 0:
self.credit_limit.set_sensitive(False)
# Only management can add credit to clients (in other words, users with
# access to the Admin app).
user = api.get_current_user(self.store)
if not user.profile.check_app_permission(u'admin'):
self.credit_transactions_button.hide()
def on_confirm(self):
if self.model.salary != self._original_salary:
ClientSalaryHistory.add(self.store, self._original_salary,
self.model, api.get_current_user(self.store))
#
# Kiwi Callbacks
#
def on_credit_limit__validate(self, entry, value):
if value < 0:
return ValidationError(
_("Credit limit must be greater than or equal to 0"))
def after_salary__changed(self, entry):
self.proxy.update('remaining_store_credit')
self.proxy.update('credit_limit')
def on_salary__validate(self, widget, value):
if value < 0:
return ValidationError(
_("Salary can't be lower than 0."))
def after_credit_limit__changed(self, entry):
self.proxy.update('remaining_store_credit')
def on_salary_history_button__clicked(self, button):
run_dialog(ClientSalaryHistorySearch,
self.get_toplevel().get_toplevel(), self.store,
client=self.model)
def on_credit_transactions_button__clicked(self, button):
# If we are not in edit mode, we are creating a new object, and thus we
# should reuse the transaction
reuse_store = not self.edit_mode
run_dialog(CreditInfoListDialog, self.get_toplevel().get_toplevel(),
self.store, self.model, reuse_store=reuse_store)
self.proxy.update('credit_account_balance')
def on_print_credit_letter__clicked(self, button):
print_report(ClientCreditReport, self.model)
|
andrebellafronte/stoq
|
stoqlib/gui/slaves/clientslave.py
|
Python
|
gpl-2.0
| 4,949
|
[
"VisIt"
] |
62a1c1a656532d645204df5e2fa814e8147c40741586bdba4f63a3c5149b590a
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_ddlgenerator
----------------------------------
Tests for `ddlgenerator` module.
"""
import glob
import unittest
import pymongo
import os.path
from collections import namedtuple, OrderedDict
try:
from ddlgenerator.ddlgenerator import Table
except ImportError:
from ddlgenerator import Table
def here(filename):
return os.path.join(os.path.dirname(__file__), filename)
class TestMongo(unittest.TestCase):
def setUp(self):
data = [{'year': 2013,
'physics': ['François Englert', 'Peter W. Higgs'],
'chemistry': ['Martin Karplus', 'Michael Levitt', 'Arieh Warshel'],
'peace': ['Organisation for the Prohibition of Chemical Weapons (OPCW)',],
},
{'year': 2011,
'physics': ['Saul Perlmutter', 'Brian P. Schmidt', 'Adam G. Riess'],
'chemistry': ['Dan Shechtman',],
'peace': ['Ellen Johnson Sirleaf', 'Leymah Gbowee', 'Tawakkol Karman'],
},
]
self.data = data
self.conn = pymongo.Connection()
self.db = self.conn.ddlgenerator_test_db
self.tbl = self.db.prize_winners
self.tbl.insert(self.data)
def tearDown(self):
self.conn.drop_database(self.db)
def testData(self):
winners = Table(self.tbl, pk_name='year')
generated = winners.sql('postgresql', inserts=True)
self.assertIn('REFERENCES prize_winners (year)', generated)
class TestFromRawPythonData(unittest.TestCase):
prov_type = namedtuple('province', ['name', 'capital', 'pop'])
canada = [prov_type('Quebec', 'Quebec City', '7903001'),
prov_type('Ontario', 'Toronto', '12851821'), ]
merovingians = [
OrderedDict([('name', {'name_id': 1, 'name_txt': 'Clovis I'}),
('reign', {'from': 486, 'to': 511}),
]),
OrderedDict([('name', {'name_id': 1, 'name_txt': 'Childebert I'}),
('reign', {'from': 511, 'to': 558}),
]),
]
def test_pydata_named_tuples(self):
tbl = Table(self.canada)
generated = tbl.sql('postgresql', inserts=True).strip()
self.assertIn('capital VARCHAR(11) NOT NULL,', generated)
self.assertIn('(name, capital, pop) VALUES (\'Quebec\', \'Quebec City\', 7903001)', generated)
def test_nested(self):
tbl = Table(self.merovingians)
generated = tbl.sql('postgresql', inserts=True).strip()
self.assertIn("reign_to", generated)
def test_sqlalchemy(self):
tbl = Table(self.merovingians)
generated = tbl.sqlalchemy()
self.assertIn("Column('reign_from'", generated)
self.assertIn("Integer()", generated)
tbl = Table(self.canada)
generated = tbl.sqlalchemy()
self.assertIn("Column('capital', Unicode", generated)
def test_django(self):
tbl = Table(self.merovingians)
generated = tbl.django_models()
#print("generated")
#print(generated)
#self.assertIn("(models.Model):", generated)
#self.assertIn("name_name_id =", generated)
tbl = Table(self.canada)
generated = tbl.django_models()
#self.assertIn("name =", generated)
def test_cushion(self):
tbl = Table(self.merovingians, data_size_cushion=0)
generated = tbl.sql('postgresql').strip()
self.assertIn('VARCHAR(12)', generated)
tbl = Table(self.merovingians, data_size_cushion=1)
generated = tbl.sql('postgresql').strip()
self.assertIn('VARCHAR(14)', generated)
class TestFiles(unittest.TestCase):
def test_use_open_file(self):
with open(here('knights.yaml')) as infile:
knights = Table(infile)
generated = knights.sql('postgresql', inserts=True)
self.assertIn('Lancelot', generated)
def test_files(self):
for sql_fname in glob.glob(here('*.sql')):
with open(sql_fname) as infile:
expected = infile.read().strip()
(fname, ext) = os.path.splitext(sql_fname)
for source_fname in glob.glob(here('%s.*' % fname)):
(fname, ext) = os.path.splitext(source_fname)
if ext != '.sql':
tbl = Table(source_fname, uniques=True)
generated = tbl.sql('postgresql', inserts=True, drops=True).strip()
self.assertEqual(generated, expected)
if __name__ == '__main__':
unittest.main()
|
catherinedevlin/ddl-generator
|
tests/test_ddlgenerator.py
|
Python
|
mit
| 4,872
|
[
"Brian"
] |
fdeb4b737b9afb29c20c935abf6ab3500f390d183a5070182edc76c4275cc40e
|
"""
ex20170307_Model_REG2.py
Build Model REG2. Second Version of Model REG from Godley & Lavoie (Chapter 6)
Uses the model builder from the sfc_models.gl_book sub-package.
Copyright 2017 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sfc_models
from sfc_models.gl_book.chapter6 import REG2
from sfc_models.examples.Quick2DPlot import Quick2DPlot
from sfc_models import Parameters
sfc_models.register_standard_logs('output', __file__)
Parameters.TraceStep = 1
builder_REG = REG2(country_code='C1', use_book_exogenous=True)
model = builder_REG.build_model()
Parameters.InitialEquilibriumStepError = 1e-5
Parameters.SolveInitialEquilibrium = False
Parameters.TraceStep = 1
# Generate the file name using an operating system independent tool - os.path.join
model.main()
model.TimeSeriesCutoff = 40
time = model.GetTimeSeries('t')
# Y_PC = model.GetTimeSeries('GOOD_SUP_GOOD')
# r = model.GetTimeSeries('DEP_r')
# Y_d = model.GetTimeSeries('HH_AfterTax')
# FB = model.GetTimeSeries('TRE_FISCBAL')
# PB = model.GetTimeSeries('TRE_PRIM_BAL')
#
# Quick2DPlot(time, r, 'Interest Rate - Model PC')
# Quick2DPlot(time, Y_PC, 'Output (Y) - Model PC')
# Quick2DPlot(time, Y_d, 'Household Disposable Income - Model PC')
# Quick2DPlot(time, FB, 'Fiscal Balance - Model PC')
# Quick2DPlot(time, PB, 'Primary Fiscal Balance')
|
brianr747/SFC_models
|
sfc_models/examples/scripts/ex20170307_Model_REG2.py
|
Python
|
apache-2.0
| 1,828
|
[
"Brian"
] |
1723e3904db2c16b81ad3011ec5b3bff29ccb2dbeb73a10e750af19e1de76b56
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Loss functions."""
import tensorflow as tf
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
#----------------------------------------------------------------------------
# Convenience func that casts all of its arguments to tf.float32.
def fp32(*values):
if len(values) == 1 and isinstance(values[0], tuple):
values = values[0]
values = tuple(tf.cast(v, tf.float32) for v in values)
return values if len(values) >= 2 else values[0]
#----------------------------------------------------------------------------
# WGAN & WGAN-GP loss functions.
def G_wgan(G, D, opt, training_set, minibatch_size): # pylint: disable=unused-argument
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
loss = -fake_scores_out
return loss
def D_wgan(G, D, opt, training_set, minibatch_size, reals, labels, # pylint: disable=unused-argument
wgan_epsilon = 0.001): # Weight for the epsilon term, \epsilon_{drift}.
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = fake_scores_out - real_scores_out
with tf.name_scope('EpsilonPenalty'):
epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
loss += epsilon_penalty * wgan_epsilon
return loss
def D_wgan_gp(G, D, opt, training_set, minibatch_size, reals, labels, # pylint: disable=unused-argument
wgan_lambda = 10.0, # Weight for the gradient penalty term.
wgan_epsilon = 0.001, # Weight for the epsilon term, \epsilon_{drift}.
wgan_target = 1.0): # Target value for gradient magnitudes.
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = fake_scores_out - real_scores_out
with tf.name_scope('GradientPenalty'):
mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
mixed_images_out = tflib.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
mixed_scores_out = fp32(D.get_output_for(mixed_images_out, labels, is_training=True))
mixed_scores_out = autosummary('Loss/scores/mixed', mixed_scores_out)
mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
mixed_norms = autosummary('Loss/mixed_norms', mixed_norms)
gradient_penalty = tf.square(mixed_norms - wgan_target)
loss += gradient_penalty * (wgan_lambda / (wgan_target**2))
with tf.name_scope('EpsilonPenalty'):
epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
loss += epsilon_penalty * wgan_epsilon
return loss
#----------------------------------------------------------------------------
# Hinge loss functions. (Use G_wgan with these)
def D_hinge(G, D, opt, training_set, minibatch_size, reals, labels): # pylint: disable=unused-argument
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.maximum(0., 1.+fake_scores_out) + tf.maximum(0., 1.-real_scores_out)
return loss
def D_hinge_gp(G, D, opt, training_set, minibatch_size, reals, labels, # pylint: disable=unused-argument
wgan_lambda = 10.0, # Weight for the gradient penalty term.
wgan_target = 1.0): # Target value for gradient magnitudes.
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.maximum(0., 1.+fake_scores_out) + tf.maximum(0., 1.-real_scores_out)
with tf.name_scope('GradientPenalty'):
mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
mixed_images_out = tflib.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
mixed_scores_out = fp32(D.get_output_for(mixed_images_out, labels, is_training=True))
mixed_scores_out = autosummary('Loss/scores/mixed', mixed_scores_out)
mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
mixed_norms = autosummary('Loss/mixed_norms', mixed_norms)
gradient_penalty = tf.square(mixed_norms - wgan_target)
loss += gradient_penalty * (wgan_lambda / (wgan_target**2))
return loss
#----------------------------------------------------------------------------
# Loss functions advocated by the paper
# "Which Training Methods for GANs do actually Converge?"
def G_logistic_saturating(G, D, opt, training_set, minibatch_size): # pylint: disable=unused-argument
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
loss = -tf.nn.softplus(fake_scores_out) # log(1 - logistic(fake_scores_out))
return loss
#---------------------------------------------------------------
# Modified by Deng et al.
def G_logistic_nonsaturating(G, D, latents, opt, training_set, minibatch_size, randomize_noise = True): # pylint: disable=unused-argument
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True, randomize_noise=randomize_noise)
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
loss = tf.nn.softplus(-fake_scores_out) # -log(logistic(fake_scores_out))
return loss,fake_images_out
#---------------------------------------------------------------
def D_logistic(G, D, opt, training_set, minibatch_size, reals, labels): # pylint: disable=unused-argument
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.nn.softplus(fake_scores_out) # -log(1 - logistic(fake_scores_out))
loss += tf.nn.softplus(-real_scores_out) # -log(logistic(real_scores_out)) # temporary pylint workaround # pylint: disable=invalid-unary-operand-type
return loss
#---------------------------------------------------------------
# Modified by Deng et al.
def D_logistic_simplegp(G, D,latents, opt, training_set, minibatch_size, reals, labels, r1_gamma=10.0, r2_gamma=0.0,randomize_noise = True): # pylint: disable=unused-argument
fake_images_out = G.get_output_for(latents, labels, is_training=True,randomize_noise=randomize_noise)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.nn.softplus(fake_scores_out) # -log(1 - logistic(fake_scores_out))
loss += tf.nn.softplus(-real_scores_out) # -log(logistic(real_scores_out)) # temporary pylint workaround # pylint: disable=invalid-unary-operand-type
if r1_gamma != 0.0:
with tf.name_scope('R1Penalty'):
real_loss = opt.apply_loss_scaling(tf.reduce_sum(real_scores_out))
real_grads = opt.undo_loss_scaling(fp32(tf.gradients(real_loss, [reals])[0]))
r1_penalty = tf.reduce_sum(tf.square(real_grads), axis=[1,2,3])
r1_penalty = autosummary('Loss/r1_penalty', r1_penalty)
loss += r1_penalty * (r1_gamma * 0.5)
if r2_gamma != 0.0:
with tf.name_scope('R2Penalty'):
fake_loss = opt.apply_loss_scaling(tf.reduce_sum(fake_scores_out))
fake_grads = opt.undo_loss_scaling(fp32(tf.gradients(fake_loss, [fake_images_out])[0]))
r2_penalty = tf.reduce_sum(tf.square(fake_grads), axis=[1,2,3])
r2_penalty = autosummary('Loss/r2_penalty', r2_penalty)
loss += r2_penalty * (r2_gamma * 0.5)
return loss
#---------------------------------------------------------------
|
microsoft/DiscoFaceGAN
|
training/loss.py
|
Python
|
mit
| 10,648
|
[
"VisIt"
] |
21e917a0ea0572a5e39780431444dd4ce9bb03327a147707bf23b5391905a175
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2007, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
from Products.DataCollector.plugins.CollectorPlugin import SnmpPlugin, GetMap
class UbiquitiDeviceMap(SnmpPlugin):
"""Map mib elements from Ubiquiti mib to get hw and os products.
"""
maptype = "UbiquitiDeviceMap"
snmpGetMap = GetMap({
#'' : 'manufacturer',
#'.1.3.6.1.4.1.14823.2.2.1.1.1.2.0' : 'setHWProductKey',
#'.1.3.6.1.4.1.14823.2.2.1.2.1.11.0' : 'setHWSerialNumber',
'.1.2.840.10036.3.1.2.1.4': 'setOSProductKey',
})
def process(self, device, results, log):
"""collect snmp information from this device"""
log.info('processing %s for device %s', self.name(), device.id)
getdata, tabledata = results
if getdata['setHWProductKey'] is None: return None
om = self.objectMap(getdata)
return om
|
anksp21/Community-Zenpacks
|
ZenPacks.AndreaConsadori.UbiquitiNanostation/ZenPacks/AndreaConsadori/UbiquitiNanostation/modeler/plugins/UbiquitiDeviceMap.py
|
Python
|
gpl-2.0
| 1,334
|
[
"VisIt"
] |
ac089ae0407d680b174e84762eb7204ea478b42b9da15a95dfd0fe8bfc6b32c7
|
from copy import deepcopy
import logging
from PyQt5.QtCore import (QCoreApplication, QPoint, QRectF, Qt)
from PyQt5.QtGui import (QColor, QTransform)
from PyQt5.QtWidgets import (QAbstractItemView, QAction, QApplication, QGraphicsScene, QGraphicsView, QHBoxLayout,
QMessageBox, QPlainTextEdit, QSizePolicy, QSplitter, QTableWidget, QTableWidgetItem,
QToolBar, QToolButton, QVBoxLayout, QWidget)
import os
from time import time
import sys
from pyteltools.conf import settings
from .MultiNode import Box, MultiLink
from . import multi_func as worker
from .multi_nodes import *
from .util import logger
NODES = {'Input/Output': {'Load Serafin 2D': MultiLoadSerafin2DNode, 'Load Serafin 3D': MultiLoadSerafin3DNode,
'Load Reference Serafin': MultiLoadReferenceSerafinNode,
'Load 2D Points': MultiLoadPoint2DNode, 'Load 2D Open Polylines': MultiLoadOpenPolyline2DNode,
'Load 2D Polygons': MultiLoadPolygon2DNode,
'Write CSV': MultiWriteCsvNode, 'Write LandXML': MultiWriteLandXMLNode,
'Write shp': MultiWriteShpNode,
'Write vtk': MultiWriteVtkNode, 'Write Serafin': MultiWriteSerafinNode},
'Basic operations': {'Select Variables': MultiSelectVariablesNode, 'Add Rouse Numbers': MultiAddRouseNode,
'Convert to Single Precision': MultiConvertToSinglePrecisionNode,
'Select Time': MultiSelectTimeNode,
'Select Single Frame': MultiSelectSingleFrameNode,
'Select First Frame': MultiSelectFirstFrameNode,
'Select Last Frame': MultiSelectLastFrameNode,
'Select Single Layer': MultiSelectSingleLayerNode,
'Vertical Aggregation': MultiVerticalAggregationNode,
'Add Transformation': MultiAddTransformationNode},
'Operators': {'Max': MultiComputeMaxNode, 'Min': MultiComputeMinNode, 'Mean': MultiComputeMeanNode,
'Project B on A': MultiProjectMeshNode, 'A Minus B': MultiMinusNode,
'B Minus A': MultiReverseMinusNode, 'Max(A,B)': MultiMaxBetweenNode,
'Min(A,B)': MultiMinBetweenNode, 'SynchMax': MultiSynchMaxNode},
'Calculations': {'Compute Arrival Duration': MultiArrivalDurationNode,
'Compute Volume': MultiComputeVolumeNode, 'Compute Flux': MultiComputeFluxNode,
'Interpolate on Points': MultiInterpolateOnPointsNode,
'Interpolate along Lines': MultiInterpolateAlongLinesNode,
'Project Lines': MultiProjectLinesNode}}
def topological_ordering(graph):
"""!
topological ordering of a DAG (adjacency list)
"""
copy_graph = deepcopy(graph)
ordered = []
candidates = graph.keys()
remove_list = []
for k in graph.keys():
for c in candidates:
if c in graph[k]:
remove_list.append(c)
candidates = [c for c in candidates if c not in remove_list]
while len(candidates) != 0:
ordered.append(candidates.pop())
a = ordered[-1]
if a in copy_graph:
for t in copy_graph[a].copy():
copy_graph[a].remove(t)
is_candidate = True
for b in copy_graph:
if t in copy_graph[b]:
is_candidate = False
break
if is_candidate:
candidates.append(t)
return ordered
def visit(graph, from_node):
"""!
generates all reachable nodes in DFS pre-ordering
from a given node in a graph (adjacency list including orphan nodes)
"""
stack = [from_node]
visited = {node: False for node in graph.keys()}
while stack:
u = stack.pop()
if not visited[u]:
visited[u] = True
yield u
for v in graph[u]:
stack.append(v)
class MultiScene(QGraphicsScene):
def __init__(self, table):
super().__init__()
self.table = table
self.language = settings.LANG
self.csv_separator = settings.CSV_SEPARATOR
self.fmt_float = settings.FMT_FLOAT
self.setSceneRect(QRectF(0, 0, settings.SCENE_SIZE[0], settings.SCENE_SIZE[1]))
self.transform = QTransform()
self.nodes = {0: MultiLoadSerafin2DNode(0)}
self.nodes[0].moveBy(50, 50)
self.has_input = False
self.inputs = {0: []}
self.ordered_input_indices = [0]
self.adj_list = {0: set()}
for node in self.nodes.values():
self.addItem(node)
self.auxiliary_input_nodes = []
def reinit(self):
self.clear()
self.nodes = {0: MultiLoadSerafin2DNode(0)}
self.nodes[0].moveBy(50, 50)
self.auxiliary_input_nodes = []
self.has_input = False
self.inputs = {0: []}
self.ordered_input_indices = [0]
self.adj_list = {0: set()}
self.update()
def add_node(self, node, x, y):
self.addItem(node)
self.nodes[node.index()] = node
node.moveBy(x, y)
self.adj_list[node.index()] = set()
if node.category == 'Input/Output':
if node.name() in ('Load 2D Polygons', 'Load 2D Open Polylines',
'Load 2D Points', 'Load Reference Serafin'):
self.auxiliary_input_nodes.append(node.index())
elif node.name() in ('Load Serafin 2D', 'Load Serafin 3D'):
self.inputs[node.index()] = []
self.ordered_input_indices.append(node.index())
def mouseDoubleClickEvent(self, event):
super().mouseDoubleClickEvent(event)
target_item = self.itemAt(event.scenePos(), self.transform)
if isinstance(target_item, Box):
node = target_item.parentItem()
if node.category == 'Input/Output' and node.name() in ('Load Serafin 2D', 'Load Serafin 3D'):
self._handle_add_input(node)
def save(self):
if not self.has_input:
return
yield str(len(self.ordered_input_indices))
for node_index in self.ordered_input_indices:
paths, name, job_ids = self.inputs[node_index]
nb_files = str(len(paths))
line = [str(node_index), nb_files, name, '|'.join(paths), '|'.join(job_ids)]
yield '|'.join(line)
def load(self, filename):
logger.debug('Loading project in MULTI: %s' % filename)
self.clear()
self.has_input = False
self.inputs = {}
self.nodes = {}
self.adj_list = {}
self.ordered_input_indices = []
self.auxiliary_input_nodes = []
try:
with open(filename, 'r') as f:
self.language, self.csv_separator = f.readline().rstrip().split('.')
nb_nodes, nb_links = map(int, f.readline().split())
# load nodes
for _ in range(nb_nodes):
line = f.readline().rstrip().split('|')
category, name, index, x, y = line[:5]
if category == 'Visualization': # ignore all visualization nodes
continue
index = int(index)
node = NODES[category][name](index)
node.load(line[5:])
self.add_node(node, float(x), float(y))
# load edges
for i in range(nb_links):
from_node_index, from_port_index, \
to_node_index, to_port_index = map(int, f.readline().rstrip().split('|'))
if to_node_index not in self.nodes: # visualization nodes
continue
from_node = self.nodes[from_node_index]
to_node = self.nodes[to_node_index]
from_port = from_node.ports[from_port_index]
to_port = to_node.ports[to_port_index]
from_port.connect(to_port)
to_port.connect(from_port)
link = MultiLink(from_port, to_port)
link.setZValue(-1)
self.addItem(link)
self.adj_list[from_node_index].add(to_node_index)
# mark nodes with input
for input_index in self.inputs:
downstream_nodes = visit(self.adj_list, input_index)
for u in downstream_nodes:
self.nodes[u].mark(input_index)
# remove orphan auxiliary nodes
to_remove = []
for u in self.auxiliary_input_nodes:
if not self.adj_list[u]:
to_remove.append(u)
del self.adj_list[u]
self.removeItem(self.nodes[u])
del self.nodes[u]
self.auxiliary_input_nodes = [u for u in self.auxiliary_input_nodes if u not in to_remove]
self.update()
# update status table
ordered_nodes = topological_ordering(self.adj_list)
self.table.update_rows(self.nodes, [u for u in ordered_nodes if u not in self.auxiliary_input_nodes])
QApplication.processEvents()
# load input information
next_line = f.readline()
if next_line:
nb_inputs = int(next_line)
for i in range(nb_inputs):
line = f.readline()
split_line = line.rstrip().split('|')
node_index = int(split_line[0])
nb_files = int(split_line[1])
slf_name = split_line[2]
paths = split_line[3:3+nb_files]
job_ids = split_line[3+nb_files:]
# check if file exists:
for path in paths:
if not os.path.exists(os.path.join(path, slf_name)):
for node_index in self.inputs:
self.inputs[node_index] = []
self.has_input = False
self.update()
QApplication.processEvents()
return True
self.inputs[node_index] = [paths, slf_name, job_ids]
self._handle_load_input(node_index)
self.update()
self.has_input = True
QApplication.processEvents()
return True
except (IndexError, ValueError, KeyError) as e:
logger.exception(e)
logger.error("An exception occured while loading project in MULTI.")
self.reinit()
self.table.reinit()
return False
def _bifurcate(self, nodes):
for i in range(len(nodes)-1):
u_parent, u = nodes[i], nodes[i+1]
# catch the first two-in-one-out operator node u
if not self.nodes[u].two_in_one_out:
continue
# do no bifurcate if the first parent is reference
if self.nodes[u].first_in_port.mother.parentItem().index() in self.auxiliary_input_nodes:
return -2, -1, []
# do not bifurcate if the two parents are the same
if len(self.nodes[u].input_index) == 1:
return 2, u, []
# only bifurcate if the parent is the second-input of u
if self.nodes[u].first_in_port.mother.parentItem().index() == u_parent:
return 1, u, []
# visit from u
downstream_nodes = [v for v in visit(self.adj_list, u)]
return 0, u, downstream_nodes
return -2, -1, []
def _handle_add_input(self, node):
success, options = node.configure(self.inputs[node.index()])
if not success:
return
self.has_input = False
old_options = self.inputs[node.index()]
self.inputs[node.index()] = options
job_ids = options[2]
downstream_nodes = [u for u in visit(self.adj_list, node.index())]
# if the current input node is second-input to a two-in-one-out operator node
# all downstream nodes from that operator node do not receive input
bifurcation_type, bifurcation_point, nodes_to_ignore = self._bifurcate(downstream_nodes)
if bifurcation_type == 0:
if self.nodes[bifurcation_point].expected_input[0] == 0:
QMessageBox.critical(None, 'Error', 'Configure the first input node first!', QMessageBox.Ok)
self.inputs[node.index()] = old_options
node.state = MultiNode.NOT_CONFIGURED
node.update()
return
if self.nodes[bifurcation_point].expected_input[0] != len(job_ids):
QMessageBox.critical(None, 'Error', 'The numbers of input files are not equal!', QMessageBox.Ok)
self.inputs[node.index()] = old_options
node.state = MultiNode.NOT_CONFIGURED
node.update()
return
# the actual downstream nodes are the one-in-one-out node between the input and the operator
downstream_nodes = [u for u in downstream_nodes if u not in nodes_to_ignore]
if node.index() not in self.table.input_columns:
self.table.add_files(node.index(), job_ids, downstream_nodes)
else:
self.table.update_files(node.index(), job_ids)
QApplication.processEvents()
self.nodes[bifurcation_point].second_ids = self.table.input_columns[node.index()]
elif bifurcation_type == 1:
if self.nodes[bifurcation_point].expected_input[1] != 0:
if len(job_ids) != self.nodes[bifurcation_point].expected_input[1]:
QMessageBox.critical(None, 'Error', 'The numbers of input files are not equal!', QMessageBox.Ok)
self.inputs[node.index()] = old_options
node.state = MultiNode.NOT_CONFIGURED
node.update()
return
if node.index() not in self.table.input_columns:
self.table.add_files(node.index(), job_ids, downstream_nodes)
else:
self.table.update_files(node.index(), job_ids)
QApplication.processEvents()
self.nodes[bifurcation_point].first_ids = self.table.input_columns[node.index()]
elif bifurcation_type == 2:
u = self.nodes[bifurcation_point]
self.nodes[u.second_in_port.mother.parentItem().index()].second_parent = True
if node.index() not in self.table.input_columns:
self.table.add_files(node.index(), job_ids, downstream_nodes)
else:
self.table.update_files(node.index(), job_ids)
QApplication.processEvents()
u.first_ids = self.table.input_columns[node.index()]
u.second_ids = list(map(lambda x: x+1000, self.table.input_columns[node.index()]))
else:
if node.index() not in self.table.input_columns:
self.table.add_files(node.index(), job_ids, downstream_nodes)
else:
self.table.update_files(node.index(), job_ids)
QApplication.processEvents()
for u in downstream_nodes:
self.nodes[u].update_input(len(job_ids))
if all(self.inputs.values()):
self.has_input = True
self.prepare_to_run()
def _handle_load_input(self, node_index):
options = self.inputs[node_index]
job_ids = options[2]
downstream_nodes = [u for u in visit(self.adj_list, node_index)]
bifurcation_type, bifurcation_point, nodes_to_ignore = self._bifurcate(downstream_nodes)
if bifurcation_type == 0:
downstream_nodes = [u for u in downstream_nodes if u not in nodes_to_ignore]
self.table.add_files(node_index, job_ids, downstream_nodes)
self.nodes[bifurcation_point].second_ids = self.table.input_columns[node_index]
elif bifurcation_type == 1:
self.table.add_files(node_index, job_ids, downstream_nodes)
self.nodes[bifurcation_point].first_ids = self.table.input_columns[node_index]
elif bifurcation_type == 2:
u = self.nodes[bifurcation_point]
self.nodes[u.second_in_port.mother.parentItem().index()].second_parent = True
self.table.add_files(node_index, job_ids, downstream_nodes)
u.first_ids = self.table.input_columns[node_index]
u.second_ids = list(map(lambda x: x+1000, self.table.input_columns[node_index]))
else:
self.table.add_files(node_index, job_ids, downstream_nodes)
QApplication.processEvents()
for u in downstream_nodes:
self.nodes[u].update_input(len(job_ids))
self.nodes[node_index].state = MultiNode.READY
def all_configured(self):
for node in self.nodes.values():
if node.state == MultiNode.NOT_CONFIGURED:
return False
return True
def prepare_to_run(self):
for node in self.nodes.values():
node.state = MultiNode.READY
node.nb_success = 0
node.nb_fail = 0
if node.two_in_one_out:
node.pending_data = {}
self.table.reset()
self.update()
class MultiView(QGraphicsView):
def __init__(self, parent, table):
super().__init__(MultiScene(table))
self.parent = parent
self.setAlignment(Qt.AlignTop | Qt.AlignLeft)
self.setAcceptDrops(True)
self.current_node = None
self.centerOn(QPoint(400, 300))
class MultiTable(QTableWidget):
def __init__(self):
super().__init__()
self.yellow = QColor(245, 255, 207, 255)
self.green = QColor(180, 250, 165, 255)
self.grey = QColor(211, 211, 211, 255)
self.red = QColor(255, 160, 160, 255)
self.setRowCount(1)
self.setColumnCount(0)
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.setFocusPolicy(Qt.NoFocus)
self.setSelectionMode(QAbstractItemView.NoSelection)
self.setVerticalHeaderLabels(['Load Serafin 2D'])
self.row_to_node = {}
self.node_to_row = {}
self.input_columns = {}
self.yellow_nodes = {}
def reinit(self):
self.setRowCount(1)
self.setVerticalHeaderLabels(['Load Serafin 2D'])
self.setColumnCount(0)
self.input_columns = {}
self.yellow_nodes = {}
def reset(self):
for node_index, columns in self.input_columns.items():
yellow_nodes = self.yellow_nodes[node_index]
for j in columns:
for i in range(self.rowCount()):
if self.row_to_node[i] in yellow_nodes:
self.item(i, j).setBackground(self.yellow)
else:
self.item(i, j).setBackground(self.grey)
QApplication.processEvents()
def update_rows(self, nodes, ordered_nodes):
self.row_to_node = {i: ordered_nodes[i] for i in range(len(ordered_nodes))}
self.node_to_row = {ordered_nodes[i]: i for i in range(len(ordered_nodes))}
self.setRowCount(len(ordered_nodes))
self.setColumnCount(0)
self.setVerticalHeaderLabels([nodes[u].name() for u in ordered_nodes])
self.input_columns = {}
self.yellow_nodes = {}
def add_files(self, node_index, new_ids, downstream_nodes):
self.input_columns[node_index] = []
self.yellow_nodes[node_index] = downstream_nodes
offset = self.columnCount()
self.setColumnCount(offset + len(new_ids))
new_labels = []
for j in range(offset):
new_labels.append(self.horizontalHeaderItem(j).text())
new_labels.extend(new_ids)
self.setHorizontalHeaderLabels(new_labels)
for j in range(len(new_ids)):
self.input_columns[node_index].append(offset+j)
for i in range(self.rowCount()):
item = QTableWidgetItem()
self.setItem(i, offset+j, item)
if self.row_to_node[i] in downstream_nodes:
self.item(i, offset+j).setBackground(self.yellow)
else:
self.item(i, offset+j).setBackground(self.grey)
def update_files(self, node_index, new_ids):
new_labels = []
old_input_nodes = [u for u in self.input_columns.keys() if u != node_index]
old_input_nb = {}
for input_node in old_input_nodes:
old_input_nb[input_node] = len(self.input_columns[input_node])
for j in self.input_columns[input_node]:
new_labels.append(self.horizontalHeaderItem(j).text())
new_labels.extend(new_ids) # modified input nodes always at end of the table
self.input_columns = {} # all columns could be shuffled
self.setColumnCount(len(new_labels))
self.setHorizontalHeaderLabels(new_labels)
# rebuild the whole table
offset = 0
for input_node in old_input_nodes:
self.input_columns[input_node] = []
for j in range(old_input_nb[input_node]):
self.input_columns[input_node].append(offset+j)
for i in range(self.rowCount()):
item = QTableWidgetItem()
self.setItem(i, offset+j, item)
if i in self.yellow_nodes[input_node]:
self.item(i, offset+j).setBackground(self.yellow)
else:
self.item(i, offset+j).setBackground(self.grey)
offset += old_input_nb[input_node]
self.input_columns[node_index] = []
for j in range(len(new_ids)):
self.input_columns[node_index].append(offset+j)
for i in range(self.rowCount()):
item = QTableWidgetItem()
self.setItem(i, offset+j, item)
if self.row_to_node[i] in self.yellow_nodes[node_index]:
self.item(i, offset+j).setBackground(self.yellow)
else:
self.item(i, offset+j).setBackground(self.grey)
def receive_result(self, success, node_id, fid):
if success:
self.item(self.node_to_row[node_id], fid).setBackground(self.green)
else:
self.item(self.node_to_row[node_id], fid).setBackground(self.red)
class CmdMessage(QPlainTextEdit):
def appendPlainText(self, message):
super().appendPlainText(message)
logger.info(message)
class MultiWidget(QWidget):
def __init__(self, parent=None, project_path=None, ncsize=settings.NCSIZE):
super().__init__()
self.parent = parent
self.table = MultiTable()
self.view = MultiView(self, self.table)
self.scene = self.view.scene()
self.toolbar = QToolBar()
self.save_act = QAction('Save\n(Ctrl+S)', self, triggered=self.save, shortcut='Ctrl+S')
self.run_act = QAction('Run\n(F5)', self, triggered=self.run, shortcut='F5')
self.init_toolbar()
if project_path is not None:
self.message_box = CmdMessage()
else:
self.message_box = QPlainTextEdit()
self.message_box.setReadOnly(True)
# right panel with table and message_box
right_panel = QSplitter(Qt.Vertical)
right_panel.addWidget(self.table)
right_panel.addWidget(self.message_box)
right_panel.setHandleWidth(10)
right_panel.setCollapsible(0, False)
right_panel.setCollapsible(1, False)
right_panel.setSizes([200, 200])
# left panel
left_panel = QWidget()
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.view)
layout.setContentsMargins(0, 0, 0, 0)
left_panel.setLayout(layout)
splitter = QSplitter(Qt.Horizontal)
splitter.addWidget(left_panel)
splitter.addWidget(right_panel)
splitter.setHandleWidth(10)
splitter.setCollapsible(0, False)
splitter.setCollapsible(1, False)
splitter.setSizes([500, 300])
mainLayout = QHBoxLayout()
mainLayout.addWidget(splitter)
self.setLayout(mainLayout)
self.ncsize = ncsize
self.worker = worker.Workers(self.ncsize)
if project_path is not None:
self.scene.load(project_path)
self.run()
def init_toolbar(self):
for act in [self.save_act, self.run_act]:
button = QToolButton(self)
button.setFixedWidth(100)
button.setMinimumHeight(30)
button.setDefaultAction(act)
button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding)
self.toolbar.addWidget(button)
self.toolbar.addSeparator()
def save(self):
if not self.scene.has_input:
QMessageBox.critical(None, 'Error', 'Configure all input nodes before saving.', QMessageBox.Ok)
return
if self.parent: self.parent.save()
QMessageBox.information(None, 'Success', 'Project saved.', QMessageBox.Ok)
def run(self):
logger.debug('Start running project')
start_time = time()
if not self.scene.all_configured():
QMessageBox.critical(None, 'Error', 'Configure all nodes first!', QMessageBox.Ok)
return
self.scene.prepare_to_run()
if self.parent: self.parent.save()
self.setEnabled(False)
csv_separator = self.scene.csv_separator
fmt_float = settings.FMT_FLOAT
# first get auxiliary tasks done
success = self._prepare_auxiliary_tasks()
if not success:
self.worker.stop()
self.message_box.appendPlainText('Done!')
self.setEnabled(True)
self.worker = worker.Workers(self.ncsize)
return
# prepare slf input tasks
nb_tasks = self._prepare_input_tasks()
while not self.worker.stopped:
nb_tasks = self._listen(nb_tasks, csv_separator, fmt_float)
if nb_tasks == 0:
self.worker.stop()
self.message_box.appendPlainText('Done!')
self.setEnabled(True)
self.worker = worker.Workers(self.ncsize)
logger.debug('Execution time %f s' % (time() - start_time))
def _prepare_auxiliary_tasks(self):
# auxiliary input tasks for N-1 type of double input nodes
aux_tasks = []
for node_id in self.scene.auxiliary_input_nodes:
fun = worker.FUNCTIONS[self.scene.nodes[node_id].name()]
if self.scene.nodes[node_id].name() == 'Load Reference Serafin':
aux_tasks.append((fun, (node_id, self.scene.nodes[node_id].options[0], self.scene.language)))
else:
aux_tasks.append((fun, (node_id, self.scene.nodes[node_id].options[0])))
all_success = True
if aux_tasks:
self.worker.add_tasks(aux_tasks)
self.worker.start()
for i in range(len(aux_tasks)):
success, node_id, data, message = self.worker.get_result()
self.message_box.appendPlainText(message)
if not success:
self.scene.nodes[node_id].state = MultiNode.FAIL
all_success = False
continue
self.scene.nodes[node_id].state = MultiNode.SUCCESS
# using the fact that auxiliary input nodes are always directly connected to double input nodes
next_nodes = self.scene.adj_list[node_id]
for next_node_id in next_nodes:
next_node = self.scene.nodes[next_node_id]
next_node.set_auxiliary_data(data)
return all_success
def _prepare_input_tasks(self):
slf_tasks = []
for node_id in self.scene.ordered_input_indices:
fun = worker.FUNCTIONS[self.scene.nodes[node_id].name()]
paths, name, job_ids = self.view.scene().inputs[node_id]
for path, job_id, fid in zip(paths, job_ids, self.table.input_columns[node_id]):
slf_tasks.append((fun, (node_id, fid, os.path.join(path, name),
self.scene.language, job_id)))
self.worker.add_tasks(slf_tasks)
if not self.worker.started:
self.worker.start()
return len(slf_tasks)
def _get_double_input_task(self, fun, node, node_id, fid, data):
if node.has_auxiliary:
self.worker.add_task((fun, (node_id, fid, node.auxiliary_data, data, True)))
return True
if fid in node.first_ids:
pair_index = node.first_ids.index(fid)
second_id = node.second_ids[pair_index]
if second_id in node.pending_data:
self.worker.add_task((fun, (node_id, fid,
data, node.pending_data[second_id], False)))
return True
else:
node.pending_data[fid] = data
return False
else:
pair_index = node.second_ids.index(fid)
first_id = node.first_ids[pair_index]
if first_id in node.pending_data:
self.worker.add_task((fun, (node_id, first_id,
node.pending_data[first_id], data, False)))
return True
else:
node.pending_data[fid] = data
return False
def _listen(self, nb_tasks, csv_separator, fmt_float):
# get one task result
success, node_id, fid, data, message = self.worker.get_result()
nb_tasks -= 1
self.message_box.appendPlainText(message)
current_node = self.scene.nodes[node_id]
self.table.receive_result(success, node_id, fid)
# enqueue tasks from child nodes
if success:
current_node.nb_success += 1
next_nodes = self.scene.adj_list[node_id]
for next_node_id in next_nodes:
next_node = self.scene.nodes[next_node_id]
fun = worker.FUNCTIONS[next_node.name()]
if next_node.double_input:
self.worker.add_task((fun, (next_node_id, fid, data, next_node.auxiliary_data,
next_node.options, csv_separator, fmt_float)))
nb_tasks += 1
elif next_node.two_in_one_out:
if current_node.second_parent:
new_task_available = self._get_double_input_task(fun, next_node, next_node_id, 1000+fid, data)
else:
new_task_available = self._get_double_input_task(fun, next_node, next_node_id, fid, data)
if new_task_available:
nb_tasks += 1
else:
self.worker.add_task((fun, (next_node_id, fid, data, next_node.options)))
nb_tasks += 1
else:
current_node.nb_fail += 1
# change box color
if current_node.nb_success + current_node.nb_fail == current_node.nb_files():
if current_node.nb_fail == 0:
current_node.state = MultiNode.SUCCESS
elif current_node.nb_success == 0:
current_node.state = MultiNode.FAIL
else:
current_node.state = MultiNode.PARTIAL_FAIL
current_node.update()
QApplication.processEvents()
return nb_tasks
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--workspace', help='workflow project file')
parser.add_argument('-v', '--verbose', help='output verbosity', action='store_true')
parser.add_argument('--ncsize', help='number of processors (overwrites default configuration)', type=int,
default=settings.NCSIZE)
parser.parse_args()
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
QApp = QCoreApplication.instance()
QApp = QApplication(sys.argv)
cmd = MultiWidget(project_path=args.workspace, ncsize=args.ncsize)
|
CNR-Engineering/PyTelTools
|
pyteltools/workflow/multi_gui.py
|
Python
|
gpl-3.0
| 32,948
|
[
"VTK",
"VisIt"
] |
d03b8389e665ba129cb73fe7fa100cc95f9c79fdf2fcb9e6d7ad8eccee5fab32
|
from ase.calculators.interface import Calculator
import os, sys
import shutil, shlex
from subprocess import Popen, PIPE
from tempfile import mkdtemp, NamedTemporaryFile
import numpy as np
import unitconversion
from bonds import Bonds
from ffdata import FFData, SequenceType, ImproperType
from multiasecalc.lammps.io.lammps import read_lammps_dump
import warnings
try:
from itertools import combinations, permutations
except:
# itertools.combinations and itertools.permutations are not in python 2.4
def permutations(iterable, r=None):
# permutations('ABCD', 2) --> AB AC AD BA BC BD CA CB CD DA DB DC
# permutations(range(3)) --> 012 021 102 120 201 210
pool = tuple(iterable)
n = len(pool)
if r is None: r = n
if r > n:
return
indices = range(n)
cycles = range(n, n-r, -1)
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def combinations(iterable, r):
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
__ALL__ = ['LAMMPSBase']
# "End mark" used to indicate that the calculation is done
CALCULATION_END_MARK = '__end_of_ase_invoked_calculation__'
class LAMMPSParameters:
def __init__(self, **pars):
self.atom_style = pars.get('atom_style', 'full')
self.units = pars.get('units')
self.neighbor = pars.get('neighbor')
self.newton = pars.get('newton')
self.pair_style = pars.get('pair_style')
self.bond_style = pars.get('bond_style')
self.angle_style = pars.get('angle_style')
self.dihedral_style = pars.get('dihedral_style')
self.improper_style = pars.get('improper_style')
self.kspace_style = pars.get('kspace_style')
self.special_bonds = pars.get('special_bonds')
self.pair_modify = pars.get('pair_modify')
# Pair coeffs to be specified in the input file
self.pair_coeffs = pars.get('pair_coeffs', [])
self.extra_cmds = pars.get('extra_cmds', [])
# Atoms groups: tuples (group_id, list_of_indices)
self.groups = pars.get('groups', [])
class LAMMPSData:
def __init__(self):
self.clear()
def clear(self):
self.tables = None
self.atom_types = None
self.bonds = None
self.angles = None
self.dihedrals = None
self.impropers = None
self.atom_typeorder = None
self.bond_typeorder = None
self.angle_typeorder = None
self.dihedral_typeorder = None
self.improper_typeorder = None
class LAMMPSBase(Calculator):
def __init__(self, label='lammps', tmp_dir=None, parameters={},
update_charges=False, lammps_command=None, force_triclinic=False,
keep_alive=False, debug=False,
output_hack=False):
"""The LAMMPS calculators object """
self.label = label
self.parameters = LAMMPSParameters(**parameters)
self.data = LAMMPSData()
self.ff_data = None
self.forces = None
self.atoms = None
self.atoms_after_last_calc = None
self.update_charges = update_charges
self.force_triclinic = force_triclinic
self.keep_alive = keep_alive
self.debug = debug
self.lammps_process = LammpsProcess(log=debug, lammps_command=lammps_command, output_hack=output_hack)
#self.lammps_process = LammpsLibrary(log=debug)
self.calls = 0
self._custom_thermo_args = [
'step', 'temp', 'press', 'cpu',
'pxx', 'pyy', 'pzz', 'pxy', 'pxz', 'pyz',
'ke', 'pe', 'etotal',
'vol', 'lx', 'ly', 'lz', 'atoms']
self._dump_fields = [
'id', 'type', 'x', 'y', 'z', 'vx',
'vy', 'vz', 'fx', 'fy', 'fz', 'q']
if tmp_dir is None:
self.tmp_dir = mkdtemp(prefix='LAMMPS-')
else:
# If tmp_dir is pointing somewhere, don't remove stuff!
self.debug = True
self.tmp_dir=os.path.realpath(tmp_dir)
if not os.path.isdir(self.tmp_dir):
os.mkdir(self.tmp_dir, 0755)
if debug:
print 'LAMMPS (label: %s) running at %s' % (self.label, self.tmp_dir)
def __del__(self):
if not self.debug:
shutil.rmtree(self.tmp_dir)
def atom_types(self, atoms):
""" Implement this method in subclasses"""
raise NotImplementedError()
def set_charges(self, atoms, atom_types):
""" Implement this method in subclasses if needed"""
pass
def prepare_calculation(self, atoms, data):
""" Implement this method in subclasses if needed"""
pass
def get_potential_energy(self, atoms):
self.update(atoms)
energy = self.lammps_process.get_thermo('pe')
return self.to_ase_units(energy, 'energy')
def get_forces(self, atoms):
self.update(atoms)
return self.forces
def get_stress(self, atoms):
self.update(atoms)
thermo = self.lammps_process.get_thermo
stress = [thermo(key) for key in ('pxx','pyy','pzz', 'pyz','pxz','pxy')]
return self.to_ase_units(np.array(stress), 'stress')
def calculation_required(self, atoms, quantities=None):
return atoms != self.atoms_after_last_calc or \
(atoms.get_charges() != self.atoms_after_last_calc.get_charges()).any()
def update(self, atoms):
if not self.calculation_required(atoms): return
self.setup_calculation(atoms)
self.evaluate_forces()
self.close_calculation()
def minimize(self, atoms, etol=0, ftol=0, maxeval=100000, maxiter=100000000, min_style=None, relax_cell=False):
if etol == 0 and ftol == 0:
raise RuntimeError('Specify at least one tolerance value!')
ftol = self.from_ase_units(ftol, 'force')
minimize_params = '%g %g %i %i' % (etol, ftol, maxeval, maxiter)
self.setup_calculation(atoms)
self.evaluate_forces()
self.set_dumpfreq(999999)
f = self.lammps_process
if relax_cell:
f.write('fix relax_cell all box/relax tri 0.0 nreset 20\n')
if min_style:
f.write('min_style %s\n' % min_style)
f.write('minimize %s\n' % minimize_params)
if relax_cell:
f.write('unfix relax_cell\n')
f.write('print "%s"\n' % CALCULATION_END_MARK)
f.flush()
self.lammps_process.read_lammps_output()
self.read_lammps_trj(update_positions=True)
self.close_calculation()
def molecular_dynamics(self, atoms, timestep, fix, step_iterator, update_cell, total_steps, constraints):
fix = fix
timestep = str(self.from_ase_units(timestep, 'time'))
self.setup_calculation(atoms)
self.evaluate_forces()
cur_step = 0
f = self.lammps_process
f.write('fix mdfix %s\n' % fix)
f.write('timestep %s\n' % timestep)
for c in constraints:
for cmd in c.get_commands(self.atoms):
f.write(cmd + '\n')
for nsteps in step_iterator:
self.set_dumpfreq(cur_step+nsteps)
if total_steps:
f.write('run %s start 0 stop %s\n' % (nsteps, total_steps))
else:
f.write('run %s\n' % nsteps)
f.write('print "%s"\n' % CALCULATION_END_MARK)
f.flush()
self.lammps_process.read_lammps_output()
self.read_lammps_trj(update_positions=True, update_cell=update_cell)
cur_step += nsteps
self.close_calculation()
def evaluate_forces(self):
self.set_dumpfreq(1)
f = self.lammps_process
f.write('run 0\n')
f.write('print "%s"\n' % CALCULATION_END_MARK)
f.flush()
self.lammps_process.read_lammps_output()
self.read_lammps_trj(update_positions=False)
def setup_calculation(self, atoms):
filelabel = self.prepare_lammps_io()
if not self.lammps_process.running():
self.lammps_process.start(self.tmp_dir, filelabel)
if np.all(atoms.pbc == False):
# Make sure the atoms are inside the cell
inv_cell = np.linalg.inv(atoms.cell)
frac_positions = np.dot(inv_cell, atoms.positions.T)
if np.any(frac_positions < 0) or np.any(frac_positions > 1):
atoms.center(vacuum=1)
self.atoms = atoms
self.prism = Prism(self.atoms.cell)
self.prepare_data()
self.prepare_calculation(self.atoms, self.data)
self.write_lammps_input()
if self.debug: print 'Calculation initialized.'
self.calls += 1
def close_calculation(self):
if not self.keep_alive:
self.lammps_process.terminate()
exitcode = self.lammps_process.poll()
if exitcode and exitcode != 0:
raise RuntimeError('LAMMPS exited in %s with exit code: %d.' %\
(self.tmp_dir, exitcode))
self.lammps_trj_file.close()
self.lammps_inputdata_file.close()
if self.debug == True: self.lammps_process.close_logs()
def prepare_lammps_io(self):
label = '%s%06d' % (self.label, self.calls)
# In python 2.4 the delete kwarg doesn't exist
#args = dict(dir=self.tmp_dir, delete=(not self.debug))
#self.lammps_trj_file = NamedTemporaryFile(mode='r', prefix='trj_'+label, **args)
#self.lammps_inputdata_file = NamedTemporaryFile(prefix='data_'+label, **args)
if self.debug:
self.lammps_trj_file = open(os.path.join(self.tmp_dir, 'trj_'+label), 'w')
self.lammps_trj_file.close()
self.lammps_trj_file = open(self.lammps_trj_file.name)
self.lammps_inputdata_file = open(os.path.join(self.tmp_dir, 'data_'+label), 'w')
else:
self.lammps_trj_file = NamedTemporaryFile(mode='r', prefix='trj_'+label, dir=self.tmp_dir)
self.lammps_inputdata_file = NamedTemporaryFile(prefix='data_'+label, dir=self.tmp_dir)
return label
def prepare_data(self):
""" Prepare self.data for write_lammps_data() using self.ff_data """
atoms = self.atoms
tables = []
ff_data = self.ff_data
def status_message(msg):
if not self.debug: return
print msg,
sys.stdout.flush()
def status_done():
if not self.debug: return
print 'Done.'
if not 'bonds' in atoms.info:
status_message('Detecting bonds...')
atoms.info['bonds'] = Bonds(atoms, autodetect=True)
status_done()
status_message('Detecting atom types...')
atom_types = self.atom_types(atoms)
status_done()
atom_typeorder = list(set(atom_types))
atom_actualtypes = [ff_data.get_actual_type('atom', tp) for tp in atom_types]
atom_actualtype_order = [ff_data.get_actual_type('atom', tp) for tp in atom_typeorder]
def identify_objects(objects, group):
result = []
discarded_objects = set()
for indices in objects:
if group != 'improper':
type = SequenceType([atom_types[i] for i in indices])
else:
a, c, b, d = (atom_types[ind] for ind in indices)
type = ImproperType(central_type=a, other_types=(c,b,d))
actual_indices, actual_type = ff_data.find(group, indices, type)
if actual_indices == None:
discarded_objects.add(actual_type)
continue
if group == 'improper' and ff_data.class2:
actual_indices = [actual_indices[1], actual_indices[0], actual_indices[2], actual_indices[3]]
result.append(dict(indices=actual_indices, type=actual_type))
if self.debug:
for tp in discarded_objects:
print 'No parameters for %s. Skipping.' % tp
return result
b = atoms.info['bonds']
parameters = self.parameters
if parameters.bond_style:
bonds = identify_objects(b, 'bond')
else: bonds = []
if parameters.angle_style:
status_message('Detecting angles...')
angles = identify_objects(b.find_angles(), 'angle')
status_done()
else: angles = []
if parameters.dihedral_style:
status_message('Detecting dihedrals...')
dihedrals = identify_objects(b.find_dihedrals(), 'dihedral')
status_done()
else: dihedrals = []
if parameters.improper_style:
status_message('Detecting impropers...')
impropers = identify_objects(b.find_impropers(), 'improper')
status_done()
else: impropers = []
# Coeffs
def add_coeff_tables(param_group, objects, typeorder=None, warn_missing=True):
if not objects: return
if typeorder:
used_types = typeorder
else:
used_types = set(object['type'] for object in objects)
available_tables = ff_data.available_tables(param_group)
new_tables = {}
for type in used_types:
params = ff_data.get_params(param_group, type)
for title, ncols in available_tables:
try:
values = params[title]
except KeyError:
if warn_missing: print 'No %s for %s!' % (title, type)
values = [0]*ncols
table = new_tables.setdefault(title, [])
if self.debug:
comment = ' # %s' % type
values = values + [comment]
table.append(values)
tables.extend(new_tables.items())
return list(used_types)
# Add masses to ff_data
masses = dict(zip(atom_actualtypes, self.atoms.get_masses()))
for type in atom_actualtype_order:
ff_data.add('atom', type, 'Masses', [masses[type]])
add_coeff_tables('atom', atom_actualtypes, atom_actualtype_order)
bond_typeorder = add_coeff_tables('bond', bonds, warn_missing=self.debug)
angle_typeorder = add_coeff_tables('angle', angles, warn_missing=self.debug)
dihedral_typeorder = add_coeff_tables('dihedral', dihedrals, warn_missing=self.debug)
improper_typeorder = add_coeff_tables('improper', impropers, warn_missing=self.debug)
# Atoms
self.set_charges(atoms, atom_types)
atom_typeids = [atom_actualtype_order.index(at)+1 for at in atom_actualtypes]
charges = self.atoms.get_charges()
positions = self.prism.vector_to_lammps(self.atoms.positions)
positions = self.from_ase_units(positions, 'distance')
columns = [atom_typeids, charges, positions[:,0], positions[:,1], positions[:,2]]
if self.debug:
comments = [' # %s' % tp for tp in atom_types]
columns += [comments]
if self.parameters.atom_style == 'full':
columns.insert(0, ['1']*len(self.atoms))
elif not self.parameters.atom_style == 'charge':
raise RuntimeError('Unsupported atom_style: %s' % self.parameters.atom_style)
tables.append(('Atoms', zip(*columns)))
# Bonds, Angles, etc.
def add_object_table(title, objects, typeorder):
if not objects or not typeorder: return
table = []
for obj in objects:
typeid = typeorder.index(obj['type'])+1
atoms = [idx+1 for idx in obj['indices']]
values = [typeid] + atoms
if self.debug:
comment = ' # %s' % obj['type']
values += [comment]
table.append(values)
tables.append((title, table))
add_object_table('Bonds', bonds, bond_typeorder)
add_object_table('Angles', angles, angle_typeorder)
add_object_table('Dihedrals', dihedrals, dihedral_typeorder)
add_object_table('Impropers', impropers, improper_typeorder)
if self.atoms.has('momenta'):
vel = self.prism.vector_to_lammps(self.atoms.get_velocities())
lammps_velocities = self.from_ase_units(vel, 'velocity')
tables.append(('Velocities', lammps_velocities))
data = self.data
data.tables = tables
data.atom_types = atom_types
data.bonds = bonds
data.angles = angles
data.dihedrals = dihedrals
data.impropers = impropers
data.atom_typeorder = atom_typeorder
data.bond_typeorder = bond_typeorder
data.angle_typeorder = angle_typeorder
data.dihedral_typeorder = dihedral_typeorder
data.improper_typeorder = improper_typeorder
def write_lammps_input(self):
"""Write LAMMPS parameters and input data """
f = self.lammps_process
parameters = self.parameters
self.write_lammps_data()
f.write('# (written by ASE)\n')
f.write('clear\n')
f.write('atom_style %s \n' % parameters.atom_style)
pbc = self.atoms.get_pbc()
f.write('units %s \n' % parameters.units)
f.write('boundary %s %s %s \n' % tuple('mp'[int(x)] for x in pbc))
if parameters.neighbor:
f.write('neighbor %s \n' % (parameters.neighbor))
if parameters.newton:
f.write('newton %s \n' % (parameters.newton))
# Write interaction stuff
f.write('\n### interactions \n')
if parameters.pair_style:
f.write('pair_style %s \n' % parameters.pair_style)
if parameters.bond_style and self.data.bonds:
f.write('bond_style %s \n' % parameters.bond_style)
if parameters.angle_style and self.data.angles:
f.write('angle_style %s \n' % parameters.angle_style)
if parameters.dihedral_style and self.data.dihedrals:
f.write('dihedral_style %s \n' % parameters.dihedral_style)
if parameters.improper_style and self.data.impropers:
f.write('improper_style %s \n' % parameters.improper_style)
if parameters.kspace_style:
f.write('kspace_style %s \n' % parameters.kspace_style)
if parameters.special_bonds:
f.write('special_bonds %s \n' % parameters.special_bonds)
if parameters.pair_modify:
f.write('pair_modify %s \n' % parameters.pair_modify)
f.write('\n### read data \n')
f.write('read_data %s\n' % self.lammps_inputdata_file.name)
# Extra pair coeffs
for line in parameters.pair_coeffs:
f.write('pair_coeff %s \n' % line)
# Create groups
for group_id, indices in parameters.groups:
indices_str = ' '.join([str(i+1) for i in indices])
f.write('group %s id %s\n' % (group_id, indices_str))
for cmd in parameters.extra_cmds:
f.write(cmd + '\n')
f.write('\nthermo_style custom %s\n' % (' '.join(self._custom_thermo_args)))
f.write('thermo 0\n')
f.write('\ndump dump_all all custom ' +
'1 %s %s\n' % (self.lammps_trj_file.name, ' '.join(self._dump_fields)) )
def set_dumpfreq(self, freq):
self.lammps_process.write('dump_modify dump_all every %s\n' % freq)
def write_lammps_data(self):
"""Write system configuration and force field parameters to file to be read
with read_data by LAMMPS."""
f = self.lammps_inputdata_file
data = self.data
f.write(f.name + ' (written by ASE) \n\n')
f.write('%d \t atoms \n' % len(data.atom_types))
if data.bonds: f.write('%d \t bonds \n' % len(data.bonds))
if data.angles: f.write('%d \t angles \n' % len(data.angles))
if data.dihedrals: f.write('%d \t dihedrals \n' % len(data.dihedrals))
if data.impropers: f.write('%d \t impropers \n' % len(data.impropers))
f.write('%d atom types\n' % len(data.atom_typeorder))
if data.bonds: f.write('%d bond types\n' % len(data.bond_typeorder))
if data.angles: f.write('%d angle types\n' % len(data.angle_typeorder))
if data.dihedrals: f.write('%d dihedral types\n' % len(data.dihedral_typeorder))
if data.impropers: f.write('%d improper types\n' % len(data.improper_typeorder))
xhi, yhi, zhi, xy, xz, yz = self.prism.get_lammps_prism()
f.write('0.0 %f xlo xhi\n' % xhi)
f.write('0.0 %f ylo yhi\n' % yhi)
f.write('0.0 %f zlo zhi\n' % zhi)
if self.force_triclinic or self.prism.is_skewed():
f.write('%f %f %f xy xz yz\n' % (xy, xz, yz))
f.write('\n\n')
for title, table in data.tables:
if len(table) == 0: continue
f.write('%s \n\n' % title)
for index, row in enumerate(table):
f.write(('%d'+' %s'*len(row) +'\n') % ((index+1,) + tuple(row)))
f.write('\n\n')
f.flush()
def read_lammps_trj(self, update_positions=False, update_cell=False):
dump = read_lammps_dump(self.lammps_trj_file)
rotate = self.prism.vector_to_ase
self.forces = rotate(self.to_ase_units(dump.info['forces'], 'force'))
if update_positions:
dump.positions -= dump.info['celldisp']
self.atoms.positions = rotate(self.to_ase_units(dump.positions, 'distance'))
self.atoms.set_velocities(rotate(self.to_ase_units(dump.get_velocities(), 'velocity')))
if np.isnan(self.atoms.positions).any():
raise RuntimeError('NaN detected in atomic coordinates!')
if update_cell:
self.atoms.set_cell(self.prism.vector_to_ase(dump.cell))
if self.update_charges:
self.atoms.set_charges(dump.get_charges())
self.atoms_after_last_calc = self.atoms.copy()
def to_ase_units(self, value, quantity):
return unitconversion.convert(value, quantity, self.parameters.units, 'ASE')
def from_ase_units(self, value, quantity):
return unitconversion.convert(value, quantity, 'ASE', self.parameters.units)
from lammpspython import lammps
class LammpsLibrary:
def __init__(self, log=False):
self.lammps = None
self.inlog = None
self.log = log
def start(self, tmp_dir, filelabel=''):
if self.lammps: self.lammps.close()
self.lammps = lammps.lammps(cmdargs=['-screen', 'none'])
if self.log == True:
# Save LAMMPS input
# in python 2.4 the delete=False option doesn't exist
#self.inlog = NamedTemporaryFile(prefix='in_'+filelabel, dir=tmp_dir, delete=False)
self.inlog = open(os.path.join(tmp_dir, 'in_'+filelabel), 'w')
def running(self):
return self.lammps != None
def poll(self):
return None
def terminate(self):
pass
def write(self, command):
if self.inlog: self.inlog.write(command)
self.lammps.command(command)
def flush(self): pass
def close_logs(self):
if self.inlog: self.inlog.close()
self.inlog = None
def get_thermo(self, key):
stress_components = ['pxx','pyy','pzz', 'pyz','pxz','pxy']
if key in stress_components:
stress = self.lammps.extract_compute('thermo_%s' % key, 0, 2)
print stress[0]
#return stress[stress_components.index(key)]
return None
return self.lammps.extract_compute('thermo_%s' % key, 0, 0)
def read_lammps_output(self): pass
class LammpsProcess:
""" A class to handle the lammps process and read thermo output. There are
sometimes errors related to the communication with the process and it
is advisable to restart lammps after every calculation.
"""
def __init__(self, log=False, lammps_command=None,
output_hack=False):
self.inlog = None
self.outlog = None
self.proc = None
self.log = log
self.lammps_command = lammps_command
self.thermo_output = []
self.output_hack=output_hack
def __del__(self):
if self.running(): self.proc.terminate()
def invoke_lammps(self, tmp_dir, filelabel):
lammps_command = self.lammps_command
if not lammps_command:
lammps_command = os.environ.get('LAMMPS_COMMAND')
if not lammps_command or len(lammps_command.strip()) == 0:
raise RuntimeError('Please set LAMMPS_COMMAND environment variable')
lammps_cmd_line = shlex.split(lammps_command)
# Make sure we execute using the absolute path
lammps_cmd_line[0] = os.path.abspath(lammps_cmd_line[0])
if self.output_hack:
lammps_cmd_line += ['-log', '/dev/stdout'] #old fix
else:
lammps_cmd_line += ['-log', 'none'] #, '-screen', 'none']
if self.log == True:
# Save LAMMPS input and output for reference
# in python 2.4 the delete=False option doesn't exist
#self.inlog = NamedTemporaryFile(prefix='in_'+filelabel, dir=tmp_dir, delete=False)
#self.outlog = NamedTemporaryFile(prefix='log_'+filelabel, dir=tmp_dir, delete=False)
self.inlog = open(os.path.join(tmp_dir, 'in_'+filelabel), 'w')
self.outlog = open(os.path.join(tmp_dir, 'out_'+filelabel), 'w')
try:
return Popen(lammps_cmd_line,
cwd=tmp_dir, stdin=PIPE, stdout=PIPE, stderr=sys.stderr)
except OSError as e:
raise RuntimeError('Unable to run LAMMPS, please check LAMMPS_COMMAND. Error message: %s.' % e.strerror)
def start(self, tmp_dir, filelabel=''):
if self.running(): self.terminate()
self.proc = self.invoke_lammps(tmp_dir, filelabel)
def running(self):
return self.proc and self.proc.poll() == None
def poll(self):
return self.proc.poll()
def terminate(self):
if not self.running(): pass
self.proc.stdin.close()
return self.proc.wait()
def write(self, data):
self.proc.stdin.write(data)
if self.inlog: self.inlog.write(data)
def readline(self):
line = self.proc.stdout.readline()
if self.outlog: self.outlog.write(line)
return line
def flush(self):
self.proc.stdin.flush()
if self.inlog: self.inlog.flush()
if self.output_hack:
self.write('log /dev/stdout\n')
def close_logs(self):
if self.inlog: self.inlog.close()
if self.outlog: self.outlog.close()
self.inlog = None
self.outlog = None
def get_thermo(self, key):
""" Return the value of thermo variable key """
return self.thermo_output[-1][key]
def read_lammps_output(self):
""" Read thermo output from LAMMPS stdout """
f = self
def translate_keys(keys):
result = []
for key in keys:
if key == 'PotEng': k = 'pe'
elif key == 'KinEng': k = 'ke'
elif key == 'TotEng': k = 'etotal'
else: k = key.lower()
result.append(k)
return result
thermo_output = []
line = f.readline()
while line and line.strip() != CALCULATION_END_MARK:
if 'ERROR' in line:
raise RuntimeError('LAMMPS execution failed. LAMMPS %s' % line)
words = line.split()
if words and np.all([w[0].isupper() for w in words]):
# Seems to be the start of thermo output
keys = translate_keys(words)
while True:
line = f.readline()
fields = line.split()
if len(fields) != len(keys): break
try:
fields = map(float, fields) # convert to float
thermo_output.append(dict(zip(keys, fields)))
except ValueError:
break # Wasn't a thermo line after all
else:
line = f.readline()
self.thermo_output = thermo_output
if len(thermo_output) == 0:
raise RuntimeError('No thermo output from LAMMPS!')
class Prism:
"""The representation of the unit cell in LAMMPS"""
def __init__(self, cell, pbc=(True,True,True), digits=10):
# Use LQ decomposition to get the lammps cell
# ase_cell * R = lammps_cell
Qtrans, Ltrans = np.linalg.qr(cell.T)
self.R = Qtrans
self.lammps_cell = Ltrans.T
if self.is_skewed() and not np.all(pbc):
raise RuntimeError('Skewed lammps cells MUST have '
'PBC == True in all directions!')
def get_lammps_prism(self):
return self.lammps_cell[(0,1,2,1,2,2), (0,1,2,0,0,1)]
def update_cell(self, xyz, offdiag):
self.lammps_cell = self.to_cell_matrix(xyz, offdiag)
return np.dot(self.lammps_cell, self.R.T)
def to_cell_matrix(self, xyz, offdiag):
x, y, z = xyz
xy, xz, yz = offdiag
return np.array([[x,0,0], [xy,y,0], [xz, yz, z]])
def vector_to_lammps(self, vec):
return np.dot(vec, self.R)
def vector_to_ase(self, vec):
return np.dot(vec, self.R.T)
def is_skewed(self):
tolerance = 1e-6
cell_sq = self.lammps_cell**2
return np.sum(np.tril(cell_sq, -1)) / np.sum(np.diag(cell_sq)) > tolerance
|
csmm/multiase
|
multiasecalc/lammps/lammpsbase.py
|
Python
|
gpl-2.0
| 26,101
|
[
"ASE",
"LAMMPS"
] |
20a01516f64bbe3adcdd202118e9e37f7052ae70634a80cb64e2377981624a6c
|
# coding=utf8
#
# Copyright 2013 Dreamlab Onet.pl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 3.0.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, visit
#
# http://www.gnu.org/licenses/lgpl.txt
#
import _pythonpath
import logging
from urllib2 import urlopen
from urllib2 import URLError
import time
import socket
import requests
from nose.tools import assert_equals
from nose.tools import assert_true
from nose.tools import assert_is
from nose.tools import assert_in
from nose.tools import assert_raises
from nose.tools import assert_raises_regexp
import rmock
from rmock.errors import RmockError
from rmock.tools.net import find_random_port
from nose.plugins.skip import SkipTest
from testtools import http_call
class TestRmockGeneric(object):
def test_rmock_run_with_key(self):
mock = rmock.run(key="myrmock",
port="random")
assert_is(mock, rmock.get("myrmock"))
def test_server_already_running(self):
port = find_random_port()
with rmock.run(port=port):
assert_raises_regexp(
RmockError,
'error starting server process.*port.*%s.*' % port,
rmock.run,
port=port
)
@rmock.patch(port="random")
def test_stop_server(self, mock):
assert_equals(http_call(mock, "func").text, '')
mock.stop_server()
assert_raises(requests.ConnectionError,
http_call,
mock,
'func')
mock.start_server()
assert_equals(http_call(mock, 'func').text, '')
def _func_side_effect(self, arg):
return arg
def _func_side_effect2(self, a, b, c):
return '.'.join([a, b, c])
@rmock.patch(port="random")
def test_function_side_effect(self, mock):
mock.func.side_effect = self._func_side_effect
mock.set_default_side_effect(self._func_side_effect2)
assert_equals(http_call(mock, 'func', arg="val").text, 'val')
assert_equals(http_call(mock, 'func', arg="val2").text, 'val2')
assert_equals(http_call(mock, 'defaultfunc', a=10, b=20, c=30).text, '10.20.30')
assert_equals(len(mock.func.calls), 2)
assert_equals(mock.func.calls[0].args, ())
assert_equals(mock.func.calls[0].kwargs, {'arg': 'val'})
@rmock.patch(port="random")
def test_function_getitem_syntax(self, mock):
mock['func-name'].return_value = 'result'
assert_equals(http_call(mock, 'func-name').text, 'result')
assert_equals(http_call(mock, 'func2').text, '')
assert_equals(len(mock['func-name'].calls), 1)
assert_equals(mock['func-name'].calls[0].args, ())
assert_equals(mock['func-name'].calls[0].kwargs, {})
def test_rmock_create(self):
mock = rmock.create("http", port="random")
mock.func.return_value = "result"
assert_raises(requests.ConnectionError, http_call, mock, "func")
mock.start_server()
assert_equals(http_call(mock, "func").text, "result")
assert_equals(http_call(mock, "func2").text, "")
mock.func.return_value = "other result"
assert_equals(http_call(mock, "func").text, "other result")
mock.start_server()
assert_equals(http_call(mock, "func").text, "other result")
@SkipTest
#("mock.patch only support class functions now")
@rmock.patch("http", port="random")
def test_free_func(mock):
http_call(mock, "func")
mock.assert_called_with()
|
tikan/rmock
|
tests/func_tests/test_generic.py
|
Python
|
lgpl-3.0
| 4,139
|
[
"VisIt"
] |
a4a5bae93cff62fd7a18d1584ea1864e5b73ca7b0fc69903630dd7382ed98103
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 22 10:18:45 2017
@author: a002028
"""
#import numpy as np
import os
import pandas as pd
import json
import codecs
import pickle
#import netcdf
"""
#==============================================================================
#==============================================================================
"""
class Load(object):
"""
class to hold various methods for loading different types of files
Can be settings files, data files, info files..
"""
def __init__(self):
pass
#==========================================================================
def load_excel(self, file_path=u'', sheetname=u'', header_row=0, fill_nan=u''):
xl = pd.ExcelFile(file_path)
ncols = xl.book.sheet_by_name(sheetname).ncols
xl.close()
return pd.read_excel(file_path, sheetname=sheetname, header=header_row,
converters={i:str for i in range(ncols)}).fillna(fill_nan)
#==========================================================================
def load_netcdf(self, file_path=u''):
pass
#==========================================================================
def load_txt(self, file_path=u'', sep='\t', encoding='cp1252', fill_nan=u''):
with codecs.open(file_path, 'r', encoding=encoding) as f:
header = f.readline().strip('\n').strip('\r').split(sep) # is .strip('\r') necessary?
return pd.read_csv(file_path, sep='\t', encoding='cp1252',
dtype={key:str for key in header}).fillna(fill_nan)
#==========================================================================
def load_json(self, file_path=u''):
"""
array will be either a list of dictionaries or one single dictionary
depending on what the json file includes
"""
with open(file_path, 'r') as f:
array = json.load(f)
return array
#==========================================================================
class SaveLoadDelete(object):
"""
Created: 20180522 by Magnus
Last modified: 20180525 by Magnus
class to save and load different structures (attributes). Generally saves/loads
from txt (if possible) and pickle.
Method should be added for the specific assignment.
The following structures are handled:
"""
def __init__(self, directory):
self.directory = directory
#==========================================================================
def _strip_name(self, file_name):
return file_name.split('.')[0]
#==========================================================================
def _pikle_file_name(self, file_name):
return self._strip_name(file_name) + '.pkl'
#==========================================================================
def _json_file_name(self, file_name):
return self._strip_name(file_name) + '.json'
#==========================================================================
def _txt_file_name(self, file_name):
return self._strip_name(file_name) + '.txt'
#==========================================================================
def old_load_df(self, file_name='df_data', load_txt=False):
"""
Created: 20180523 by Magnus
Last modified: 20180523 by Magnus
Loads a pandas dataframe structure from pickle- and/or txt-file.
By default the pickle file is loaded if it exists.
If the corresponding pickle-file does not exists the txt file is loaded
If load_txt=True the txt-file is loaded even if the pickle excists exists.
"""
pickle_file_path = os.path.join(self.directory, self._pikle_file_name(file_name))
txt_file_path = os.path.join(self.directory, self._txt_file_name(file_name))
if load_txt:
df = load_data_file(file_path=txt_file_path, sep='\t', encoding='cp1252', fill_nan=u'')
if os.path.exists(pickle_file_path):
with open(pickle_file_path, "rb") as fid:
df = pickle.load(fid)
# df = pd.read_pickle(pickle_file_path)
elif os.path.exists(txt_file_path):
df = load_data_file(file_path=txt_file_path, sep='\t', encoding='cp1252', fill_nan=u'')
return df
def load_dict_from_pkl(self, file_name):
pickle_file_path = os.path.join(self.directory, self._pikle_file_name(file_name))
if os.path.exists(pickle_file_path):
with open(pickle_file_path, "rb") as fid:
dicten = pickle.load(fid)
else:
print('{} not in {}'.format(file_name, self.directory))
return False
return dicten
def load_dict_from_json(self, file_name):
"""
Created: 20181017 by Lena
Saves a dictionary as a json.
"""
json_file_path = os.path.join(self.directory, self._json_file_name(file_name))
if os.path.exists(json_file_path):
with codecs.open(json_file_path, 'r', encoding = 'cp1252') as fid:
dicten = json.load(fid)
else:
print('{} not in {}'.format(file_name, self.directory))
return False
return dicten
#==========================================================================
def load_df(self, file_name='df_data', load_txt=False):
"""
Created: 20180523 by Magnus
Last modified: 20180720 by Magnus
Loads a pandas dataframe structure from pickle- and/or txt-file.
By default the pickle file is loaded if it exists.
If the corresponding pickle-file does not exists the txt file is loaded
If load_txt=True the txt-file is loaded even if the pickle excists exists.
"""
pickle_file_path = os.path.join(self.directory, self._pikle_file_name(file_name))
txt_file_path = os.path.join(self.directory, self._txt_file_name(file_name))
#TODO: I added None here for it to work with some loading, but discovered it does not work with load_all_data
#df = None
if load_txt or not os.path.exists(pickle_file_path):
if os.path.exists(txt_file_path):
df = load_data_file(file_path=txt_file_path, sep='\t', encoding='cp1252', fill_nan=u'')
df.set_index('index_column', inplace = True)
elif os.path.exists(pickle_file_path):
with open(pickle_file_path, "rb") as fid:
df = pickle.load(fid)
# df = pd.read_pickle(pickle_file_path)
return df
#==========================================================================
def save_df(self, df, file_name='df_data', force_save_txt=False, only_pkl=False, **kwargs):
"""
Created: 20180522 by Magnus
Last modified: 20180525 by Magnus
Saves a pandas dataframe structure.
By default a pickle file is created.
If the corresponding txt-file does not exists a txt file is created
If save_txt=True a txt-file is created even if the pickle file exists.
If only_pkl=True and force_save_txt=True no txt file is saved.
"""
pickle_file_path = os.path.join(self.directory, self._pikle_file_name(file_name))
txt_file_path = os.path.join(self.directory, self._txt_file_name(file_name))
# Save txt-file
if only_pkl:
pass
elif force_save_txt or not os.path.exists(txt_file_path):
save_data_file(df=df,
directory=self.directory,
file_name=self._txt_file_name(file_name), **kwargs)
# Save pickle file
with open(pickle_file_path, "wb") as fid:
pickle.dump(df, fid)
# df.to_pickle(pickle_file_path)
#==========================================================================
def save_dict_to_pkl(self, dicten, file_name=None):
"""
Created: 20181016 by Lena
Saves a dictionary as a pickle.
"""
pickle_file_path = os.path.join(self.directory, self._pikle_file_name(file_name))
# Save pickle file
with open(pickle_file_path, "wb") as fid:
pickle.dump(dicten, fid)
def save_dict_to_json(self, dicten = None, file_name=None):
"""
Created: 20181017 by Lena
Saves a dictionary as a json.
"""
json_file_path = os.path.join(self.directory, self._json_file_name(file_name))
with codecs.open(json_file_path, 'w', encoding = 'cp1252') as fid:
json.dump(dicten, fid, indent = 4)
#==========================================================================
def delete_files(self, file_name='df_data'):
"""
Created: 20180525 by Magnus
Last modified: 20180525 by Magnus
Deletes txt and pkl files matching the file_name.
"""
pickle_file_path = os.path.join(self.directory, self._pikle_file_name(file_name))
txt_file_path = os.path.join(self.directory, self._txt_file_name(file_name))
print('¤'*50)
print(pickle_file_path)
print(txt_file_path)
if os.path.exists(pickle_file_path):
os.remove(pickle_file_path)
if os.path.exists(txt_file_path):
os.remove(txt_file_path)
#==========================================================================
def load_boolean_dict(self, boolean_dict, file_name='boolean_dict'):
"""
Created: 20180523 by Magnus
Last modified: 20180523 by Magnus
Loads a boolean dict from pickle.
Returns empty dict if non exsisting.
"""
pickle_file_path = os.path.join(self.directory, self._pikle_file_name(file_name))
if os.path.exists(pickle_file_path):
return pickle.load(open(pickle_file_path, "rb"))
else:
return {}
#==========================================================================
def save_boolean_dict(self, boolean_dict, file_name='boolean_dict'):
"""
Created: 20180523 by Magnus
Last modified: 20180523 by Magnus
Saves a boolean dict to pickle
"""
pickle_file_path = os.path.join(self.directory, self._pikle_file_name(file_name))
pickle.dump(boolean_dict, open(pickle_file_path, "wb"))
#==========================================================================
def save_data_file(df=None, directory=u'', file_name=u'', **kwargs):
"""
Last modified: 20181105 by Lena Viktorsson
20181105 by Lena: added the index_column to a copy of df and saves that df
20180525 by Magnus: moved to load module from data_handlers module
"""
# directory = os.path.dirname(os.path.realpath(__file__))[:-4] + 'test_data\\test_exports\\'
if not directory.endswith(('/','\\')):
directory = directory + '/'
file_path = directory + file_name
print(u'Saving data to:',file_path)
df_copy = df.copy()
# MW: Name index
df_copy['index_column'] = df_copy.index
# df['index_column']=df.index
# df = df.reset_index(drop=True)
# MW: Index is set when loading via funktion load_data_file
df_copy.to_csv(file_path, sep='\t', encoding='cp1252', index=False)
# df.to_csv(file_path, sep='\t', encoding='cp1252', index=True)
# column_file_path = directory + '/column_data.txt'
# self.column_data.to_csv(column_file_path, sep='\t', encoding='cp1252', index=False)
#
# row_file_path = directory + '/row_data.txt'
# self.row_data.to_csv(row_file_path, sep='\t', encoding='cp1252', index=False)
#==========================================================================
#==========================================================================
def load_data_file(file_path=None, sep='\t', encoding='cp1252', fill_nan=u''):
"""
Created: 20180420 by Magnus Wenzer
Last modified: 20180525 by Magnus Wenzer
1: Loads the given file using the core.Load().load_txt method.
2: Fix index
3: Returns the DataFrame
20180525 by Magnus: moved to load module from data_handlers module
"""
df = Load().load_txt(file_path, sep=sep, encoding=encoding, fill_nan=fill_nan)
df['index_column'] = df['index_column'].astype(int)
df = df.set_index('index_column')
return df
"""
#==============================================================================
#==============================================================================
"""
|
ekostat/ekostat_calculator
|
core/load.py
|
Python
|
mit
| 13,247
|
[
"NetCDF"
] |
b7053822f882ff4e05b08e8aa182795ce7c09b6b5e40bec4a6446e423fd81ca2
|
dali_file = "dali.txt"
pdb_dir = "dali_pdb"
max_pairs = 10
def fetch_pdb(pdbCode,outFile):
import urllib
import gzip
import os
import string
remoteCode = string.upper(pdbCode)
if not os.path.exists(pdb_dir):
os.mkdir(pdb_dir)
if not os.path.exists(outFile):
try:
filename = urllib.urlretrieve(
'http://www.rcsb.org/pdb/cgi/export.cgi/' +
remoteCode + '.pdb.gz?format=PDB&pdbId=' +
remoteCode + '&compression=gz')[0]
except:
print "warning: %s not found.\n"%pdbCode
else:
if (os.path.getsize(filename) > 0): # If 0, then pdb code was invalid
try:
abort = 0
open(outFile, 'w').write(gzip.open(filename).read())
print "fetched: %s"%(pdbCode)
except IOError:
abort = 1
if abort:
os.remove(outFile)
else:
print "warning: %s not valid.\n"%pdbCode
os.remove(filename)
from pymol import cmd
from string import strip
import os
seen = {}
input = open(dali_file).readlines()
input_state = 0
while 1:
try:
line = input.pop(0)
except IndexError:
break
if input_state == 0:
if line[0:11]=='## MATRICES':
line = input.pop(0)
if line[0:12]==' NR. STRID1':
input_state = 1
elif input_state == 1:
if strip(line)=='':
input_state = 2
elif line[4:5]==':':
trg = strip(line[6:12])
src = strip(line[13:19])
trg_code = trg[0:4]
src_code = src[0:4]
if not seen.has_key(trg_code):
trg_file = pdb_dir+os.sep+trg_code+".pdb"
fetch_pdb(trg_code,trg_file)
cmd.load(trg_file)
seen[trg_code]=1
if not seen.has_key(src_code):
src_file = pdb_dir+os.sep+src_code+".pdb"
fetch_pdb(src_code,src_file)
cmd.load(src_file)
seen[src_code]=1
matrix = []
for a in range(0,3):
matrix.append(float(strip(line[29:38])))
matrix.append(float(strip(line[39:48])))
matrix.append(float(strip(line[49:58])))
matrix.append(float(strip(line[59:78])))
matrix.extend([0.0,0.0,0.0,1.0])
max_pairs = max_pairs - 1
if max_pairs<0:
break
|
gratefulfrog/lib
|
python/pymol/pymol_path/examples/cookbook/dali.py
|
Python
|
gpl-2.0
| 2,564
|
[
"PyMOL"
] |
9150ab57951c67b7afbaa7d2389f31f64e3ea1311c223b3a060cfb7ea53fa259
|
"""
Define steps for bulk email acceptance test.
"""
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from lettuce.django import mail
from nose.tools import assert_in, assert_equal
from django.core.management import call_command
from django.conf import settings
from courseware.tests.factories import StaffFactory, InstructorFactory
@step(u'Given there is a course with a staff, instructor and student')
def make_populated_course(step): # pylint: disable=unused-argument
## This is different than the function defined in common.py because it enrolls
## a staff, instructor, and student member regardless of what `role` is, then
## logs `role` in. This is to ensure we have 3 class participants to email.
# Clear existing courses to avoid conflicts
world.clear_courses()
# Create a new course
course = world.CourseFactory.create(
org='edx',
number='888',
display_name='Bulk Email Test Course'
)
world.bulk_email_course_key = course.id
try:
# See if we've defined the instructor & staff user yet
world.bulk_email_instructor
except AttributeError:
# Make & register an instructor for the course
world.bulk_email_instructor = InstructorFactory(course_key=world.bulk_email_course_key)
world.enroll_user(world.bulk_email_instructor, world.bulk_email_course_key)
# Make & register a staff member
world.bulk_email_staff = StaffFactory(course_key=course.id)
world.enroll_user(world.bulk_email_staff, world.bulk_email_course_key)
# Make & register a student
world.register_by_course_key(
course.id,
username='student',
password='test',
is_staff=False
)
# Store the expected recipients
# given each "send to" option
staff_emails = [world.bulk_email_staff.email, world.bulk_email_instructor.email]
world.expected_addresses = {
'course staff': staff_emails,
'students, staff, and instructors': staff_emails + ['student@edx.org']
}
# Dictionary mapping a description of the email recipient
# to the corresponding <option> value in the UI.
SEND_TO_OPTIONS = {
'myself': 'myself',
'course staff': 'staff',
'students, staff, and instructors': 'all'
}
@step(u'I am logged in to the course as "([^"]*)"')
def log_into_the_course(step, role): # pylint: disable=unused-argument
# Store the role
assert_in(role, ['instructor', 'staff'])
# Log in as the an instructor or staff for the course
my_email = world.bulk_email_instructor.email
if role == 'instructor':
world.log_in(
username=world.bulk_email_instructor.username,
password='test',
email=my_email,
name=world.bulk_email_instructor.profile.name
)
else:
my_email = world.bulk_email_staff.email
world.log_in(
username=world.bulk_email_staff.username,
password='test',
email=my_email,
name=world.bulk_email_staff.profile.name
)
# Store the "myself" send to option
world.expected_addresses['myself'] = [my_email]
@step(u'I send email to "([^"]*)"')
def when_i_send_an_email(step, recipient): # pylint: disable=unused-argument
# Check that the recipient is valid
assert_in(
recipient, SEND_TO_OPTIONS,
msg="Invalid recipient: {}".format(recipient)
)
# Clear the queue of existing emails
while not mail.queue.empty(): # pylint: disable=no-member
mail.queue.get() # pylint: disable=no-member
# Because we flush the database before each run,
# we need to ensure that the email template fixture
# is re-loaded into the database
call_command('loaddata', 'course_email_template.json')
# Go to the email section of the instructor dash
url = '/courses/{}'.format(world.bulk_email_course_key)
world.visit(url)
world.css_click('a[href="{}/instructor"]'.format(url))
world.css_click('a[data-section="send_email"]')
# Select the recipient
world.select_option('send_to', SEND_TO_OPTIONS[recipient])
# Enter subject and message
world.css_fill('input#id_subject', 'Hello')
with world.browser.get_iframe('mce_0_ifr') as iframe:
editor = iframe.find_by_id('tinymce')[0]
editor.fill('test message')
# Click send
world.css_click('input[name="send"]', dismiss_alert=True)
# Expect to see a message that the email was sent
expected_msg = "Your email was successfully queued for sending."
world.wait_for_visible('#request-response')
assert_in(
expected_msg, world.css_text('#request-response'),
msg="Could not find email success message."
)
UNSUBSCRIBE_MSG = 'To stop receiving email like this'
@step(u'Email is sent to "([^"]*)"')
def then_the_email_is_sent(step, recipient): # pylint: disable=unused-argument
# Check that the recipient is valid
assert_in(
recipient, SEND_TO_OPTIONS,
msg="Invalid recipient: {}".format(recipient)
)
# Retrieve messages. Because we are using celery in "always eager"
# mode, we expect all messages to be sent by this point.
messages = []
while not mail.queue.empty(): # pylint: disable=no-member
messages.append(mail.queue.get()) # pylint: disable=no-member
# Check that we got the right number of messages
assert_equal(
len(messages), len(world.expected_addresses[recipient]),
msg="Received {0} instead of {1} messages for {2}".format(
len(messages), len(world.expected_addresses[recipient]), recipient
)
)
# Check that the message properties were correct
recipients = []
for msg in messages:
assert_in('Hello', msg.subject)
assert_in(settings.BULK_EMAIL_DEFAULT_FROM_EMAIL, msg.from_email)
# Message body should have the message we sent
# and an unsubscribe message
assert_in('test message', msg.body)
assert_in(UNSUBSCRIBE_MSG, msg.body)
# Should have alternative HTML form
assert_equal(len(msg.alternatives), 1)
content, mime_type = msg.alternatives[0]
assert_equal(mime_type, 'text/html')
assert_in('test message', content)
assert_in(UNSUBSCRIBE_MSG, content)
# Store the recipient address so we can verify later
recipients.extend(msg.recipients())
# Check that the messages were sent to the right people
# Because "myself" can vary based on who sent the message,
# we use the world.expected_addresses dict we configured
# in an earlier step.
for addr in world.expected_addresses[recipient]:
assert_in(addr, recipients)
|
solashirai/edx-platform
|
lms/djangoapps/instructor/features/bulk_email.py
|
Python
|
agpl-3.0
| 6,776
|
[
"VisIt"
] |
fb7896f7468cd2de1fbcff97406751943b6c2f96f9d5479a58edc54e179db365
|
#!/usr/bin/env python
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import urllib.request, urllib.parse, urllib.error
import json
import os
import sys
import psycopg2
import urlparse
import emoji
from flask import Flask
from flask import request, render_template
from flask import make_response
# Flask should start in global layout
context = Flask(__name__)
# Facbook Access Token
ACCESS_TOKEN = "EAAXRzkKCxVQBAImZBQo8kEpHVn0YDSVxRcadEHiMlZAcqSpu5pV7wAkZBKUs0eIZBcX1RmZCEV6cxJzuZAp5NO5ZCcJgZBJu4OPrFpKiAPJ5Hxlve2vrSthfMSZC3GqLnzwwRENQSzZAMyBXFCi1LtLWm9PhYucY88zPT4KEwcZCmhLYAZDZD"
#ACCESS_TOKEN = "EAADCpnCTbUoBAMlgDxoEVTifvyD80zCxvfakHu6m3VjYVdS5VnbIdDnZCxxonXJTK2LBMFemzYo2a4DGrz0SxNJIFkMAsU8WBfRS7IRrZAaHRrXEMBEL5wmdUvzawASQWtZAMNBr90Gattw3IGzeJ7pZBBUthMewXDvnmBELCgZDZD"
# Google Access Token
Google_Acces_Token = "key=AIzaSyDNYsLn4JGIR4UaZMFTAgDB9gKN3rty2aM&cx=003066316917117435589%3Avcms6hy5lxs&q="
# NewsAPI Access Token
newspai_access_token = "505c1506aeb94ba69b72a4dbdce31996"
# Weather Update API KeyError
weather_update_key = "747d84ccfe063ba9"
#************************************************************************************#
# #
# All Webhook requests lands within the method --webhook #
# #
#************************************************************************************#
# Webhook requests are coming to this method
@context.route('/webhook', methods=['POST'])
def webhook():
reqContext = request.get_json(silent=True, force=True)
#print(json.dumps(reqContext, indent=4))
print(reqContext.get("result").get("action"))
print ("webhook is been hit ONCE ONLY")
if reqContext.get("result").get("action") == "input.welcome":
return welcome()
elif reqContext.get("result").get("action") == "firstIntroductionSureOptionStatement":
return firstIntroductionSureOptionStatement(reqContext)
elif reqContext.get("result").get("action") == "firstIntroductionNoOptionStatement":
return firstIntroductionNoOptionStatement(reqContext)
elif reqContext.get("result").get("action") == "secondExplanationOKStatement":
return secondExplanationOKStatement(reqContext)
elif reqContext.get("result").get("action") == "thirdExplanationOKStatement":
return thirdExplanationOKStatement(reqContext)
elif reqContext.get("result").get("action") == "fourthExplanationOKStatement":
return fourthExplanationOKStatement(reqContext)
elif reqContext.get("result").get("action") == "weather":
return weather(reqContext)
elif reqContext.get("result").get("action") == "yahooWeatherForecast":
return weatherhook(reqContext)
elif reqContext.get("result").get("action") == "wikipedia":
return wikipedia_search(reqContext)
elif reqContext.get("result").get("action") == "GoogleSearch":
return searchhook(reqContext)
elif reqContext.get("result").get("action") == "wikipediaInformationSearch":
return wikipediaInformationSearch(reqContext)
elif reqContext.get("result").get("action") == "news.category":
return newsCategory(reqContext)
elif reqContext.get("result").get("action") == "topnews":
return news_category_topnews(reqContext)
elif reqContext.get("result").get("action") == "topfournewsarticle":
return topFourNewsArticle(reqContext)
elif reqContext.get("result").get("action") == "youtubeTopic":
return youtubeTopic(reqContext)
elif reqContext.get("result").get("action") == "youtubeVideoSearch":
return youtubeVideoSearch(reqContext)
elif reqContext.get("result").get("action") == "Help":
return help(reqContext)
elif reqContext.get("result").get("action") == "contact.us":
return contact(reqContext)
elif reqContext.get("result").get("action") == "requestdemo":
return requestDemo(reqContext)
elif reqContext.get("result").get("action") == "forsalebottemplate":
return forsale(reqContext)
else:
print("Good Bye")
#************************************************************************************#
# #
# This method is to get the Facebook User Deatails via graph.facebook.com/v2.6 #
# #
#************************************************************************************#
user_name = None
def welcome():
global user_name
print ("within welcome method")
data = request.json
print (data)
if data is None:
return {}
entry = data.get('originalRequest')
dataall = entry.get('data')
sender = dataall.get('sender')
id = sender.get('id')
print ("id :" + id)
fb_info = "https://graph.facebook.com/v2.6/" + id + "?fields=first_name,last_name,profile_pic,locale,timezone,gender&access_token=" + ACCESS_TOKEN
print (fb_info)
result = urllib.request.urlopen(fb_info).read()
print (result)
data = json.loads(result)
first_name = data.get('first_name')
print (first_name)
user_name = data.get('first_name')
speech1 = "I am 'Marvin' - your personal assistant"
res = {
"speech": speech1,
"displayText": speech1,
"data" : {
"facebook" : [
{
"sender_action": "typing_on"
},
{
"attachment" : {
"type" : "template",
"payload" : {
"template_type" : "generic",
"elements" : [
{
"title" : "Hey " + first_name + "! Thanks for stopping by...",
"image_url" : "http://gdurl.com/vc1o",
}
]
}
}
},
{
"sender_action": "typing_on"
},
{
"text": speech1
},
{
"sender_action": "typing_on"
},
{
"attachment":{
"type":"image",
"payload":{
"url":"https://media.giphy.com/media/1qO2XGCGx7Rte/giphy.gif"
}
}
},
{
"text": "Do you want to know more about me?",
"quick_replies": [
{
"content_type": "text",
"title": "Yeah Sure",
"payload": "Ummm, yeah sure",
"image_url": "http://www.thehindubusinessline.com/multimedia/dynamic/02337/bl12_smiley_jpg_2337780e.jpg"
},
{
"content_type": "text",
"title": "No Thanks",
"payload": "No, thank you",
"image_url": "https://www.colourbox.com/preview/7036940-exited-emoticon.jpg"
},
{
"content_type": "text",
"title": "For Sale",
"payload": "For Sale",
"image_url": "http://p.lnwfile.com/_/p/_raw/pg/vn/cm.png"
},
]
}
]
}
};
print (res)
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
print (r)
return r
def reply(user_id, msg):
data = {
"recipient": {"id": user_id},
"message": {"text": msg}
}
print ("Data.........")
print (data)
resp = requests.post("https://graph.facebook.com/v2.6/me/messages?access_token=" + ACCESS_TOKEN, json=data)
print(resp.content)
###################################THIS IS THE START OF FIRST BLOCK OF CUSTOMER ENGAGEMENT#########################################
def firstIntroductionSureOptionStatement(reqContext):
print ("firstIntroductionSureOptionStatement..........YES..........")
option = reqContext.get("result").get("action")
res = {
"speech": "...",
"displayText": "...",
"data" : {
"facebook" : [
{
"sender_action": "typing_on"
},
{
"text": "I can provide weather report with 7 day weather forecast of any city across the world"
},
{
"sender_action": "typing_on"
},
{
"text": "Ask me any topic, I can bring info from Wikipedia"
},
{
"sender_action": "typing_on"
},
{
"text": "Read out live newsfeed from 33 Nespapers - choose your favorite category"
},
{
"sender_action": "typing_on"
},
{
"text": "Looking for something special? Search and watch YouTube videos here :)"
},
{
"sender_action": "typing_on"
},
{
"attachment":{
"type":"image",
"payload":{
"url":"https://media.giphy.com/media/c6DcchsqBlGCY/giphy.gif"
}
}
},
{
"text": "Do you wanna know more some amazing bot-news?",
"quick_replies": [
{
"content_type": "text",
"title": "Show More Bots",
"payload": "Tell me right now",
"image_url": "https://previews.123rf.com/images/krisdog/krisdog1509/krisdog150900014/44577557-A-cartoon-emoji-emoticon-icon-character-looking-very-happy-with-his-thumbs-up-he-likes-it-Stock-Vector.jpg"
},
{
"content_type": "text",
"title": "No, thanks",
"payload": "No, thanks",
"image_url": "https://www.colourbox.com/preview/7036940-exited-emoticon.jpg"
},
{
"content_type": "text",
"title": "For Sale",
"payload": "For Sale",
"image_url": "http://p.lnwfile.com/_/p/_raw/pg/vn/cm.png"
},
{
"content_type": "text",
"title": "Contact Me",
"payload": "contact",
"image_url": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcT82m3I34RXj5OqXvJUqczmgCWoqS9U2EZmdJKXMjZx24Jpp-Z6lQ"
}
]
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
#####################################################################
def firstIntroductionNoOptionStatement(reqContext):
print ("firstIntroductionNoOptionStatement...........NO.........")
option = reqContext.get("result").get("action")
res = {
"speech": "...",
"displayText": "...",
"data" : {
"facebook" : [
{
"sender_action": "typing_on"
},
{
"text": "Now it's time for you to enjoy tons of features that I offer."
},
{
"sender_action": "typing_on"
},
{
"text": "Click on 'Menu' option to explore more!!!",
"quick_replies": [
{
"content_type": "text",
"title": "News",
"payload": "News",
"image_url": "http://www.freeiconspng.com/uploads/newspaper-icon-20.jpg"
},
{
"content_type": "text",
"title": "Weather",
"payload": "Weather",
"image_url": "https://www.mikeafford.com/store/store-images/ww01_example_light_rain_showers.png"
},
{
"content_type": "text",
"title": "Wikipedia",
"payload": "Wikipedia",
"image_url": "https://upload.wikimedia.org/wikipedia/en/thumb/8/80/Wikipedia-logo-v2.svg/1122px-Wikipedia-logo-v2.svg.png"
},
{
"content_type": "text",
"title": "YouTube",
"payload": "YouTube",
"image_url": "https://cdn1.iconfinder.com/data/icons/logotypes/32/youtube-512.png"
},
{
"content_type": "text",
"title": "For Sale",
"payload": "For Sale",
"image_url": "http://p.lnwfile.com/_/p/_raw/pg/vn/cm.png"
},
{
"content_type": "text",
"title": "Contact Me",
"payload": "contact",
"image_url": "https://cdn3.iconfinder.com/data/icons/communication-mass-media-news/512/phone_marketing-128.png"
}
]
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
################################################THIS IS THE START OF SECOND BLOCK OF CUSTOMER ENGAGEMENT#########################################
def secondExplanationOKStatement(reqContext):
option = reqContext.get("result").get("action")
res = {
"speech": "...",
"displayText": "...",
"data" : {
"facebook" : [
{
"sender_action": "typing_on"
},
{
"text": "I can introduce to some other chatbots worth to give a try" + emoji.emojize(':iphone:', use_aliases=True)
},
{
"sender_action": "typing_on"
},
{
"attachment":{
"type":"image",
"payload":{
"url":"https://media.giphy.com/media/g4AgRBcatHKve/giphy.gif"
}
}
},
{
"text": "Want you like to see the chatbots?",
"quick_replies": [
{
"content_type": "text",
"title": "Show More Bots",
"payload": "Tell me right now",
"image_url": "https://previews.123rf.com/images/krisdog/krisdog1509/krisdog150900014/44577557-A-cartoon-emoji-emoticon-icon-character-looking-very-happy-with-his-thumbs-up-he-likes-it-Stock-Vector.jpg"
},
{
"content_type": "text",
"title": "Maybe later on",
"payload": "Maybe later on",
"image_url": "https://www.colourbox.com/preview/7036940-exited-emoticon.jpg"
},
{
"content_type": "text",
"title": "For Sale",
"payload": "For Sale",
"image_url": "http://p.lnwfile.com/_/p/_raw/pg/vn/cm.png"
},
{
"content_type": "text",
"title": "Contact Me",
"payload": "contact",
"image_url": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcT82m3I34RXj5OqXvJUqczmgCWoqS9U2EZmdJKXMjZx24Jpp-Z6lQ"
}
]
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
#####################################################################
def thirdExplanationOKStatement(reqContext):
option = reqContext.get("result").get("action")
res = {
"speech": "...",
"displayText": "...",
"data" : {
"facebook" : [
{
"sender_action": "typing_on"
},
{
"text": "Ok, Ok, I know you're getting impatient" + emoji.emojize(':sunglasses:', use_aliases=True)
},
{
"sender_action": "typing_on"
},
{
"attachment" : {
"type" : "template",
"payload" : {
"template_type" : "generic",
"elements" : [
{
"title" : "Travel Agency Bot Template",
"image_url" : "http://www.sunsail.eu/files/Destinations/Mediteranean/Greece/Athens/thira.jpg",
"subtitle" : "Get customized virtual assistant for your Restaurant today",
"buttons": [{
"type": "web_url",
"url": "https://marvinai.live",
"title": "Buy Template"
},
{
"type": "web_url",
"url": "https://m.me/926146750885580",
"title": "Chat"
},
{
"type": "element_share"
}]
},
{
"title" : "Real Estate Bot Template",
"image_url" : "https://husvild-static.s3.eu-central-1.amazonaws.com/images/files/000/280/915/large/3674bd34e6c1bc42b690adeacfe9c778507f261a?1516032863",
"subtitle" : "Get qualified buyer and seller leads automatically delivered to your inbox!",
"buttons": [{
"type": "web_url",
"url": "https://marvinai.live",
"title": "Buy Template"
},
{
"type": "web_url",
"url": "https://m.me/realestatebotai",
"title": "Chat"
},
{
"type": "element_share"
}]
},
{
"title" : "Restaurant Bot Template",
"image_url" : "https://www.outlookhindi.com/public/uploads/article/gallery/6eb226c14abd79a801172ab8d473e6d2_342_660.jpg",
"subtitle" : "Perfectly crafted bot from assisting online customers to handle orders",
"buttons": [{
"type": "web_url",
"url": "https://marvinai.live",
"title": "Buy Template"
},
{
"type": "web_url",
"url": "https://m.me/730273667158154",
"title": "Chat"
},
{
"type": "element_share"
}]
},
{
"title" : "Coffee Shop Bot Template",
"image_url" : "https://images-na.ssl-images-amazon.com/images/I/71Crz9MYPPL._SY355_.jpg",
"subtitle" : "Your bot can deal with online customers, take orders and many more ",
"buttons": [{
"type": "web_url",
"url": "https://marvinai.live",
"title": "Buy Template"
},
{
"type": "web_url",
"url": "https://m.me/200138490717876",
"title": "Chat"
},
{
"type": "element_share"
}]
},
{
"title" : "VISA Check Bot",
"image_url" : "http://famousdestinations.in/wp-content/uploads/2016/03/howtogetthere.png",
"subtitle" : "One stop solution for all your VISA requirements...Coming Soon!",
"buttons": [{
"type": "web_url",
"url": "https://marvinai.live",
"title": "Visit Website"
},
{
"type": "element_share"
}]
}
]
}
}
},
{
"text": "Ready to know more about the deal?",
"quick_replies": [
{
"content_type": "text",
"title": "For Sale",
"payload": "For Sale",
"image_url": "http://p.lnwfile.com/_/p/_raw/pg/vn/cm.png"
},
{
"content_type": "text",
"title": "Contact",
"payload": "contact",
"image_url": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcT82m3I34RXj5OqXvJUqczmgCWoqS9U2EZmdJKXMjZx24Jpp-Z6lQ"
}
]
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
##################################################
def fourthExplanationOKStatement(reqContext):
option = reqContext.get("result").get("action")
res = {
"speech": "...",
"displayText": "...",
"data" : {
"facebook" : [
{
"sender_action": "typing_on"
},
{
"text": "Reaching out to all your customers with effective promotion is difficult" + emoji.emojize(':loudspeaker:', use_aliases=True)
},
{
"sender_action": "typing_on"
},
{
"text": "Traditional marketing channels like ad, promotions, email marketing have very low sales conversion rate."
},
{
"sender_action": "typing_on"
},
{
"text": "Chatbot can promote sales offer to all your digital customers with highest opening rate. This helps in creating personal bonding."
},
{
"sender_action": "typing_on"
},
{
"attachment":{
"type":"image",
"payload":{
"url":"https://media.giphy.com/media/p2qX0hzOihmp2/giphy.gif"
}
}
},
{
"sender_action": "typing_on"
},
{
"text": "Marvin AI has the best market expertise to guide and grow your business."
},
{
"text": "Do you want a chatbot for your business? Ask for a Limited FREE Trial Offer now",
"quick_replies": [
{
"content_type": "text",
"title": "Contact Me",
"payload": "contact",
"image_url": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcT82m3I34RXj5OqXvJUqczmgCWoqS9U2EZmdJKXMjZx24Jpp-Z6lQ"
},
{
"content_type": "text",
"title": "For Sale",
"payload": "For Sale",
"image_url": "http://p.lnwfile.com/_/p/_raw/pg/vn/cm.png"
}
]
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
#************************************************************************************#
# #
# Below method is to get the Facebook Quick Reply Webhook Handling - Weather #
# #
#************************************************************************************#
def weather(reqContext):
print (reqContext.get("result").get("action"))
option = reqContext.get("result").get("action")
res = {
"speech": "Please provide a city name for weather report:",
"displayText": "Please provide a city name for weather report:",
"data" : {
"facebook" : [
{
"text": "Please provide a city name for weather report:"
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
#************************************************************************************#
# #
# Below 3 methods are to get the Yahoo Weather Report for a location via API #
# #
#************************************************************************************#
def weatherhook(reqContext):
#req = request.get_json(silent=True, force=True)
result = reqContext.get("result")
parameters = result.get("parameters")
city = parameters.get("geo-city")
if not parameters.get("geo-city"):
city = parameters.get("geo-city-dk")
#return
###########################################################
data = yahoo_weatherapi(city)
#print (data)
############################################################
query = data.get('query')
if query is None:
return {}
result = query.get('results')
if result is None:
return {}
channel = result.get('channel')
if channel is None:
return {}
item = channel.get('item')
location = channel.get('location')
units = channel.get('units')
if (location is None) or (item is None) or (units is None):
return {}
condition = item.get('condition')
if condition is None:
return {}
#description = item.get('description')
#if description is None:
# return {}
#print ("URL Link and Condition code should be printed afterwards")
link = item.get('link')
link_forecast = link.split("*",1)[1]
#print (link_forecast)
#print ("<<<<<>>>>")
#print (condition.get('code'))
condition_get_code = condition.get('code')
condition_code = weather_code(condition_get_code)
image_url = "http://gdurl.com/" + condition_code
#if condition.get('code') != condition_code:
# image_url = "http://l.yimg.com/a/i/us/we/" + condition.get('code') + "/14.gif"
#print (image_url)
speech = "Today in " + location.get('city') + "(" +location.get('country') + ")" + ": " + condition.get('text') + \
", the temperature is " + condition.get('temp') + " " + units.get('temperature')
#print ("City - Country: " +location.get('city') + "-" + location.get('country'))
#print ("image url: " + image_url)
#print ("forecast link: " + link_forecast)
#print("speech: " + speech)
##############################################################
#res = {"speech": speech,
# "displayText": speech,
# "source": "apiai-weather-webhook-sample"}
res = {
"speech": speech,
"displayText": speech,
"data" : {
"facebook" : [
{
"sender_action": "typing_on"
},
{
"sender_action": "typing_on"
},
{
"text": speech
},
{
"attachment" : {
"type" : "template",
"payload" : {
"template_type" : "generic",
"elements" : [
{
"title" : location.get('city') + "-" + location.get('country'),
"image_url" : image_url,
"subtitle" : "",
"buttons": [{
"type": "web_url",
"url": link_forecast,
"title": "Weather Forecast"
}]
}
]
}
}
},
{
"text": "Click on the below options to start over again",
"quick_replies": [
{
"content_type": "text",
"title": "News",
"payload": "news",
"image_url": "http://www.freeiconspng.com/uploads/newspaper-icon-20.jpg"
},
{
"content_type": "text",
"title": "Weather",
"payload": "weather",
"image_url": "https://www.mikeafford.com/store/store-images/ww01_example_light_rain_showers.png"
},
{
"content_type": "text",
"title": "Wikipedia",
"payload": "wikipedia",
"image_url": "https://upload.wikimedia.org/wikipedia/en/thumb/8/80/Wikipedia-logo-v2.svg/1122px-Wikipedia-logo-v2.svg.png"
},
{
"content_type": "text",
"title": "YouTube",
"payload": "youtube",
"image_url": "https://cdn1.iconfinder.com/data/icons/logotypes/32/youtube-512.png"
},
{
"content_type": "text",
"title": "For Sale",
"payload": "For Sale",
"image_url": "http://p.lnwfile.com/_/p/_raw/pg/vn/cm.png"
},
{
"content_type": "text",
"title": "Contact Me",
"payload": "contact",
"image_url": "https://cdn3.iconfinder.com/data/icons/communication-mass-media-news/512/phone_marketing-128.png"
}
]
}
]
}
};
#print (res)
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
#print ("City - Country: " +location.get('city') + "-" + location.get('country'))
return r
def yahoo_weatherapi(city):
yql_query = "select * from weather.forecast where woeid in (select woeid from geo.places(1) where text='" + city + "') and u='c'"
if yql_query is None:
return {}
baseurl = "https://query.yahooapis.com/v1/public/yql?"
yql_url = baseurl + urllib.parse.urlencode({'q': yql_query}) + "&format=json"
#print (yql_url)
result = urllib.request.urlopen(yql_url).read()
data = json.loads(result)
return data
def weather_code(condition_get_code):
# Below block of code is to check for weather condition code and map corresponding http://gdurl.com/#### permalink context
if condition_get_code == "0":
condition_code = "dCAC"
elif condition_get_code == "1":
condition_code = "SREL"
elif condition_get_code == "2":
condition_code = "IarO"
elif condition_get_code == "3":
condition_code = "7uxP"
elif condition_get_code == "4":
condition_code = "bwNv"
elif condition_get_code == "5":
condition_code = "KAeR"
elif condition_get_code == "6":
condition_code = "G2vM"
elif condition_get_code == "7":
condition_code = "I5yA"
elif condition_get_code == "8":
condition_code = "ZOqK"
elif condition_get_code == "9":
condition_code = "1zb0"
elif condition_get_code == "10":
condition_code = "jqoP"
elif condition_get_code == "11":
condition_code = "NN8n"
elif condition_get_code == "12":
condition_code = "SrHt"
elif condition_get_code == "13":
condition_code = "k-Kn"
elif condition_get_code == "14":
condition_code = "GQbF"
elif condition_get_code == "15":
condition_code = "Vz-n"
elif condition_get_code == "16":
condition_code = "Lqmw"
elif condition_get_code == "17":
condition_code = "21ph"
elif condition_get_code == "18":
condition_code = "caw1"
elif condition_get_code == "19":
condition_code = "UHxC"
elif condition_get_code == "20":
condition_code = "doYD"
elif condition_get_code == "21":
condition_code = "bs6H"
elif condition_get_code == "22":
condition_code = "lhGL"
elif condition_get_code == "23":
condition_code = "G4CG"
elif condition_get_code == "24":
condition_code = "5ixA"
elif condition_get_code == "25":
condition_code = "IuNo"
elif condition_get_code == "26":
condition_code = "mjvk"
elif condition_get_code == "27":
condition_code = "HvT7"
elif condition_get_code == "28":
condition_code = "IRxA"
elif condition_get_code == "29":
condition_code = "XjVZ"
elif condition_get_code == "30":
condition_code = "F1PG"
elif condition_get_code == "31":
condition_code = "GEwD"
elif condition_get_code == "32":
condition_code = "KIJr"
elif condition_get_code == "33":
condition_code = "jlUw"
elif condition_get_code == "34":
condition_code = "NfxW"
elif condition_get_code == "35":
condition_code = "5sRT"
elif condition_get_code == "36":
condition_code = "RGml"
elif condition_get_code == "37":
condition_code = "fMls"
elif condition_get_code == "38":
condition_code = "SCk3"
elif condition_get_code == "39":
condition_code = "0PsU"
elif condition_get_code == "40":
condition_code = "zoPn"
elif condition_get_code == "41":
condition_code = "6mNT"
elif condition_get_code == "42":
condition_code = "frKG"
elif condition_get_code == "43":
condition_code = "QE-9"
elif condition_get_code == "45":
condition_code = "TANY"
elif condition_get_code == "46":
condition_code = "86Sz"
elif condition_get_code == "47":
condition_code = "uQ5r"
elif condition_get_code == "3200":
condition_code = "mgzs"
else:
print ("Condition code did not match the sequence")
return condition_code
#************************************************************************************#
# #
# Below method is to get the Facebook Quick Reply Webhook Handling - Wikipedia #
# #
#************************************************************************************#
def wikipedia_search(reqContext):
#print (reqContext.get("result").get("action"))
option = reqContext.get("result").get("action")
res = {
"speech": "Please provide the topic you want to search in Wikipedia",
"displayText": "Please provide the topic you want to search in Wikipedia",
"data" : {
"facebook" : [
{
"text": "Please write the topic you want to search in Wikipedia"
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
#************************************************************************************#
# #
# This method is to get the Wikipedia Information via Google API #
# #
#************************************************************************************#
# Searchhook is for searching for Wkipedia information via Google API
def searchhook(reqContext):
req = request.get_json(silent=True, force=True)
print("Within Search function......!!")
resolvedQuery = reqContext.get("result").get("resolvedQuery")
print ("resolvedQuery: " + resolvedQuery)
true_false = True
baseurl = "https://www.googleapis.com/customsearch/v1?"
###########################################################
result = req.get("result")
parameters = result.get("parameters")
search_list0 = parameters.get("any")
#print ("search_list0" + search_list0)
search_u_string_removed = [str(i) for i in search_list0]
search_list1 = str(search_u_string_removed)
#print ("search_list1" + search_list1)
cumulative_string = search_list1.strip('[]')
search_string = cumulative_string.replace(" ", "%20")
print(search_string)
search_string_ascii = search_string.encode('ascii')
if search_string_ascii is None:
return None
print ("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
google_query = "key=AIzaSyDucQkfSMQSkcz8LTcfHhenq2aQ-QTOhGE&cx=003066316917117435589:vcms6hy5lxs&q=" + search_string_ascii + "&num=1"
###########################################################
if google_query is None:
return {}
#google_url = baseurl + urllib.parse.urlencode({google_query})
google_url = baseurl + google_query
print("google_url::::"+google_url)
result = urllib.request.urlopen(google_url).read()
#print (result)
data = json.loads(result)
print ("data = json.loads(result)")
############################################################
speech = data['items'][0]['snippet'].encode('utf-8').strip()
for data_item in data['items']:
link = data_item['link'],
for data_item in data['items']:
pagemap = data_item['pagemap'],
cse_thumbnail_u_string_removed = [str(i) for i in pagemap]
cse_thumbnail_u_removed = str(cse_thumbnail_u_string_removed)
cse_thumbnail_brace_removed_1 = cse_thumbnail_u_removed.strip('[')
cse_thumbnail_brace_removed_2 = cse_thumbnail_brace_removed_1.strip(']')
cse_thumbnail_brace_removed_final = cse_thumbnail_brace_removed_2.strip("'")
#print (cse_thumbnail_brace_removed_final)
keys = ('cse_thumbnail', 'metatags', 'cse_image')
for key in keys:
# print(key in cse_thumbnail_brace_removed_final)
#print ('cse_thumbnail' in cse_thumbnail_brace_removed_final)
true_false = 'cse_thumbnail' in cse_thumbnail_brace_removed_final
if true_false == True:
#print ('Condition matched -- Within IF block')
for key in pagemap:
cse_thumbnail = key['cse_thumbnail']
#print ('Within the For loop -- cse_thumbnail is been assigned')
for image_data in cse_thumbnail:
raw_str = image_data['src']
break
else:
raw_str = "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTwdc3ra_4N2X5G06Rr5-L0QY8Gi6SuhUb3DiSN_M-C_nalZnVA"
#print ('***FALSE***')
src_brace_removed_final = raw_str
# Remove junk charaters from URL
link_u_removal = [str(i) for i in link]
#link_u_removed = str(link_u_removal)
#link_brace_removed_1 = link_u_removed.strip('[')
#link_brace_removed_2 = link_brace_removed_1.strip(']')
link_final = str(link_u_removal).strip('[').strip(']').strip("'")
# Remove junk character from search item
search_string_final = cumulative_string.strip("'")
############################################################
res = {
"speech": speech,
"displayText": speech,
"data" : {
"facebook" : [
{
"sender_action": "typing_on"
},
{
"attachment" : {
"type" : "template",
"payload" : {
"template_type" : "generic",
"elements" : [
{
"title" : search_string_final,
"image_url" : src_brace_removed_final,
"subtitle" : "",
"buttons": [{
"type": "web_url",
"url": link_final,
"title": "More info"
}]
}
]
}
}
},
{
"text": speech
},
{
"text": "Click on the below options to start over again",
"quick_replies": [
{
"content_type": "text",
"title": "News",
"payload": "News",
"image_url": "http://www.freeiconspng.com/uploads/newspaper-icon-20.jpg"
},
{
"content_type": "text",
"title": "Weather",
"payload": "weather",
"image_url": "https://www.mikeafford.com/store/store-images/ww01_example_light_rain_showers.png"
},
{
"content_type": "text",
"title": "Wikipedia",
"payload": "wikipedia",
"image_url": "https://upload.wikimedia.org/wikipedia/en/thumb/8/80/Wikipedia-logo-v2.svg/1122px-Wikipedia-logo-v2.svg.png"
},
{
"content_type": "text",
"title": "YouTube",
"payload": "YouTube",
"image_url": "https://cdn1.iconfinder.com/data/icons/logotypes/32/youtube-512.png"
},
{
"content_type": "text",
"title": "For Sale",
"payload": "For Sale",
"image_url": "http://p.lnwfile.com/_/p/_raw/pg/vn/cm.png"
},
{
"content_type": "text",
"title": "Contact Me",
"payload": "contact",
"image_url": "https://cdn3.iconfinder.com/data/icons/communication-mass-media-news/512/phone_marketing-128.png"
}
]
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
#************************************************************************************#
# #
# This method is to get the Wikipedia Information via Google API via Funnel #
# #
#************************************************************************************#
# Searchhook is for searching for Wkipedia information via Google API
def wikipediaInformationSearch(reqContext):
req = request.get_json(silent=True, force=True)
resolvedQuery = reqContext.get("result").get("resolvedQuery")
print ("resolvedQuery: " + resolvedQuery)
true_false = True
baseurl = "https://www.googleapis.com/customsearch/v1?"
#print ("1111111111111111111111111111111")
#resolvedQueryFinal = resolvedQuery.replace(" ", "%20")
search_string_ascii = resolvedQuery.replace(" ", "%20").encode('ascii')
#print ("22222222222222222222222222222222" + search_string_ascii)
if search_string_ascii is None:
return None
google_query = "https://www.googleapis.com/customsearch/v1?key=AIzaSyDucQkfSMQSkcz8LTcfHhenq2aQ-QTOhGE&cx=003066316917117435589:vcms6hy5lxs&q=" + search_string_ascii + "&num=1"
#print ("33333333333333333333333333333333---" + google_query)
###########################################################
if google_query is None:
return {}
#google_url = baseurl + google_query
#print("google_url::::"+google_url)
result = urllib.request.urlopen(google_query).read()
data = json.loads(result)
#print (data)
############################################################
speech = data['items'][0]['snippet'].encode('utf-8').strip()
for data_item in data['items']:
link = data_item['link'],
for data_item in data['items']:
pagemap = data_item['pagemap'],
cse_thumbnail_u_string_removed = [str(i) for i in pagemap]
cse_thumbnail_u_removed = str(cse_thumbnail_u_string_removed)
cse_thumbnail_brace_removed_1 = cse_thumbnail_u_removed.strip('[')
cse_thumbnail_brace_removed_2 = cse_thumbnail_brace_removed_1.strip(']')
cse_thumbnail_brace_removed_final = cse_thumbnail_brace_removed_2.strip("'")
keys = ('cse_thumbnail', 'metatags', 'cse_image')
for key in keys:
true_false = 'cse_thumbnail' in cse_thumbnail_brace_removed_final
if true_false == True:
for key in pagemap:
cse_thumbnail = key['cse_thumbnail']
for image_data in cse_thumbnail:
raw_str = image_data['src']
break
else:
raw_str = "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTwdc3ra_4N2X5G06Rr5-L0QY8Gi6SuhUb3DiSN_M-C_nalZnVA"
src_brace_removed_final = raw_str
# Remove junk charaters from URL
link_u_removal = [str(i) for i in link]
link_u_removed = str(link_u_removal)
link_brace_removed_1 = link_u_removed.strip('[')
link_brace_removed_2 = link_brace_removed_1.strip(']')
link_final = link_brace_removed_2.strip("'")
# Remove junk character from search item
search_string_final = resolvedQuery.strip("'")
############################################################
res = {
"speech": speech,
"displayText": speech,
"data" : {
"facebook" : [
{
"sender_action": "typing_on"
},
{
"attachment" : {
"type" : "template",
"payload" : {
"template_type" : "generic",
"elements" : [
{
"title" : search_string_final,
"image_url" : src_brace_removed_final,
"subtitle" : "",
"buttons": [{
"type": "web_url",
"url": link_final,
"title": "More info"
}]
}
]
}
}
},
{
"text": speech
},
{
"text": "Click on the below options to start over again",
"quick_replies": [
{
"content_type": "text",
"title": "News",
"payload": "News",
"image_url": "http://www.freeiconspng.com/uploads/newspaper-icon-20.jpg"
},
{
"content_type": "text",
"title": "Weather",
"payload": "Weather",
"image_url": "https://www.mikeafford.com/store/store-images/ww01_example_light_rain_showers.png"
},
{
"content_type": "text",
"title": "Wikipedia",
"payload": "Wikipedia",
"image_url": "https://upload.wikimedia.org/wikipedia/en/thumb/8/80/Wikipedia-logo-v2.svg/1122px-Wikipedia-logo-v2.svg.png"
},
{
"content_type": "text",
"title": "YouTube",
"payload": "YouTube",
"image_url": "https://cdn1.iconfinder.com/data/icons/logotypes/32/youtube-512.png"
},
{
"content_type": "text",
"title": "For Sale",
"payload": "For Sale",
"image_url": "http://p.lnwfile.com/_/p/_raw/pg/vn/cm.png"
},
{
"content_type": "text",
"title": "Contact Me",
"payload": "contact",
"image_url": "https://cdn3.iconfinder.com/data/icons/communication-mass-media-news/512/phone_marketing-128.png"
}
]
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
#************************************************************************************#
# #
# Below method is to get the Facebook Quick Reply Webhook Handling - YOUTUBE #
# #
#************************************************************************************#
def youtubeTopic(reqContext):
#print (reqContext.get("result").get("action"))
option = reqContext.get("result").get("action")
res = {
"speech": "Please provide a topic to search in YouTube:",
"displayText": "Please provide a topic to search in YouTube:",
"data" : {
"facebook" : [
{
"text": "Please provide a topic to search in YouTube:"
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
#************************************************************************************#
# #
# This method is for searching YouTube videos via YouTube API via Funnel #
# #
#************************************************************************************#
def youtubeVideoSearch(reqContext):
resolvedQuery = reqContext.get("result").get("resolvedQuery")
#print ("resolvedQuery: " + resolvedQuery)
true_false = True
baseurl = "https://www.googleapis.com/youtube/v3/search?part=id&q="
resolvedQueryFinal = resolvedQuery.replace(" ", "%20")
search_string_ascii = resolvedQueryFinal.encode('ascii')
if search_string_ascii is None:
return None
youtube_query = "&type=video&fields=items%2Fid&key=AIzaSyCndOHGungnxfGosCn_dhkuSHA82FBbkQY&cx=003066316917117435589%3Avcms6hy5lxs&num=5"
if youtube_query is None:
return {}
youtube_query = baseurl + search_string_ascii + youtube_query
#print("youtube_query::::"+youtube_query)
result = urllib.request.urlopen(youtube_query).read()
data = json.loads(result)
#print (data)
items = data['items']
#print (items)
id_list = []
for id_block in items:
id = id_block['id']
#print (id)
id_list.append(id)
res = {
"speech": "Video",
"displayText": "Video",
"data" : {
"facebook" : [
{
"sender_action": "typing_on"
},
{
"attachment":{
"type":"template",
"payload":{
"template_type":"open_graph",
"elements":[
{
"url":"https://www.youtube.com/watch?v=" + id_list[0].get('videoId')
}
]
}
}
},
{
"attachment":{
"type":"template",
"payload":{
"template_type":"open_graph",
"elements":[
{
"url":"https://www.youtube.com/watch?v=" + id_list[1].get('videoId')
}
]
}
}
},
{
"attachment":{
"type":"template",
"payload":{
"template_type":"open_graph",
"elements":[
{
"url":"https://www.youtube.com/watch?v=" + id_list[2].get('videoId')
}
]
}
}
},
{
"attachment":{
"type":"template",
"payload":{
"template_type":"open_graph",
"elements":[
{
"url":"https://www.youtube.com/watch?v=" + id_list[3].get('videoId')
}
]
}
}
},
{
"attachment":{
"type":"template",
"payload":{
"template_type":"open_graph",
"elements":[
{
"url":"https://www.youtube.com/watch?v=" + id_list[4].get('videoId')
}
]
}
}
},
{
"text": "Click on the below options to start over again",
"quick_replies": [
{
"content_type": "text",
"title": "News",
"payload": "News",
"image_url": "http://www.freeiconspng.com/uploads/newspaper-icon-20.jpg"
},
{
"content_type": "text",
"title": "Weather",
"payload": "Weather",
"image_url": "https://www.mikeafford.com/store/store-images/ww01_example_light_rain_showers.png"
},
{
"content_type": "text",
"title": "Wikipedia",
"payload": "Wikipedia",
"image_url": "https://upload.wikimedia.org/wikipedia/en/thumb/8/80/Wikipedia-logo-v2.svg/1122px-Wikipedia-logo-v2.svg.png"
},
{
"content_type": "text",
"title": "YouTube",
"payload": "YouTube",
"image_url": "https://cdn1.iconfinder.com/data/icons/logotypes/32/youtube-512.png"
},
{
"content_type": "text",
"title": "For Sale",
"payload": "For Sale",
"image_url": "http://p.lnwfile.com/_/p/_raw/pg/vn/cm.png"
},
{
"content_type": "text",
"title": "Contact Me",
"payload": "contact",
"image_url": "https://cdn3.iconfinder.com/data/icons/communication-mass-media-news/512/phone_marketing-128.png"
}
]
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
#************************************************************************************#
# #
# Below method is to get the Facebook Quick Reply Webhook Handling - NEWS #
# #
#************************************************************************************#
def newsCategory(reqContext):
print (reqContext.get("result").get("action"))
#option = reqContext.get("result").get("action")
res = {
"speech": "Please select the category",
"displayText": "Please select the category",
"data" : {
"facebook" : [
{
"text": "Please select your favourite category:",
"quick_replies": [
{
"content_type": "text",
"title": "Latest News",
"payload": "topnews",
"image_url": "http://www.freeiconspng.com/uploads/news-icon-13.png"
},
{
"content_type": "text",
"title": "Sports",
"payload": "sports",
"image_url": "http://thebridgeconference.com/wp-content/uploads/2014/05/main_paragraph_icon.png"
},
{
"content_type": "text",
"title": "Finance",
"payload": "business",
"image_url": "https://phil.ca/wp-content/uploads/2015/12/funraising-icons_fundraising.png"
},
{
"content_type": "text",
"title": "Technology",
"payload": "technology",
"image_url": "https://cdn.pixabay.com/photo/2015/12/04/22/20/gear-1077550_640.png"
},
{
"content_type": "text",
"title": "Entertainment",
"payload": "entertainment",
"image_url": "https://userscontent2.emaze.com/images/2afc7b67-eba3-41c8-adce-b1e2b1c34b02/99782968e977045b1f88f94d0c4e00cf.png"
},
{
"content_type": "text",
"title": "Science & Nature",
"payload": "science",
"image_url": "https://www.designmate.com/images/biology1.png"
}
]
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
#************************************************************************************#
# #
# Below method is to get the provide News Category Quick Replies - Top News #
# #
#************************************************************************************#
def news_category_topnews(reqContext):
resolvedQuery = reqContext.get("result").get("resolvedQuery")
print ("resolvedQuery: " + resolvedQuery)
if resolvedQuery == "topnews":
res = {
"speech": "Please select your favourite Newspaper:",
"displayText": "Please select your favourite Newspaper:",
"data" : {
"facebook" : [
{
"text": "Please select the Newspaper of your choice:",
"quick_replies": [
{
"content_type": "text",
"title": "The Times Of India",
"payload": "the-times-of-india",
"image_url": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTGUM0uhwsV3vp9ZzMEnjJo4MDZRSC3cgp32qH64zZlWFsAiGNv"
},
{
"content_type": "text",
"title": "BBC News",
"payload": "bbc-news",
"image_url": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSWrLeudSaMDHDclbCjfvVoOdIK9q3XKqbWG5G1aDJzO3z6YZUP"
},
{
"content_type": "text",
"title": "CNN",
"payload": "cnn",
"image_url": "https://qph.ec.quoracdn.net/main-qimg-583846beabeef96102a6f18fc2096a82-c"
},
{
"content_type": "text",
"title": "Time",
"payload": "time",
"image_url": "https://s0.wp.com/wp-content/themes/vip/time2014/img/time-touch_icon_152.png"
},
{
"content_type": "text",
"title": "USA Today",
"payload": "usa-today",
"image_url": "http://www.gmkfreelogos.com/logos/U/img/U_Bahn.gif"
},
{
"content_type": "text",
"title": "The Telegraph",
"payload": "the-telegraph",
"image_url": "https://media.glassdoor.com/sqll/700053/the-telegraph-calcutta-squarelogo-1475068747795.png"
},
{
"content_type": "text",
"title": "The Washington Post",
"payload": "the-washington-post",
"image_url": "https://static1.squarespace.com/static/58505df4579fb348904cdf5f/t/58ab141b20099e74879fe27f/1487606851497/wp.jog"
},
{
"content_type": "text",
"title": "The Guardian (UK)",
"payload": "the-guardian-uk",
"image_url": "http://a2.mzstatic.com/eu/r30/Purple62/v4/0b/a9/56/0ba956de-3621-3585-285e-1141b53d4d51/icon175x175.png"
},
{
"content_type": "text",
"title": "The Guardian (AU)",
"payload": "the-guardian-au",
"image_url": "http://a2.mzstatic.com/eu/r30/Purple62/v4/0b/a9/56/0ba956de-3621-3585-285e-1141b53d4d51/icon175x175.png"
},
{
"content_type": "text",
"title": "Reuters",
"payload": "reuters",
"image_url": "http://www.adweek.com/wp-content/uploads/sites/9/2013/09/reuters-logo.jpg"
},
{
"content_type": "text",
"title": "The Hindu",
"payload": "the-hindu",
"image_url": "https://lh4.ggpht.com/_wAwneNQFfcruC-YiUpWKPtBTpzfdqLVTIArJyYRt52xGm4ABVQKT5eeLb_rl6em42kO=w300"
}
]
}
]
}
};
elif resolvedQuery == "sports":
res = {
"speech": "Please select the Newspaper of your choice:",
"displayText": "Please select the Newspaper of your choice:",
"data" : {
"facebook" : [
{
"text": "Please select the Newspaper of your choice:",
"quick_replies": [
{
"content_type": "text",
"title": "ESPN",
"payload": "espn",
"image_url": "https://www.brandsoftheworld.com/sites/default/files/styles/logo-thumbnail/public/052016/untitled-1_242.png?itok=vy3l2HxD"
},
{
"content_type": "text",
"title": "Fox Sports",
"payload": "fox-sports",
"image_url": "http://i48.tinypic.com/rwroy1.gif"
},
{
"content_type": "text",
"title": "BBC Sport",
"payload": "bbc-sport",
"image_url": "http://yellingperformance.com/wp-content/uploads/2014/08/bbc-sport.png"
},
{
"content_type": "text",
"title": "Four Four Two",
"payload": "four-four-two",
"image_url": "http://www.free-icons-download.net/images/football-icon-53581.png"
},
{
"content_type": "text",
"title": "NFL",
"payload": "nfl-news",
"image_url": "http://orig09.deviantart.net/4d3f/f/2013/087/7/e/nfl_icon_by_slamiticon-d5zbovo.png"
},
{
"content_type": "text",
"title": "The Sport Bible",
"payload": "the-sport-bible",
"image_url": "https://pbs.twimg.com/profile_images/528682495923859456/yuXwYzR4.png"
}
]
}
]
}
};
elif resolvedQuery == "business":
res = {
"speech": "Please select the Newspaper of your choice:",
"displayText": "Please select the Newspaper of your choice:",
"data" : {
"facebook" : [
{
"text": "Please select the Newspaper of your choice:",
"quick_replies": [
{
"content_type": "text",
"title": "The Economist",
"payload": "the-economist",
"image_url": "https://gs-img.112.ua/original/2016/04/01/221445.jpg"
},
{
"content_type": "text",
"title": "Financial Times",
"payload": "financial-times",
"image_url": "http://www.adweek.com/wp-content/uploads/sites/10/2014/03/financial_times_logo304x200.jpg"
},
{
"content_type": "text",
"title": "CNBC",
"payload": "cnbc",
"image_url": "https://upload.wikimedia.org/wikipedia/commons/thumb/e/e3/CNBC_logo.svg/961px-CNBC_logo.svg.png"
},
{
"content_type": "text",
"title": "Business Insider",
"payload": "business-insider",
"image_url": "https://pbs.twimg.com/profile_images/661313209605976064/EjEK7KeO.jpg"
},
{
"content_type": "text",
"title": "Fortune",
"payload": "fortune",
"image_url": "https://fortunedotcom.files.wordpress.com/2014/05/f_icon_orange_1.png"
},
{
"content_type": "text",
"title": "The Wall Street Journal",
"payload": "the-wall-street-journal",
"image_url": "https://www.wsj.com/apple-touch-icon.png"
}
]
}
]
}
};
elif resolvedQuery == "technology":
res = {
"speech": "Please select the Newspaper of your choice:",
"displayText": "Please select the Newspaper of your choice:",
"data" : {
"facebook" : [
{
"text": "Please select the Newspaper of your choice:",
"quick_replies": [
{
"content_type": "text",
"title": "TechRadar",
"payload": "techradar",
"image_url": "http://www.ittiam.com/vividhdr/img/techradar.jpg"
},
{
"content_type": "text",
"title": "TechCrunch",
"payload": "techcrunch",
"image_url": "https://tctechcrunch2011.files.wordpress.com/2014/04/tc-logo.jpg"
},
{
"content_type": "text",
"title": "T3N",
"payload": "t3n",
"image_url": "https://pbs.twimg.com/profile_images/2267864145/8oalkkbzq6davn5snoi4.png"
},
{
"content_type": "text",
"title": "Hacker News",
"payload": "hacker-news",
"image_url": "https://pbs.twimg.com/profile_images/659012257985097728/AXXMa-X2.png"
},
{
"content_type": "text",
"title": "Buzzfeed",
"payload": "buzzfeed",
"image_url": "https://static-s.aa-cdn.net/img/ios/352969997/d5f0fe265f21af1cffd41964bc7b46ab"
},
{
"content_type": "text",
"title": "Recode",
"payload": "recode",
"image_url": "https://cdn.vox-cdn.com/uploads/hub/sbnu_logo/633/large_mark.64395.png"
}
]
}
]
}
};
elif resolvedQuery == "entertainment":
res = {
"speech": "Please select the Newspaper of your choice:",
"displayText": "Please select the Newspaper of your choice:",
"data" : {
"facebook" : [
{
"text": "Please select the Newspaper of your choice:",
"quick_replies": [
{
"content_type": "text",
"title": "MTV News",
"payload": "mtv-news",
"image_url": "http://imagesmtv-a.akamaihd.net/uri/mgid:file:http:shared:mtv.com/news/wp-content/uploads/2016/07/staff-author-250-1468362828.png?format=jpg&quality=.8"
},
{
"content_type": "text",
"title": "MTV News (UK)",
"payload": "mtv-news-uk",
"image_url": "http://imagesmtv-a.akamaihd.net/uri/mgid:file:http:shared:mtv.com/news/wp-content/uploads/2016/07/staff-author-250-1468362828.png?format=jpg&quality=.8"
}
]
}
]
}
};
elif resolvedQuery == "science":
res = {
"speech": "Please select the Newspaper of your choice:",
"displayText": "Please select the Newspaper of your choice:",
"data" : {
"facebook" : [
{
"text": "Please select the Newspaper of your choice:",
"quick_replies": [
{
"content_type": "text",
"title": "National Geographic",
"payload": "national-geographic",
"image_url": "https://pbs.twimg.com/profile_images/798181194202566656/U8QbCBdH_400x400.jpg"
},
{
"content_type": "text",
"title": "New Scientist",
"payload": "new-scientist",
"image_url": "http://www.peteraldhous.com/Images/ns.jpg"
}
]
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
#************************************************************************************#
# #
# Below method is to get the News Details in JSON Format and put as List Template #
# #
#************************************************************************************#
newspaper_url = ''
data = ''
def topFourNewsArticle(reqContext):
resolvedQuery = reqContext.get("result").get("resolvedQuery")
#print ("resolvedQuery: " + resolvedQuery)
newsAPI = "https://newsapi.org/v1/articles?source=" + resolvedQuery + "&sortBy=top&apiKey=" + newspai_access_token
result = urllib.request.urlopen(newsAPI).read()
data = json.loads(result)
newspaper_url = newsWebsiteIdentification(resolvedQuery)
#print ("newspaper_url finally: " + newspaper_url)
res = {
"speech": "NewsList",
"displayText": "NewsList",
"data" : {
"facebook" : [
{
"sender_action": "typing_on"
},
{
"attachment" : {
"type" : "template",
"payload" : {
"template_type" : "list",
"elements" : [
{
"title": data['articles'][0]['title'],
"image_url": data['articles'][0]['urlToImage'],
"default_action": {
"type": "web_url",
"url": data['articles'][0]['url'],
"webview_height_ratio": "tall",
},
"buttons": [
{
"title": "Read Article",
"type": "web_url",
"url": data['articles'][0]['url'],
"webview_height_ratio": "tall",
}
]
},
{
"title": data['articles'][1]['title'],
"image_url": data['articles'][1]['urlToImage'],
"subtitle": data['articles'][1]['description'],
"default_action":
{
"type": "web_url",
"url": data['articles'][1]['url'],
"webview_height_ratio": "tall"
},
"buttons": [
{
"title": "Read Article",
"type": "web_url",
"url": data['articles'][1]['url'],
"webview_height_ratio": "tall"
}
]
},
{
"title": data['articles'][2]['title'],
"image_url": data['articles'][2]['urlToImage'],
"subtitle": data['articles'][2]['description'],
"default_action":
{
"type": "web_url",
"url": data['articles'][2]['url'],
"webview_height_ratio": "tall"
},
"buttons": [
{
"title": "Read Article",
"type": "web_url",
"url": data['articles'][2]['url'],
"webview_height_ratio": "tall"
}
]
},
{
"title": data['articles'][3]['title'],
"image_url": data['articles'][3]['urlToImage'],
"subtitle": data['articles'][3]['description'],
"default_action":
{
"type": "web_url",
"url": data['articles'][3]['url'],
"webview_height_ratio": "tall"
},
"buttons": [
{
"title": "Read Article",
"type": "web_url",
"url": data['articles'][3]['url'],
"webview_height_ratio": "tall"
}
]
}
],
"buttons": [
{
"title": "View Site",
"type": "web_url",
"url": newspaper_url
}
]
}
}
},
{
"text": "Click on the below options to start over again",
"quick_replies": [
{
"content_type": "text",
"title": "News",
"payload": "News",
"image_url": "http://www.freeiconspng.com/uploads/newspaper-icon-20.jpg"
},
{
"content_type": "text",
"title": "Weather",
"payload": "Weather",
"image_url": "https://www.mikeafford.com/store/store-images/ww01_example_light_rain_showers.png"
},
{
"content_type": "text",
"title": "Wikipedia",
"payload": "Wikipedia",
"image_url": "https://upload.wikimedia.org/wikipedia/en/thumb/8/80/Wikipedia-logo-v2.svg/1122px-Wikipedia-logo-v2.svg.png"
},
{
"content_type": "text",
"title": "YouTube",
"payload": "YouTube",
"image_url": "https://cdn1.iconfinder.com/data/icons/logotypes/32/youtube-512.png"
},
{
"content_type": "text",
"title": "For Sale",
"payload": "For Sale",
"image_url": "http://p.lnwfile.com/_/p/_raw/pg/vn/cm.png"
},
{
"content_type": "text",
"title": "Contact Me",
"payload": "contact",
"image_url": "https://cdn3.iconfinder.com/data/icons/communication-mass-media-news/512/phone_marketing-128.png"
}
]
}
]
}
};
#print (res)
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
#************************************************************************************#
# #
# Identifying Newspaper Website #
# #
#************************************************************************************#
def newsWebsiteIdentification(resolvedQuery):
if resolvedQuery == "the-times-of-india":
newspaper_url = "http://timesofindia.indiatimes.com"
elif resolvedQuery == "bbc-news":
newspaper_url = "http://www.bbc.com/news"
elif resolvedQuery == "cnn":
newspaper_url = "http://edition.cnn.com"
elif resolvedQuery == "time":
newspaper_url = "http://time.com"
elif resolvedQuery == "usa-today":
newspaper_url = "https://www.usatoday.com"
elif resolvedQuery == "the-telegraph":
newspaper_url = "http://www.telegraph.co.uk"
elif resolvedQuery == "the-washington-post":
newspaper_url = "https://www.washingtonpost.com"
elif resolvedQuery == "the-guardian-uk":
newspaper_url = "https://www.theguardian.com/uk"
elif resolvedQuery == "the-guardian-au":
newspaper_url = "https://www.theguardian.com/au"
elif resolvedQuery == "reuters":
newspaper_url = "http://www.reuters.com"
elif resolvedQuery == "the-hindu":
newspaper_url = "http://www.thehindu.com"
elif resolvedQuery == "espn":
newspaper_url = "http://espn.go.com"
elif resolvedQuery == "espn-cric-info":
newspaper_url = "http://www.espncricinfo.com"
elif resolvedQuery == "four-four-two":
newspaper_url = "https://www.fourfourtwo.com/"
elif resolvedQuery == "bbc-sport":
newspaper_url = "http://www.bbc.com/sport"
elif resolvedQuery == "fox-sports":
newspaper_url = "http://www.foxsports.com"
elif resolvedQuery == "the-sport-bible":
newspaper_url = "http://www.sportbible.com"
elif resolvedQuery == "the-economist":
newspaper_url = "https://www.economist.com"
elif resolvedQuery == "financial-times":
newspaper_url = "https://www.ft.com"
elif resolvedQuery == "cnbc":
newspaper_url = "http://www.cnbc.com"
elif resolvedQuery == "business-insider":
newspaper_url = "http://nordic.businessinsider.com"
elif resolvedQuery == "fortune":
newspaper_url = "http://fortune.com"
elif resolvedQuery == "the-wall-street-journal":
newspaper_url = "https://www.wsj.com"
elif resolvedQuery == "techradar":
newspaper_url = "http://www.techradar.com"
elif resolvedQuery == "techcrunch":
newspaper_url = "https://techcrunch.com"
elif resolvedQuery == "t3n":
newspaper_url = "http://t3n.de"
elif resolvedQuery == "hacker-news":
newspaper_url = "http://thehackernews.com"
elif resolvedQuery == "buzzfeed":
newspaper_url = "https://www.buzzfeed.com"
elif resolvedQuery == "entertainment-weekly":
newspaper_url = "http://ew.com"
elif resolvedQuery == "mtv-news":
newspaper_url = "http://www.mtv.com"
elif resolvedQuery == "mtv-news-uk":
newspaper_url = "http://www.mtv.co.uk/news"
elif resolvedQuery == "national-geographic":
newspaper_url = "http://www.nationalgeographic.com"
elif resolvedQuery == "new-scientist":
newspaper_url = "https://www.newscientist.com"
elif resolvedQuery == "nfl-news":
newspaper_url = "https://www.nfl.com/"
else:
print ("Newspaper name did not match the input")
print ("Within newsWebsiteIdentification Method, the newspaper_url is: " + newspaper_url)
return newspaper_url
#************************************************************************************#
# #
# Help Information Providing #
# #
#************************************************************************************#
def help(resolvedQuery):
speech = "I'm sorry if I make you confused. Please select Quick Reply or Menu to chat with me. \n\n 1. Click on 'News' to read latest news from 33 globally leading newspapers \n 2. Click on 'Weather' and write a city name to get weather forecast \n 3. Click on 'Wikipedia' and write a topic you want to know about. No need to ask a full question. \n 4. Click on 'YouTube' and search for your favourite videos. \n 5. You can still chat directly with Marvin without the quick replies like before for - Weather, Wikipedia & Small Talk."
res = {
"speech": speech,
"displayText": speech,
"data" : {
"facebook" : [
{
"text": speech
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
#************************************************************************************#
# #
# Contact Information #
# #
#************************************************************************************#
def contact(resolvedQuery):
print ("Within Contact Me method")
speech = "Marvin.ai is now present from Denmark to help businesses all over the world. \nRequest for a free Demo now."
res = {
"speech": speech,
"displayText": speech,
"data" : {
"facebook" : [
{
"text": speech
},
{
"attachment" : {
"type" : "template",
"payload" : {
"template_type" : "generic",
"elements" : [
{
"title" : "Swapratim Roy",
"image_url" : "https://marvinchatbot.files.wordpress.com/2017/06/swapratim-roy-founder-owner-of-marvin-ai.jpg?w=700&h=&crop=1",
"subtitle" : "An innovative entrepreneur, founder at Marvin.ai \nAarhus, Denmark \nCall: +45-7182-5584",
"buttons": [{
"type": "web_url",
"url": "https://www.messenger.com/t/swapratim.roy",
"title": "Connect on Messenger"
},
{
"type": "web_url",
"url": "https://marvinai.live",
"title": "View Website"
}]
}
]
}
}
},
{
"sender_action": "typing_on"
},
{
"text": "Start over again",
"quick_replies": [
{
"content_type": "text",
"title": "News",
"payload": "News",
"image_url": "http://www.freeiconspng.com/uploads/newspaper-icon-20.jpg"
},
{
"content_type": "text",
"title": "Weather",
"payload": "Weather",
"image_url": "https://www.mikeafford.com/store/store-images/ww01_example_light_rain_showers.png"
},
{
"content_type": "text",
"title": "Wikipedia",
"payload": "Wikipedia",
"image_url": "https://upload.wikimedia.org/wikipedia/en/thumb/8/80/Wikipedia-logo-v2.svg/1122px-Wikipedia-logo-v2.svg.png"
},
{
"content_type": "text",
"title": "YouTube",
"payload": "YouTube",
"image_url": "https://cdn1.iconfinder.com/data/icons/logotypes/32/youtube-512.png"
},
{
"content_type": "text",
"title": "For Sale",
"payload": "For Sale",
"image_url": "http://p.lnwfile.com/_/p/_raw/pg/vn/cm.png"
},
{
"content_type": "text",
"title": "Contact Me",
"payload": "contact",
"image_url": "https://cdn3.iconfinder.com/data/icons/communication-mass-media-news/512/phone_marketing-128.png"
}
]
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def requestDemo(resolvedQuery):
print ("Within requestDemo method")
speech = "Marvin.ai is now present from Denmark to help businesses all over the world. \nRequest for a free Demo now."
res = {
"speech": speech,
"displayText": speech,
"data" : {
"facebook" : [
{
"sender_action": "typing_on"
},
{
"text": "Thank you " + user_name + " for requesting a Demo. Please say Hi to Swapratim on Messenger to get him notified. :-)"
},
{
"attachment" : {
"type" : "template",
"payload" : {
"template_type" : "generic",
"elements" : [
{
"title" : "Swapratim Roy",
"image_url" : "https://marvinchatbot.files.wordpress.com/2017/06/swapratim-roy-founder-owner-of-marvin-ai.jpg?w=700&h=&crop=1",
"subtitle" : "An innovative entrepreneur, founder at Marvin.ai \nAarhus, Denmark \nCall: +45-7182-5584",
"buttons": [{
"type": "web_url",
"url": "https://www.messenger.com/t/swapratim.roy",
"title": "Connect on Messenger"
},
{
"type": "web_url",
"url": "https://marvinai.live",
"title": "View Website"
}]
}
]
}
}
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
#************************************************************************************#
# #
# Displaying ALL CHATBOTS - For Sale #
# #
#************************************************************************************#
def forsale(resolvedQuery):
print ("Within forsale method")
speech = "This bot is been created by marvin.ai. \nDo you like it?"
res = {
"speech": speech,
"displayText": speech,
"data" : {
"facebook" : [
{
"sender_action": "typing_on"
},
{
"attachment" : {
"type" : "template",
"payload" : {
"template_type" : "generic",
"elements" : [
{
"title" : "You like Personal Assistant Bot Template?",
"image_url" : "https://media.sproutsocial.com/uploads/2017/09/Real-Estate-Marketing-Ideas-1.png",
"subtitle" : "Get customized virtual assistant for your Restaurant today",
"buttons": [{
"type": "web_url",
"url": "https://marvinai.live",
"title": "Buy Template"
},
{
"type": "web_url",
"url": "https://www.facebook.com/marvinai.live",
"title": "Facebook Page"
},
{
"type": "element_share"
}]
},
{
"title" : "Travel Agency Bot Template",
"image_url" : "http://www.sunsail.eu/files/Destinations/Mediteranean/Greece/Athens/thira.jpg",
"subtitle" : "Get customized virtual assistant for your Restaurant today",
"buttons": [{
"type": "web_url",
"url": "https://marvinai.live",
"title": "Buy Template"
},
{
"type": "web_url",
"url": "https://m.me/926146750885580",
"title": "Chat"
},
{
"type": "element_share"
}]
},
{
"title" : "Real Estate Bot Template",
"image_url" : "https://husvild-static.s3.eu-central-1.amazonaws.com/images/files/000/280/915/large/3674bd34e6c1bc42b690adeacfe9c778507f261a?1516032863",
"subtitle" : "Get qualified buyer and seller leads automatically delivered to your inbox!",
"buttons": [{
"type": "web_url",
"url": "https://marvinai.live",
"title": "Buy Template"
},
{
"type": "web_url",
"url": "https://m.me/realestatebotai",
"title": "Chat"
},
{
"type": "element_share"
}]
},
{
"title" : "Restaurant Bot Template",
"image_url" : "https://www.outlookhindi.com/public/uploads/article/gallery/6eb226c14abd79a801172ab8d473e6d2_342_660.jpg",
"subtitle" : "Perfectly crafted bot from assisting online customers to handle orders",
"buttons": [{
"type": "web_url",
"url": "https://marvinai.live",
"title": "Buy Template"
},
{
"type": "web_url",
"url": "https://m.me/730273667158154",
"title": "Chat"
},
{
"type": "element_share"
}]
},
{
"title" : "Coffee Shop Bot Template",
"image_url" : "https://images-na.ssl-images-amazon.com/images/I/71Crz9MYPPL._SY355_.jpg",
"subtitle" : "Your bot can deal with online customers, take orders and many more ",
"buttons": [{
"type": "web_url",
"url": "https://marvinai.live",
"title": "Buy Template"
},
{
"type": "web_url",
"url": "https://m.me/200138490717876",
"title": "Chat"
},
{
"type": "element_share"
}]
},
{
"title" : "VISA Check Bot",
"image_url" : "http://famousdestinations.in/wp-content/uploads/2016/03/howtogetthere.png",
"subtitle" : "One stop solution for all your VISA requirements...Coming Soon!",
"buttons": [{
"type": "web_url",
"url": "https://marvinai.live",
"title": "Visit Website"
},
{
"type": "element_share"
}]
}
]
}
}
}
]
}
};
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting APPLICATION on port %d" % port)
context.run(debug=True, port=port, host='0.0.0.0')
|
Swapratim/marvin-test
|
context.py
|
Python
|
apache-2.0
| 100,371
|
[
"VisIt"
] |
db5bcc3a8e2162b963084fc12f037dcaf38cc9fa2fef1859bca8d1cbe2c0abe0
|
"""Perform GATK based filtering, perferring variant quality score recalibration.
Performs hard filtering when VQSR fails on smaller sets of variant calls.
"""
import os
import toolz as tz
from bcbio import broad, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import vcfutils, vfilter
def run(call_file, ref_file, vrn_files, data):
"""Run filtering on the input call file, handling SNPs and indels separately.
"""
snp_file, indel_file = vcfutils.split_snps_indels(call_file, ref_file, data["config"])
snp_filter_file = _variant_filtration(snp_file, ref_file, vrn_files, data, "SNP",
vfilter.gatk_snp_hard)
indel_filter_file = _variant_filtration(indel_file, ref_file, vrn_files, data, "INDEL",
vfilter.gatk_indel_hard)
orig_files = [snp_filter_file, indel_filter_file]
out_file = "%scombined.vcf.gz" % os.path.commonprefix(orig_files)
combined_file = vcfutils.combine_variant_files(orig_files, out_file, ref_file, data["config"])
return _filter_nonref(combined_file, data)
_MISSING_HEADERS = """##FORMAT=<ID=PGT,Number=1,Type=String,Description="Physical phasing haplotype information, describing how the alternate alleles are phased in relation to one another">
##FORMAT=<ID=PID,Number=1,Type=String,Description="Physical phasing ID information, where each unique ID within a given sample (but not across samples) connects records within a phasing group">
"""
def _filter_nonref(in_file, data):
"""Fixes potential issues from GATK processing and merging
- Remove NON_REF gVCF items from GATK VCF output; these occasionally sneak
through in joint calling.
- Add headers for physical phasing. These are not always present and the
header definitions can be lost during merging.
"""
out_file = "%s-gatkclean%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
header_file = "%s-updateheaders.txt" % utils.splitext_plus(tx_out_file)[0]
with open(header_file, "w") as out_handle:
out_handle.write(_MISSING_HEADERS)
cmd = ("bcftools annotate -h {header_file} -o - {in_file} | "
"grep -v NON_REF | bgzip -c > {tx_out_file}")
do.run(cmd.format(**locals()), "Remove stray NON_REF gVCF information from VCF output", data)
vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
def _apply_vqsr(in_file, ref_file, recal_file, tranch_file,
sensitivity_cutoff, filter_type, data):
"""Apply VQSR based on the specified tranche, returning a filtered VCF file.
"""
broad_runner = broad.runner_from_config(data["config"])
base, ext = utils.splitext_plus(in_file)
out_file = "{base}-{filter}filter{ext}".format(base=base, ext=ext,
filter=filter_type)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
params = ["-T", "ApplyRecalibration",
"-R", ref_file,
"--input", in_file,
"--out", tx_out_file,
"--tranches_file", tranch_file,
"--recal_file", recal_file,
"--mode", filter_type]
resources = config_utils.get_resources("gatk_apply_recalibration", data["config"])
opts = resources.get("options", [])
if not opts:
opts += ["--ts_filter_level", sensitivity_cutoff]
params += opts
broad_runner.run_gatk(params)
return out_file
def _get_training_data(vrn_files):
"""Retrieve training data, returning an empty set of information if not available.
"""
out = {"SNP": [], "INDEL": []}
# SNPs
for name, train_info in [("train_hapmap", "known=false,training=true,truth=true,prior=15.0"),
("train_omni", "known=false,training=true,truth=true,prior=12.0"),
("train_1000g", "known=false,training=true,truth=false,prior=10.0"),
("dbsnp", "known=true,training=false,truth=false,prior=2.0")]:
if name not in vrn_files:
return {}
else:
out["SNP"].append((name.replace("train_", ""), train_info, vrn_files[name]))
# Indels
if "train_indels" in vrn_files:
out["INDEL"].append(("mills", "known=true,training=true,truth=true,prior=12.0",
vrn_files["train_indels"]))
else:
return {}
return out
def _have_training_data(vrn_files):
return len(_get_training_data(vrn_files)) > 0
def _get_vqsr_training(filter_type, vrn_files):
"""Return parameters for VQSR training, handling SNPs and Indels.
"""
params = []
for name, train_info, fname in _get_training_data(vrn_files)[filter_type]:
params.extend(["-resource:%s,VCF,%s" % (name, train_info), fname])
if filter_type == "INDEL":
params.extend(["--maxGaussians", "4"])
return params
def _get_vqsr_annotations(filter_type):
"""Retrieve appropriate annotations to use for VQSR based on filter type.
Issues reported with MQ and bwa-mem quality distribution, results in intermittent
failures to use VQSR:
http://gatkforums.broadinstitute.org/discussion/4425/variant-recalibration-failing
http://gatkforums.broadinstitute.org/discussion/4248/variantrecalibrator-removing-all-snps-from-the-training-set
"""
if filter_type == "SNP":
# MQ, MQRankSum
return ["DP", "QD", "FS", "ReadPosRankSum"]
else:
assert filter_type == "INDEL"
# MQRankSum
return ["DP", "QD", "FS", "ReadPosRankSum"]
def _run_vqsr(in_file, ref_file, vrn_files, sensitivity_cutoff, filter_type, data):
"""Run variant quality score recalibration.
"""
cutoffs = ["100.0", "99.99", "99.98", "99.97", "99.96", "99.95", "99.94", "99.93", "99.92", "99.91",
"99.9", "99.8", "99.7", "99.6", "99.5", "99.0", "98.0", "90.0"]
if sensitivity_cutoff not in cutoffs:
cutoffs.append(sensitivity_cutoff)
cutoffs.sort()
broad_runner = broad.runner_from_config(data["config"])
base = utils.splitext_plus(in_file)[0]
recal_file = "%s.recal" % base
tranches_file = "%s.tranches" % base
if not utils.file_exists(recal_file):
with file_transaction(data, recal_file, tranches_file) as (tx_recal, tx_tranches):
params = ["-T", "VariantRecalibrator",
"-R", ref_file,
"--input", in_file,
"--mode", filter_type,
"--recal_file", tx_recal,
"--tranches_file", tx_tranches]
params += _get_vqsr_training(filter_type, vrn_files)
resources = config_utils.get_resources("gatk_variant_recalibrator", data["config"])
opts = resources.get("options", [])
if not opts:
for cutoff in cutoffs:
opts += ["-tranche", str(cutoff)]
for a in _get_vqsr_annotations(filter_type):
opts += ["-an", a]
params += opts
cores = dd.get_cores(data)
memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None
try:
broad_runner.new_resources("gatk-vqsr")
broad_runner.run_gatk(params, log_error=False, memscale=memscale)
except: # Can fail to run if not enough values are present to train.
return None, None
return recal_file, tranches_file
# ## SNP and indel specific variant filtration
def _already_hard_filtered(in_file, filter_type):
"""Check if we have a pre-existing hard filter file from previous VQSR failure.
"""
filter_file = "%s-filter%s.vcf.gz" % (utils.splitext_plus(in_file)[0], filter_type)
return utils.file_exists(filter_file)
def _variant_filtration(in_file, ref_file, vrn_files, data, filter_type,
hard_filter_fn):
"""Filter SNP and indel variant calls using GATK best practice recommendations.
Hard filter if configuration indicates too little data or already finished a
hard filtering, otherwise try VQSR.
"""
# Algorithms multiplied by number of input files to check for large enough sample sizes
algs = [data["config"]["algorithm"]] * len(data.get("vrn_files", [1]))
if (not config_utils.use_vqsr(algs) or
_already_hard_filtered(in_file, filter_type)):
logger.info("Skipping VQSR, using hard filers: we don't have whole genome input data")
return hard_filter_fn(in_file, data)
elif not _have_training_data(vrn_files):
logger.info("Skipping VQSR, using hard filers: genome build does not have sufficient training data")
return hard_filter_fn(in_file, data)
else:
sensitivities = {"INDEL": "98.0", "SNP": "99.97"}
recal_file, tranches_file = _run_vqsr(in_file, ref_file, vrn_files,
sensitivities[filter_type], filter_type, data)
if recal_file is None: # VQSR failed
logger.info("VQSR failed due to lack of training data. Using hard filtering.")
return hard_filter_fn(in_file, data)
else:
return _apply_vqsr(in_file, ref_file, recal_file, tranches_file,
sensitivities[filter_type], filter_type, data)
|
Cyberbio-Lab/bcbio-nextgen
|
bcbio/variation/gatkfilter.py
|
Python
|
mit
| 9,778
|
[
"BWA"
] |
e4976b63137c60641d539b643bb16ba4706c043d35ee97a81ae460516758ee33
|
##############################################################################
# MSIBI: A package for optimizing coarse-grained force fields using multistate
# iterative Boltzmann inversion.
# Copyright (c) 2017 Vanderbilt University and the Authors
#
# Authors: Christoph Klein, Timothy C. Moore
# Contributors: Davy Yue
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files, to deal
# in MSIBI without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# # copies of MSIBI, and to permit persons to whom MSIBI is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of MSIBI.
#
# MSIBI IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH MSIBI OR THE USE OR OTHER DEALINGS ALONG WITH
# MSIBI.
#
# You should have received a copy of the MIT license.
# If not, see <https://opensource.org/licenses/MIT/>.
##############################################################################
import os
import mdtraj as md
HOOMD1_HEADER = """
from hoomd_script import *
system = init.read_xml(filename="{0}", wrap_coordinates=True)
T_final = {1:.1f}
pot_width = {2:d}
table = pair.table(width=pot_width)
"""
HOOMD2_HEADER = """
import hoomd
import hoomd.md
from hoomd.deprecated.init import read_xml
hoomd.context.initialize("")
system = read_xml(filename="{0}", wrap_coordinates=True)
T_final = {1:.1f}
pot_width = {2:d}
nl = hoomd.md.nlist.cell()
table = hoomd.md.pair.table(width=pot_width, nlist=nl)
"""
HOOMD_TABLE_ENTRY = """
table.set_from_file('{type1}', '{type2}', filename='{potential_file}')
"""
class State(object):
"""A single state used as part of a multistate optimization.
Attributes
----------
k : float
Boltzmann's constant in specified units.
T : float
Temperature in kelvin.
traj : md.Trajectory
The trajectory associated with this state.
backup_trajectory : bool
True if each query trajectory is backed up (default=False)
"""
def __init__(self, kT, state_dir='', traj_file=None, top_file=None,
name=None, backup_trajectory=False):
self.kT = kT
self.state_dir = state_dir
if not traj_file:
self.traj_path = os.path.join(state_dir, 'query.dcd')
if top_file:
self.top_path = os.path.join(state_dir, top_file)
self.traj = None
if not name:
name = 'state-{0:.3f}'.format(self.kT)
self.name = name
self.backup_trajectory = backup_trajectory
def reload_query_trajectory(self):
"""Reload the query trajectory. """
if self.top_path:
self.traj = md.load(self.traj_path, top=self.top_path)
else:
self.traj = md.load(self.traj_path)
def save_runscript(self, table_potentials, table_width, engine='hoomd',
runscript='hoomd_run_template.py'):
"""Save the input script for the MD engine. """
header = list()
if self.HOOMD_VERSION == 1:
HOOMD_HEADER = HOOMD1_HEADER
elif self.HOOMD_VERSION == 2:
HOOMD_HEADER = HOOMD2_HEADER
header.append(HOOMD_HEADER.format('start.hoomdxml', self.kT, table_width))
for type1, type2, potential_file in table_potentials:
header.append(HOOMD_TABLE_ENTRY.format(**locals()))
header = ''.join(header)
with open(os.path.join(self.state_dir, runscript)) as fh:
body = ''.join(fh.readlines())
runscript_file = os.path.join(self.state_dir, 'run.py')
with open(runscript_file, 'w') as fh:
fh.write(header)
fh.write(body)
|
mosdef-hub/msibi
|
msibi/state.py
|
Python
|
mit
| 4,215
|
[
"MDTraj"
] |
f71f4badf3731d67e71f594c43f69292100ef9e469fa0943c8403fa8dfc3bc65
|
#!/usr/bin/python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys
import os
import glob
import re
DriverPath = ''
InsertPath = '/../../../'
if (len(sys.argv) == 2):
DriverPath = sys.argv[1] + '/'
sys.path.insert(0, os.path.abspath(os.getcwd()))
import apply_relpath
IncludePath = apply_relpath.get_topsrcdir_asrelativepathto_objdirsfnxsource()[1]
def pts(category, pyfile):
print('Auto-documenting %s file %s' % (category, pyfile))
# helper fn
def sphinxify_comment(text):
text = text.replace('@@', '_')
text = text.replace(' $', ' :math:`')
text = text.replace('($', '(\ :math:`')
text = text.replace('$ ', '` ')
text = text.replace('$.', '`.')
text = text.replace('$,', '`,')
text = text.replace('$)', '`\ )')
return text
# helper fn
# including the options abbr substitutions file in every SSSOUT option file slows
# compilation by a factor of ten. so, back-translate |%s__%s| into :term:`%s`
def substitute_comment(cmnt):
subst = re.compile(r'^(.*?)[\s\(]\|(\w+)__(\w+)\|[\s\).,](.*?)$')
while True:
if subst.match(cmnt):
m = subst.match(cmnt)
cmnt = m.group(1) + ' :term:`' + m.group(3).upper() + ' <' + m.group(3).upper() + ' (' + m.group(2).upper() + ')>` ' + m.group(4)
continue
else:
break
return cmnt
# helper fn
def determine_options(cfilename):
module = re.compile(r'^(.*)name\s*==\s*"(.*)"(.*?)$', re.IGNORECASE)
modulecomment = re.compile(r'^(\s*?)\/\*-\s*MODULEDESCRIPTION\s*(.*?)-\*\/(\s*?)$', re.IGNORECASE)
modulecommentstart = re.compile(r'^(\s*?)\/\*-\s*MODULEDESCRIPTION\s*(.*?)(\s*?)$', re.IGNORECASE)
subsection = re.compile(r'^(\s*?)\/\*-\s*SUBSECTION\s*(.*?)\s*-\*\/(\s*?)$', re.IGNORECASE)
comment = re.compile(r'^(\s*?)\/\*-\s*(.*?)-\*\/(\s*?)$', re.IGNORECASE)
commentend = re.compile(r'^(\s*)(.*?)-\*\/(\s*?)$', re.IGNORECASE)
commentstart = re.compile(r'^(\s*?)\/\*-\s*(.*)(\s*?)$', re.IGNORECASE)
kw_string_def_opt = re.compile(r'add_str\(\s*"(.*)"\s*,\s*"(.*)"\s*,\s*"(.*)"\s*\)')
kw_string_def_opt_2 = re.compile(r'add_str_i\(\s*"(.*)"\s*,\s*"(.*)"\s*,\s*"(.*)"\s*\)')
kw_string_def = re.compile(r'add_str\(\s*"(.*)"\s*,\s*"(.*)"\s*\)')
kw_string_def_2 = re.compile(r'add_str_i\(\s*"(.*)"\s*,\s*"(.*)"\s*\)')
kw_bool_def = re.compile(r'add_bool\(\s*"(.*)"\s*,\s*("?)([-\w]+)("?)\s*\)')
kw_double_def = re.compile(r'add_double\(\s*"(.*)"\s*,\s*("?)([-/\.\w]+)("?)\s*\)')
kw_generic_def = re.compile(r'add_(\w+)\(\s*"(\w+)"\s*,\s*("?)([-\w]+)("?)\s*\)') # untested
kw_complicated = re.compile(r'add\(\s*"(\w*)"\s*,\s*new\s+(\w+)\(\)\s*\)') # untested
fcfile = open(cfilename)
contents = fcfile.readlines()
fcfile.close()
ii = 0
while (ii < len(contents)):
line = contents[ii]
if module.match(line):
currentmodule = module.match(line).group(2).upper()
fmodule.write('.. toctree::\n :hidden:\n :glob:\n\n %s__*\n\n' % (currentmodule.lower()))
elif modulecommentstart.match(line):
tag = ''
while 1:
if (not commentend.match(line)):
if modulecommentstart.match(line):
tag += modulecommentstart.match(line).group(2)
else:
tag += ' ' + line.strip()
ii += 1
line = contents[ii]
continue
else:
if modulecomment.match(line):
tag += modulecomment.match(line).group(2)
break
else:
tag += ' ' + commentend.match(line).group(2)
break
fglossary.write('**%s**: %s\n\n' % (currentmodule, tag))
elif subsection.match(line):
currentsubsection = subsection.match(line).group(2)
fglossary.write('\n%s\n%s\n\n' % (currentsubsection, '^' * len(currentsubsection)))
fglossary.write('.. glossary::\n :sorted:\n\n')
elif commentstart.match(line):
tag = ''
while 1:
if (not commentend.match(line)):
if commentstart.match(line):
tag += commentstart.match(line).group(2)
else:
tag += ' ' + line.strip()
ii += 1
line = contents[ii]
continue
else:
if comment.match(line):
tag += comment.match(line).group(2)
break
else:
tag += ' ' + commentend.match(line).group(2)
break
tag = sphinxify_comment(tag)
# capture option immediately after comment
kw_name = ''
kw_default = 'No Default'
kw_type = ''
kw_possible = ''
ii += 1
line = contents[ii]
if (not line or line.isspace()):
ii += 1
line = contents[ii]
if kw_string_def_opt.search(line):
m = kw_string_def_opt.search(line)
kw_name = m.group(1)
kw_type = 'str'
if not (not m.group(2) or m.group(2).isspace()):
kw_default = m.group(2)
kw_possible = m.group(3)
elif kw_string_def_opt_2.search(line):
m = kw_string_def_opt_2.search(line)
kw_name = m.group(1)
kw_type = 'str'
if not (not m.group(2) or m.group(2).isspace()):
kw_default = m.group(2)
kw_possible = m.group(3)
elif kw_string_def.search(line):
m = kw_string_def.search(line)
kw_name = m.group(1)
kw_type = 'str'
if not (not m.group(2) or m.group(2).isspace()):
kw_default = m.group(2)
elif kw_string_def_2.search(line):
m = kw_string_def_2.search(line)
kw_name = m.group(1)
kw_type = 'str'
if not (not m.group(2) or m.group(2).isspace()):
kw_default = m.group(2)
elif kw_bool_def.search(line):
m = kw_bool_def.search(line)
kw_name = m.group(1)
kw_type = 'bool'
if not (not m.group(3) or m.group(3).isspace()):
kw_default = m.group(3).lower()
if kw_default == '1':
kw_default = 'true'
if kw_default == '0':
kw_default = 'false'
elif kw_double_def.search(line):
m = kw_double_def.search(line)
kw_name = m.group(1)
kw_type = 'double'
if not (not m.group(3) or m.group(3).isspace()):
kw_default = m.group(3).lower()
elif kw_generic_def.search(line):
m = kw_generic_def.search(line)
kw_name = m.group(2)
kw_type = m.group(1)
if not (not m.group(4) or m.group(4).isspace()):
kw_default = m.group(4).lower()
elif kw_complicated.search(line):
m = kw_complicated.search(line)
kw_name = m.group(1)
kw_type = m.group(2)
if kw_type == 'ArrayType':
kw_type = 'array'
elif kw_type == 'MapType':
kw_type = 'map'
elif kw_type == 'PythonDataType':
kw_type = 'python'
else:
print('ERROR: unrecognized type %s for %s' % (kw_type, kw_name))
sys.exit()
if kw_type == 'str': kw_type = 'string'
elif kw_type == 'int': kw_type = 'integer'
elif kw_type == 'bool': kw_type = 'boolean'
elif kw_type == 'double': pass
elif kw_type == 'array': pass
elif kw_type == 'map': pass
elif kw_type == 'python': pass
else:
print('ERROR: unrecognized type2 %s for %s' % (kw_type, kw_name))
sys.exit()
#print 'kw_name = \t', kw_name
#print 'kw_type = \t', kw_type
#print 'kw_dflt = \t', kw_default
#print 'kw_poss = \t', kw_possible
#print 'kw_tagl = \t', tag
#print '\n'
# substitution list file
fabbr.write('.. |%s__%s| replace:: :term:`%s <%s (%s)>`\n' %
(currentmodule.lower(), kw_name.lower(), kw_name.upper(), kw_name.upper(), currentmodule.upper()))
# individual option file for plugin options. rather pointless but consistent w/regular module options
fsssdoc = open('source/autodir_plugins/'+currentmodule.lower()+'__'+kw_name.lower()+'.rst', 'w')
div = '"' * (14 + len(currentmodule) + 2 * len(kw_name))
fsssdoc.write(':term:`%s <%s (%s)>`\n%s\n\n' % (kw_name.upper(), kw_name.upper(), currentmodule.upper(), div))
fsssdoc.write(' %s\n\n' % (substitute_comment(tag)))
fglossary.write(' %s (%s)\n %s\n\n' % (kw_name.upper(), currentmodule.upper(), tag))
if kw_type == 'boolean':
fglossary.write(' * **Type**: :ref:`boolean <op_c_boolean>`\n')
fsssdoc.write(' * **Type**: :ref:`boolean <op_c_boolean>`\n')
elif (kw_type == 'double') and ((kw_name.lower().find('conv') > -1) or (kw_name.lower().find('tol') > -1)):
fglossary.write(' * **Type**: :ref:`conv double <op_c_conv>`\n')
fsssdoc.write(' * **Type**: :ref:`conv double <op_c_conv>`\n')
elif (kw_type == 'string') and ((kw_name.lower() == 'basis') or (kw_name.lower().startswith('df_basis'))):
fglossary.write(' * **Type**: %s\n' % kw_type)
fsssdoc.write(' * **Type**: %s\n' % kw_type)
fglossary.write(' * **Possible Values**: :ref:`basis string <apdx:basisElement>`\n')
fsssdoc.write(' * **Possible Values**: :ref:`basis string <apdx:basisElement>`\n')
else:
fglossary.write(' * **Type**: %s\n' % kw_type)
fsssdoc.write(' * **Type**: %s\n' % kw_type)
if not (not kw_possible or kw_possible.isspace()):
sline = kw_possible.split()
fglossary.write(' * **Possible Values**: %s\n' % (', '.join(sline)))
fsssdoc.write(' * **Possible Values**: %s\n' % (', '.join(sline)))
fglossary.write(' * **Default**: %s\n\n' % kw_default)
fsssdoc.write(' * **Default**: %s\n\n' % kw_default)
fsssdoc.close()
if (line.find('extern "C" PsiReturnType') > -1):
break
ii += 1
# Objective #3
# Plugin directories in psi4/plugin/
fdriver = open('source/autodoc_available_plugins.rst', 'w')
fdriver.write('\n.. index:: plugins; available\n')
fdriver.write('.. _`sec:availablePlugins`:\n\n')
fdriver.write('====================================================\n')
fdriver.write('Emerging Theoretical Methods: Plugins DFADC to RQCHF\n')
fdriver.write('====================================================\n\n')
fdriver.write('.. toctree::\n :maxdepth: 1\n\n')
fabbr = open('source/autodoc_abbr_options_plugins.rst', 'w')
# from each plugin directory ...
for pydir in glob.glob(DriverPath + '../../plugins/*'):
dirname = os.path.split(pydir)[1]
div = '=' * len(dirname)
if dirname not in []:
pts('plugin', dirname)
fdriver.write(' autodir_plugins/module__%s' % (dirname))
fmodule = open('source/autodir_plugins/module__'+dirname+'.rst', 'w')
fmodule.write('\n.. _`sec:%s`:\n' % (dirname.lower()))
fmodule.write('.. index:: plugin; %s\n\n' % (dirname.lower()))
fmodule.write(':srcplugin:`' + dirname.lower() + '`\n')
fmodule.write(div + '=============' + '\n\n')
#fmodule.write(dirname.lower() + '\n')
#fmodule.write(div + '\n\n')
#fmodule.write('.. toctree::\n :hidden:\n :glob:\n\n %s__*\n\n' % (dirname.lower()))
fmodule.write('.. toctree::\n :hidden:\n\n /autodir_plugins/glossary__%s\n\n' % (dirname.lower()))
fglossary = open('source/autodir_plugins/glossary__'+dirname+'.rst', 'w')
fglossary.write('\n.. include:: /autodoc_abbr_options_c.rst\n')
fglossary.write('.. include:: /autodoc_abbr_options_plugins.rst\n\n')
fglossary.write('.. glossary::\n :sorted:\n\n')
# ... include doc.rst file
docfile = '%s/doc.rst' % (pydir)
if os.path.isfile(docfile):
fmodule.write('.. include:: %splugins/%s/doc.rst\n\n' % (IncludePath, dirname))
# ... include docstrings from any *.py files
pyfiles = glob.glob(pydir + '/*.py')
if len(pyfiles) > 0:
fmodule.write('Py-side Documentation\n')
fmodule.write('---------------------\n\n')
for pyfile in pyfiles:
filename = os.path.split(pyfile)[1]
basename = os.path.splitext(filename)[0]
fmodule.write('.. automodule:: %s.%s\n' % (dirname, basename))
fmodule.write(' :members:\n')
fmodule.write(' :undoc-members:\n\n')
# ... include keywords section from any *.cc files
# todo: turn this into a fn and store in a dictionary
cfiles = glob.glob(pydir + '/*.cc') + glob.glob(pydir + '/*.cc.in')
if len(cfiles) > 0:
fmodule.write('C-side Documentation\n')
fmodule.write('--------------------\n\n')
for cfile in cfiles:
determine_options(cfile)
fmodule.write('.. include:: /autodir_plugins/glossary__%s.rst' % (dirname))
fmodule.write('\n\n')
fmodule.close()
fglossary.write('\n\n')
fglossary.close()
fdriver.write('\n')
fdriver.write('\n')
fdriver.close()
fabbr.write('\n')
fabbr.close()
|
andysim/psi4
|
doc/sphinxman/document_plugins.py
|
Python
|
gpl-2.0
| 15,256
|
[
"Psi4"
] |
64aee7ba79dbca05c5e069f89e16f0c7b87863c2ba941f653cfcfe3558ed6b99
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_role
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of Role Avi RESTful Object
description:
- This module is used to configure Role object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
name:
description:
- Name of the object.
required: true
privileges:
description:
- List of permission.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create Role object
avi_role:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_role
"""
RETURN = '''
obj:
description: Role (api/role) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
name=dict(type='str', required=True),
privileges=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'role',
set([]))
if __name__ == '__main__':
main()
|
adityacs/ansible
|
lib/ansible/modules/network/avi/avi_role.py
|
Python
|
gpl-3.0
| 3,201
|
[
"VisIt"
] |
a47c704f9c9e612e603462ca3d09c54e7113d1845b3462c63f0888420df87dba
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
A master convenience script with many tools for vasp and structure analysis.
"""
import argparse
import itertools
import sys
from tabulate import tabulate, tabulate_formats
from pymatgen import SETTINGS, __version__
from pymatgen.core.structure import Structure
from pymatgen.cli.pmg_analyze import analyze
from pymatgen.cli.pmg_config import configure_pmg
from pymatgen.cli.pmg_plot import plot
from pymatgen.cli.pmg_potcar import generate_potcar
from pymatgen.cli.pmg_query import do_query
from pymatgen.cli.pmg_structure import analyze_structures
from pymatgen.io.vasp import Incar, Potcar
def parse_view(args):
"""
Handle view commands.
:param args: Args from command.
"""
from pymatgen.vis.structure_vtk import StructureVis
excluded_bonding_elements = args.exclude_bonding[0].split(",") if args.exclude_bonding else []
s = Structure.from_file(args.filename[0])
vis = StructureVis(excluded_bonding_elements=excluded_bonding_elements)
vis.set_structure(s)
vis.show()
return 0
def diff_incar(args):
"""
Handle diff commands.
:param args: Args from command.
"""
filepath1 = args.incars[0]
filepath2 = args.incars[1]
incar1 = Incar.from_file(filepath1)
incar2 = Incar.from_file(filepath2)
def format_lists(v):
if isinstance(v, (tuple, list)):
return " ".join(["%d*%.2f" % (len(tuple(group)), i) for (i, group) in itertools.groupby(v)])
return v
d = incar1.diff(incar2)
output = [
["SAME PARAMS", "", ""],
["---------------", "", ""],
["", "", ""],
["DIFFERENT PARAMS", "", ""],
["----------------", "", ""],
]
output.extend(
[(k, format_lists(d["Same"][k]), format_lists(d["Same"][k])) for k in sorted(d["Same"].keys()) if k != "SYSTEM"]
)
output.extend(
[
(
k,
format_lists(d["Different"][k]["INCAR1"]),
format_lists(d["Different"][k]["INCAR2"]),
)
for k in sorted(d["Different"].keys())
if k != "SYSTEM"
]
)
print(tabulate(output, headers=["", filepath1, filepath2]))
return 0
def main():
"""
Handle main.
"""
parser = argparse.ArgumentParser(
description="""
pmg is a convenient script that uses pymatgen to perform many
analyses, plotting and format conversions. This script works based on
several sub-commands with their own options. To see the options for the
sub-commands, type "pmg sub-command -h".""",
epilog="""Version: {}""".format(__version__),
)
subparsers = parser.add_subparsers()
parser_config = subparsers.add_parser(
"config",
help="Tools for configuring pymatgen, e.g., " "potcar setup, modifying .pmgrc.yaml " "configuration file.",
)
groups = parser_config.add_mutually_exclusive_group(required=True)
groups.add_argument(
"-p",
"--potcar",
dest="potcar_dirs",
metavar="dir_name",
nargs=2,
help="Initial directory where downloaded VASP "
"POTCARs are extracted to, and the "
"output directory where the reorganized "
"potcars will be stored. The input "
"directory should be "
"the parent directory that contains the "
"POT_GGA_PAW_PBE or potpaw_PBE type "
"subdirectories.",
)
groups.add_argument(
"-i",
"--install",
dest="install",
metavar="package_name",
choices=["enumlib", "bader"],
help="Install various optional command line " "tools needed for full functionality.",
)
groups.add_argument(
"-a",
"--add",
dest="var_spec",
nargs="+",
help="Variables to add in the form of space " "separated key value pairs. E.g., " "PMG_VASP_PSP_DIR ~/psps",
)
parser_config.set_defaults(func=configure_pmg)
parser_analyze = subparsers.add_parser("analyze", help="Vasp calculation analysis tools.")
parser_analyze.add_argument(
"directories",
metavar="dir",
default=".",
type=str,
nargs="*",
help="directory to process (default to .)",
)
parser_analyze.add_argument(
"-e",
"--energies",
dest="get_energies",
action="store_true",
help="Print energies",
)
parser_analyze.add_argument(
"-m",
"--mag",
dest="ion_list",
type=str,
nargs=1,
help="Print magmoms. ION LIST can be a range " "(e.g., 1-2) or the string 'All' for all ions.",
)
parser_analyze.add_argument(
"-r",
"--reanalyze",
dest="reanalyze",
action="store_true",
help="Force reanalysis. Typically, vasp_analyzer"
" will just reuse a vasp_analyzer_data.gz if "
"present. This forces the analyzer to reanalyze "
"the data.",
)
parser_analyze.add_argument(
"-f",
"--format",
dest="format",
choices=tabulate_formats,
default="simple",
help="Format for table. Supports all options in tabulate package.",
)
parser_analyze.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
help="Verbose mode. Provides detailed output on progress.",
)
parser_analyze.add_argument(
"-q",
"--quick",
dest="quick",
action="store_true",
help="Faster mode, but less detailed information. Parses individual vasp files.",
)
parser_analyze.add_argument(
"-s",
"--sort",
dest="sort",
choices=["energy_per_atom", "filename"],
default="energy_per_atom",
help="Sort criteria. Defaults to energy / atom.",
)
parser_analyze.set_defaults(func=analyze)
parser_query = subparsers.add_parser("query", help="Search for structures and data from the Materials Project.")
parser_query.add_argument(
"criteria",
metavar="criteria",
help="Search criteria. Supported formats in formulas, chemical " "systems, Materials Project ids, etc.",
)
group = parser_query.add_mutually_exclusive_group(required=True)
group.add_argument(
"-s",
"--structure",
dest="structure",
metavar="format",
choices=["poscar", "cif", "cssr"],
type=str.lower,
help="Get structures from Materials Project and write them to a " "specified format.",
)
group.add_argument(
"-e",
"--entries",
dest="entries",
metavar="filename",
help="Get entries from Materials Project and write them to " "serialization file. JSON and YAML supported.",
)
group.add_argument(
"-d",
"--data",
dest="data",
metavar="fields",
nargs="*",
help="Print a summary of entries in the Materials Project satisfying "
"the criteria. Supply field names to include additional data. "
"By default, the Materials Project id, formula, spacegroup, "
"energy per atom, energy above hull are shown.",
)
parser_query.set_defaults(func=do_query)
parser_plot = subparsers.add_parser("plot", help="Plotting tool for " "DOS, CHGCAR, XRD, etc.")
group = parser_plot.add_mutually_exclusive_group(required=True)
group.add_argument(
"-d",
"--dos",
dest="dos_file",
metavar="vasprun.xml",
help="Plot DOS from a vasprun.xml",
)
group.add_argument(
"-c",
"--chgint",
dest="chgcar_file",
metavar="CHGCAR",
help="Generate charge integration plots from any " "CHGCAR",
)
group.add_argument(
"-x",
"--xrd",
dest="xrd_structure_file",
metavar="structure_file",
help="Generate XRD plots from any supported structure " "file, e.g., CIF, POSCAR, vasprun.xml, etc.",
)
parser_plot.add_argument(
"-s",
"--site",
dest="site",
action="store_const",
const=True,
help="Plot site projected DOS",
)
parser_plot.add_argument(
"-e",
"--element",
dest="element",
type=str,
nargs=1,
help="List of elements to plot as comma-separated" " values e.g., Fe,Mn",
)
parser_plot.add_argument(
"-o",
"--orbital",
dest="orbital",
action="store_const",
const=True,
help="Plot orbital projected DOS",
)
parser_plot.add_argument(
"-i",
"--indices",
dest="inds",
type=str,
nargs=1,
help="Comma-separated list of indices to plot "
"charge integration, e.g., 1,2,3,4. If not "
"provided, the code will plot the chgint "
"for all symmetrically distinct atoms "
"detected.",
)
parser_plot.add_argument(
"-r",
"--radius",
dest="radius",
type=float,
default=3,
help="Radius of integration for charge " "integration plot.",
)
parser_plot.add_argument(
"--out_file",
dest="out_file",
type=str,
help="Save plot to file instead of displaying.",
)
parser_plot.set_defaults(func=plot)
parser_structure = subparsers.add_parser("structure", help="Structure conversion and analysis tools.")
parser_structure.add_argument(
"-f",
"--filenames",
dest="filenames",
metavar="filename",
nargs="+",
help="List of structure files.",
)
groups = parser_structure.add_mutually_exclusive_group(required=True)
groups.add_argument(
"-c",
"--convert",
dest="convert",
action="store_true",
help="Convert from structure file 1 to structure "
"file 2. Format determined from filename. "
"Supported formats include POSCAR/CONTCAR, "
"CIF, CSSR, etc. If the keyword'prim' is within "
"the filename, the code will automatically attempt "
"to find a primitive cell.",
)
groups.add_argument(
"-s",
"--symmetry",
dest="symmetry",
metavar="tolerance",
type=float,
help="Determine the spacegroup using the "
"specified tolerance. 0.1 is usually a good "
"value for DFT calculations.",
)
groups.add_argument(
"-g",
"--group",
dest="group",
choices=["element", "species"],
metavar="mode",
help="Compare a set of structures for similarity. "
"Element mode does not compare oxidation states. "
"Species mode will take into account oxidations "
"states.",
)
groups.add_argument(
"-l",
"--localenv",
dest="localenv",
nargs="+",
help="Local environment analysis. Provide bonds in the format of"
"Center Species-Ligand Species=max_dist, e.g., H-O=0.5.",
)
parser_structure.set_defaults(func=analyze_structures)
parser_view = subparsers.add_parser("view", help="Visualize structures")
parser_view.add_argument("filename", metavar="filename", type=str, nargs=1, help="Filename")
parser_view.add_argument(
"-e",
"--exclude_bonding",
dest="exclude_bonding",
type=str,
nargs=1,
help="List of elements to exclude from bonding " "analysis. E.g., Li,Na",
)
parser_view.set_defaults(func=parse_view)
parser_diff = subparsers.add_parser("diff", help="Diffing tool. For now, only INCAR supported.")
parser_diff.add_argument(
"-i",
"--incar",
dest="incars",
metavar="INCAR",
required=True,
nargs=2,
help="List of INCARs to compare.",
)
parser_diff.set_defaults(func=diff_incar)
parser_potcar = subparsers.add_parser("potcar", help="Generate POTCARs")
parser_potcar.add_argument(
"-f",
"--functional",
dest="functional",
type=str,
choices=sorted(Potcar.FUNCTIONAL_CHOICES),
default=SETTINGS.get("PMG_DEFAULT_FUNCTIONAL", "PBE"),
help="Functional to use. Unless otherwise " "stated (e.g., US), " "refers to PAW psuedopotential.",
)
group = parser_potcar.add_mutually_exclusive_group(required=True)
group.add_argument(
"-s",
"--symbols",
dest="symbols",
type=str,
nargs="+",
help="List of POTCAR symbols. Use -f to set " "functional. Defaults to PBE.",
)
group.add_argument(
"-r",
"--recursive",
dest="recursive",
type=str,
help="Dirname to find and generate from POTCAR.spec.",
)
parser_potcar.set_defaults(func=generate_potcar)
try:
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
# argcomplete not present.
pass
args = parser.parse_args()
try:
getattr(args, "func")
except AttributeError:
parser.print_help()
sys.exit(-1)
return args.func(args)
if __name__ == "__main__":
main()
|
davidwaroquiers/pymatgen
|
pymatgen/cli/pmg.py
|
Python
|
mit
| 13,307
|
[
"VASP",
"pymatgen"
] |
838f081f2af72a3baa816c4f5923e869c6f5f23dd7767ae13f777c66b8f6ec3b
|
#
# Parse tree nodes for expressions
#
from __future__ import absolute_import
import cython
cython.declare(error=object, warning=object, warn_once=object, InternalError=object,
CompileError=object, UtilityCode=object, TempitaUtilityCode=object,
StringEncoding=object, operator=object,
Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object,
list_type=object, tuple_type=object, set_type=object, dict_type=object,
unicode_type=object, str_type=object, bytes_type=object, type_type=object,
Builtin=object, Symtab=object, Utils=object, find_coercion_error=object,
debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object,
bytearray_type=object, slice_type=object)
import os.path
import copy
import operator
from .Errors import error, warning, warn_once, InternalError, CompileError
from .Errors import hold_errors, release_errors, held_errors, report_error
from .Code import UtilityCode, TempitaUtilityCode
from . import StringEncoding
from . import Naming
from . import Nodes
from .Nodes import Node
from . import PyrexTypes
from .PyrexTypes import py_object_type, c_long_type, typecast, error_type, \
unspecified_type
from . import TypeSlots
from .Builtin import list_type, tuple_type, set_type, dict_type, type_type, \
unicode_type, str_type, bytes_type, bytearray_type, basestring_type, slice_type
from . import Builtin
from . import Symtab
from .. import Utils
from .Annotate import AnnotationItem
from . import Future
from ..Debugging import print_call_chain
from .DebugFlags import debug_disposal_code, debug_temp_alloc, \
debug_coercion
try:
from __builtin__ import basestring
except ImportError:
basestring = str # Python 3
try:
from builtins import bytes
except ImportError:
bytes = str # Python 2
class NotConstant(object):
_obj = None
def __new__(cls):
if NotConstant._obj is None:
NotConstant._obj = super(NotConstant, cls).__new__(cls)
return NotConstant._obj
def __repr__(self):
return "<NOT CONSTANT>"
not_a_constant = NotConstant()
constant_value_not_set = object()
# error messages when coercing from key[0] to key[1]
coercion_error_dict = {
# string related errors
(Builtin.unicode_type, Builtin.bytes_type) : "Cannot convert Unicode string to 'bytes' implicitly, encoding required.",
(Builtin.unicode_type, Builtin.str_type) : "Cannot convert Unicode string to 'str' implicitly. This is not portable and requires explicit encoding.",
(Builtin.unicode_type, PyrexTypes.c_char_ptr_type) : "Unicode objects only support coercion to Py_UNICODE*.",
(Builtin.unicode_type, PyrexTypes.c_uchar_ptr_type) : "Unicode objects only support coercion to Py_UNICODE*.",
(Builtin.bytes_type, Builtin.unicode_type) : "Cannot convert 'bytes' object to unicode implicitly, decoding required",
(Builtin.bytes_type, Builtin.str_type) : "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.",
(Builtin.bytes_type, Builtin.basestring_type) : "Cannot convert 'bytes' object to basestring implicitly. This is not portable to Py3.",
(Builtin.bytes_type, PyrexTypes.c_py_unicode_ptr_type) : "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.",
(Builtin.basestring_type, Builtin.bytes_type) : "Cannot convert 'basestring' object to bytes implicitly. This is not portable.",
(Builtin.str_type, Builtin.unicode_type) : "str objects do not support coercion to unicode, use a unicode string literal instead (u'')",
(Builtin.str_type, Builtin.bytes_type) : "Cannot convert 'str' to 'bytes' implicitly. This is not portable.",
(Builtin.str_type, PyrexTypes.c_char_ptr_type) : "'str' objects do not support coercion to C types (use 'bytes'?).",
(Builtin.str_type, PyrexTypes.c_uchar_ptr_type) : "'str' objects do not support coercion to C types (use 'bytes'?).",
(Builtin.str_type, PyrexTypes.c_py_unicode_ptr_type) : "'str' objects do not support coercion to C types (use 'unicode'?).",
(PyrexTypes.c_char_ptr_type, Builtin.unicode_type) : "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_uchar_ptr_type, Builtin.unicode_type) : "Cannot convert 'char*' to unicode implicitly, decoding required",
}
def find_coercion_error(type_tuple, default, env):
err = coercion_error_dict.get(type_tuple)
if err is None:
return default
elif ((PyrexTypes.c_char_ptr_type in type_tuple or PyrexTypes.c_uchar_ptr_type in type_tuple)
and env.directives['c_string_encoding']):
if type_tuple[1].is_pyobject:
return default
elif env.directives['c_string_encoding'] in ('ascii', 'default'):
return default
else:
return "'%s' objects do not support coercion to C types with non-ascii or non-default c_string_encoding" % type_tuple[0].name
else:
return err
def default_str_type(env):
return {
'bytes': bytes_type,
'bytearray': bytearray_type,
'str': str_type,
'unicode': unicode_type
}.get(env.directives['c_string_type'])
def check_negative_indices(*nodes):
"""
Raise a warning on nodes that are known to have negative numeric values.
Used to find (potential) bugs inside of "wraparound=False" sections.
"""
for node in nodes:
if (node is None
or not isinstance(node.constant_result, (int, float, long))):
continue
if node.constant_result < 0:
warning(node.pos,
"the result of using negative indices inside of "
"code sections marked as 'wraparound=False' is "
"undefined", level=1)
def infer_sequence_item_type(env, seq_node, index_node=None, seq_type=None):
if not seq_node.is_sequence_constructor:
if seq_type is None:
seq_type = seq_node.infer_type(env)
if seq_type is tuple_type:
# tuples are immutable => we can safely follow assignments
if seq_node.cf_state and len(seq_node.cf_state) == 1:
try:
seq_node = seq_node.cf_state[0].rhs
except AttributeError:
pass
if seq_node is not None and seq_node.is_sequence_constructor:
if index_node is not None and index_node.has_constant_result():
try:
item = seq_node.args[index_node.constant_result]
except (ValueError, TypeError, IndexError):
pass
else:
return item.infer_type(env)
# if we're lucky, all items have the same type
item_types = set([item.infer_type(env) for item in seq_node.args])
if len(item_types) == 1:
return item_types.pop()
return None
class ExprNode(Node):
# subexprs [string] Class var holding names of subexpr node attrs
# type PyrexType Type of the result
# result_code string Code fragment
# result_ctype string C type of result_code if different from type
# is_temp boolean Result is in a temporary variable
# is_sequence_constructor
# boolean Is a list or tuple constructor expression
# is_starred boolean Is a starred expression (e.g. '*a')
# saved_subexpr_nodes
# [ExprNode or [ExprNode or None] or None]
# Cached result of subexpr_nodes()
# use_managed_ref boolean use ref-counted temps/assignments/etc.
# result_is_used boolean indicates that the result will be dropped and the
# result_code/temp_result can safely be set to None
result_ctype = None
type = None
temp_code = None
old_temp = None # error checker for multiple frees etc.
use_managed_ref = True # can be set by optimisation transforms
result_is_used = True
# The Analyse Expressions phase for expressions is split
# into two sub-phases:
#
# Analyse Types
# Determines the result type of the expression based
# on the types of its sub-expressions, and inserts
# coercion nodes into the expression tree where needed.
# Marks nodes which will need to have temporary variables
# allocated.
#
# Allocate Temps
# Allocates temporary variables where needed, and fills
# in the result_code field of each node.
#
# ExprNode provides some convenience routines which
# perform both of the above phases. These should only
# be called from statement nodes, and only when no
# coercion nodes need to be added around the expression
# being analysed. In that case, the above two phases
# should be invoked separately.
#
# Framework code in ExprNode provides much of the common
# processing for the various phases. It makes use of the
# 'subexprs' class attribute of ExprNodes, which should
# contain a list of the names of attributes which can
# hold sub-nodes or sequences of sub-nodes.
#
# The framework makes use of a number of abstract methods.
# Their responsibilities are as follows.
#
# Declaration Analysis phase
#
# analyse_target_declaration
# Called during the Analyse Declarations phase to analyse
# the LHS of an assignment or argument of a del statement.
# Nodes which cannot be the LHS of an assignment need not
# implement it.
#
# Expression Analysis phase
#
# analyse_types
# - Call analyse_types on all sub-expressions.
# - Check operand types, and wrap coercion nodes around
# sub-expressions where needed.
# - Set the type of this node.
# - If a temporary variable will be required for the
# result, set the is_temp flag of this node.
#
# analyse_target_types
# Called during the Analyse Types phase to analyse
# the LHS of an assignment or argument of a del
# statement. Similar responsibilities to analyse_types.
#
# target_code
# Called by the default implementation of allocate_target_temps.
# Should return a C lvalue for assigning to the node. The default
# implementation calls calculate_result_code.
#
# check_const
# - Check that this node and its subnodes form a
# legal constant expression. If so, do nothing,
# otherwise call not_const.
#
# The default implementation of check_const
# assumes that the expression is not constant.
#
# check_const_addr
# - Same as check_const, except check that the
# expression is a C lvalue whose address is
# constant. Otherwise, call addr_not_const.
#
# The default implementation of calc_const_addr
# assumes that the expression is not a constant
# lvalue.
#
# Code Generation phase
#
# generate_evaluation_code
# - Call generate_evaluation_code for sub-expressions.
# - Perform the functions of generate_result_code
# (see below).
# - If result is temporary, call generate_disposal_code
# on all sub-expressions.
#
# A default implementation of generate_evaluation_code
# is provided which uses the following abstract methods:
#
# generate_result_code
# - Generate any C statements necessary to calculate
# the result of this node from the results of its
# sub-expressions.
#
# calculate_result_code
# - Should return a C code fragment evaluating to the
# result. This is only called when the result is not
# a temporary.
#
# generate_assignment_code
# Called on the LHS of an assignment.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the assignment.
# - If the assignment absorbed a reference, call
# generate_post_assignment_code on the RHS,
# otherwise call generate_disposal_code on it.
#
# generate_deletion_code
# Called on an argument of a del statement.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the deletion.
# - Call generate_disposal_code on all sub-expressions.
#
#
is_sequence_constructor = False
is_dict_literal = False
is_string_literal = False
is_attribute = False
is_subscript = False
saved_subexpr_nodes = None
is_temp = False
is_target = False
is_starred = False
constant_result = constant_value_not_set
# whether this node with a memoryview type should be broadcast
memslice_broadcast = False
child_attrs = property(fget=operator.attrgetter('subexprs'))
def not_implemented(self, method_name):
print_call_chain(method_name, "not implemented") ###
raise InternalError(
"%s.%s not implemented" %
(self.__class__.__name__, method_name))
def is_lvalue(self):
return 0
def is_addressable(self):
return self.is_lvalue() and not self.type.is_memoryviewslice
def is_ephemeral(self):
# An ephemeral node is one whose result is in
# a Python temporary and we suspect there are no
# other references to it. Certain operations are
# disallowed on such values, since they are
# likely to result in a dangling pointer.
return self.type.is_pyobject and self.is_temp
def subexpr_nodes(self):
# Extract a list of subexpression nodes based
# on the contents of the subexprs class attribute.
nodes = []
for name in self.subexprs:
item = getattr(self, name)
if item is not None:
if type(item) is list:
nodes.extend(item)
else:
nodes.append(item)
return nodes
def result(self):
if self.is_temp:
#if not self.temp_code:
# pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)'
# raise RuntimeError("temp result name not set in %s at %r" % (
# self.__class__.__name__, pos))
return self.temp_code
else:
return self.calculate_result_code()
def is_c_result_required(self):
"""
Subtypes may return False here if result temp allocation can be skipped.
"""
return True
def result_as(self, type = None):
# Return the result code cast to the specified C type.
if (self.is_temp and self.type.is_pyobject and
type != py_object_type):
# Allocated temporaries are always PyObject *, which may not
# reflect the actual type (e.g. an extension type)
return typecast(type, py_object_type, self.result())
return typecast(type, self.ctype(), self.result())
def py_result(self):
# Return the result code cast to PyObject *.
return self.result_as(py_object_type)
def ctype(self):
# Return the native C type of the result (i.e. the
# C type of the result_code expression).
return self.result_ctype or self.type
def get_constant_c_result_code(self):
# Return the constant value of this node as a result code
# string, or None if the node is not constant. This method
# can be called when the constant result code is required
# before the code generation phase.
#
# The return value is a string that can represent a simple C
# value, a constant C name or a constant C expression. If the
# node type depends on Python code, this must return None.
return None
def calculate_constant_result(self):
# Calculate the constant compile time result value of this
# expression and store it in ``self.constant_result``. Does
# nothing by default, thus leaving ``self.constant_result``
# unknown. If valid, the result can be an arbitrary Python
# value.
#
# This must only be called when it is assured that all
# sub-expressions have a valid constant_result value. The
# ConstantFolding transform will do this.
pass
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def compile_time_value(self, denv):
# Return value of compile-time expression, or report error.
error(self.pos, "Invalid compile-time expression")
def compile_time_value_error(self, e):
error(self.pos, "Error in compile-time expression: %s: %s" % (
e.__class__.__name__, e))
# ------------- Declaration Analysis ----------------
def analyse_target_declaration(self, env):
error(self.pos, "Cannot assign to or delete this")
# ------------- Expression Analysis ----------------
def analyse_const_expression(self, env):
# Called during the analyse_declarations phase of a
# constant expression. Analyses the expression's type,
# checks whether it is a legal const expression,
# and determines its value.
node = self.analyse_types(env)
node.check_const()
return node
def analyse_expressions(self, env):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for a whole
# expression.
return self.analyse_types(env)
def analyse_target_expression(self, env, rhs):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for the LHS of
# an assignment.
return self.analyse_target_types(env)
def analyse_boolean_expression(self, env):
# Analyse expression and coerce to a boolean.
node = self.analyse_types(env)
bool = node.coerce_to_boolean(env)
return bool
def analyse_temp_boolean_expression(self, env):
# Analyse boolean expression and coerce result into
# a temporary. This is used when a branch is to be
# performed on the result and we won't have an
# opportunity to ensure disposal code is executed
# afterwards. By forcing the result into a temporary,
# we ensure that all disposal has been done by the
# time we get the result.
node = self.analyse_types(env)
return node.coerce_to_boolean(env).coerce_to_simple(env)
# --------------- Type Inference -----------------
def type_dependencies(self, env):
# Returns the list of entries whose types must be determined
# before the type of self can be inferred.
if hasattr(self, 'type') and self.type is not None:
return ()
return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ())
def infer_type(self, env):
# Attempt to deduce the type of self.
# Differs from analyse_types as it avoids unnecessary
# analysis of subexpressions, but can assume everything
# in self.type_dependencies() has been resolved.
if hasattr(self, 'type') and self.type is not None:
return self.type
elif hasattr(self, 'entry') and self.entry is not None:
return self.entry.type
else:
self.not_implemented("infer_type")
def nonlocally_immutable(self):
# Returns whether this variable is a safe reference, i.e.
# can't be modified as part of globals or closures.
return self.is_literal or self.is_temp or self.type.is_array or self.type.is_cfunction
def inferable_item_node(self, index=0):
"""
Return a node that represents the (type) result of an indexing operation,
e.g. for tuple unpacking or iteration.
"""
return IndexNode(self.pos, base=self, index=IntNode(
self.pos, value=str(index), constant_result=index, type=PyrexTypes.c_py_ssize_t_type))
# --------------- Type Analysis ------------------
def analyse_as_module(self, env):
# If this node can be interpreted as a reference to a
# cimported module, return its scope, else None.
return None
def analyse_as_type(self, env):
# If this node can be interpreted as a reference to a
# type, return that type, else None.
return None
def analyse_as_extension_type(self, env):
# If this node can be interpreted as a reference to an
# extension type or builtin type, return its type, else None.
return None
def analyse_types(self, env):
self.not_implemented("analyse_types")
def analyse_target_types(self, env):
return self.analyse_types(env)
def nogil_check(self, env):
# By default, any expression based on Python objects is
# prevented in nogil environments. Subtypes must override
# this if they can work without the GIL.
if self.type and self.type.is_pyobject:
self.gil_error()
def gil_assignment_check(self, env):
if env.nogil and self.type.is_pyobject:
error(self.pos, "Assignment of Python object not allowed without gil")
def check_const(self):
self.not_const()
return False
def not_const(self):
error(self.pos, "Not allowed in a constant expression")
def check_const_addr(self):
self.addr_not_const()
return False
def addr_not_const(self):
error(self.pos, "Address is not constant")
# ----------------- Result Allocation -----------------
def result_in_temp(self):
# Return true if result is in a temporary owned by
# this node or one of its subexpressions. Overridden
# by certain nodes which can share the result of
# a subnode.
return self.is_temp
def target_code(self):
# Return code fragment for use as LHS of a C assignment.
return self.calculate_result_code()
def calculate_result_code(self):
self.not_implemented("calculate_result_code")
# def release_target_temp(self, env):
# # Release temporaries used by LHS of an assignment.
# self.release_subexpr_temps(env)
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("Temp allocated multiple times in %r: %r" % (self.__class__.__name__, self.pos))
type = self.type
if not type.is_void:
if type.is_pyobject:
type = PyrexTypes.py_object_type
elif not (self.result_is_used or type.is_memoryviewslice or self.is_c_result_required()):
self.temp_code = None
return
self.temp_code = code.funcstate.allocate_temp(
type, manage_ref=self.use_managed_ref)
else:
self.temp_code = None
def release_temp_result(self, code):
if not self.temp_code:
if not self.result_is_used:
# not used anyway, so ignore if not set up
return
pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)'
if self.old_temp:
raise RuntimeError("temp %s released multiple times in %s at %r" % (
self.old_temp, self.__class__.__name__, pos))
else:
raise RuntimeError("no temp, but release requested in %s at %r" % (
self.__class__.__name__, pos))
code.funcstate.release_temp(self.temp_code)
self.old_temp = self.temp_code
self.temp_code = None
# ---------------- Code Generation -----------------
def make_owned_reference(self, code):
"""
If result is a pyobject, make sure we own a reference to it.
If the result is in a temp, it is already a new reference.
"""
if self.type.is_pyobject and not self.result_in_temp():
code.put_incref(self.result(), self.ctype())
def make_owned_memoryviewslice(self, code):
"""
Make sure we own the reference to this memoryview slice.
"""
if not self.result_in_temp():
code.put_incref_memoryviewslice(self.result(),
have_gil=self.in_nogil_context)
def generate_evaluation_code(self, code):
# Generate code to evaluate this node and
# its sub-expressions, and dispose of any
# temporary results of its sub-expressions.
self.generate_subexpr_evaluation_code(code)
code.mark_pos(self.pos)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_result_code(code)
if self.is_temp and not (self.type.is_string or self.type.is_pyunicode_ptr):
# If we are temp we do not need to wait until this node is disposed
# before disposing children.
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def generate_subexpr_evaluation_code(self, code):
for node in self.subexpr_nodes():
node.generate_evaluation_code(code)
def generate_result_code(self, code):
self.not_implemented("generate_result_code")
def generate_disposal_code(self, code):
if self.is_temp:
if self.type.is_string or self.type.is_pyunicode_ptr:
# postponed from self.generate_evaluation_code()
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
if self.result():
if self.type.is_pyobject:
code.put_decref_clear(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(
self.result(), have_gil=not self.in_nogil_context)
else:
# Already done if self.is_temp
self.generate_subexpr_disposal_code(code)
def generate_subexpr_disposal_code(self, code):
# Generate code to dispose of temporary results
# of all sub-expressions.
for node in self.subexpr_nodes():
node.generate_disposal_code(code)
def generate_post_assignment_code(self, code):
if self.is_temp:
if self.type.is_string or self.type.is_pyunicode_ptr:
# postponed from self.generate_evaluation_code()
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
elif self.type.is_pyobject:
code.putln("%s = 0;" % self.result())
elif self.type.is_memoryviewslice:
code.putln("%s.memview = NULL;" % self.result())
code.putln("%s.data = NULL;" % self.result())
else:
self.generate_subexpr_disposal_code(code)
def generate_assignment_code(self, rhs, code):
# Stub method for nodes which are not legal as
# the LHS of an assignment. An error will have
# been reported earlier.
pass
def generate_deletion_code(self, code, ignore_nonexisting=False):
# Stub method for nodes that are not legal as
# the argument of a del statement. An error
# will have been reported earlier.
pass
def free_temps(self, code):
if self.is_temp:
if not self.type.is_void:
self.release_temp_result(code)
else:
self.free_subexpr_temps(code)
def free_subexpr_temps(self, code):
for sub in self.subexpr_nodes():
sub.free_temps(code)
def generate_function_definitions(self, env, code):
pass
# ---------------- Annotation ---------------------
def annotate(self, code):
for node in self.subexpr_nodes():
node.annotate(code)
# ----------------- Coercion ----------------------
def coerce_to(self, dst_type, env):
# Coerce the result so that it can be assigned to
# something of type dst_type. If processing is necessary,
# wraps this node in a coercion node and returns that.
# Otherwise, returns this node unchanged.
#
# This method is called during the analyse_expressions
# phase of the src_node's processing.
#
# Note that subclasses that override this (especially
# ConstNodes) must not (re-)set their own .type attribute
# here. Since expression nodes may turn up in different
# places in the tree (e.g. inside of CloneNodes in cascaded
# assignments), this method must return a new node instance
# if it changes the type.
#
src = self
src_type = self.type
if self.check_for_coercion_error(dst_type, env):
return self
used_as_reference = dst_type.is_reference
if used_as_reference and not src_type.is_reference:
dst_type = dst_type.ref_base_type
if src_type.is_const:
src_type = src_type.const_base_type
if src_type.is_fused or dst_type.is_fused:
# See if we are coercing a fused function to a pointer to a
# specialized function
if (src_type.is_cfunction and not dst_type.is_fused and
dst_type.is_ptr and dst_type.base_type.is_cfunction):
dst_type = dst_type.base_type
for signature in src_type.get_all_specialized_function_types():
if signature.same_as(dst_type):
src.type = signature
src.entry = src.type.entry
src.entry.used = True
return self
if src_type.is_fused:
error(self.pos, "Type is not specialized")
else:
error(self.pos, "Cannot coerce to a type that is not specialized")
self.type = error_type
return self
if self.coercion_type is not None:
# This is purely for error checking purposes!
node = NameNode(self.pos, name='', type=self.coercion_type)
node.coerce_to(dst_type, env)
if dst_type.is_memoryviewslice:
from . import MemoryView
if not src.type.is_memoryviewslice:
if src.type.is_pyobject:
src = CoerceToMemViewSliceNode(src, dst_type, env)
elif src.type.is_array:
src = CythonArrayNode.from_carray(src, env).coerce_to(
dst_type, env)
elif not src_type.is_error:
error(self.pos,
"Cannot convert '%s' to memoryviewslice" %
(src_type,))
elif not MemoryView.src_conforms_to_dst(
src.type, dst_type, broadcast=self.memslice_broadcast):
if src.type.dtype.same_as(dst_type.dtype):
msg = "Memoryview '%s' not conformable to memoryview '%s'."
tup = src.type, dst_type
else:
msg = "Different base types for memoryviews (%s, %s)"
tup = src.type.dtype, dst_type.dtype
error(self.pos, msg % tup)
elif dst_type.is_pyobject:
if not src.type.is_pyobject:
if dst_type is bytes_type and src.type.is_int:
src = CoerceIntToBytesNode(src, env)
else:
src = CoerceToPyTypeNode(src, env, type=dst_type)
if not src.type.subtype_of(dst_type):
if src.constant_result is not None:
src = PyTypeTestNode(src, dst_type, env)
elif src.type.is_pyobject:
if used_as_reference and dst_type.is_cpp_class:
warning(
self.pos,
"Cannot pass Python object as C++ data structure reference (%s &), will pass by copy." % dst_type)
src = CoerceFromPyTypeNode(dst_type, src, env)
elif (dst_type.is_complex
and src_type != dst_type
and dst_type.assignable_from(src_type)):
src = CoerceToComplexNode(src, dst_type, env)
else: # neither src nor dst are py types
# Added the string comparison, since for c types that
# is enough, but Cython gets confused when the types are
# in different pxi files.
if not (str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)):
self.fail_assignment(dst_type)
return src
def fail_assignment(self, dst_type):
error(self.pos, "Cannot assign type '%s' to '%s'" % (self.type, dst_type))
def check_for_coercion_error(self, dst_type, env, fail=False, default=None):
if fail and not default:
default = "Cannot assign type '%(FROM)s' to '%(TO)s'"
message = find_coercion_error((self.type, dst_type), default, env)
if message is not None:
error(self.pos, message % {'FROM': self.type, 'TO': dst_type})
return True
if fail:
self.fail_assignment(dst_type)
return True
return False
def coerce_to_pyobject(self, env):
return self.coerce_to(PyrexTypes.py_object_type, env)
def coerce_to_boolean(self, env):
# Coerce result to something acceptable as
# a boolean value.
# if it's constant, calculate the result now
if self.has_constant_result():
bool_value = bool(self.constant_result)
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
type = self.type
if type.is_enum or type.is_error:
return self
elif type.is_pyobject or type.is_int or type.is_ptr or type.is_float:
return CoerceToBooleanNode(self, env)
elif type.is_ctuple:
bool_value = len(type.components) == 0
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
else:
error(self.pos, "Type '%s' not acceptable as a boolean" % type)
return self
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.type.is_int:
return self
else:
return self.coerce_to(PyrexTypes.c_long_type, env)
def coerce_to_temp(self, env):
# Ensure that the result is in a temporary.
if self.result_in_temp():
return self
else:
return CoerceToTempNode(self, env)
def coerce_to_simple(self, env):
# Ensure that the result is simple (see is_simple).
if self.is_simple():
return self
else:
return self.coerce_to_temp(env)
def is_simple(self):
# A node is simple if its result is something that can
# be referred to without performing any operations, e.g.
# a constant, local var, C global var, struct member
# reference, or temporary.
return self.result_in_temp()
def may_be_none(self):
if self.type and not (self.type.is_pyobject or
self.type.is_memoryviewslice):
return False
if self.has_constant_result():
return self.constant_result is not None
return True
def as_cython_attribute(self):
return None
def as_none_safe_node(self, message, error="PyExc_TypeError", format_args=()):
# Wraps the node in a NoneCheckNode if it is not known to be
# not-None (e.g. because it is a Python literal).
if self.may_be_none():
return NoneCheckNode(self, error, message, format_args)
else:
return self
@classmethod
def from_node(cls, node, **kwargs):
"""Instantiate this node class from another node, properly
copying over all attributes that one would forget otherwise.
"""
attributes = "cf_state cf_maybe_null cf_is_null constant_result".split()
for attr_name in attributes:
if attr_name in kwargs:
continue
try:
value = getattr(node, attr_name)
except AttributeError:
pass
else:
kwargs[attr_name] = value
return cls(node.pos, **kwargs)
class AtomicExprNode(ExprNode):
# Abstract base class for expression nodes which have
# no sub-expressions.
subexprs = []
# Override to optimize -- we know we have no children
def generate_subexpr_evaluation_code(self, code):
pass
def generate_subexpr_disposal_code(self, code):
pass
class PyConstNode(AtomicExprNode):
# Abstract base class for constant Python values.
is_literal = 1
type = py_object_type
def is_simple(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self
def calculate_result_code(self):
return self.value
def generate_result_code(self, code):
pass
class NoneNode(PyConstNode):
# The constant value None
is_none = 1
value = "Py_None"
constant_result = None
nogil_check = None
def compile_time_value(self, denv):
return None
def may_be_none(self):
return True
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
value = "Py_Ellipsis"
constant_result = Ellipsis
def compile_time_value(self, denv):
return Ellipsis
class ConstNode(AtomicExprNode):
# Abstract base type for literal constant nodes.
#
# value string C code fragment
is_literal = 1
nogil_check = None
def is_simple(self):
return 1
def nonlocally_immutable(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self # Types are held in class variables
def check_const(self):
return True
def get_constant_c_result_code(self):
return self.calculate_result_code()
def calculate_result_code(self):
return str(self.value)
def generate_result_code(self, code):
pass
class BoolNode(ConstNode):
type = PyrexTypes.c_bint_type
# The constant value True or False
def calculate_constant_result(self):
self.constant_result = self.value
def compile_time_value(self, denv):
return self.value
def calculate_result_code(self):
if self.type.is_pyobject:
return self.value and 'Py_True' or 'Py_False'
else:
return str(int(self.value))
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_int:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.bool_type)
if dst_type.is_int and self.type.is_pyobject:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type)
return ConstNode.coerce_to(self, dst_type, env)
class NullNode(ConstNode):
type = PyrexTypes.c_null_ptr_type
value = "NULL"
constant_result = 0
def get_constant_c_result_code(self):
return self.value
class CharNode(ConstNode):
type = PyrexTypes.c_char_type
def calculate_constant_result(self):
self.constant_result = ord(self.value)
def compile_time_value(self, denv):
return ord(self.value)
def calculate_result_code(self):
return "'%s'" % StringEncoding.escape_char(self.value)
class IntNode(ConstNode):
# unsigned "" or "U"
# longness "" or "L" or "LL"
# is_c_literal True/False/None creator considers this a C integer literal
unsigned = ""
longness = ""
is_c_literal = None # unknown
def __init__(self, pos, **kwds):
ExprNode.__init__(self, pos, **kwds)
if 'type' not in kwds:
self.type = self.find_suitable_type_for_value()
def find_suitable_type_for_value(self):
if self.constant_result is constant_value_not_set:
try:
self.calculate_constant_result()
except ValueError:
pass
# we ignore 'is_c_literal = True' and instead map signed 32bit
# integers as C long values
if self.is_c_literal or \
self.constant_result in (constant_value_not_set, not_a_constant) or \
self.unsigned or self.longness == 'LL':
# clearly a C literal
rank = (self.longness == 'LL') and 2 or 1
suitable_type = PyrexTypes.modifiers_and_name_to_type[not self.unsigned, rank, "int"]
if self.type:
suitable_type = PyrexTypes.widest_numeric_type(suitable_type, self.type)
else:
# C literal or Python literal - split at 32bit boundary
if -2**31 <= self.constant_result < 2**31:
if self.type and self.type.is_int:
suitable_type = self.type
else:
suitable_type = PyrexTypes.c_long_type
else:
suitable_type = PyrexTypes.py_object_type
return suitable_type
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
elif dst_type.is_float:
if self.has_constant_result():
return FloatNode(self.pos, value='%d.0' % int(self.constant_result), type=dst_type,
constant_result=float(self.constant_result))
else:
return FloatNode(self.pos, value=self.value, type=dst_type,
constant_result=not_a_constant)
if dst_type.is_numeric and not dst_type.is_complex:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type = dst_type, is_c_literal = True,
unsigned=self.unsigned, longness=self.longness)
return node
elif dst_type.is_pyobject:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type = PyrexTypes.py_object_type, is_c_literal = False,
unsigned=self.unsigned, longness=self.longness)
else:
# FIXME: not setting the type here to keep it working with
# complex numbers. Should they be special cased?
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
unsigned=self.unsigned, longness=self.longness)
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def coerce_to_boolean(self, env):
return IntNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type,
unsigned=self.unsigned, longness=self.longness)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
# pre-allocate a Python version of the number
plain_integer_string = str(Utils.str_to_number(self.value))
self.result_code = code.get_py_int(plain_integer_string, self.longness)
else:
self.result_code = self.get_constant_c_result_code()
def get_constant_c_result_code(self):
return self.value_as_c_integer_string() + self.unsigned + self.longness
def value_as_c_integer_string(self):
value = self.value
if len(value) > 2:
# convert C-incompatible Py3 oct/bin notations
if value[1] in 'oO':
value = value[0] + value[2:] # '0o123' => '0123'
elif value[1] in 'bB':
value = int(value[2:], 2)
return str(value)
def calculate_result_code(self):
return self.result_code
def calculate_constant_result(self):
self.constant_result = Utils.str_to_number(self.value)
def compile_time_value(self, denv):
return Utils.str_to_number(self.value)
class FloatNode(ConstNode):
type = PyrexTypes.c_double_type
def calculate_constant_result(self):
self.constant_result = float(self.value)
def compile_time_value(self, denv):
return float(self.value)
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_float:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.float_type)
if dst_type.is_float and self.type.is_pyobject:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=dst_type)
return ConstNode.coerce_to(self, dst_type, env)
def calculate_result_code(self):
return self.result_code
def get_constant_c_result_code(self):
strval = self.value
assert isinstance(strval, (str, unicode))
cmpval = repr(float(strval))
if cmpval == 'nan':
return "(Py_HUGE_VAL * 0)"
elif cmpval == 'inf':
return "Py_HUGE_VAL"
elif cmpval == '-inf':
return "(-Py_HUGE_VAL)"
else:
return strval
def generate_evaluation_code(self, code):
c_value = self.get_constant_c_result_code()
if self.type.is_pyobject:
self.result_code = code.get_py_float(self.value, c_value)
else:
self.result_code = c_value
def _analyse_name_as_type(name, pos, env):
type = PyrexTypes.parse_basic_type(name)
if type is not None:
return type
hold_errors()
from .TreeFragment import TreeFragment
pos = (pos[0], pos[1], pos[2]-7)
try:
declaration = TreeFragment(u"sizeof(%s)" % name, name=pos[0].filename, initial_pos=pos)
except CompileError:
sizeof_node = None
else:
sizeof_node = declaration.root.stats[0].expr
sizeof_node = sizeof_node.analyse_types(env)
release_errors(ignore=True)
if isinstance(sizeof_node, SizeofTypeNode):
return sizeof_node.arg_type
return None
class BytesNode(ConstNode):
# A char* or bytes literal
#
# value BytesLiteral
is_string_literal = True
# start off as Python 'bytes' to support len() in O(1)
type = bytes_type
def calculate_constant_result(self):
self.constant_result = self.value
def as_sliced_node(self, start, stop, step=None):
value = StringEncoding.BytesLiteral(self.value[start:stop:step])
value.encoding = self.value.encoding
return BytesNode(
self.pos, value=value, constant_result=value)
def compile_time_value(self, denv):
return self.value
def analyse_as_type(self, env):
return _analyse_name_as_type(self.value.decode('ISO8859-1'), self.pos, env)
def can_coerce_to_char_literal(self):
return len(self.value) == 1
def coerce_to_boolean(self, env):
# This is special because testing a C char* for truth directly
# would yield the wrong result.
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def coerce_to(self, dst_type, env):
if self.type == dst_type:
return self
if dst_type.is_int:
if not self.can_coerce_to_char_literal():
error(self.pos, "Only single-character string literals can be coerced into ints.")
return self
if dst_type.is_unicode_char:
error(self.pos, "Bytes literals cannot coerce to Py_UNICODE/Py_UCS4, use a unicode literal instead.")
return self
return CharNode(self.pos, value=self.value,
constant_result=ord(self.value))
node = BytesNode(self.pos, value=self.value,
constant_result=self.constant_result)
if dst_type.is_pyobject:
if dst_type in (py_object_type, Builtin.bytes_type):
node.type = Builtin.bytes_type
else:
self.check_for_coercion_error(dst_type, env, fail=True)
return node
elif dst_type == PyrexTypes.c_char_ptr_type:
node.type = dst_type
return node
elif dst_type == PyrexTypes.c_uchar_ptr_type:
node.type = PyrexTypes.c_char_ptr_type
return CastNode(node, PyrexTypes.c_uchar_ptr_type)
elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
node.type = dst_type
return node
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
self.result_code = code.get_py_string_const(self.value)
else:
self.result_code = code.get_string_const(self.value)
def get_constant_c_result_code(self):
return None # FIXME
def calculate_result_code(self):
return self.result_code
class UnicodeNode(ConstNode):
# A Py_UNICODE* or unicode literal
#
# value EncodedString
# bytes_value BytesLiteral the literal parsed as bytes string
# ('-3' unicode literals only)
is_string_literal = True
bytes_value = None
type = unicode_type
def calculate_constant_result(self):
self.constant_result = self.value
def analyse_as_type(self, env):
return _analyse_name_as_type(self.value, self.pos, env)
def as_sliced_node(self, start, stop, step=None):
if StringEncoding.string_contains_surrogates(self.value[:stop]):
# this is unsafe as it may give different results
# in different runtimes
return None
value = StringEncoding.EncodedString(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.bytes_value is not None:
bytes_value = StringEncoding.BytesLiteral(
self.bytes_value[start:stop:step])
bytes_value.encoding = self.bytes_value.encoding
else:
bytes_value = None
return UnicodeNode(
self.pos, value=value, bytes_value=bytes_value,
constant_result=value)
def coerce_to(self, dst_type, env):
if dst_type is self.type:
pass
elif dst_type.is_unicode_char:
if not self.can_coerce_to_char_literal():
error(self.pos,
"Only single-character Unicode string literals or "
"surrogate pairs can be coerced into Py_UCS4/Py_UNICODE.")
return self
int_value = ord(self.value)
return IntNode(self.pos, type=dst_type, value=str(int_value),
constant_result=int_value)
elif not dst_type.is_pyobject:
if dst_type.is_string and self.bytes_value is not None:
# special case: '-3' enforced unicode literal used in a
# C char* context
return BytesNode(self.pos, value=self.bytes_value
).coerce_to(dst_type, env)
if dst_type.is_pyunicode_ptr:
node = UnicodeNode(self.pos, value=self.value)
node.type = dst_type
return node
error(self.pos,
"Unicode literals do not support coercion to C types other "
"than Py_UNICODE/Py_UCS4 (for characters) or Py_UNICODE* "
"(for strings).")
elif dst_type not in (py_object_type, Builtin.basestring_type):
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return len(self.value) == 1
## or (len(self.value) == 2
## and (0xD800 <= self.value[0] <= 0xDBFF)
## and (0xDC00 <= self.value[1] <= 0xDFFF))
def coerce_to_boolean(self, env):
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def contains_surrogates(self):
return StringEncoding.string_contains_surrogates(self.value)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
if self.contains_surrogates():
# surrogates are not really portable and cannot be
# decoded by the UTF-8 codec in Py3.3
self.result_code = code.get_py_const(py_object_type, 'ustring')
data_cname = code.get_pyunicode_ptr_const(self.value)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PyUnicode_FromUnicode(%s, (sizeof(%s) / sizeof(Py_UNICODE))-1); %s" % (
self.result_code,
data_cname,
data_cname,
code.error_goto_if_null(self.result_code, self.pos)))
code.put_error_if_neg(
self.pos, "__Pyx_PyUnicode_READY(%s)" % self.result_code)
else:
self.result_code = code.get_py_string_const(self.value)
else:
self.result_code = code.get_pyunicode_ptr_const(self.value)
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class StringNode(PyConstNode):
# A Python str object, i.e. a byte string in Python 2.x and a
# unicode string in Python 3.x
#
# value BytesLiteral (or EncodedString with ASCII content)
# unicode_value EncodedString or None
# is_identifier boolean
type = str_type
is_string_literal = True
is_identifier = None
unicode_value = None
def calculate_constant_result(self):
if self.unicode_value is not None:
# only the Unicode value is portable across Py2/3
self.constant_result = self.unicode_value
def analyse_as_type(self, env):
return _analyse_name_as_type(self.unicode_value or self.value.decode('ISO8859-1'), self.pos, env)
def as_sliced_node(self, start, stop, step=None):
value = type(self.value)(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.unicode_value is not None:
if StringEncoding.string_contains_surrogates(self.unicode_value[:stop]):
# this is unsafe as it may give different results in different runtimes
return None
unicode_value = StringEncoding.EncodedString(
self.unicode_value[start:stop:step])
else:
unicode_value = None
return StringNode(
self.pos, value=value, unicode_value=unicode_value,
constant_result=value, is_identifier=self.is_identifier)
def coerce_to(self, dst_type, env):
if dst_type is not py_object_type and not str_type.subtype_of(dst_type):
# if dst_type is Builtin.bytes_type:
# # special case: bytes = 'str literal'
# return BytesNode(self.pos, value=self.value)
if not dst_type.is_pyobject:
return BytesNode(self.pos, value=self.value).coerce_to(dst_type, env)
if dst_type is not Builtin.basestring_type:
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return not self.is_identifier and len(self.value) == 1
def generate_evaluation_code(self, code):
self.result_code = code.get_py_string_const(
self.value, identifier=self.is_identifier, is_str=True,
unicode_value=self.unicode_value)
def get_constant_c_result_code(self):
return None
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class IdentifierStringNode(StringNode):
# A special str value that represents an identifier (bytes in Py2,
# unicode in Py3).
is_identifier = True
class ImagNode(AtomicExprNode):
# Imaginary number literal
#
# value float imaginary part
type = PyrexTypes.c_double_complex_type
def calculate_constant_result(self):
self.constant_result = complex(0.0, self.value)
def compile_time_value(self, denv):
return complex(0.0, self.value)
def analyse_types(self, env):
self.type.create_declaration_utility_code(env)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
node = ImagNode(self.pos, value=self.value)
if dst_type.is_pyobject:
node.is_temp = 1
node.type = PyrexTypes.py_object_type
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return AtomicExprNode.coerce_to(node, dst_type, env)
gil_message = "Constructing complex number"
def calculate_result_code(self):
if self.type.is_pyobject:
return self.result()
else:
return "%s(0, %r)" % (self.type.from_parts, float(self.value))
def generate_result_code(self, code):
if self.type.is_pyobject:
code.putln(
"%s = PyComplex_FromDoubles(0.0, %r); %s" % (
self.result(),
float(self.value),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class NewExprNode(AtomicExprNode):
# C++ new statement
#
# cppclass node c++ class to create
type = None
def infer_type(self, env):
type = self.cppclass.analyse_as_type(env)
if type is None or not type.is_cpp_class:
error(self.pos, "new operator can only be applied to a C++ class")
self.type = error_type
return
self.cpp_check(env)
constructor = type.scope.lookup(u'<init>')
if constructor is None:
func_type = PyrexTypes.CFuncType(type, [], exception_check='+')
type.scope.declare_cfunction(u'<init>', func_type, self.pos)
constructor = type.scope.lookup(u'<init>')
self.class_type = type
self.entry = constructor
self.type = constructor.type
return self.type
def analyse_types(self, env):
if self.type is None:
self.infer_type(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
pass
def calculate_result_code(self):
return "new " + self.class_type.empty_declaration_code()
class NameNode(AtomicExprNode):
# Reference to a local or global variable name.
#
# name string Python name of the variable
# entry Entry Symbol table entry
# type_entry Entry For extension type names, the original type entry
# cf_is_null boolean Is uninitialized before this node
# cf_maybe_null boolean Maybe uninitialized before this node
# allow_null boolean Don't raise UnboundLocalError
# nogil boolean Whether it is used in a nogil context
is_name = True
is_cython_module = False
cython_attribute = None
lhs_of_first_assignment = False # TODO: remove me
is_used_as_rvalue = 0
entry = None
type_entry = None
cf_maybe_null = True
cf_is_null = False
allow_null = False
nogil = False
inferred_type = None
def as_cython_attribute(self):
return self.cython_attribute
def type_dependencies(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is not None and self.entry.type.is_unspecified:
return (self,)
else:
return ()
def infer_type(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is None or self.entry.type is unspecified_type:
if self.inferred_type is not None:
return self.inferred_type
return py_object_type
elif (self.entry.type.is_extension_type or self.entry.type.is_builtin_type) and \
self.name == self.entry.type.name:
# Unfortunately the type attribute of type objects
# is used for the pointer to the type they represent.
return type_type
elif self.entry.type.is_cfunction:
if self.entry.scope.is_builtin_scope:
# special case: optimised builtin functions must be treated as Python objects
return py_object_type
else:
# special case: referring to a C function must return its pointer
return PyrexTypes.CPtrType(self.entry.type)
else:
# If entry is inferred as pyobject it's safe to use local
# NameNode's inferred_type.
if self.entry.type.is_pyobject and self.inferred_type:
# Overflow may happen if integer
if not (self.inferred_type.is_int and self.entry.might_overflow):
return self.inferred_type
return self.entry.type
def compile_time_value(self, denv):
try:
return denv.lookup(self.name)
except KeyError:
error(self.pos, "Compile-time name '%s' not defined" % self.name)
def get_constant_c_result_code(self):
if not self.entry or self.entry.type.is_pyobject:
return None
return self.entry.cname
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a builtin
# C function with a Python equivalent, manufacture a NameNode
# referring to the Python builtin.
#print "NameNode.coerce_to:", self.name, dst_type ###
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction:
var_entry = entry.as_variable
if var_entry:
if var_entry.is_builtin and var_entry.is_const:
var_entry = env.declare_builtin(var_entry.name, self.pos)
node = NameNode(self.pos, name = self.name)
node.entry = var_entry
node.analyse_rvalue_entry(env)
return node
return super(NameNode, self).coerce_to(dst_type, env)
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
# Returns the module scope, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.as_module:
return entry.as_module
return None
def analyse_as_type(self, env):
if self.cython_attribute:
type = PyrexTypes.parse_basic_type(self.cython_attribute)
else:
type = PyrexTypes.parse_basic_type(self.name)
if type:
return type
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
return entry.type
else:
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type.
# Returns the extension type, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_target_declaration(self, env):
if not self.entry:
self.entry = env.lookup_here(self.name)
if not self.entry:
if env.directives['warn.undeclared']:
warning(self.pos, "implicit declaration of '%s'" % self.name, 1)
if env.directives['infer_types'] != False:
type = unspecified_type
else:
type = py_object_type
self.entry = env.declare_var(self.name, type, self.pos)
if self.entry.is_declared_generic:
self.result_ctype = py_object_type
def analyse_types(self, env):
self.initialized_check = env.directives['initializedcheck']
if self.entry is None:
self.entry = env.lookup(self.name)
if not self.entry:
self.entry = env.declare_builtin(self.name, self.pos)
if not self.entry:
self.type = PyrexTypes.error_type
return self
entry = self.entry
if entry:
entry.used = 1
if entry.type.is_buffer:
from . import Buffer
Buffer.used_buffer_aux_vars(entry)
self.analyse_rvalue_entry(env)
return self
def analyse_target_types(self, env):
self.analyse_entry(env, is_target=True)
if self.entry.is_cfunction and self.entry.as_variable:
if self.entry.is_overridable or not self.is_lvalue() and self.entry.fused_cfunction:
# We need this for assigning to cpdef names and for the fused 'def' TreeFragment
self.entry = self.entry.as_variable
self.type = self.entry.type
if self.type.is_const:
error(self.pos, "Assignment to const '%s'" % self.name)
if self.type.is_reference:
error(self.pos, "Assignment to reference '%s'" % self.name)
if not self.is_lvalue():
error(self.pos, "Assignment to non-lvalue '%s'" % self.name)
self.type = PyrexTypes.error_type
self.entry.used = 1
if self.entry.type.is_buffer:
from . import Buffer
Buffer.used_buffer_aux_vars(self.entry)
return self
def analyse_rvalue_entry(self, env):
#print "NameNode.analyse_rvalue_entry:", self.name ###
#print "Entry:", self.entry.__dict__ ###
self.analyse_entry(env)
entry = self.entry
if entry.is_declared_generic:
self.result_ctype = py_object_type
if entry.is_pyglobal or entry.is_builtin:
if entry.is_builtin and entry.is_const:
self.is_temp = 0
else:
self.is_temp = 1
self.is_used_as_rvalue = 1
elif entry.type.is_memoryviewslice:
self.is_temp = False
self.is_used_as_rvalue = True
self.use_managed_ref = True
return self
def nogil_check(self, env):
self.nogil = True
if self.is_used_as_rvalue:
entry = self.entry
if entry.is_builtin:
if not entry.is_const: # cached builtins are ok
self.gil_error()
elif entry.is_pyglobal:
self.gil_error()
elif self.entry.type.is_memoryviewslice:
if self.cf_is_null or self.cf_maybe_null:
from . import MemoryView
MemoryView.err_if_nogil_initialized_check(self.pos, env)
gil_message = "Accessing Python global or builtin"
def analyse_entry(self, env, is_target=False):
#print "NameNode.analyse_entry:", self.name ###
self.check_identifier_kind()
entry = self.entry
type = entry.type
if (not is_target and type.is_pyobject and self.inferred_type and
self.inferred_type.is_builtin_type):
# assume that type inference is smarter than the static entry
type = self.inferred_type
self.type = type
def check_identifier_kind(self):
# Check that this is an appropriate kind of name for use in an
# expression. Also finds the variable entry associated with
# an extension type.
entry = self.entry
if entry.is_type and entry.type.is_extension_type:
self.type_entry = entry
if not (entry.is_const or entry.is_variable
or entry.is_builtin or entry.is_cfunction
or entry.is_cpp_class):
if self.entry.as_variable:
self.entry = self.entry.as_variable
else:
error(self.pos,
"'%s' is not a constant, variable or function identifier" % self.name)
def is_simple(self):
# If it's not a C variable, it'll be in a temp.
return 1
def may_be_none(self):
if self.cf_state and self.type and (self.type.is_pyobject or
self.type.is_memoryviewslice):
# gard against infinite recursion on self-dependencies
if getattr(self, '_none_checking', False):
# self-dependency - either this node receives a None
# value from *another* node, or it can not reference
# None at this point => safe to assume "not None"
return False
self._none_checking = True
# evaluate control flow state to see if there were any
# potential None values assigned to the node so far
may_be_none = False
for assignment in self.cf_state:
if assignment.rhs.may_be_none():
may_be_none = True
break
del self._none_checking
return may_be_none
return super(NameNode, self).may_be_none()
def nonlocally_immutable(self):
if ExprNode.nonlocally_immutable(self):
return True
entry = self.entry
if not entry or entry.in_closure:
return False
return entry.is_local or entry.is_arg or entry.is_builtin or entry.is_readonly
def calculate_target_results(self, env):
pass
def check_const(self):
entry = self.entry
if entry is not None and not (entry.is_const or entry.is_cfunction or entry.is_builtin):
self.not_const()
return False
return True
def check_const_addr(self):
entry = self.entry
if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin):
self.addr_not_const()
return False
return True
def is_lvalue(self):
return (
self.entry.is_variable and
not self.entry.is_readonly
) or (
self.entry.is_cfunction and
self.entry.is_overridable
)
def is_addressable(self):
return self.entry.is_variable and not self.type.is_memoryviewslice
def is_ephemeral(self):
# Name nodes are never ephemeral, even if the
# result is in a temporary.
return 0
def calculate_result_code(self):
entry = self.entry
if not entry:
return "<error>" # There was an error earlier
return entry.cname
def generate_result_code(self, code):
assert hasattr(self, 'entry')
entry = self.entry
if entry is None:
return # There was an error earlier
if entry.is_builtin and entry.is_const:
return # Lookup already cached
elif entry.is_pyclass_attr:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.is_builtin:
namespace = Naming.builtins_cname
else: # entry.is_pyglobal
namespace = entry.scope.namespace_cname
if not self.cf_is_null:
code.putln(
'%s = PyObject_GetItem(%s, %s);' % (
self.result(),
namespace,
interned_cname))
code.putln('if (unlikely(!%s)) {' % self.result())
code.putln('PyErr_Clear();')
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetModuleGlobalName(%s);' % (
self.result(),
interned_cname))
if not self.cf_is_null:
code.putln("}")
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
elif entry.is_builtin and not entry.scope.is_module_scope:
# known builtin
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetBuiltinName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_pyglobal or (entry.is_builtin and entry.scope.is_module_scope):
# name in class body, global name or unknown builtin
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.scope.is_module_scope:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetModuleGlobalName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
else:
# FIXME: is_pyglobal is also used for class namespace
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetNameInClass(%s, %s); %s' % (
self.result(),
entry.scope.namespace_cname,
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_local or entry.in_closure or entry.from_closure or entry.type.is_memoryviewslice:
# Raise UnboundLocalError for objects and memoryviewslices
raise_unbound = (
(self.cf_maybe_null or self.cf_is_null) and not self.allow_null)
null_code = entry.type.check_for_null_code(entry.cname)
memslice_check = entry.type.is_memoryviewslice and self.initialized_check
if null_code and raise_unbound and (entry.type.is_pyobject or memslice_check):
code.put_error_if_unbound(self.pos, entry, self.in_nogil_context)
def generate_assignment_code(self, rhs, code):
#print "NameNode.generate_assignment_code:", self.name ###
entry = self.entry
if entry is None:
return # There was an error earlier
if (self.entry.type.is_ptr and isinstance(rhs, ListNode)
and not self.lhs_of_first_assignment and not rhs.in_module_scope):
error(self.pos, "Literal list must be assigned to pointer at time of declaration")
# is_pyglobal seems to be True for module level-globals only.
# We use this to access class->tp_dict if necessary.
if entry.is_pyglobal:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
namespace = self.entry.scope.namespace_cname
if entry.is_member:
# if the entry is a member we have to cheat: SetAttr does not work
# on types, so we create a descriptor which is then added to tp_dict
setter = 'PyDict_SetItem'
namespace = '%s->tp_dict' % namespace
elif entry.scope.is_module_scope:
setter = 'PyDict_SetItem'
namespace = Naming.moddict_cname
elif entry.is_pyclass_attr:
setter = 'PyObject_SetItem'
else:
assert False, repr(entry)
code.put_error_if_neg(
self.pos,
'%s(%s, %s, %s)' % (
setter,
namespace,
interned_cname,
rhs.py_result()))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating disposal code for %s" % rhs)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
if entry.is_member:
# in Py2.6+, we need to invalidate the method cache
code.putln("PyType_Modified(%s);" %
entry.scope.parent_type.typeptr_cname)
else:
if self.type.is_memoryviewslice:
self.generate_acquire_memoryviewslice(rhs, code)
elif self.type.is_buffer:
# Generate code for doing the buffer release/acquisition.
# This might raise an exception in which case the assignment (done
# below) will not happen.
#
# The reason this is not in a typetest-like node is because the
# variables that the acquired buffer info is stored to is allocated
# per entry and coupled with it.
self.generate_acquire_buffer(rhs, code)
assigned = False
if self.type.is_pyobject:
#print "NameNode.generate_assignment_code: to", self.name ###
#print "...from", rhs ###
#print "...LHS type", self.type, "ctype", self.ctype() ###
#print "...RHS type", rhs.type, "ctype", rhs.ctype() ###
if self.use_managed_ref:
rhs.make_owned_reference(code)
is_external_ref = entry.is_cglobal or self.entry.in_closure or self.entry.from_closure
if is_external_ref:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xgotref(self.py_result())
else:
code.put_gotref(self.py_result())
assigned = True
if entry.is_cglobal:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xdecref_set(
self.result(), rhs.result_as(self.ctype()))
else:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
assigned = False
if is_external_ref:
code.put_giveref(rhs.py_result())
if not self.type.is_memoryviewslice:
if not assigned:
code.putln('%s = %s;' % (
self.result(), rhs.result_as(self.ctype())))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating post-assignment code for %s" % rhs)
rhs.generate_post_assignment_code(code)
elif rhs.result_in_temp():
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
def generate_acquire_memoryviewslice(self, rhs, code):
"""
Slices, coercions from objects, return values etc are new references.
We have a borrowed reference in case of dst = src
"""
from . import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=self.result(),
lhs_type=self.type,
lhs_pos=self.pos,
rhs=rhs,
code=code,
have_gil=not self.in_nogil_context,
first_assignment=self.cf_is_null)
def generate_acquire_buffer(self, rhs, code):
# rhstmp is only used in case the rhs is a complicated expression leading to
# the object, to avoid repeating the same C expression for every reference
# to the rhs. It does NOT hold a reference.
pretty_rhs = isinstance(rhs, NameNode) or rhs.is_temp
if pretty_rhs:
rhstmp = rhs.result_as(self.ctype())
else:
rhstmp = code.funcstate.allocate_temp(self.entry.type, manage_ref=False)
code.putln('%s = %s;' % (rhstmp, rhs.result_as(self.ctype())))
from . import Buffer
Buffer.put_assign_to_buffer(self.result(), rhstmp, self.entry,
is_initialized=not self.lhs_of_first_assignment,
pos=self.pos, code=code)
if not pretty_rhs:
code.putln("%s = 0;" % rhstmp)
code.funcstate.release_temp(rhstmp)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if self.entry is None:
return # There was an error earlier
elif self.entry.is_pyclass_attr:
namespace = self.entry.scope.namespace_cname
interned_cname = code.intern_identifier(self.entry.name)
if ignore_nonexisting:
key_error_code = 'PyErr_Clear(); else'
else:
# minor hack: fake a NameError on KeyError
key_error_code = (
'{ PyErr_Clear(); PyErr_Format(PyExc_NameError, "name \'%%s\' is not defined", "%s"); }' %
self.entry.name)
code.putln(
'if (unlikely(PyObject_DelItem(%s, %s) < 0)) {'
' if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) %s'
' %s '
'}' % (namespace, interned_cname,
key_error_code,
code.error_goto(self.pos)))
elif self.entry.is_pyglobal:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
interned_cname = code.intern_identifier(self.entry.name)
del_code = '__Pyx_PyObject_DelAttrStr(%s, %s)' % (
Naming.module_cname, interned_cname)
if ignore_nonexisting:
code.putln('if (unlikely(%s < 0)) { if (likely(PyErr_ExceptionMatches(PyExc_AttributeError))) PyErr_Clear(); else %s }' % (
del_code,
code.error_goto(self.pos)))
else:
code.put_error_if_neg(self.pos, del_code)
elif self.entry.type.is_pyobject or self.entry.type.is_memoryviewslice:
if not self.cf_is_null:
if self.cf_maybe_null and not ignore_nonexisting:
code.put_error_if_unbound(self.pos, self.entry)
if self.entry.type.is_pyobject:
if self.entry.in_closure:
# generator
if ignore_nonexisting and self.cf_maybe_null:
code.put_xgotref(self.result())
else:
code.put_gotref(self.result())
if ignore_nonexisting and self.cf_maybe_null:
code.put_xdecref(self.result(), self.ctype())
else:
code.put_decref(self.result(), self.ctype())
code.putln('%s = NULL;' % self.result())
else:
code.put_xdecref_memoryviewslice(self.entry.cname,
have_gil=not self.nogil)
else:
error(self.pos, "Deletion of C names not supported")
def annotate(self, code):
if hasattr(self, 'is_called') and self.is_called:
pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1)
if self.type.is_pyobject:
style, text = 'py_call', 'python function (%s)'
else:
style, text = 'c_call', 'c function (%s)'
code.annotate(pos, AnnotationItem(style, text % self.type, size=len(self.name)))
class BackquoteNode(ExprNode):
# `expr`
#
# arg ExprNode
type = py_object_type
subexprs = ['arg']
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.is_temp = 1
return self
gil_message = "Backquote expression"
def calculate_constant_result(self):
self.constant_result = repr(self.arg.constant_result)
def generate_result_code(self, code):
code.putln(
"%s = PyObject_Repr(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ImportNode(ExprNode):
# Used as part of import statement implementation.
# Implements result =
# __import__(module_name, globals(), None, name_list, level)
#
# module_name StringNode dotted name of module. Empty module
# name means importing the parent package according
# to level
# name_list ListNode or None list of names to be imported
# level int relative import level:
# -1: attempt both relative import and absolute import;
# 0: absolute import;
# >0: the number of parent directories to search
# relative to the current module.
# None: decide the level according to language level and
# directives
type = py_object_type
subexprs = ['module_name', 'name_list']
def analyse_types(self, env):
if self.level is None:
if (env.directives['py2_import'] or
Future.absolute_import not in env.global_scope().context.future_directives):
self.level = -1
else:
self.level = 0
module_name = self.module_name.analyse_types(env)
self.module_name = module_name.coerce_to_pyobject(env)
if self.name_list:
name_list = self.name_list.analyse_types(env)
self.name_list = name_list.coerce_to_pyobject(env)
self.is_temp = 1
env.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c"))
return self
gil_message = "Python import"
def generate_result_code(self, code):
if self.name_list:
name_list_code = self.name_list.py_result()
else:
name_list_code = "0"
code.putln(
"%s = __Pyx_Import(%s, %s, %d); %s" % (
self.result(),
self.module_name.py_result(),
name_list_code,
self.level,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class IteratorNode(ExprNode):
# Used as part of for statement implementation.
#
# Implements result = iter(sequence)
#
# sequence ExprNode
type = py_object_type
iter_func_ptr = None
counter_cname = None
cpp_iterator_cname = None
reversed = False # currently only used for list/tuple types (see Optimize.py)
subexprs = ['sequence']
def analyse_types(self, env):
self.sequence = self.sequence.analyse_types(env)
if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \
not self.sequence.type.is_string:
# C array iteration will be transformed later on
self.type = self.sequence.type
elif self.sequence.type.is_cpp_class:
self.analyse_cpp_types(env)
else:
self.sequence = self.sequence.coerce_to_pyobject(env)
if self.sequence.type is list_type or \
self.sequence.type is tuple_type:
self.sequence = self.sequence.as_none_safe_node("'NoneType' object is not iterable")
self.is_temp = 1
return self
gil_message = "Iterating over Python object"
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def type_dependencies(self, env):
return self.sequence.type_dependencies(env)
def infer_type(self, env):
sequence_type = self.sequence.infer_type(env)
if sequence_type.is_array or sequence_type.is_ptr:
return sequence_type
elif sequence_type.is_cpp_class:
begin = sequence_type.scope.lookup("begin")
if begin is not None:
return begin.type.return_type
elif sequence_type.is_pyobject:
return sequence_type
return py_object_type
def analyse_cpp_types(self, env):
sequence_type = self.sequence.type
if sequence_type.is_ptr:
sequence_type = sequence_type.base_type
begin = sequence_type.scope.lookup("begin")
end = sequence_type.scope.lookup("end")
if (begin is None
or not begin.type.is_cfunction
or begin.type.args):
error(self.pos, "missing begin() on %s" % self.sequence.type)
self.type = error_type
return
if (end is None
or not end.type.is_cfunction
or end.type.args):
error(self.pos, "missing end() on %s" % self.sequence.type)
self.type = error_type
return
iter_type = begin.type.return_type
if iter_type.is_cpp_class:
if env.lookup_operator_for_types(
self.pos,
"!=",
[iter_type, end.type.return_type]) is None:
error(self.pos, "missing operator!= on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '++', [iter_type]) is None:
error(self.pos, "missing operator++ on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '*', [iter_type]) is None:
error(self.pos, "missing operator* on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
self.type = iter_type
elif iter_type.is_ptr:
if not (iter_type == end.type.return_type):
error(self.pos, "incompatible types for begin() and end()")
self.type = iter_type
else:
error(self.pos, "result type of begin() on %s must be a C++ class or pointer" % self.sequence.type)
self.type = error_type
return
def generate_result_code(self, code):
sequence_type = self.sequence.type
if sequence_type.is_cpp_class:
if self.sequence.is_name:
# safe: C++ won't allow you to reassign to class references
begin_func = "%s.begin" % self.sequence.result()
else:
sequence_type = PyrexTypes.c_ptr_type(sequence_type)
self.cpp_iterator_cname = code.funcstate.allocate_temp(sequence_type, manage_ref=False)
code.putln("%s = &%s;" % (self.cpp_iterator_cname, self.sequence.result()))
begin_func = "%s->begin" % self.cpp_iterator_cname
# TODO: Limit scope.
code.putln("%s = %s();" % (self.result(), begin_func))
return
if sequence_type.is_array or sequence_type.is_ptr:
raise InternalError("for in carray slice not transformed")
is_builtin_sequence = sequence_type is list_type or \
sequence_type is tuple_type
if not is_builtin_sequence:
# reversed() not currently optimised (see Optimize.py)
assert not self.reversed, "internal error: reversed() only implemented for list/tuple objects"
self.may_be_a_sequence = not sequence_type.is_builtin_type
if self.may_be_a_sequence:
code.putln(
"if (likely(PyList_CheckExact(%s)) || PyTuple_CheckExact(%s)) {" % (
self.sequence.py_result(),
self.sequence.py_result()))
if is_builtin_sequence or self.may_be_a_sequence:
self.counter_cname = code.funcstate.allocate_temp(
PyrexTypes.c_py_ssize_t_type, manage_ref=False)
if self.reversed:
if sequence_type is list_type:
init_value = 'PyList_GET_SIZE(%s) - 1' % self.result()
else:
init_value = 'PyTuple_GET_SIZE(%s) - 1' % self.result()
else:
init_value = '0'
code.putln(
"%s = %s; __Pyx_INCREF(%s); %s = %s;" % (
self.result(),
self.sequence.py_result(),
self.result(),
self.counter_cname,
init_value
))
if not is_builtin_sequence:
self.iter_func_ptr = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
if self.may_be_a_sequence:
code.putln("%s = NULL;" % self.iter_func_ptr)
code.putln("} else {")
code.put("%s = -1; " % self.counter_cname)
code.putln("%s = PyObject_GetIter(%s); %s" % (
self.result(),
self.sequence.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
# PyObject_GetIter() fails if "tp_iternext" is not set, but the check below
# makes it visible to the C compiler that the pointer really isn't NULL, so that
# it can distinguish between the special cases and the generic case
code.putln("%s = Py_TYPE(%s)->tp_iternext; %s" % (
self.iter_func_ptr, self.py_result(),
code.error_goto_if_null(self.iter_func_ptr, self.pos)))
if self.may_be_a_sequence:
code.putln("}")
def generate_next_sequence_item(self, test_name, result_name, code):
assert self.counter_cname, "internal error: counter_cname temp not prepared"
final_size = 'Py%s_GET_SIZE(%s)' % (test_name, self.py_result())
if self.sequence.is_sequence_constructor:
item_count = len(self.sequence.args)
if self.sequence.mult_factor is None:
final_size = item_count
elif isinstance(self.sequence.mult_factor.constant_result, (int, long)):
final_size = item_count * self.sequence.mult_factor.constant_result
code.putln("if (%s >= %s) break;" % (self.counter_cname, final_size))
if self.reversed:
inc_dec = '--'
else:
inc_dec = '++'
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln(
"%s = Py%s_GET_ITEM(%s, %s); __Pyx_INCREF(%s); %s%s; %s" % (
result_name,
test_name,
self.py_result(),
self.counter_cname,
result_name,
self.counter_cname,
inc_dec,
# use the error label to avoid C compiler warnings if we only use it below
code.error_goto_if_neg('0', self.pos)
))
code.putln("#else")
code.putln(
"%s = PySequence_ITEM(%s, %s); %s%s; %s" % (
result_name,
self.py_result(),
self.counter_cname,
self.counter_cname,
inc_dec,
code.error_goto_if_null(result_name, self.pos)))
code.putln("#endif")
def generate_iter_next_result_code(self, result_name, code):
sequence_type = self.sequence.type
if self.reversed:
code.putln("if (%s < 0) break;" % self.counter_cname)
if sequence_type.is_cpp_class:
if self.cpp_iterator_cname:
end_func = "%s->end" % self.cpp_iterator_cname
else:
end_func = "%s.end" % self.sequence.result()
# TODO: Cache end() call?
code.putln("if (!(%s != %s())) break;" % (
self.result(),
end_func))
code.putln("%s = *%s;" % (
result_name,
self.result()))
code.putln("++%s;" % self.result())
return
elif sequence_type is list_type:
self.generate_next_sequence_item('List', result_name, code)
return
elif sequence_type is tuple_type:
self.generate_next_sequence_item('Tuple', result_name, code)
return
if self.may_be_a_sequence:
code.putln("if (likely(!%s)) {" % self.iter_func_ptr)
code.putln("if (likely(PyList_CheckExact(%s))) {" % self.py_result())
self.generate_next_sequence_item('List', result_name, code)
code.putln("} else {")
self.generate_next_sequence_item('Tuple', result_name, code)
code.putln("}")
code.put("} else ")
code.putln("{")
code.putln(
"%s = %s(%s);" % (
result_name,
self.iter_func_ptr,
self.py_result()))
code.putln("if (unlikely(!%s)) {" % result_name)
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(exc_type == PyExc_StopIteration ||"
" PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
code.putln("break;")
code.putln("}")
code.put_gotref(result_name)
code.putln("}")
def free_temps(self, code):
if self.counter_cname:
code.funcstate.release_temp(self.counter_cname)
if self.iter_func_ptr:
code.funcstate.release_temp(self.iter_func_ptr)
self.iter_func_ptr = None
if self.cpp_iterator_cname:
code.funcstate.release_temp(self.cpp_iterator_cname)
ExprNode.free_temps(self, code)
class NextNode(AtomicExprNode):
# Used as part of for statement implementation.
# Implements result = iterator.next()
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
def __init__(self, iterator):
AtomicExprNode.__init__(self, iterator.pos)
self.iterator = iterator
def type_dependencies(self, env):
return self.iterator.type_dependencies(env)
def infer_type(self, env, iterator_type = None):
if iterator_type is None:
iterator_type = self.iterator.infer_type(env)
if iterator_type.is_ptr or iterator_type.is_array:
return iterator_type.base_type
elif iterator_type.is_cpp_class:
item_type = env.lookup_operator_for_types(self.pos, "*", [iterator_type]).type.return_type
if item_type.is_reference:
item_type = item_type.ref_base_type
if item_type.is_const:
item_type = item_type.const_base_type
return item_type
else:
# Avoid duplication of complicated logic.
fake_index_node = IndexNode(
self.pos,
base=self.iterator.sequence,
index=IntNode(self.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
return fake_index_node.infer_type(env)
def analyse_types(self, env):
self.type = self.infer_type(env, self.iterator.type)
self.is_temp = 1
return self
def generate_result_code(self, code):
self.iterator.generate_iter_next_result_code(self.result(), code)
class WithExitCallNode(ExprNode):
# The __exit__() call of a 'with' statement. Used in both the
# except and finally clauses.
# with_stat WithStatNode the surrounding 'with' statement
# args TupleNode or ResultStatNode the exception info tuple
subexprs = ['args']
test_if_run = True
def analyse_types(self, env):
self.args = self.args.analyse_types(env)
self.type = PyrexTypes.c_bint_type
self.is_temp = True
return self
def generate_evaluation_code(self, code):
if self.test_if_run:
# call only if it was not already called (and decref-cleared)
code.putln("if (%s) {" % self.with_stat.exit_var)
self.args.generate_evaluation_code(code)
result_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.mark_pos(self.pos)
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_Call(%s, %s, NULL);" % (
result_var,
self.with_stat.exit_var,
self.args.result()))
code.put_decref_clear(self.with_stat.exit_var, type=py_object_type)
self.args.generate_disposal_code(code)
self.args.free_temps(code)
code.putln(code.error_goto_if_null(result_var, self.pos))
code.put_gotref(result_var)
if self.result_is_used:
self.allocate_temp_result(code)
code.putln("%s = __Pyx_PyObject_IsTrue(%s);" % (self.result(), result_var))
code.put_decref_clear(result_var, type=py_object_type)
if self.result_is_used:
code.put_error_if_neg(self.pos, self.result())
code.funcstate.release_temp(result_var)
if self.test_if_run:
code.putln("}")
class ExcValueNode(AtomicExprNode):
# Node created during analyse_types phase
# of an ExceptClauseNode to fetch the current
# exception value.
type = py_object_type
def __init__(self, pos):
ExprNode.__init__(self, pos)
def set_var(self, var):
self.var = var
def calculate_result_code(self):
return self.var
def generate_result_code(self, code):
pass
def analyse_types(self, env):
return self
class TempNode(ExprNode):
# Node created during analyse_types phase
# of some nodes to hold a temporary value.
#
# Note: One must call "allocate" and "release" on
# the node during code generation to get/release the temp.
# This is because the temp result is often used outside of
# the regular cycle.
subexprs = []
def __init__(self, pos, type, env=None):
ExprNode.__init__(self, pos)
self.type = type
if type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
def analyse_types(self, env):
return self
def analyse_target_declaration(self, env):
pass
def generate_result_code(self, code):
pass
def allocate(self, code):
self.temp_cname = code.funcstate.allocate_temp(self.type, manage_ref=True)
def release(self, code):
code.funcstate.release_temp(self.temp_cname)
self.temp_cname = None
def result(self):
try:
return self.temp_cname
except:
assert False, "Remember to call allocate/release on TempNode"
raise
# Do not participate in normal temp alloc/dealloc:
def allocate_temp_result(self, code):
pass
def release_temp_result(self, code):
pass
class PyTempNode(TempNode):
# TempNode holding a Python value.
def __init__(self, pos, env):
TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
class RawCNameExprNode(ExprNode):
subexprs = []
def __init__(self, pos, type=None, cname=None):
ExprNode.__init__(self, pos, type=type)
if cname is not None:
self.cname = cname
def analyse_types(self, env):
return self
def set_cname(self, cname):
self.cname = cname
def result(self):
return self.cname
def generate_result_code(self, code):
pass
#-------------------------------------------------------------------
#
# Parallel nodes (cython.parallel.thread(savailable|id))
#
#-------------------------------------------------------------------
class ParallelThreadsAvailableNode(AtomicExprNode):
"""
Note: this is disabled and not a valid directive at this moment
Implements cython.parallel.threadsavailable(). If we are called from the
sequential part of the application, we need to call omp_get_max_threads(),
and in the parallel part we can just call omp_get_num_threads()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("if (omp_in_parallel()) %s = omp_get_max_threads();" %
self.temp_code)
code.putln("else %s = omp_get_num_threads();" % self.temp_code)
code.putln("#else")
code.putln("%s = 1;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
class ParallelThreadIdNode(AtomicExprNode): #, Nodes.ParallelNode):
"""
Implements cython.parallel.threadid()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("%s = omp_get_thread_num();" % self.temp_code)
code.putln("#else")
code.putln("%s = 0;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
#-------------------------------------------------------------------
#
# Trailer nodes
#
#-------------------------------------------------------------------
class IndexNode(ExprNode):
# Sequence indexing.
#
# base ExprNode
# index ExprNode
# indices [ExprNode]
# type_indices [PyrexType]
# is_buffer_access boolean Whether this is a buffer access.
#
# indices is used on buffer access, index on non-buffer access.
# The former contains a clean list of index parameters, the
# latter whatever Python object is needed for index access.
#
# is_fused_index boolean Whether the index is used to specialize a
# c(p)def function
subexprs = ['base', 'index', 'indices']
indices = None
type_indices = None
is_subscript = True
is_fused_index = False
# Whether we're assigning to a buffer (in that case it needs to be
# writable)
writable_needed = False
# Whether we are indexing or slicing a memoryviewslice
memslice_index = False
memslice_slice = False
is_memslice_copy = False
memslice_ellipsis_noop = False
warned_untyped_idx = False
# set by SingleAssignmentNode after analyse_types()
is_memslice_scalar_assignment = False
def __init__(self, pos, index, **kw):
ExprNode.__init__(self, pos, index=index, **kw)
self._index = index
def calculate_constant_result(self):
self.constant_result = \
self.base.constant_result[self.index.constant_result]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
index = self.index.compile_time_value(denv)
try:
return base[index]
except Exception, e:
self.compile_time_value_error(e)
def is_ephemeral(self):
# in most cases, indexing will return a safe reference to an object in a container,
# so we consider the result safe if the base object is
return self.base.is_ephemeral() or self.base.type in (
basestring_type, str_type, bytes_type, unicode_type)
def is_simple(self):
if self.is_buffer_access or self.memslice_index:
return False
elif self.memslice_slice:
return True
base = self.base
return (base.is_simple() and self.index.is_simple()
and base.type and (base.type.is_ptr or base.type.is_array))
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if isinstance(self.index, SliceNode):
# slicing!
if base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def analyse_target_declaration(self, env):
pass
def analyse_as_type(self, env):
base_type = self.base.analyse_as_type(env)
if base_type and not base_type.is_pyobject:
if base_type.is_cpp_class:
if isinstance(self.index, TupleNode):
template_values = self.index.args
else:
template_values = [self.index]
type_node = Nodes.TemplatedTypeNode(
pos=self.pos,
positional_args=template_values,
keyword_args=None)
return type_node.analyse(env, base_type=base_type)
else:
index = self.index.compile_time_value(env)
if index is not None:
return PyrexTypes.CArrayType(base_type, int(index))
error(self.pos, "Array size must be a compile time constant")
return None
def type_dependencies(self, env):
return self.base.type_dependencies(env) + self.index.type_dependencies(env)
def infer_type(self, env):
base_type = self.base.infer_type(env)
if isinstance(self.index, SliceNode):
# slicing!
if base_type.is_string:
# sliced C strings must coerce to Python
return bytes_type
elif base_type.is_pyunicode_ptr:
# sliced Py_UNICODE* strings must coerce to Python
return unicode_type
elif base_type in (unicode_type, bytes_type, str_type,
bytearray_type, list_type, tuple_type):
# slicing these returns the same type
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
index_type = self.index.infer_type(env)
if index_type and index_type.is_int or isinstance(self.index, IntNode):
# indexing!
if base_type is unicode_type:
# Py_UCS4 will automatically coerce to a unicode string
# if required, so this is safe. We only infer Py_UCS4
# when the index is a C integer type. Otherwise, we may
# need to use normal Python item access, in which case
# it's faster to return the one-char unicode string than
# to receive it, throw it away, and potentially rebuild it
# on a subsequent PyObject coercion.
return PyrexTypes.c_py_ucs4_type
elif base_type is str_type:
# always returns str - Py2: bytes, Py3: unicode
return base_type
elif base_type is bytearray_type:
return PyrexTypes.c_uchar_type
elif isinstance(self.base, BytesNode):
#if env.global_scope().context.language_level >= 3:
# # inferring 'char' can be made to work in Python 3 mode
# return PyrexTypes.c_char_type
# Py2/3 return different types on indexing bytes objects
return py_object_type
elif base_type in (tuple_type, list_type):
# if base is a literal, take a look at its values
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is not None:
return item_type
elif base_type.is_ptr or base_type.is_array:
return base_type.base_type
elif base_type.is_ctuple and isinstance(self.index, IntNode):
if self.index.has_constant_result():
index = self.index.constant_result
if index < 0:
index += base_type.size
if 0 <= index < base_type.size:
return base_type.components[index]
if base_type.is_cpp_class:
class FakeOperand:
def __init__(self, **kwds):
self.__dict__.update(kwds)
operands = [
FakeOperand(pos=self.pos, type=base_type),
FakeOperand(pos=self.pos, type=index_type),
]
index_func = env.lookup_operator('[]', operands)
if index_func is not None:
return index_func.type.return_type
# may be slicing or indexing, we don't know
if base_type in (unicode_type, str_type):
# these types always returns their own type on Python indexing/slicing
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
def analyse_types(self, env):
return self.analyse_base_and_index_types(env, getting=True)
def analyse_target_types(self, env):
node = self.analyse_base_and_index_types(env, setting=True)
if node.type.is_const:
error(self.pos, "Assignment to const dereference")
if not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % node.type)
return node
def analyse_base_and_index_types(self, env, getting=False, setting=False,
analyse_base=True):
# Note: This might be cleaned up by having IndexNode
# parsed in a saner way and only construct the tuple if
# needed.
# Note that this function must leave IndexNode in a cloneable state.
# For buffers, self.index is packed out on the initial analysis, and
# when cloning self.indices is copied.
self.is_buffer_access = False
# a[...] = b
self.is_memslice_copy = False
# incomplete indexing, Ellipsis indexing or slicing
self.memslice_slice = False
# integer indexing
self.memslice_index = False
if analyse_base:
self.base = self.base.analyse_types(env)
if self.base.type.is_error:
# Do not visit child tree if base is undeclared to avoid confusing
# error messages
self.type = PyrexTypes.error_type
return self
is_slice = isinstance(self.index, SliceNode)
if not env.directives['wraparound']:
if is_slice:
check_negative_indices(self.index.start, self.index.stop)
else:
check_negative_indices(self.index)
# Potentially overflowing index value.
if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value):
self.index = self.index.coerce_to_pyobject(env)
is_memslice = self.base.type.is_memoryviewslice
# Handle the case where base is a literal char* (and we expect a string, not an int)
if not is_memslice and (isinstance(self.base, BytesNode) or is_slice):
if self.base.type.is_string or not (self.base.type.is_ptr or self.base.type.is_array):
self.base = self.base.coerce_to_pyobject(env)
skip_child_analysis = False
buffer_access = False
if self.indices:
indices = self.indices
elif isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
if (is_memslice and not self.indices and
isinstance(self.index, EllipsisNode)):
# Memoryviewslice copying
self.is_memslice_copy = True
elif is_memslice:
# memoryviewslice indexing or slicing
from . import MemoryView
skip_child_analysis = True
newaxes = [newaxis for newaxis in indices if newaxis.is_none]
have_slices, indices = MemoryView.unellipsify(indices,
newaxes,
self.base.type.ndim)
self.memslice_index = (not newaxes and
len(indices) == self.base.type.ndim)
axes = []
index_type = PyrexTypes.c_py_ssize_t_type
new_indices = []
if len(indices) - len(newaxes) > self.base.type.ndim:
self.type = error_type
error(indices[self.base.type.ndim].pos,
"Too many indices specified for type %s" %
self.base.type)
return self
axis_idx = 0
for i, index in enumerate(indices[:]):
index = index.analyse_types(env)
if not index.is_none:
access, packing = self.base.type.axes[axis_idx]
axis_idx += 1
if isinstance(index, SliceNode):
self.memslice_slice = True
if index.step.is_none:
axes.append((access, packing))
else:
axes.append((access, 'strided'))
# Coerce start, stop and step to temps of the right type
for attr in ('start', 'stop', 'step'):
value = getattr(index, attr)
if not value.is_none:
value = value.coerce_to(index_type, env)
#value = value.coerce_to_temp(env)
setattr(index, attr, value)
new_indices.append(value)
elif index.is_none:
self.memslice_slice = True
new_indices.append(index)
axes.append(('direct', 'strided'))
elif index.type.is_int or index.type.is_pyobject:
if index.type.is_pyobject and not self.warned_untyped_idx:
warning(index.pos, "Index should be typed for more "
"efficient access", level=2)
IndexNode.warned_untyped_idx = True
self.memslice_index = True
index = index.coerce_to(index_type, env)
indices[i] = index
new_indices.append(index)
else:
self.type = error_type
error(index.pos, "Invalid index for memoryview specified")
return self
self.memslice_index = self.memslice_index and not self.memslice_slice
self.original_indices = indices
# All indices with all start/stop/step for slices.
# We need to keep this around
self.indices = new_indices
self.env = env
elif self.base.type.is_buffer:
# Buffer indexing
if len(indices) == self.base.type.ndim:
buffer_access = True
skip_child_analysis = True
for x in indices:
x = x.analyse_types(env)
if not x.type.is_int:
buffer_access = False
if buffer_access and not self.base.type.is_memoryviewslice:
assert hasattr(self.base, "entry") # Must be a NameNode-like node
# On cloning, indices is cloned. Otherwise, unpack index into indices
assert not (buffer_access and isinstance(self.index, CloneNode))
self.nogil = env.nogil
if buffer_access or self.memslice_index:
#if self.base.type.is_memoryviewslice and not self.base.is_name:
# self.base = self.base.coerce_to_temp(env)
self.base = self.base.coerce_to_simple(env)
self.indices = indices
self.index = None
self.type = self.base.type.dtype
self.is_buffer_access = True
self.buffer_type = self.base.type #self.base.entry.type
if getting and self.type.is_pyobject:
self.is_temp = True
if setting and self.base.type.is_memoryviewslice:
self.base.type.writable_needed = True
elif setting:
if not self.base.entry.type.writable:
error(self.pos, "Writing to readonly buffer")
else:
self.writable_needed = True
if self.base.type.is_buffer:
self.base.entry.buffer_aux.writable_needed = True
elif self.is_memslice_copy:
self.type = self.base.type
if getting:
self.memslice_ellipsis_noop = True
else:
self.memslice_broadcast = True
elif self.memslice_slice:
self.index = None
self.is_temp = True
self.use_managed_ref = True
if not MemoryView.validate_axes(self.pos, axes):
self.type = error_type
return self
self.type = PyrexTypes.MemoryViewSliceType(
self.base.type.dtype, axes)
if (self.base.type.is_memoryviewslice and not
self.base.is_name and not
self.base.result_in_temp()):
self.base = self.base.coerce_to_temp(env)
if setting:
self.memslice_broadcast = True
else:
base_type = self.base.type
if not base_type.is_cfunction:
if isinstance(self.index, TupleNode):
self.index = self.index.analyse_types(
env, skip_children=skip_child_analysis)
elif not skip_child_analysis:
self.index = self.index.analyse_types(env)
self.original_index_type = self.index.type
if base_type.is_unicode_char:
# we infer Py_UNICODE/Py_UCS4 for unicode strings in some
# cases, but indexing must still work for them
if setting:
warning(self.pos, "cannot assign to Unicode string index", level=1)
elif self.index.constant_result in (0, -1):
# uchar[0] => uchar
return self.base
self.base = self.base.coerce_to_pyobject(env)
base_type = self.base.type
if base_type.is_pyobject:
if self.index.type.is_int and base_type is not dict_type:
if (getting
and (base_type in (list_type, tuple_type, bytearray_type))
and (not self.index.type.signed
or not env.directives['wraparound']
or (isinstance(self.index, IntNode) and
self.index.has_constant_result() and self.index.constant_result >= 0))
and not env.directives['boundscheck']):
self.is_temp = 0
else:
self.is_temp = 1
self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env)
self.original_index_type.create_to_py_utility_code(env)
else:
self.index = self.index.coerce_to_pyobject(env)
self.is_temp = 1
if self.index.type.is_int and base_type is unicode_type:
# Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string
# if required, so this is fast and safe
self.type = PyrexTypes.c_py_ucs4_type
elif self.index.type.is_int and base_type is bytearray_type:
if setting:
self.type = PyrexTypes.c_uchar_type
else:
# not using 'uchar' to enable fast and safe error reporting as '-1'
self.type = PyrexTypes.c_int_type
elif is_slice and base_type in (bytes_type, str_type, unicode_type, list_type, tuple_type):
self.type = base_type
else:
item_type = None
if base_type in (list_type, tuple_type) and self.index.type.is_int:
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is None:
item_type = py_object_type
self.type = item_type
if base_type in (list_type, tuple_type, dict_type):
# do the None check explicitly (not in a helper) to allow optimising it away
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
else:
if base_type.is_ptr or base_type.is_array:
self.type = base_type.base_type
if is_slice:
self.type = base_type
elif self.index.type.is_pyobject:
self.index = self.index.coerce_to(
PyrexTypes.c_py_ssize_t_type, env)
elif not self.index.type.is_int:
error(self.pos,
"Invalid index type '%s'" %
self.index.type)
elif base_type.is_cpp_class:
function = env.lookup_operator("[]", [self.base, self.index])
if function is None:
error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return self
func_type = function.type
if func_type.is_ptr:
func_type = func_type.base_type
self.index = self.index.coerce_to(func_type.args[0].type, env)
self.type = func_type.return_type
if setting and not func_type.return_type.is_reference:
error(self.pos, "Can't set non-reference result '%s'" % self.type)
elif base_type.is_cfunction:
if base_type.is_fused:
self.parse_indexed_fused_cdef(env)
else:
self.type_indices = self.parse_index_as_types(env)
if base_type.templates is None:
error(self.pos, "Can only parameterize template functions.")
elif len(base_type.templates) != len(self.type_indices):
error(self.pos, "Wrong number of template arguments: expected %s, got %s" % (
(len(base_type.templates), len(self.type_indices))))
self.type = base_type.specialize(dict(zip(base_type.templates, self.type_indices)))
elif base_type.is_ctuple:
if isinstance(self.index, IntNode) and self.index.has_constant_result():
index = self.index.constant_result
if -base_type.size <= index < base_type.size:
if index < 0:
index += base_type.size
self.type = base_type.components[index]
else:
error(self.pos,
"Index %s out of bounds for '%s'" %
(index, base_type))
self.type = PyrexTypes.error_type
else:
self.base = self.base.coerce_to_pyobject(env)
return self.analyse_base_and_index_types(env, getting=getting, setting=setting, analyse_base=False)
else:
error(self.pos,
"Attempting to index non-array type '%s'" %
base_type)
self.type = PyrexTypes.error_type
self.wrap_in_nonecheck_node(env, getting)
return self
def wrap_in_nonecheck_node(self, env, getting):
if not env.directives['nonecheck'] or not self.base.may_be_none():
return
if self.base.type.is_memoryviewslice:
if self.is_memslice_copy and not getting:
msg = "Cannot assign to None memoryview slice"
elif self.memslice_slice:
msg = "Cannot slice None memoryview slice"
else:
msg = "Cannot index None memoryview slice"
else:
msg = "'NoneType' object is not subscriptable"
self.base = self.base.as_none_safe_node(msg)
def parse_index_as_types(self, env, required=True):
if isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
type_indices = []
for index in indices:
type_indices.append(index.analyse_as_type(env))
if type_indices[-1] is None:
if required:
error(index.pos, "not parsable as a type")
return None
return type_indices
def parse_indexed_fused_cdef(self, env):
"""
Interpret fused_cdef_func[specific_type1, ...]
Note that if this method is called, we are an indexed cdef function
with fused argument types, and this IndexNode will be replaced by the
NameNode with specific entry just after analysis of expressions by
AnalyseExpressionsTransform.
"""
self.type = PyrexTypes.error_type
self.is_fused_index = True
base_type = self.base.type
specific_types = []
positions = []
if self.index.is_name or self.index.is_attribute:
positions.append(self.index.pos)
elif isinstance(self.index, TupleNode):
for arg in self.index.args:
positions.append(arg.pos)
specific_types = self.parse_index_as_types(env, required=False)
if specific_types is None:
self.index = self.index.analyse_types(env)
if not self.base.entry.as_variable:
error(self.pos, "Can only index fused functions with types")
else:
# A cpdef function indexed with Python objects
self.base.entry = self.entry = self.base.entry.as_variable
self.base.type = self.type = self.entry.type
self.base.is_temp = True
self.is_temp = True
self.entry.used = True
self.is_fused_index = False
return
for i, type in enumerate(specific_types):
specific_types[i] = type.specialize_fused(env)
fused_types = base_type.get_fused_types()
if len(specific_types) > len(fused_types):
return error(self.pos, "Too many types specified")
elif len(specific_types) < len(fused_types):
t = fused_types[len(specific_types)]
return error(self.pos, "Not enough types specified to specialize "
"the function, %s is still fused" % t)
# See if our index types form valid specializations
for pos, specific_type, fused_type in zip(positions,
specific_types,
fused_types):
if not any([specific_type.same_as(t) for t in fused_type.types]):
return error(pos, "Type not in fused type")
if specific_type is None or specific_type.is_error:
return
fused_to_specific = dict(zip(fused_types, specific_types))
type = base_type.specialize(fused_to_specific)
if type.is_fused:
# Only partially specific, this is invalid
error(self.pos,
"Index operation makes function only partially specific")
else:
# Fully specific, find the signature with the specialized entry
for signature in self.base.type.get_all_specialized_function_types():
if type.same_as(signature):
self.type = signature
if self.base.is_attribute:
# Pretend to be a normal attribute, for cdef extension
# methods
self.entry = signature.entry
self.is_attribute = True
self.obj = self.base.obj
self.type.entry.used = True
self.base.type = signature
self.base.entry = signature.entry
break
else:
# This is a bug
raise InternalError("Couldn't find the right signature")
gil_message = "Indexing Python object"
def nogil_check(self, env):
if self.is_buffer_access or self.memslice_index or self.memslice_slice:
if not self.memslice_slice and env.directives['boundscheck']:
# error(self.pos, "Cannot check buffer index bounds without gil; "
# "use boundscheck(False) directive")
warning(self.pos, "Use boundscheck(False) for faster access",
level=1)
if self.type.is_pyobject:
error(self.pos, "Cannot access buffer with object dtype without gil")
return
super(IndexNode, self).nogil_check(env)
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
def is_lvalue(self):
# NOTE: references currently have both is_reference and is_ptr
# set. Since pointers and references have different lvalue
# rules, we must be careful to separate the two.
if self.type.is_reference:
if self.type.ref_base_type.is_array:
# fixed-sized arrays aren't l-values
return False
elif self.type.is_ptr:
# non-const pointers can always be reassigned
return True
# Just about everything else returned by the index operator
# can be an lvalue.
return True
def calculate_result_code(self):
if self.is_buffer_access:
return "(*%s)" % self.buffer_ptr_code
elif self.is_memslice_copy:
return self.base.result()
elif self.base.type in (list_type, tuple_type, bytearray_type):
if self.base.type is list_type:
index_code = "PyList_GET_ITEM(%s, %s)"
elif self.base.type is tuple_type:
index_code = "PyTuple_GET_ITEM(%s, %s)"
elif self.base.type is bytearray_type:
index_code = "((unsigned char)(PyByteArray_AS_STRING(%s)[%s]))"
else:
assert False, "unexpected base type in indexing: %s" % self.base.type
elif self.base.type.is_cfunction:
return "%s<%s>" % (
self.base.result(),
",".join([param.empty_declaration_code() for param in self.type_indices]))
elif self.base.type.is_ctuple:
index = self.index.constant_result
if index < 0:
index += self.base.type.size
return "%s.f%s" % (self.base.result(), index)
else:
if (self.type.is_ptr or self.type.is_array) and self.type == self.base.type:
error(self.pos, "Invalid use of pointer slice")
return
index_code = "(%s[%s])"
return index_code % (self.base.result(), self.index.result())
def extra_index_params(self, code):
if self.index.type.is_int:
is_list = self.base.type is list_type
wraparound = (
bool(code.globalstate.directives['wraparound']) and
self.original_index_type.signed and
not (isinstance(self.index.constant_result, (int, long))
and self.index.constant_result >= 0))
boundscheck = bool(code.globalstate.directives['boundscheck'])
return ", %s, %d, %s, %d, %d, %d" % (
self.original_index_type.empty_declaration_code(),
self.original_index_type.signed and 1 or 0,
self.original_index_type.to_py_function,
is_list, wraparound, boundscheck)
else:
return ""
def generate_subexpr_evaluation_code(self, code):
self.base.generate_evaluation_code(code)
if self.type_indices is not None:
pass
elif self.indices is None:
self.index.generate_evaluation_code(code)
else:
for i in self.indices:
i.generate_evaluation_code(code)
def generate_subexpr_disposal_code(self, code):
self.base.generate_disposal_code(code)
if self.type_indices is not None:
pass
elif self.indices is None:
self.index.generate_disposal_code(code)
else:
for i in self.indices:
i.generate_disposal_code(code)
def free_subexpr_temps(self, code):
self.base.free_temps(code)
if self.indices is None:
self.index.free_temps(code)
else:
for i in self.indices:
i.free_temps(code)
def generate_result_code(self, code):
if self.is_buffer_access or self.memslice_index:
buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code)
if self.type.is_pyobject:
# is_temp is True, so must pull out value and incref it.
# NOTE: object temporary results for nodes are declared
# as PyObject *, so we need a cast
code.putln("%s = (PyObject *) *%s;" % (self.temp_code,
self.buffer_ptr_code))
code.putln("__Pyx_INCREF((PyObject*)%s);" % self.temp_code)
elif self.memslice_slice:
self.put_memoryviewslice_slice_code(code)
elif self.is_temp:
if self.type.is_pyobject:
error_value = 'NULL'
if self.index.type.is_int:
if self.base.type is list_type:
function = "__Pyx_GetItemInt_List"
elif self.base.type is tuple_type:
function = "__Pyx_GetItemInt_Tuple"
else:
function = "__Pyx_GetItemInt"
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
else:
if self.base.type is dict_type:
function = "__Pyx_PyDict_GetItem"
code.globalstate.use_utility_code(
UtilityCode.load_cached("DictGetItem", "ObjectHandling.c"))
else:
function = "PyObject_GetItem"
elif self.type.is_unicode_char and self.base.type is unicode_type:
assert self.index.type.is_int
function = "__Pyx_GetItemInt_Unicode"
error_value = '(Py_UCS4)-1'
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c"))
elif self.base.type is bytearray_type:
assert self.index.type.is_int
assert self.type.is_int
function = "__Pyx_GetItemInt_ByteArray"
error_value = '-1'
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c"))
else:
assert False, "unexpected type %s and base type %s for indexing" % (
self.type, self.base.type)
if self.index.type.is_int:
index_code = self.index.result()
else:
index_code = self.index.py_result()
code.putln(
"%s = %s(%s, %s%s); if (unlikely(%s == %s)) %s;" % (
self.result(),
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
self.result(),
error_value,
code.error_goto(self.pos)))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def generate_setitem_code(self, value_code, code):
if self.index.type.is_int:
if self.base.type is bytearray_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemIntByteArray", "StringTools.c"))
function = "__Pyx_SetItemInt_ByteArray"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemInt", "ObjectHandling.c"))
function = "__Pyx_SetItemInt"
index_code = self.index.result()
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_SetItem"
# It would seem that we could specialized lists/tuples, but that
# shouldn't happen here.
# Both PyList_SetItem() and PyTuple_SetItem() take a Py_ssize_t as
# index instead of an object, and bad conversion here would give
# the wrong exception. Also, tuples are supposed to be immutable,
# and raise a TypeError when trying to set their entries
# (PyTuple_SetItem() is for creating new tuples from scratch).
else:
function = "PyObject_SetItem"
code.putln(
"if (unlikely(%s(%s, %s, %s%s) < 0)) %s" % (
function,
self.base.py_result(),
index_code,
value_code,
self.extra_index_params(code),
code.error_goto(self.pos)))
def generate_buffer_setitem_code(self, rhs, code, op=""):
# Used from generate_assignment_code and InPlaceAssignmentNode
buffer_entry, ptrexpr = self.buffer_lookup_code(code)
if self.buffer_type.dtype.is_pyobject:
# Must manage refcounts. Decref what is already there
# and incref what we put in.
ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type,
manage_ref=False)
rhs_code = rhs.result()
code.putln("%s = %s;" % (ptr, ptrexpr))
code.put_gotref("*%s" % ptr)
code.putln("__Pyx_INCREF(%s); __Pyx_DECREF(*%s);" % (
rhs_code, ptr))
code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
code.put_giveref("*%s" % ptr)
code.funcstate.release_temp(ptr)
else:
# Simple case
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
def generate_assignment_code(self, rhs, code):
generate_evaluation_code = (self.is_memslice_scalar_assignment or
self.memslice_slice)
if generate_evaluation_code:
self.generate_evaluation_code(code)
else:
self.generate_subexpr_evaluation_code(code)
if self.is_buffer_access or self.memslice_index:
self.generate_buffer_setitem_code(rhs, code)
elif self.is_memslice_scalar_assignment:
self.generate_memoryviewslice_assign_scalar_code(rhs, code)
elif self.memslice_slice or self.is_memslice_copy:
self.generate_memoryviewslice_setslice_code(rhs, code)
elif self.type.is_pyobject:
self.generate_setitem_code(rhs.py_result(), code)
elif self.base.type is bytearray_type:
value_code = self._check_byte_value(code, rhs)
self.generate_setitem_code(value_code, code)
else:
code.putln(
"%s = %s;" % (
self.result(), rhs.result()))
if generate_evaluation_code:
self.generate_disposal_code(code)
else:
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def _check_byte_value(self, code, rhs):
# TODO: should we do this generally on downcasts, or just here?
assert rhs.type.is_int, repr(rhs.type)
value_code = rhs.result()
if rhs.has_constant_result():
if 0 <= rhs.constant_result < 256:
return value_code
needs_cast = True # make at least the C compiler happy
warning(rhs.pos,
"value outside of range(0, 256)"
" when assigning to byte: %s" % rhs.constant_result,
level=1)
else:
needs_cast = rhs.type != PyrexTypes.c_uchar_type
if not self.nogil:
conditions = []
if rhs.is_literal or rhs.type.signed:
conditions.append('%s < 0' % value_code)
if (rhs.is_literal or not
(rhs.is_temp and rhs.type in (
PyrexTypes.c_uchar_type, PyrexTypes.c_char_type,
PyrexTypes.c_schar_type))):
conditions.append('%s > 255' % value_code)
if conditions:
code.putln("if (unlikely(%s)) {" % ' || '.join(conditions))
code.putln(
'PyErr_SetString(PyExc_ValueError,'
' "byte must be in range(0, 256)"); %s' %
code.error_goto(self.pos))
code.putln("}")
if needs_cast:
value_code = '((unsigned char)%s)' % value_code
return value_code
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.generate_subexpr_evaluation_code(code)
#if self.type.is_pyobject:
if self.index.type.is_int:
function = "__Pyx_DelItemInt"
index_code = self.index.result()
code.globalstate.use_utility_code(
UtilityCode.load_cached("DelItemInt", "ObjectHandling.c"))
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_DelItem"
else:
function = "PyObject_DelItem"
code.putln(
"if (%s(%s, %s%s) < 0) %s" % (
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
code.error_goto(self.pos)))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def buffer_entry(self):
from . import Buffer, MemoryView
base = self.base
if self.base.is_nonecheck:
base = base.arg
if base.is_name:
entry = base.entry
else:
# SimpleCallNode is_simple is not consistent with coerce_to_simple
assert base.is_simple() or base.is_temp
cname = base.result()
entry = Symtab.Entry(cname, cname, self.base.type, self.base.pos)
if entry.type.is_buffer:
buffer_entry = Buffer.BufferEntry(entry)
else:
buffer_entry = MemoryView.MemoryViewSliceBufferEntry(entry)
return buffer_entry
def buffer_lookup_code(self, code):
"ndarray[1, 2, 3] and memslice[1, 2, 3]"
# Assign indices to temps
index_temps = [code.funcstate.allocate_temp(i.type, manage_ref=False)
for i in self.indices]
for temp, index in zip(index_temps, self.indices):
code.putln("%s = %s;" % (temp, index.result()))
# Generate buffer access code using these temps
from . import Buffer
buffer_entry = self.buffer_entry()
if buffer_entry.type.is_buffer:
negative_indices = buffer_entry.type.negative_indices
else:
negative_indices = Buffer.buffer_defaults['negative_indices']
return buffer_entry, Buffer.put_buffer_lookup_code(
entry=buffer_entry,
index_signeds=[i.type.signed for i in self.indices],
index_cnames=index_temps,
directives=code.globalstate.directives,
pos=self.pos, code=code,
negative_indices=negative_indices,
in_nogil_context=self.in_nogil_context)
def put_memoryviewslice_slice_code(self, code):
"memslice[:]"
buffer_entry = self.buffer_entry()
have_gil = not self.in_nogil_context
have_slices = False
it = iter(self.indices)
for index in self.original_indices:
is_slice = isinstance(index, SliceNode)
have_slices = have_slices or is_slice
if is_slice:
if not index.start.is_none:
index.start = next(it)
if not index.stop.is_none:
index.stop = next(it)
if not index.step.is_none:
index.step = next(it)
else:
next(it)
assert not list(it)
buffer_entry.generate_buffer_slice_code(code, self.original_indices,
self.result(),
have_gil=have_gil,
have_slices=have_slices,
directives=code.globalstate.directives)
def generate_memoryviewslice_setslice_code(self, rhs, code):
"memslice1[...] = memslice2 or memslice1[:] = memslice2"
from . import MemoryView
MemoryView.copy_broadcast_memview_src_to_dst(rhs, self, code)
def generate_memoryviewslice_assign_scalar_code(self, rhs, code):
"memslice1[...] = 0.0 or memslice1[:] = 0.0"
from . import MemoryView
MemoryView.assign_scalar(self, rhs, code)
class SliceIndexNode(ExprNode):
# 2-element slice indexing
#
# base ExprNode
# start ExprNode or None
# stop ExprNode or None
# slice ExprNode or None constant slice object
subexprs = ['base', 'start', 'stop', 'slice']
slice = None
def infer_type(self, env):
base_type = self.base.infer_type(env)
if base_type.is_string or base_type.is_cpp_class:
return bytes_type
elif base_type.is_pyunicode_ptr:
return unicode_type
elif base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return base_type
elif base_type.is_ptr or base_type.is_array:
return PyrexTypes.c_array_type(base_type.base_type, None)
return py_object_type
def inferable_item_node(self, index=0):
# slicing shouldn't change the result type of the base, but the index might
if index is not not_a_constant and self.start:
if self.start.has_constant_result():
index += self.start.constant_result
else:
index = not_a_constant
return self.base.inferable_item_node(index)
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def calculate_constant_result(self):
if self.start is None:
start = None
else:
start = self.start.constant_result
if self.stop is None:
stop = None
else:
stop = self.stop.constant_result
self.constant_result = self.base.constant_result[start:stop]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
if self.start is None:
start = 0
else:
start = self.start.compile_time_value(denv)
if self.stop is None:
stop = None
else:
stop = self.stop.compile_time_value(denv)
try:
return base[start:stop]
except Exception, e:
self.compile_time_value_error(e)
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, getting=False)
# when assigning, we must accept any Python type
if node.type.is_pyobject:
node.type = py_object_type
return node
def analyse_types(self, env, getting=True):
self.base = self.base.analyse_types(env)
if self.base.type.is_memoryviewslice:
none_node = NoneNode(self.pos)
index = SliceNode(self.pos,
start=self.start or none_node,
stop=self.stop or none_node,
step=none_node)
index_node = IndexNode(self.pos, index, base=self.base)
return index_node.analyse_base_and_index_types(
env, getting=getting, setting=not getting,
analyse_base=False)
if self.start:
self.start = self.start.analyse_types(env)
if self.stop:
self.stop = self.stop.analyse_types(env)
if not env.directives['wraparound']:
check_negative_indices(self.start, self.stop)
base_type = self.base.type
if base_type.is_array and not getting:
# cannot assign directly to C array => try to assign by making a copy
if not self.start and not self.stop:
self.type = base_type
else:
self.type = PyrexTypes.CPtrType(base_type.base_type)
elif base_type.is_string or base_type.is_cpp_string:
self.type = default_str_type(env)
elif base_type.is_pyunicode_ptr:
self.type = unicode_type
elif base_type.is_ptr:
self.type = base_type
elif base_type.is_array:
# we need a ptr type here instead of an array type, as
# array types can result in invalid type casts in the C
# code
self.type = PyrexTypes.CPtrType(base_type.base_type)
else:
self.base = self.base.coerce_to_pyobject(env)
self.type = py_object_type
if base_type.is_builtin_type:
# slicing builtin types returns something of the same type
self.type = base_type
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
if self.type is py_object_type:
if (not self.start or self.start.is_literal) and \
(not self.stop or self.stop.is_literal):
# cache the constant slice object, in case we need it
none_node = NoneNode(self.pos)
self.slice = SliceNode(
self.pos,
start=copy.deepcopy(self.start or none_node),
stop=copy.deepcopy(self.stop or none_node),
step=none_node
).analyse_types(env)
else:
c_int = PyrexTypes.c_py_ssize_t_type
if self.start:
self.start = self.start.coerce_to(c_int, env)
if self.stop:
self.stop = self.stop.coerce_to(c_int, env)
self.is_temp = 1
return self
nogil_check = Node.gil_error
gil_message = "Slicing Python object"
get_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Get'})
set_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Set'})
def coerce_to(self, dst_type, env):
if ((self.base.type.is_string or self.base.type.is_cpp_string)
and dst_type in (bytes_type, bytearray_type, str_type, unicode_type)):
if (dst_type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(self.pos,
"default encoding required for conversion from '%s' to '%s'" %
(self.base.type, dst_type))
self.type = dst_type
if dst_type.is_array and self.base.type.is_array:
if not self.start and not self.stop:
# redundant slice building, copy C arrays directly
return self.base.coerce_to(dst_type, env)
# else: check array size if possible
return super(SliceIndexNode, self).coerce_to(dst_type, env)
def generate_result_code(self, code):
if not self.type.is_pyobject:
error(self.pos,
"Slicing is not currently supported for '%s'." % self.type)
return
base_result = self.base.result()
result = self.result()
start_code = self.start_code()
stop_code = self.stop_code()
if self.base.type.is_string:
base_result = self.base.result()
if self.base.type != PyrexTypes.c_char_ptr_type:
base_result = '((const char*)%s)' % base_result
if self.type is bytearray_type:
type_name = 'ByteArray'
else:
type_name = self.type.name.title()
if self.stop is None:
code.putln(
"%s = __Pyx_Py%s_FromString(%s + %s); %s" % (
result,
type_name,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_Py%s_FromStringAndSize(%s + %s, %s - %s); %s" % (
result,
type_name,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type.is_pyunicode_ptr:
base_result = self.base.result()
if self.base.type != PyrexTypes.c_py_unicode_ptr_type:
base_result = '((const Py_UNICODE*)%s)' % base_result
if self.stop is None:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicode(%s + %s); %s" % (
result,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicodeAndLength(%s + %s, %s - %s); %s" % (
result,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type is unicode_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyUnicode_Substring", "StringTools.c"))
code.putln(
"%s = __Pyx_PyUnicode_Substring(%s, %s, %s); %s" % (
result,
base_result,
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
elif self.type is py_object_type:
code.globalstate.use_utility_code(self.get_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.putln(
"%s = __Pyx_PyObject_GetSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d); %s" % (
result,
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound']),
code.error_goto_if_null(result, self.pos)))
else:
if self.base.type is list_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyList_GetSlice'
elif self.base.type is tuple_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyTuple_GetSlice'
else:
cfunc = 'PySequence_GetSlice'
code.putln(
"%s = %s(%s, %s, %s); %s" % (
result,
cfunc,
self.base.py_result(),
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
code.put_gotref(self.py_result())
def generate_assignment_code(self, rhs, code):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_SetSlice(%s, %s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
rhs.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
else:
start_offset = self.start_code() if self.start else '0'
if rhs.type.is_array:
array_length = rhs.type.size
self.generate_slice_guard_code(code, array_length)
else:
array_length = '%s - %s' % (self.stop_code(), start_offset)
code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
code.putln("memcpy(&(%s[%s]), %s, sizeof(%s[0]) * (%s));" % (
self.base.result(), start_offset,
rhs.result(),
self.base.result(), array_length
))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if not self.base.type.is_pyobject:
error(self.pos,
"Deleting slices is only supported for Python types, not '%s'." % self.type)
return
self.generate_subexpr_evaluation_code(code)
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_DelSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def get_slice_config(self):
has_c_start, c_start, py_start = False, '0', 'NULL'
if self.start:
has_c_start = not self.start.type.is_pyobject
if has_c_start:
c_start = self.start.result()
else:
py_start = '&%s' % self.start.py_result()
has_c_stop, c_stop, py_stop = False, '0', 'NULL'
if self.stop:
has_c_stop = not self.stop.type.is_pyobject
if has_c_stop:
c_stop = self.stop.result()
else:
py_stop = '&%s' % self.stop.py_result()
py_slice = self.slice and '&%s' % self.slice.py_result() or 'NULL'
return (has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice)
def generate_slice_guard_code(self, code, target_size):
if not self.base.type.is_array:
return
slice_size = self.base.type.size
try:
total_length = slice_size = int(slice_size)
except ValueError:
total_length = None
start = stop = None
if self.stop:
stop = self.stop.result()
try:
stop = int(stop)
if stop < 0:
if total_length is None:
slice_size = '%s + %d' % (slice_size, stop)
else:
slice_size += stop
else:
slice_size = stop
stop = None
except ValueError:
pass
if self.start:
start = self.start.result()
try:
start = int(start)
if start < 0:
if total_length is None:
start = '%s + %d' % (self.base.type.size, start)
else:
start += total_length
if isinstance(slice_size, (int, long)):
slice_size -= start
else:
slice_size = '%s - (%s)' % (slice_size, start)
start = None
except ValueError:
pass
runtime_check = None
compile_time_check = False
try:
int_target_size = int(target_size)
except ValueError:
int_target_size = None
else:
compile_time_check = isinstance(slice_size, (int, long))
if compile_time_check and slice_size < 0:
if int_target_size > 0:
error(self.pos, "Assignment to empty slice.")
elif compile_time_check and start is None and stop is None:
# we know the exact slice length
if int_target_size != slice_size:
error(self.pos, "Assignment to slice of wrong length, expected %s, got %s" % (
slice_size, target_size))
elif start is not None:
if stop is None:
stop = slice_size
runtime_check = "(%s)-(%s)" % (stop, start)
elif stop is not None:
runtime_check = stop
else:
runtime_check = slice_size
if runtime_check:
code.putln("if (unlikely((%s) != (%s))) {" % (runtime_check, target_size))
code.putln(
'PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length,'
' expected %%" CYTHON_FORMAT_SSIZE_T "d, got %%" CYTHON_FORMAT_SSIZE_T "d",'
' (Py_ssize_t)(%s), (Py_ssize_t)(%s));' % (
target_size, runtime_check))
code.putln(code.error_goto(self.pos))
code.putln("}")
def start_code(self):
if self.start:
return self.start.result()
else:
return "0"
def stop_code(self):
if self.stop:
return self.stop.result()
elif self.base.type.is_array:
return self.base.type.size
else:
return "PY_SSIZE_T_MAX"
def calculate_result_code(self):
# self.result() is not used, but this method must exist
return "<unused>"
class SliceNode(ExprNode):
# start:stop:step in subscript list
#
# start ExprNode
# stop ExprNode
# step ExprNode
subexprs = ['start', 'stop', 'step']
type = slice_type
is_temp = 1
def calculate_constant_result(self):
self.constant_result = slice(
self.start.constant_result,
self.stop.constant_result,
self.step.constant_result)
def compile_time_value(self, denv):
start = self.start.compile_time_value(denv)
stop = self.stop.compile_time_value(denv)
step = self.step.compile_time_value(denv)
try:
return slice(start, stop, step)
except Exception, e:
self.compile_time_value_error(e)
def may_be_none(self):
return False
def analyse_types(self, env):
start = self.start.analyse_types(env)
stop = self.stop.analyse_types(env)
step = self.step.analyse_types(env)
self.start = start.coerce_to_pyobject(env)
self.stop = stop.coerce_to_pyobject(env)
self.step = step.coerce_to_pyobject(env)
if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
self.is_literal = True
self.is_temp = False
return self
gil_message = "Constructing Python slice object"
def calculate_result_code(self):
return self.result_code
def generate_result_code(self, code):
if self.is_literal:
self.result_code = code.get_py_const(py_object_type, 'slice', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PySlice_New(%s, %s, %s); %s" % (
self.result(),
self.start.py_result(),
self.stop.py_result(),
self.step.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if self.is_literal:
code.put_giveref(self.py_result())
class CallNode(ExprNode):
# allow overriding the default 'may_be_none' behaviour
may_return_none = None
def infer_type(self, env):
function = self.function
func_type = function.infer_type(env)
if isinstance(function, NewExprNode):
# note: needs call to infer_type() above
return PyrexTypes.CPtrType(function.class_type)
if func_type is py_object_type:
# function might have lied for safety => try to find better type
entry = getattr(function, 'entry', None)
if entry is not None:
func_type = entry.type or func_type
if func_type.is_ptr:
func_type = func_type.base_type
if func_type.is_cfunction:
return func_type.return_type
elif func_type is type_type:
if function.is_name and function.entry and function.entry.type:
result_type = function.entry.type
if result_type.is_extension_type:
return result_type
elif result_type.is_builtin_type:
if function.entry.name == 'float':
return PyrexTypes.c_double_type
elif function.entry.name in Builtin.types_that_construct_their_instance:
return result_type
return py_object_type
def type_dependencies(self, env):
# TODO: Update when Danilo's C++ code merged in to handle the
# the case of function overloading.
return self.function.type_dependencies(env)
def is_simple(self):
# C function calls could be considered simple, but they may
# have side-effects that may hit when multiple operations must
# be effected in order, e.g. when constructing the argument
# sequence for a function call or comparing values.
return False
def may_be_none(self):
if self.may_return_none is not None:
return self.may_return_none
func_type = self.function.type
if func_type is type_type and self.function.is_name:
entry = self.function.entry
if entry.type.is_extension_type:
return False
if (entry.type.is_builtin_type and
entry.name in Builtin.types_that_construct_their_instance):
return False
return ExprNode.may_be_none(self)
def analyse_as_type_constructor(self, env):
type = self.function.analyse_as_type(env)
if type and type.is_struct_or_union:
args, kwds = self.explicit_args_kwds()
items = []
for arg, member in zip(args, type.scope.var_entries):
items.append(DictItemNode(pos=arg.pos, key=StringNode(pos=arg.pos, value=member.name), value=arg))
if kwds:
items += kwds.key_value_pairs
self.key_value_pairs = items
self.__class__ = DictNode
self.analyse_types(env) # FIXME
self.coerce_to(type, env)
return True
elif type and type.is_cpp_class:
self.args = [ arg.analyse_types(env) for arg in self.args ]
constructor = type.scope.lookup("<init>")
self.function = RawCNameExprNode(self.function.pos, constructor.type)
self.function.entry = constructor
self.function.set_cname(type.empty_declaration_code())
self.analyse_c_function_call(env)
self.type = type
return True
def is_lvalue(self):
return self.type.is_reference
def nogil_check(self, env):
func_type = self.function_type()
if func_type.is_pyobject:
self.gil_error()
elif not getattr(func_type, 'nogil', False):
self.gil_error()
gil_message = "Calling gil-requiring function"
class SimpleCallNode(CallNode):
# Function call without keyword, * or ** args.
#
# function ExprNode
# args [ExprNode]
# arg_tuple ExprNode or None used internally
# self ExprNode or None used internally
# coerced_self ExprNode or None used internally
# wrapper_call bool used internally
# has_optional_args bool used internally
# nogil bool used internally
subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple']
self = None
coerced_self = None
arg_tuple = None
wrapper_call = False
has_optional_args = False
nogil = False
analysed = False
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
args = [arg.compile_time_value(denv) for arg in self.args]
try:
return function(*args)
except Exception, e:
self.compile_time_value_error(e)
def analyse_as_type(self, env):
attr = self.function.as_cython_attribute()
if attr == 'pointer':
if len(self.args) != 1:
error(self.args.pos, "only one type allowed.")
else:
type = self.args[0].analyse_as_type(env)
if not type:
error(self.args[0].pos, "Unknown type")
else:
return PyrexTypes.CPtrType(type)
def explicit_args_kwds(self):
return self.args, None
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
if self.analysed:
return self
self.analysed = True
self.function.is_called = 1
self.function = self.function.analyse_types(env)
function = self.function
if function.is_attribute and function.entry and function.entry.is_cmethod:
# Take ownership of the object from which the attribute
# was obtained, because we need to pass it as 'self'.
self.self = function.obj
function.obj = CloneNode(self.self)
func_type = self.function_type()
if func_type.is_pyobject:
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env).coerce_to_pyobject(env)
self.args = None
if func_type is Builtin.type_type and function.is_name and \
function.entry and \
function.entry.is_builtin and \
function.entry.name in Builtin.types_that_construct_their_instance:
# calling a builtin type that returns a specific object type
if function.entry.name == 'float':
# the following will come true later on in a transform
self.type = PyrexTypes.c_double_type
self.result_ctype = PyrexTypes.c_double_type
else:
self.type = Builtin.builtin_types[function.entry.name]
self.result_ctype = py_object_type
self.may_return_none = False
elif function.is_name and function.type_entry:
# We are calling an extension type constructor. As
# long as we do not support __new__(), the result type
# is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
else:
self.args = [ arg.analyse_types(env) for arg in self.args ]
self.analyse_c_function_call(env)
return self
def function_type(self):
# Return the type of the function being called, coercing a function
# pointer to a function if necessary. If the function has fused
# arguments, return the specific type.
func_type = self.function.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type
def analyse_c_function_call(self, env):
func_type = self.function.type
if func_type is error_type:
self.type = error_type
return
if func_type.is_cfunction and func_type.is_static_method:
if self.self and self.self.type.is_extension_type:
# To support this we'd need to pass self to determine whether
# it was overloaded in Python space (possibly via a Cython
# superclass turning a cdef method into a cpdef one).
error(self.pos, "Cannot call a static method on an instance variable.")
args = self.args
elif self.self:
args = [self.self] + self.args
else:
args = self.args
if func_type.is_cpp_class:
overloaded_entry = self.function.type.scope.lookup("operator()")
if overloaded_entry is None:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
elif hasattr(self.function, 'entry'):
overloaded_entry = self.function.entry
elif (isinstance(self.function, IndexNode) and
self.function.is_fused_index):
overloaded_entry = self.function.type.entry
else:
overloaded_entry = None
if overloaded_entry:
if self.function.type.is_fused:
functypes = self.function.type.get_all_specialized_function_types()
alternatives = [f.entry for f in functypes]
else:
alternatives = overloaded_entry.all_alternatives()
entry = PyrexTypes.best_match(args, alternatives, self.pos, env)
if not entry:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
entry.used = True
self.function.entry = entry
self.function.type = entry.type
func_type = self.function_type()
else:
entry = None
func_type = self.function_type()
if not func_type.is_cfunction:
error(self.pos, "Calling non-function type '%s'" % func_type)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
# Check no. of args
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(args)
if func_type.optional_arg_count and expected_nargs != actual_nargs:
self.has_optional_args = 1
self.is_temp = 1
# check 'self' argument
if entry and entry.is_cmethod and func_type.args and not func_type.is_static_method:
formal_arg = func_type.args[0]
arg = args[0]
if formal_arg.not_none:
if self.self:
self.self = self.self.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error='PyExc_AttributeError',
format_args=[entry.name])
else:
# unbound method
arg = arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=[entry.name, formal_arg.type.name])
if self.self:
if formal_arg.accept_builtin_subtypes:
arg = CMethodSelfCloneNode(self.self)
else:
arg = CloneNode(self.self)
arg = self.coerced_self = arg.coerce_to(formal_arg.type, env)
elif formal_arg.type.is_builtin_type:
# special case: unbound methods of builtins accept subtypes
arg = arg.coerce_to(formal_arg.type, env)
if arg.type.is_builtin_type and isinstance(arg, PyTypeTestNode):
arg.exact_builtin_type = False
args[0] = arg
# Coerce arguments
some_args_in_temps = False
for i in xrange(min(max_nargs, actual_nargs)):
formal_arg = func_type.args[i]
formal_type = formal_arg.type
arg = args[i].coerce_to(formal_type, env)
if formal_arg.not_none:
# C methods must do the None checks at *call* time
arg = arg.as_none_safe_node(
"cannot pass None into a C function argument that is declared 'not None'")
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if i == 0 and self.self is not None:
# a method's cloned "self" argument is ok
pass
elif arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
args[i] = arg
# handle additional varargs parameters
for i in xrange(max_nargs, actual_nargs):
arg = args[i]
if arg.type.is_pyobject:
arg_ctype = arg.type.default_coerced_ctype()
if arg_ctype is None:
error(self.args[i].pos,
"Python object cannot be passed as a varargs parameter")
else:
args[i] = arg = arg.coerce_to(arg_ctype, env)
if arg.is_temp and i > 0:
some_args_in_temps = True
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in xrange(actual_nargs-1):
if i == 0 and self.self is not None:
continue # self is ok
arg = args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0 or i == 1 and self.self is not None: # skip first arg
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
self.args[:] = args
# Calc result type and code fragment
if isinstance(self.function, NewExprNode):
self.type = PyrexTypes.CPtrType(self.function.class_type)
else:
self.type = func_type.return_type
if self.function.is_name or self.function.is_attribute:
if self.function.entry and self.function.entry.utility_code:
self.is_temp = 1 # currently doesn't work for self.calculate_result_code()
if self.type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
elif func_type.exception_value is not None or func_type.exception_check:
self.is_temp = 1
elif self.type.is_memoryviewslice:
self.is_temp = 1
# func_type.exception_check = True
if self.is_temp and self.type.is_reference:
self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type)
# Called in 'nogil' context?
self.nogil = env.nogil
if (self.nogil and
func_type.exception_check and
func_type.exception_check != '+'):
env.use_utility_code(pyerr_occurred_withgil_utility_code)
# C++ exception handler
if func_type.exception_check == '+':
if func_type.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
def calculate_result_code(self):
return self.c_call_code()
def c_call_code(self):
func_type = self.function_type()
if self.type is PyrexTypes.error_type or not func_type.is_cfunction:
return "<error>"
formal_args = func_type.args
arg_list_code = []
args = list(zip(formal_args, self.args))
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(self.args)
for formal_arg, actual_arg in args[:expected_nargs]:
arg_code = actual_arg.result_as(formal_arg.type)
arg_list_code.append(arg_code)
if func_type.is_overridable:
arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod)))
if func_type.optional_arg_count:
if expected_nargs == actual_nargs:
optional_args = 'NULL'
else:
optional_args = "&%s" % self.opt_arg_struct
arg_list_code.append(optional_args)
for actual_arg in self.args[len(formal_args):]:
arg_list_code.append(actual_arg.result())
result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code))
return result
def is_c_result_required(self):
func_type = self.function_type()
if not func_type.exception_value or func_type.exception_check == '+':
return False # skip allocation of unused result temp
return True
def generate_result_code(self, code):
func_type = self.function_type()
if self.function.is_name or self.function.is_attribute:
if self.function.entry and self.function.entry.utility_code:
code.globalstate.use_utility_code(self.function.entry.utility_code)
if func_type.is_pyobject:
if func_type is not type_type and not self.arg_tuple.args and self.arg_tuple.is_literal:
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCallNoArg", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
self.result(),
self.function.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
else:
arg_code = self.arg_tuple.py_result()
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
self.function.py_result(),
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif func_type.is_cfunction:
if self.has_optional_args:
actual_nargs = len(self.args)
expected_nargs = len(func_type.args) - func_type.optional_arg_count
self.opt_arg_struct = code.funcstate.allocate_temp(
func_type.op_arg_struct.base_type, manage_ref=True)
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
Naming.pyrex_prefix + "n",
len(self.args) - expected_nargs))
args = list(zip(func_type.args, self.args))
for formal_arg, actual_arg in args[expected_nargs:actual_nargs]:
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
func_type.opt_arg_cname(formal_arg.name),
actual_arg.result_as(formal_arg.type)))
exc_checks = []
if self.type.is_pyobject and self.is_temp:
exc_checks.append("!%s" % self.result())
elif self.type.is_memoryviewslice:
assert self.is_temp
exc_checks.append(self.type.error_condition(self.result()))
else:
exc_val = func_type.exception_value
exc_check = func_type.exception_check
if exc_val is not None:
exc_checks.append("%s == %s" % (self.result(), exc_val))
if exc_check:
if self.nogil:
exc_checks.append("__Pyx_ErrOccurredWithGIL()")
else:
exc_checks.append("PyErr_Occurred()")
if self.is_temp or exc_checks:
rhs = self.c_call_code()
if self.result():
lhs = "%s = " % self.result()
if self.is_temp and self.type.is_pyobject:
#return_type = self.type # func_type.return_type
#print "SimpleCallNode.generate_result_code: casting", rhs, \
# "from", return_type, "to pyobject" ###
rhs = typecast(py_object_type, self.type, rhs)
else:
lhs = ""
if func_type.exception_check == '+':
if func_type.exception_value is None:
raise_py_exception = "__Pyx_CppExn2PyErr();"
elif func_type.exception_value.type.is_pyobject:
raise_py_exception = 'try { throw; } catch(const std::exception& exn) { PyErr_SetString(%s, exn.what()); } catch(...) { PyErr_SetNone(%s); }' % (
func_type.exception_value.entry.cname,
func_type.exception_value.entry.cname)
else:
raise_py_exception = '%s(); if (!PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError , "Error converting c++ exception.");' % func_type.exception_value.entry.cname
code.putln("try {")
code.putln("%s%s;" % (lhs, rhs))
code.putln("} catch(...) {")
if self.nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(raise_py_exception)
if self.nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
else:
if exc_checks:
goto_error = code.error_goto_if(" && ".join(exc_checks), self.pos)
else:
goto_error = ""
code.putln("%s%s; %s" % (lhs, rhs, goto_error))
if self.type.is_pyobject and self.result():
code.put_gotref(self.py_result())
if self.has_optional_args:
code.funcstate.release_temp(self.opt_arg_struct)
class PyMethodCallNode(SimpleCallNode):
# Specialised call to a (potential) PyMethodObject with non-constant argument tuple.
# Allows the self argument to be injected directly instead of repacking a tuple for it.
#
# function ExprNode the function/method object to call
# arg_tuple TupleNode the arguments for the args tuple
subexprs = ['function', 'arg_tuple']
is_temp = True
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.function.generate_evaluation_code(code)
assert self.arg_tuple.mult_factor is None
args = self.arg_tuple.args
for arg in args:
arg.generate_evaluation_code(code)
# make sure function is in temp so that we can replace the reference below if it's a method
reuse_function_temp = self.function.is_temp
if reuse_function_temp:
function = self.function.result()
else:
function = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.function.make_owned_reference(code)
code.put("%s = %s; " % (function, self.function.py_result()))
self.function.generate_disposal_code(code)
self.function.free_temps(code)
self_arg = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln("%s = NULL;" % self_arg)
arg_offset_cname = None
if len(args) > 1:
arg_offset_cname = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln("%s = 0;" % arg_offset_cname)
def attribute_is_likely_method(attr):
obj = attr.obj
if obj.is_name and obj.entry.is_pyglobal:
return False # more likely to be a function
return True
if self.function.is_attribute:
likely_method = 'likely' if attribute_is_likely_method(self.function) else 'unlikely'
elif self.function.is_name and self.function.cf_state:
# not an attribute itself, but might have been assigned from one (e.g. bound method)
for assignment in self.function.cf_state:
value = assignment.rhs
if value and value.is_attribute and value.obj.type.is_pyobject:
if attribute_is_likely_method(value):
likely_method = 'likely'
break
else:
likely_method = 'unlikely'
else:
likely_method = 'unlikely'
code.putln("if (CYTHON_COMPILING_IN_CPYTHON && %s(PyMethod_Check(%s))) {" % (likely_method, function))
code.putln("%s = PyMethod_GET_SELF(%s);" % (self_arg, function))
# the following is always true in Py3 (kept only for safety),
# but is false for unbound methods in Py2
code.putln("if (likely(%s)) {" % self_arg)
code.putln("PyObject* function = PyMethod_GET_FUNCTION(%s);" % function)
code.put_incref(self_arg, py_object_type)
code.put_incref("function", py_object_type)
# free method object as early to possible to enable reuse from CPython's freelist
code.put_decref_set(function, "function")
if len(args) > 1:
code.putln("%s = 1;" % arg_offset_cname)
code.putln("}")
code.putln("}")
if not args:
# fastest special case: try to avoid tuple creation
code.putln("if (%s) {" % self_arg)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCallOneArg", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % (
self.result(),
function, self_arg,
code.error_goto_if_null(self.result(), self.pos)))
code.put_decref_clear(self_arg, py_object_type)
code.funcstate.release_temp(self_arg)
code.putln("} else {")
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCallNoArg", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
self.result(),
function,
code.error_goto_if_null(self.result(), self.pos)))
code.putln("}")
code.put_gotref(self.py_result())
else:
if len(args) == 1:
code.putln("if (!%s) {" % self_arg)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCallOneArg", "ObjectHandling.c"))
arg = args[0]
code.putln(
"%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % (
self.result(),
function, arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
arg.generate_disposal_code(code)
code.put_gotref(self.py_result())
code.putln("} else {")
arg_offset = 1
else:
arg_offset = arg_offset_cname
args_tuple = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln("%s = PyTuple_New(%d+%s); %s" % (
args_tuple, len(args), arg_offset,
code.error_goto_if_null(args_tuple, self.pos)))
code.put_gotref(args_tuple)
if len(args) > 1:
code.putln("if (%s) {" % self_arg)
code.putln("PyTuple_SET_ITEM(%s, 0, %s); __Pyx_GIVEREF(%s); %s = NULL;" % (
args_tuple, self_arg, self_arg, self_arg)) # stealing owned ref in this case
code.funcstate.release_temp(self_arg)
if len(args) > 1:
code.putln("}")
for i, arg in enumerate(args):
arg.make_owned_reference(code)
code.putln("PyTuple_SET_ITEM(%s, %d+%s, %s);" % (
args_tuple, i, arg_offset, arg.py_result()))
code.put_giveref(arg.py_result())
if len(args) > 1:
code.funcstate.release_temp(arg_offset_cname)
for arg in args:
arg.generate_post_assignment_code(code)
arg.free_temps(code)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
function, args_tuple,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.put_decref_clear(args_tuple, py_object_type)
code.funcstate.release_temp(args_tuple)
if len(args) == 1:
code.putln("}")
if reuse_function_temp:
self.function.generate_disposal_code(code)
self.function.free_temps(code)
else:
code.put_decref_clear(function, py_object_type)
code.funcstate.release_temp(function)
class InlinedDefNodeCallNode(CallNode):
# Inline call to defnode
#
# function PyCFunctionNode
# function_name NameNode
# args [ExprNode]
subexprs = ['args', 'function_name']
is_temp = 1
type = py_object_type
function = None
function_name = None
def can_be_inlined(self):
func_type= self.function.def_node
if func_type.star_arg or func_type.starstar_arg:
return False
if len(func_type.args) != len(self.args):
return False
if func_type.num_kwonly_args:
return False # actually wrong number of arguments
return True
def analyse_types(self, env):
self.function_name = self.function_name.analyse_types(env)
self.args = [ arg.analyse_types(env) for arg in self.args ]
func_type = self.function.def_node
actual_nargs = len(self.args)
# Coerce arguments
some_args_in_temps = False
for i in xrange(actual_nargs):
formal_type = func_type.args[i].type
arg = self.args[i].coerce_to(formal_type, env)
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
self.args[i] = arg
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in xrange(actual_nargs-1):
arg = self.args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0:
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
return self
def generate_result_code(self, code):
arg_code = [self.function_name.py_result()]
func_type = self.function.def_node
for arg, proto_arg in zip(self.args, func_type.args):
if arg.type.is_pyobject:
arg_code.append(arg.result_as(proto_arg.type))
else:
arg_code.append(arg.result())
arg_code = ', '.join(arg_code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
self.function.def_node.entry.pyfunc_cname,
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PythonCapiFunctionNode(ExprNode):
subexprs = []
def __init__(self, pos, py_name, cname, func_type, utility_code = None):
ExprNode.__init__(self, pos, name=py_name, cname=cname,
type=func_type, utility_code=utility_code)
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if self.utility_code:
code.globalstate.use_utility_code(self.utility_code)
def calculate_result_code(self):
return self.cname
class PythonCapiCallNode(SimpleCallNode):
# Python C-API Function call (only created in transforms)
# By default, we assume that the call never returns None, as this
# is true for most C-API functions in CPython. If this does not
# apply to a call, set the following to True (or None to inherit
# the default behaviour).
may_return_none = False
def __init__(self, pos, function_name, func_type,
utility_code = None, py_name=None, **kwargs):
self.type = func_type.return_type
self.result_ctype = self.type
self.function = PythonCapiFunctionNode(
pos, py_name, function_name, func_type,
utility_code = utility_code)
# call this last so that we can override the constructed
# attributes above with explicit keyword arguments if required
SimpleCallNode.__init__(self, pos, **kwargs)
class GeneralCallNode(CallNode):
# General Python function call, including keyword,
# * and ** arguments.
#
# function ExprNode
# positional_args ExprNode Tuple of positional arguments
# keyword_args ExprNode or None Dict of keyword arguments
type = py_object_type
subexprs = ['function', 'positional_args', 'keyword_args']
nogil_check = Node.gil_error
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
positional_args = self.positional_args.compile_time_value(denv)
keyword_args = self.keyword_args.compile_time_value(denv)
try:
return function(*positional_args, **keyword_args)
except Exception, e:
self.compile_time_value_error(e)
def explicit_args_kwds(self):
if (self.keyword_args and not isinstance(self.keyword_args, DictNode) or
not isinstance(self.positional_args, TupleNode)):
raise CompileError(self.pos,
'Compile-time keyword arguments must be explicit.')
return self.positional_args.args, self.keyword_args
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
self.function = self.function.analyse_types(env)
if not self.function.type.is_pyobject:
if self.function.type.is_error:
self.type = error_type
return self
if hasattr(self.function, 'entry'):
node = self.map_to_simple_call_node()
if node is not None and node is not self:
return node.analyse_types(env)
elif self.function.entry.as_variable:
self.function = self.function.coerce_to_pyobject(env)
elif node is self:
error(self.pos,
"Non-trivial keyword arguments and starred "
"arguments not allowed in cdef functions.")
else:
# error was already reported
pass
else:
self.function = self.function.coerce_to_pyobject(env)
if self.keyword_args:
self.keyword_args = self.keyword_args.analyse_types(env)
self.positional_args = self.positional_args.analyse_types(env)
self.positional_args = \
self.positional_args.coerce_to_pyobject(env)
function = self.function
if function.is_name and function.type_entry:
# We are calling an extension type constructor. As long
# as we do not support __new__(), the result type is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
return self
def map_to_simple_call_node(self):
"""
Tries to map keyword arguments to declared positional arguments.
Returns self to try a Python call, None to report an error
or a SimpleCallNode if the mapping succeeds.
"""
if not isinstance(self.positional_args, TupleNode):
# has starred argument
return self
if not isinstance(self.keyword_args, DictNode):
# keywords come from arbitrary expression => nothing to do here
return self
function = self.function
entry = getattr(function, 'entry', None)
if not entry:
return self
function_type = entry.type
if function_type.is_ptr:
function_type = function_type.base_type
if not function_type.is_cfunction:
return self
pos_args = self.positional_args.args
kwargs = self.keyword_args
declared_args = function_type.args
if entry.is_cmethod:
declared_args = declared_args[1:] # skip 'self'
if len(pos_args) > len(declared_args):
error(self.pos, "function call got too many positional arguments, "
"expected %d, got %s" % (len(declared_args),
len(pos_args)))
return None
matched_args = set([ arg.name for arg in declared_args[:len(pos_args)]
if arg.name ])
unmatched_args = declared_args[len(pos_args):]
matched_kwargs_count = 0
args = list(pos_args)
# check for duplicate keywords
seen = set(matched_args)
has_errors = False
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name in seen:
error(arg.pos, "argument '%s' passed twice" % name)
has_errors = True
# continue to report more errors if there are any
seen.add(name)
# match keywords that are passed in order
for decl_arg, arg in zip(unmatched_args, kwargs.key_value_pairs):
name = arg.key.value
if decl_arg.name == name:
matched_args.add(name)
matched_kwargs_count += 1
args.append(arg.value)
else:
break
# match keyword arguments that are passed out-of-order, but keep
# the evaluation of non-simple arguments in order by moving them
# into temps
from .UtilNodes import EvalWithTempExprNode, LetRefNode
temps = []
if len(kwargs.key_value_pairs) > matched_kwargs_count:
unmatched_args = declared_args[len(args):]
keywords = dict([ (arg.key.value, (i+len(pos_args), arg))
for i, arg in enumerate(kwargs.key_value_pairs) ])
first_missing_keyword = None
for decl_arg in unmatched_args:
name = decl_arg.name
if name not in keywords:
# missing keyword argument => either done or error
if not first_missing_keyword:
first_missing_keyword = name
continue
elif first_missing_keyword:
if entry.as_variable:
# we might be able to convert the function to a Python
# object, which then allows full calling semantics
# with default values in gaps - currently, we only
# support optional arguments at the end
return self
# wasn't the last keyword => gaps are not supported
error(self.pos, "C function call is missing "
"argument '%s'" % first_missing_keyword)
return None
pos, arg = keywords[name]
matched_args.add(name)
matched_kwargs_count += 1
if arg.value.is_simple():
args.append(arg.value)
else:
temp = LetRefNode(arg.value)
assert temp.is_simple()
args.append(temp)
temps.append((pos, temp))
if temps:
# may have to move preceding non-simple args into temps
final_args = []
new_temps = []
first_temp_arg = temps[0][-1]
for arg_value in args:
if arg_value is first_temp_arg:
break # done
if arg_value.is_simple():
final_args.append(arg_value)
else:
temp = LetRefNode(arg_value)
new_temps.append(temp)
final_args.append(temp)
if new_temps:
args = final_args
temps = new_temps + [ arg for i,arg in sorted(temps) ]
# check for unexpected keywords
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name not in matched_args:
has_errors = True
error(arg.pos,
"C function got unexpected keyword argument '%s'" %
name)
if has_errors:
# error was reported already
return None
# all keywords mapped to positional arguments
# if we are missing arguments, SimpleCallNode will figure it out
node = SimpleCallNode(self.pos, function=function, args=args)
for temp in temps[::-1]:
node = EvalWithTempExprNode(temp, node)
return node
def generate_result_code(self, code):
if self.type.is_error: return
if self.keyword_args:
kwargs = self.keyword_args.py_result()
else:
kwargs = 'NULL'
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, %s); %s" % (
self.result(),
self.function.py_result(),
self.positional_args.py_result(),
kwargs,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class AsTupleNode(ExprNode):
# Convert argument to tuple. Used for normalising
# the * argument of a function call.
#
# arg ExprNode
subexprs = ['arg']
def calculate_constant_result(self):
self.constant_result = tuple(self.arg.constant_result)
def compile_time_value(self, denv):
arg = self.arg.compile_time_value(denv)
try:
return tuple(arg)
except Exception, e:
self.compile_time_value_error(e)
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.type = tuple_type
self.is_temp = 1
return self
def may_be_none(self):
return False
nogil_check = Node.gil_error
gil_message = "Constructing Python tuple"
def generate_result_code(self, code):
code.putln(
"%s = PySequence_Tuple(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class AttributeNode(ExprNode):
# obj.attribute
#
# obj ExprNode
# attribute string
# needs_none_check boolean Used if obj is an extension type.
# If set to True, it is known that the type is not None.
#
# Used internally:
#
# is_py_attr boolean Is a Python getattr operation
# member string C name of struct member
# is_called boolean Function call is being done on result
# entry Entry Symbol table entry of attribute
is_attribute = 1
subexprs = ['obj']
type = PyrexTypes.error_type
entry = None
is_called = 0
needs_none_check = True
is_memslice_transpose = False
is_special_lookup = False
def as_cython_attribute(self):
if (isinstance(self.obj, NameNode) and
self.obj.is_cython_module and not
self.attribute == u"parallel"):
return self.attribute
cy = self.obj.as_cython_attribute()
if cy:
return "%s.%s" % (cy, self.attribute)
return None
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a cpdef function
# we can create the corresponding attribute
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction and entry.as_variable:
# must be a cpdef function
self.is_temp = 1
self.entry = entry.as_variable
self.analyse_as_python_attribute(env)
return self
return ExprNode.coerce_to(self, dst_type, env)
def calculate_constant_result(self):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
return
self.constant_result = getattr(self.obj.constant_result, attr)
def compile_time_value(self, denv):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
error(self.pos,
"Invalid attribute name '%s' in compile-time expression" % attr)
return None
obj = self.obj.compile_time_value(denv)
try:
return getattr(obj, attr)
except Exception, e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return self.obj.type_dependencies(env)
def infer_type(self, env):
# FIXME: this is way too redundant with analyse_types()
node = self.analyse_as_cimported_attribute_node(env, target=False)
if node is not None:
return node.entry.type
node = self.analyse_as_unbound_cmethod_node(env)
if node is not None:
return node.entry.type
obj_type = self.obj.infer_type(env)
self.analyse_attribute(env, obj_type=obj_type)
if obj_type.is_builtin_type and self.type.is_cfunction:
# special case: C-API replacements for C methods of
# builtin types cannot be inferred as C functions as
# that would prevent their use as bound methods
return py_object_type
return self.type
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, target = 1)
if node.type.is_const:
error(self.pos, "Assignment to const attribute '%s'" % self.attribute)
if not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type)
return node
def analyse_types(self, env, target = 0):
self.initialized_check = env.directives['initializedcheck']
node = self.analyse_as_cimported_attribute_node(env, target)
if node is None and not target:
node = self.analyse_as_unbound_cmethod_node(env)
if node is None:
node = self.analyse_as_ordinary_attribute_node(env, target)
assert node is not None
if node.entry:
node.entry.used = True
if node.is_attribute:
node.wrap_obj_in_nonecheck(env)
return node
def analyse_as_cimported_attribute_node(self, env, target):
# Try to interpret this as a reference to an imported
# C const, type, var or function. If successful, mutates
# this node into a NameNode and returns 1, otherwise
# returns 0.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and (
entry.is_cglobal or entry.is_cfunction
or entry.is_type or entry.is_const):
return self.as_name_node(env, entry, target)
return None
def analyse_as_unbound_cmethod_node(self, env):
# Try to interpret this as a reference to an unbound
# C method of an extension type or builtin type. If successful,
# creates a corresponding NameNode and returns it, otherwise
# returns None.
if self.obj.is_string_literal:
return
type = self.obj.analyse_as_type(env)
if type and (type.is_extension_type or type.is_builtin_type or type.is_cpp_class):
entry = type.scope.lookup_here(self.attribute)
if entry and (entry.is_cmethod or type.is_cpp_class and entry.type.is_cfunction):
if type.is_builtin_type:
if not self.is_called:
# must handle this as Python object
return None
ubcm_entry = entry
else:
# Create a temporary entry describing the C method
# as an ordinary function.
if entry.func_cname and not hasattr(entry.type, 'op_arg_struct'):
cname = entry.func_cname
if entry.type.is_static_method:
ctype = entry.type
elif type.is_cpp_class:
error(self.pos, "%s not a static member of %s" % (entry.name, type))
ctype = PyrexTypes.error_type
else:
# Fix self type.
ctype = copy.copy(entry.type)
ctype.args = ctype.args[:]
ctype.args[0] = PyrexTypes.CFuncTypeArg('self', type, 'self', None)
else:
cname = "%s->%s" % (type.vtabptr_cname, entry.cname)
ctype = entry.type
ubcm_entry = Symtab.Entry(entry.name, cname, ctype)
ubcm_entry.is_cfunction = 1
ubcm_entry.func_cname = entry.func_cname
ubcm_entry.is_unbound_cmethod = 1
return self.as_name_node(env, ubcm_entry, target=False)
return None
def analyse_as_type(self, env):
module_scope = self.obj.analyse_as_module(env)
if module_scope:
return module_scope.lookup_type(self.attribute)
if not self.obj.is_string_literal:
base_type = self.obj.analyse_as_type(env)
if base_type and hasattr(base_type, 'scope') and base_type.scope is not None:
return base_type.scope.lookup_type(self.attribute)
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type
# in a cimported module. Returns the extension type, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module
# in another cimported module. Returns the module scope, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.as_module:
return entry.as_module
return None
def as_name_node(self, env, entry, target):
# Create a corresponding NameNode from this node and complete the
# analyse_types phase.
node = NameNode.from_node(self, name=self.attribute, entry=entry)
if target:
node = node.analyse_target_types(env)
else:
node = node.analyse_rvalue_entry(env)
node.entry.used = 1
return node
def analyse_as_ordinary_attribute_node(self, env, target):
self.obj = self.obj.analyse_types(env)
self.analyse_attribute(env)
if self.entry and self.entry.is_cmethod and not self.is_called:
# error(self.pos, "C method can only be called")
pass
## Reference to C array turns into pointer to first element.
#while self.type.is_array:
# self.type = self.type.element_ptr_type()
if self.is_py_attr:
if not target:
self.is_temp = 1
self.result_ctype = py_object_type
elif target and self.obj.type.is_builtin_type:
error(self.pos, "Assignment to an immutable object field")
#elif self.type.is_memoryviewslice and not target:
# self.is_temp = True
return self
def analyse_attribute(self, env, obj_type = None):
# Look up attribute and set self.type and self.member.
immutable_obj = obj_type is not None # used during type inference
self.is_py_attr = 0
self.member = self.attribute
if obj_type is None:
if self.obj.type.is_string or self.obj.type.is_pyunicode_ptr:
self.obj = self.obj.coerce_to_pyobject(env)
obj_type = self.obj.type
else:
if obj_type.is_string or obj_type.is_pyunicode_ptr:
obj_type = py_object_type
if obj_type.is_ptr or obj_type.is_array:
obj_type = obj_type.base_type
self.op = "->"
elif obj_type.is_extension_type or obj_type.is_builtin_type:
self.op = "->"
elif obj_type.is_reference and obj_type.is_fake_reference:
self.op = "->"
else:
self.op = "."
if obj_type.has_attributes:
if obj_type.attributes_known():
if (obj_type.is_memoryviewslice and not
obj_type.scope.lookup_here(self.attribute)):
if self.attribute == 'T':
self.is_memslice_transpose = True
self.is_temp = True
self.use_managed_ref = True
self.type = self.obj.type
return
else:
obj_type.declare_attribute(self.attribute, env, self.pos)
entry = obj_type.scope.lookup_here(self.attribute)
if entry and entry.is_member:
entry = None
else:
error(self.pos,
"Cannot select attribute of incomplete type '%s'"
% obj_type)
self.type = PyrexTypes.error_type
return
self.entry = entry
if entry:
if obj_type.is_extension_type and entry.name == "__weakref__":
error(self.pos, "Illegal use of special attribute __weakref__")
# def methods need the normal attribute lookup
# because they do not have struct entries
# fused function go through assignment synthesis
# (foo = pycfunction(foo_func_obj)) and need to go through
# regular Python lookup as well
if (entry.is_variable and not entry.fused_cfunction) or entry.is_cmethod:
self.type = entry.type
self.member = entry.cname
return
else:
# If it's not a variable or C method, it must be a Python
# method of an extension type, so we treat it like a Python
# attribute.
pass
# If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
# attribute reference.
self.analyse_as_python_attribute(env, obj_type, immutable_obj)
def analyse_as_python_attribute(self, env, obj_type=None, immutable_obj=False):
if obj_type is None:
obj_type = self.obj.type
# mangle private '__*' Python attributes used inside of a class
self.attribute = env.mangle_class_private_name(self.attribute)
self.member = self.attribute
self.type = py_object_type
self.is_py_attr = 1
if not obj_type.is_pyobject and not obj_type.is_error:
if obj_type.can_coerce_to_pyobject(env):
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
elif (obj_type.is_cfunction and (self.obj.is_name or self.obj.is_attribute)
and self.obj.entry.as_variable
and self.obj.entry.as_variable.type.is_pyobject):
# might be an optimised builtin function => unpack it
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
else:
error(self.pos,
"Object of type '%s' has no attribute '%s'" %
(obj_type, self.attribute))
def wrap_obj_in_nonecheck(self, env):
if not env.directives['nonecheck']:
return
msg = None
format_args = ()
if (self.obj.type.is_extension_type and self.needs_none_check and not
self.is_py_attr):
msg = "'NoneType' object has no attribute '%s'"
format_args = (self.attribute,)
elif self.obj.type.is_memoryviewslice:
if self.is_memslice_transpose:
msg = "Cannot transpose None memoryview slice"
else:
entry = self.obj.type.scope.lookup_here(self.attribute)
if entry:
# copy/is_c_contig/shape/strides etc
msg = "Cannot access '%s' attribute of None memoryview slice"
format_args = (entry.name,)
if msg:
self.obj = self.obj.as_none_safe_node(msg, 'PyExc_AttributeError',
format_args=format_args)
def nogil_check(self, env):
if self.is_py_attr:
self.gil_error()
elif self.type.is_memoryviewslice:
from . import MemoryView
MemoryView.err_if_nogil_initialized_check(self.pos, env, 'attribute')
gil_message = "Accessing Python attribute"
def is_simple(self):
if self.obj:
return self.result_in_temp() or self.obj.is_simple()
else:
return NameNode.is_simple(self)
def is_lvalue(self):
if self.obj:
return True
else:
return NameNode.is_lvalue(self)
def is_ephemeral(self):
if self.obj:
return self.obj.is_ephemeral()
else:
return NameNode.is_ephemeral(self)
def calculate_result_code(self):
#print "AttributeNode.calculate_result_code:", self.member ###
#print "...obj node =", self.obj, "code", self.obj.result() ###
#print "...obj type", self.obj.type, "ctype", self.obj.ctype() ###
obj = self.obj
obj_code = obj.result_as(obj.type)
#print "...obj_code =", obj_code ###
if self.entry and self.entry.is_cmethod:
if obj.type.is_extension_type and not self.entry.is_builtin_cmethod:
if self.entry.final_func_cname:
return self.entry.final_func_cname
if self.type.from_fused:
# If the attribute was specialized through indexing, make
# sure to get the right fused name, as our entry was
# replaced by our parent index node
# (AnalyseExpressionsTransform)
self.member = self.entry.cname
return "((struct %s *)%s%s%s)->%s" % (
obj.type.vtabstruct_cname, obj_code, self.op,
obj.type.vtabslot_cname, self.member)
elif self.result_is_used:
return self.member
# Generating no code at all for unused access to optimised builtin
# methods fixes the problem that some optimisations only exist as
# macros, i.e. there is no function pointer to them, so we would
# generate invalid C code here.
return
elif obj.type.is_complex:
return "__Pyx_C%s(%s)" % (self.member.upper(), obj_code)
else:
if obj.type.is_builtin_type and self.entry and self.entry.is_variable:
# accessing a field of a builtin type, need to cast better than result_as() does
obj_code = obj.type.cast_code(obj.result(), to_object_struct = True)
return "%s%s%s" % (obj_code, self.op, self.member)
def generate_result_code(self, code):
if self.is_py_attr:
if self.is_special_lookup:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_LookupSpecial'
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_GetAttrStr'
code.putln(
'%s = %s(%s, %s); %s' % (
self.result(),
lookup_func_name,
self.obj.py_result(),
code.intern_identifier(self.attribute),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.type.is_memoryviewslice:
if self.is_memslice_transpose:
# transpose the slice
for access, packing in self.type.axes:
if access == 'ptr':
error(self.pos, "Transposing not supported for slices "
"with indirect dimensions")
return
code.putln("%s = %s;" % (self.result(), self.obj.result()))
if self.obj.is_name or (self.obj.is_attribute and
self.obj.is_memslice_transpose):
code.put_incref_memoryviewslice(self.result(), have_gil=True)
T = "__pyx_memslice_transpose(&%s) == 0"
code.putln(code.error_goto_if(T % self.result(), self.pos))
elif self.initialized_check:
code.putln(
'if (unlikely(!%s.memview)) {'
'PyErr_SetString(PyExc_AttributeError,'
'"Memoryview is not initialized");'
'%s'
'}' % (self.result(), code.error_goto(self.pos)))
else:
# result_code contains what is needed, but we may need to insert
# a check and raise an exception
if self.obj.type.is_extension_type:
pass
elif self.entry and self.entry.is_cmethod and self.entry.utility_code:
# C method implemented as function call with utility code
code.globalstate.use_utility_code(self.entry.utility_code)
def generate_disposal_code(self, code):
if self.is_temp and self.type.is_memoryviewslice and self.is_memslice_transpose:
# mirror condition for putting the memview incref here:
if self.obj.is_name or (self.obj.is_attribute and
self.obj.is_memslice_transpose):
code.put_xdecref_memoryviewslice(
self.result(), have_gil=True)
else:
ExprNode.generate_disposal_code(self, code)
def generate_assignment_code(self, rhs, code):
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_SetAttrStr(%s, %s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute),
rhs.py_result()))
rhs.generate_disposal_code(code)
rhs.free_temps(code)
elif self.obj.type.is_complex:
code.putln("__Pyx_SET_C%s(%s, %s);" % (
self.member.upper(),
self.obj.result_as(self.obj.type),
rhs.result_as(self.ctype())))
else:
select_code = self.result()
if self.type.is_pyobject and self.use_managed_ref:
rhs.make_owned_reference(code)
code.put_giveref(rhs.py_result())
code.put_gotref(select_code)
code.put_decref(select_code, self.ctype())
elif self.type.is_memoryviewslice:
from . import MemoryView
MemoryView.put_assign_to_memviewslice(
select_code, rhs, rhs.result(), self.type, code)
if not self.type.is_memoryviewslice:
code.putln(
"%s = %s;" % (
select_code,
rhs.result_as(self.ctype())))
#rhs.result()))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.obj.generate_evaluation_code(code)
if self.is_py_attr or (self.entry.scope.is_property_scope
and u'__del__' in self.entry.scope.entries):
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_DelAttrStr(%s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute)))
else:
error(self.pos, "Cannot delete C attribute of extension type")
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def annotate(self, code):
if self.is_py_attr:
style, text = 'py_attr', 'python attribute (%s)'
else:
style, text = 'c_attr', 'c attribute (%s)'
code.annotate(self.pos, AnnotationItem(style, text % self.type, size=len(self.attribute)))
#-------------------------------------------------------------------
#
# Constructor nodes
#
#-------------------------------------------------------------------
class StarredTargetNode(ExprNode):
# A starred expression like "*a"
#
# This is only allowed in sequence assignment targets such as
#
# a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4]
#
# and will be removed during type analysis (or generate an error
# if it's found at unexpected places).
#
# target ExprNode
subexprs = ['target']
is_starred = 1
type = py_object_type
is_temp = 1
def __init__(self, pos, target):
ExprNode.__init__(self, pos)
self.target = target
def analyse_declarations(self, env):
error(self.pos, "can use starred expression only as assignment target")
self.target.analyse_declarations(env)
def analyse_types(self, env):
error(self.pos, "can use starred expression only as assignment target")
self.target = self.target.analyse_types(env)
self.type = self.target.type
return self
def analyse_target_declaration(self, env):
self.target.analyse_target_declaration(env)
def analyse_target_types(self, env):
self.target = self.target.analyse_target_types(env)
self.type = self.target.type
return self
def calculate_result_code(self):
return ""
def generate_result_code(self, code):
pass
class SequenceNode(ExprNode):
# Base class for list and tuple constructor nodes.
# Contains common code for performing sequence unpacking.
#
# args [ExprNode]
# unpacked_items [ExprNode] or None
# coerced_unpacked_items [ExprNode] or None
# mult_factor ExprNode the integer number of content repetitions ([1,2]*3)
subexprs = ['args', 'mult_factor']
is_sequence_constructor = 1
unpacked_items = None
mult_factor = None
slow = False # trade speed for code size (e.g. use PyTuple_Pack())
def compile_time_value_list(self, denv):
return [arg.compile_time_value(denv) for arg in self.args]
def replace_starred_target_node(self):
# replace a starred node in the targets by the contained expression
self.starred_assignment = False
args = []
for arg in self.args:
if arg.is_starred:
if self.starred_assignment:
error(arg.pos, "more than 1 starred expression in assignment")
self.starred_assignment = True
arg = arg.target
arg.is_starred = True
args.append(arg)
self.args = args
def analyse_target_declaration(self, env):
self.replace_starred_target_node()
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_types(self, env, skip_children=False):
for i in range(len(self.args)):
arg = self.args[i]
if not skip_children: arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
if self.mult_factor:
self.mult_factor = self.mult_factor.analyse_types(env)
if not self.mult_factor.type.is_int:
self.mult_factor = self.mult_factor.coerce_to_pyobject(env)
self.is_temp = 1
# not setting self.type here, subtypes do this
return self
def may_be_none(self):
return False
def analyse_target_types(self, env):
if self.mult_factor:
error(self.pos, "can't assign to multiplied sequence")
self.unpacked_items = []
self.coerced_unpacked_items = []
self.any_coerced_items = False
for i, arg in enumerate(self.args):
arg = self.args[i] = arg.analyse_target_types(env)
if arg.is_starred:
if not arg.type.assignable_from(Builtin.list_type):
error(arg.pos,
"starred target must have Python object (list) type")
if arg.type is py_object_type:
arg.type = Builtin.list_type
unpacked_item = PyTempNode(self.pos, env)
coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env)
if unpacked_item is not coerced_unpacked_item:
self.any_coerced_items = True
self.unpacked_items.append(unpacked_item)
self.coerced_unpacked_items.append(coerced_unpacked_item)
self.type = py_object_type
return self
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_sequence_packing_code(self, code, target=None, plain=False):
if target is None:
target = self.result()
size_factor = c_mult = ''
mult_factor = None
if self.mult_factor and not plain:
mult_factor = self.mult_factor
if mult_factor.type.is_int:
c_mult = mult_factor.result()
if isinstance(mult_factor.constant_result, (int,long)) \
and mult_factor.constant_result > 0:
size_factor = ' * %s' % mult_factor.constant_result
elif mult_factor.type.signed:
size_factor = ' * ((%s<0) ? 0:%s)' % (c_mult, c_mult)
else:
size_factor = ' * (%s)' % (c_mult,)
if self.type is Builtin.tuple_type and (self.is_literal or self.slow) and not c_mult:
# use PyTuple_Pack() to avoid generating huge amounts of one-time code
code.putln('%s = PyTuple_Pack(%d, %s); %s' % (
target,
len(self.args),
', '.join([ arg.py_result() for arg in self.args ]),
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
elif self.type.is_ctuple:
for i, arg in enumerate(self.args):
code.putln("%s.f%s = %s;" % (
target, i, arg.result()))
else:
# build the tuple/list step by step, potentially multiplying it as we go
if self.type is Builtin.list_type:
create_func, set_item_func = 'PyList_New', 'PyList_SET_ITEM'
elif self.type is Builtin.tuple_type:
create_func, set_item_func = 'PyTuple_New', 'PyTuple_SET_ITEM'
else:
raise InternalError("sequence packing for unexpected type %s" % self.type)
arg_count = len(self.args)
code.putln("%s = %s(%s%s); %s" % (
target, create_func, arg_count, size_factor,
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
if c_mult:
# FIXME: can't use a temp variable here as the code may
# end up in the constant building function. Temps
# currently don't work there.
#counter = code.funcstate.allocate_temp(mult_factor.type, manage_ref=False)
counter = Naming.quick_temp_cname
code.putln('{ Py_ssize_t %s;' % counter)
if arg_count == 1:
offset = counter
else:
offset = '%s * %s' % (counter, arg_count)
code.putln('for (%s=0; %s < %s; %s++) {' % (
counter, counter, c_mult, counter
))
else:
offset = ''
for i in xrange(arg_count):
arg = self.args[i]
if c_mult or not arg.result_in_temp():
code.put_incref(arg.result(), arg.ctype())
code.putln("%s(%s, %s, %s);" % (
set_item_func,
target,
(offset and i) and ('%s + %s' % (offset, i)) or (offset or i),
arg.py_result()))
code.put_giveref(arg.py_result())
if c_mult:
code.putln('}')
#code.funcstate.release_temp(counter)
code.putln('}')
if mult_factor is not None and mult_factor.type.is_pyobject:
code.putln('{ PyObject* %s = PyNumber_InPlaceMultiply(%s, %s); %s' % (
Naming.quick_temp_cname, target, mult_factor.py_result(),
code.error_goto_if_null(Naming.quick_temp_cname, self.pos)
))
code.put_gotref(Naming.quick_temp_cname)
code.put_decref(target, py_object_type)
code.putln('%s = %s;' % (target, Naming.quick_temp_cname))
code.putln('}')
def generate_subexpr_disposal_code(self, code):
if self.mult_factor and self.mult_factor.type.is_int:
super(SequenceNode, self).generate_subexpr_disposal_code(code)
elif self.type is Builtin.tuple_type and (self.is_literal or self.slow):
super(SequenceNode, self).generate_subexpr_disposal_code(code)
else:
# We call generate_post_assignment_code here instead
# of generate_disposal_code, because values were stored
# in the tuple using a reference-stealing operation.
for arg in self.args:
arg.generate_post_assignment_code(code)
# Should NOT call free_temps -- this is invoked by the default
# generate_evaluation_code which will do that.
if self.mult_factor:
self.mult_factor.generate_disposal_code(code)
def generate_assignment_code(self, rhs, code):
if self.starred_assignment:
self.generate_starred_assignment_code(rhs, code)
else:
self.generate_parallel_assignment_code(rhs, code)
for item in self.unpacked_items:
item.release(code)
rhs.free_temps(code)
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def generate_parallel_assignment_code(self, rhs, code):
# Need to work around the fact that generate_evaluation_code
# allocates the temps in a rather hacky way -- the assignment
# is evaluated twice, within each if-block.
for item in self.unpacked_items:
item.allocate(code)
special_unpack = (rhs.type is py_object_type
or rhs.type in (tuple_type, list_type)
or not rhs.type.is_builtin_type)
long_enough_for_a_loop = len(self.unpacked_items) > 3
if special_unpack:
self.generate_special_parallel_unpacking_code(
code, rhs, use_loop=long_enough_for_a_loop)
else:
code.putln("{")
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=long_enough_for_a_loop)
code.putln("}")
for value_node in self.coerced_unpacked_items:
value_node.generate_evaluation_code(code)
for i in range(len(self.args)):
self.args[i].generate_assignment_code(
self.coerced_unpacked_items[i], code)
def generate_special_parallel_unpacking_code(self, code, rhs, use_loop):
sequence_type_test = '1'
none_check = "likely(%s != Py_None)" % rhs.py_result()
if rhs.type is list_type:
sequence_types = ['List']
if rhs.may_be_none():
sequence_type_test = none_check
elif rhs.type is tuple_type:
sequence_types = ['Tuple']
if rhs.may_be_none():
sequence_type_test = none_check
else:
sequence_types = ['Tuple', 'List']
tuple_check = 'likely(PyTuple_CheckExact(%s))' % rhs.py_result()
list_check = 'PyList_CheckExact(%s)' % rhs.py_result()
sequence_type_test = "(%s) || (%s)" % (tuple_check, list_check)
code.putln("if (%s) {" % sequence_type_test)
code.putln("PyObject* sequence = %s;" % rhs.py_result())
# list/tuple => check size
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln("Py_ssize_t size = Py_SIZE(sequence);")
code.putln("#else")
code.putln("Py_ssize_t size = PySequence_Size(sequence);") # < 0 => exception
code.putln("#endif")
code.putln("if (unlikely(size != %d)) {" % len(self.args))
code.globalstate.use_utility_code(raise_too_many_values_to_unpack)
code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % (
len(self.args), len(self.args)))
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);")
code.putln(code.error_goto(self.pos))
code.putln("}")
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
# unpack items from list/tuple in unrolled loop (can't fail)
if len(sequence_types) == 2:
code.putln("if (likely(Py%s_CheckExact(sequence))) {" % sequence_types[0])
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[0], i))
if len(sequence_types) == 2:
code.putln("} else {")
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[1], i))
code.putln("}")
for item in self.unpacked_items:
code.put_incref(item.result(), item.ctype())
code.putln("#else")
# in non-CPython, use the PySequence protocol (which can fail)
if not use_loop:
for i, item in enumerate(self.unpacked_items):
code.putln("%s = PySequence_ITEM(sequence, %d); %s" % (
item.result(), i,
code.error_goto_if_null(item.result(), self.pos)))
code.put_gotref(item.result())
else:
code.putln("{")
code.putln("Py_ssize_t i;")
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in self.unpacked_items])))
code.putln("for (i=0; i < %s; i++) {" % len(self.unpacked_items))
code.putln("PyObject* item = PySequence_ITEM(sequence, i); %s" % (
code.error_goto_if_null('item', self.pos)))
code.put_gotref('item')
code.putln("*(temps[i]) = item;")
code.putln("}")
code.putln("}")
code.putln("#endif")
rhs.generate_disposal_code(code)
if sequence_type_test == '1':
code.putln("}") # all done
elif sequence_type_test == none_check:
# either tuple/list or None => save some code by generating the error directly
code.putln("} else {")
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseNoneIterError", "ObjectHandling.c"))
code.putln("__Pyx_RaiseNoneNotIterableError(); %s" % code.error_goto(self.pos))
code.putln("}") # all done
else:
code.putln("} else {") # needs iteration fallback code
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=use_loop)
code.putln("}")
def generate_generic_parallel_unpacking_code(self, code, rhs, unpacked_items, use_loop, terminate=True):
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.globalstate.use_utility_code(UtilityCode.load_cached("IterFinish", "ObjectHandling.c"))
code.putln("Py_ssize_t index = -1;") # must be at the start of a C block!
if use_loop:
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in unpacked_items])))
iterator_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(
"%s = PyObject_GetIter(%s); %s" % (
iterator_temp,
rhs.py_result(),
code.error_goto_if_null(iterator_temp, self.pos)))
code.put_gotref(iterator_temp)
rhs.generate_disposal_code(code)
iternext_func = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
code.putln("%s = Py_TYPE(%s)->tp_iternext;" % (
iternext_func, iterator_temp))
unpacking_error_label = code.new_label('unpacking_failed')
unpack_code = "%s(%s)" % (iternext_func, iterator_temp)
if use_loop:
code.putln("for (index=0; index < %s; index++) {" % len(unpacked_items))
code.put("PyObject* item = %s; if (unlikely(!item)) " % unpack_code)
code.put_goto(unpacking_error_label)
code.put_gotref("item")
code.putln("*(temps[index]) = item;")
code.putln("}")
else:
for i, item in enumerate(unpacked_items):
code.put(
"index = %d; %s = %s; if (unlikely(!%s)) " % (
i,
item.result(),
unpack_code,
item.result()))
code.put_goto(unpacking_error_label)
code.put_gotref(item.py_result())
if terminate:
code.globalstate.use_utility_code(
UtilityCode.load_cached("UnpackItemEndCheck", "ObjectHandling.c"))
code.put_error_if_neg(self.pos, "__Pyx_IternextUnpackEndCheck(%s, %d)" % (
unpack_code,
len(unpacked_items)))
code.putln("%s = NULL;" % iternext_func)
code.put_decref_clear(iterator_temp, py_object_type)
unpacking_done_label = code.new_label('unpacking_done')
code.put_goto(unpacking_done_label)
code.put_label(unpacking_error_label)
code.put_decref_clear(iterator_temp, py_object_type)
code.putln("%s = NULL;" % iternext_func)
code.putln("if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);")
code.putln(code.error_goto(self.pos))
code.put_label(unpacking_done_label)
code.funcstate.release_temp(iternext_func)
if terminate:
code.funcstate.release_temp(iterator_temp)
iterator_temp = None
return iterator_temp
def generate_starred_assignment_code(self, rhs, code):
for i, arg in enumerate(self.args):
if arg.is_starred:
starred_target = self.unpacked_items[i]
unpacked_fixed_items_left = self.unpacked_items[:i]
unpacked_fixed_items_right = self.unpacked_items[i+1:]
break
else:
assert False
iterator_temp = None
if unpacked_fixed_items_left:
for item in unpacked_fixed_items_left:
item.allocate(code)
code.putln('{')
iterator_temp = self.generate_generic_parallel_unpacking_code(
code, rhs, unpacked_fixed_items_left,
use_loop=True, terminate=False)
for i, item in enumerate(unpacked_fixed_items_left):
value_node = self.coerced_unpacked_items[i]
value_node.generate_evaluation_code(code)
code.putln('}')
starred_target.allocate(code)
target_list = starred_target.result()
code.putln("%s = PySequence_List(%s); %s" % (
target_list,
iterator_temp or rhs.py_result(),
code.error_goto_if_null(target_list, self.pos)))
code.put_gotref(target_list)
if iterator_temp:
code.put_decref_clear(iterator_temp, py_object_type)
code.funcstate.release_temp(iterator_temp)
else:
rhs.generate_disposal_code(code)
if unpacked_fixed_items_right:
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list))
code.putln("if (unlikely(%s < %d)) {" % (length_temp, len(unpacked_fixed_items_right)))
code.putln("__Pyx_RaiseNeedMoreValuesError(%d+%s); %s" % (
len(unpacked_fixed_items_left), length_temp,
code.error_goto(self.pos)))
code.putln('}')
for item in unpacked_fixed_items_right[::-1]:
item.allocate(code)
for i, (item, coerced_arg) in enumerate(zip(unpacked_fixed_items_right[::-1],
self.coerced_unpacked_items[::-1])):
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln("%s = PyList_GET_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
# resize the list the hard way
code.putln("((PyVarObject*)%s)->ob_size--;" % target_list)
code.putln('#else')
code.putln("%s = PySequence_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
code.putln('#endif')
code.put_gotref(item.py_result())
coerced_arg.generate_evaluation_code(code)
code.putln('#if !CYTHON_COMPILING_IN_CPYTHON')
sublist_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln('%s = PySequence_GetSlice(%s, 0, %s-%d); %s' % (
sublist_temp, target_list, length_temp, len(unpacked_fixed_items_right),
code.error_goto_if_null(sublist_temp, self.pos)))
code.put_gotref(sublist_temp)
code.funcstate.release_temp(length_temp)
code.put_decref(target_list, py_object_type)
code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp))
code.putln('#else')
code.putln('%s = %s;' % (sublist_temp, sublist_temp)) # avoid warning about unused variable
code.funcstate.release_temp(sublist_temp)
code.putln('#endif')
for i, arg in enumerate(self.args):
arg.generate_assignment_code(self.coerced_unpacked_items[i], code)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
if self.unpacked_items:
for arg in self.unpacked_items:
arg.annotate(code)
for arg in self.coerced_unpacked_items:
arg.annotate(code)
class TupleNode(SequenceNode):
# Tuple constructor.
type = tuple_type
is_partly_literal = False
gil_message = "Constructing Python tuple"
def infer_type(self, env):
if self.mult_factor or not self.args:
return tuple_type
arg_types = [arg.infer_type(env) for arg in self.args]
if any(type.is_pyobject or type.is_unspecified or type.is_fused for type in arg_types):
return tuple_type
else:
type = PyrexTypes.c_tuple_type(arg_types)
env.declare_tuple_type(self.pos, type)
return type
def analyse_types(self, env, skip_children=False):
if len(self.args) == 0:
self.is_temp = False
self.is_literal = True
return self
else:
if not skip_children:
self.args = [arg.analyse_types(env) for arg in self.args]
if not self.mult_factor and not any(arg.type.is_pyobject or arg.type.is_fused for arg in self.args):
self.type = PyrexTypes.c_tuple_type(arg.type for arg in self.args)
env.declare_tuple_type(self.pos, self.type)
self.is_temp = 1
return self
else:
node = SequenceNode.analyse_types(self, env, skip_children=True)
for child in node.args:
if not child.is_literal:
break
else:
if not node.mult_factor or node.mult_factor.is_literal and \
isinstance(node.mult_factor.constant_result, (int, long)):
node.is_temp = False
node.is_literal = True
else:
if not node.mult_factor.type.is_pyobject:
node.mult_factor = node.mult_factor.coerce_to_pyobject(env)
node.is_temp = True
node.is_partly_literal = True
return node
def coerce_to(self, dst_type, env):
if self.type.is_ctuple:
if dst_type.is_ctuple and self.type.size == dst_type.size:
if self.type == dst_type:
return self
coerced_args = [arg.coerce_to(type, env) for arg, type in zip(self.args, dst_type.components)]
return TupleNode(self.pos, args=coerced_args, type=dst_type, is_temp=1)
elif dst_type is tuple_type or dst_type is py_object_type:
coerced_args = [arg.coerce_to_pyobject(env) for arg in self.args]
return TupleNode(self.pos, args=coerced_args, type=tuple_type, is_temp=1).analyse_types(env, skip_children=True)
else:
return self.coerce_to_pyobject(env).coerce_to(dst_type, env)
else:
return SequenceNode.coerce_to(self, dst_type, env)
def as_list(self):
t = ListNode(self.pos, args=self.args, mult_factor=self.mult_factor)
if isinstance(self.constant_result, tuple):
t.constant_result = list(self.constant_result)
return t
def is_simple(self):
# either temp or constant => always simple
return True
def nonlocally_immutable(self):
# either temp or constant => always safe
return True
def calculate_result_code(self):
if len(self.args) > 0:
return self.result_code
else:
return Naming.empty_tuple
def calculate_constant_result(self):
self.constant_result = tuple([
arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = self.compile_time_value_list(denv)
try:
return tuple(values)
except Exception, e:
self.compile_time_value_error(e)
def generate_operation_code(self, code):
if len(self.args) == 0:
# result_code is Naming.empty_tuple
return
if self.is_partly_literal:
# underlying tuple is const, but factor is not
tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
const_code = code.get_cached_constants_writer()
const_code.mark_pos(self.pos)
self.generate_sequence_packing_code(const_code, tuple_target, plain=True)
const_code.put_giveref(tuple_target)
code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
self.result(), tuple_target, self.mult_factor.py_result(),
code.error_goto_if_null(self.result(), self.pos)
))
code.put_gotref(self.py_result())
elif self.is_literal:
# non-empty cached tuple => result is global constant,
# creation code goes into separate code writer
self.result_code = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
self.generate_sequence_packing_code(code)
code.put_giveref(self.py_result())
else:
self.type.entry.used = True
self.generate_sequence_packing_code(code)
class ListNode(SequenceNode):
# List constructor.
# obj_conversion_errors [PyrexError] used internally
# orignial_args [ExprNode] used internally
obj_conversion_errors = []
type = list_type
in_module_scope = False
gil_message = "Constructing Python list"
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer non-object list arrays.
return list_type
def analyse_expressions(self, env):
node = SequenceNode.analyse_expressions(self, env)
return node.coerce_to_pyobject(env)
def analyse_types(self, env):
hold_errors()
self.original_args = list(self.args)
node = SequenceNode.analyse_types(self, env)
node.obj_conversion_errors = held_errors()
release_errors(ignore=True)
if env.is_module_scope:
self.in_module_scope = True
return node
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
elif (dst_type.is_array or dst_type.is_ptr) and dst_type.base_type is not PyrexTypes.c_void_type:
array_length = len(self.args)
if self.mult_factor:
if isinstance(self.mult_factor.constant_result, (int, long)):
if self.mult_factor.constant_result <= 0:
error(self.pos, "Cannot coerce non-positively multiplied list to '%s'" % dst_type)
else:
array_length *= self.mult_factor.constant_result
else:
error(self.pos, "Cannot coerce dynamically multiplied list to '%s'" % dst_type)
base_type = dst_type.base_type
self.type = PyrexTypes.CArrayType(base_type, array_length)
for i in range(len(self.original_args)):
arg = self.args[i]
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(base_type, env)
elif dst_type.is_cpp_class:
# TODO(robertwb): Avoid object conversion for vector/list/set.
return TypecastNode(self.pos, operand=self, type=PyrexTypes.py_object_type).coerce_to(dst_type, env)
elif self.mult_factor:
error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type)
elif dst_type.is_struct:
if len(self.args) > len(dst_type.scope.var_entries):
error(self.pos, "Too many members for '%s'" % dst_type)
else:
if len(self.args) < len(dst_type.scope.var_entries):
warning(self.pos, "Too few members for '%s'" % dst_type, 1)
for i, (arg, member) in enumerate(zip(self.original_args, dst_type.scope.var_entries)):
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(member.type, env)
self.type = dst_type
else:
self.type = error_type
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
return self
def as_list(self): # dummy for compatibility with TupleNode
return self
def as_tuple(self):
t = TupleNode(self.pos, args=self.args, mult_factor=self.mult_factor)
if isinstance(self.constant_result, list):
t.constant_result = tuple(self.constant_result)
return t
def allocate_temp_result(self, code):
if self.type.is_array and self.in_module_scope:
self.temp_code = code.funcstate.allocate_temp(
self.type, manage_ref=False, static=True)
else:
SequenceNode.allocate_temp_result(self, code)
def release_temp_result(self, env):
if self.type.is_array:
# To be valid C++, we must allocate the memory on the stack
# manually and be sure not to reuse it for something else.
# Yes, this means that we leak a temp array variable.
pass
else:
SequenceNode.release_temp_result(self, env)
def calculate_constant_result(self):
if self.mult_factor:
raise ValueError() # may exceed the compile time memory
self.constant_result = [
arg.constant_result for arg in self.args]
def compile_time_value(self, denv):
l = self.compile_time_value_list(denv)
if self.mult_factor:
l *= self.mult_factor.compile_time_value(denv)
return l
def generate_operation_code(self, code):
if self.type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.generate_sequence_packing_code(code)
elif self.type.is_array:
if self.mult_factor:
code.putln("{")
code.putln("Py_ssize_t %s;" % Naming.quick_temp_cname)
code.putln("for ({i} = 0; {i} < {count}; {i}++) {{".format(
i=Naming.quick_temp_cname, count=self.mult_factor.result()))
offset = '+ (%d * %s)' % (len(self.args), Naming.quick_temp_cname)
else:
offset = ''
for i, arg in enumerate(self.args):
if arg.type.is_array:
code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
code.putln("memcpy(&(%s[%s%s]), %s, sizeof(%s[0]));" % (
self.result(), i, offset,
arg.result(), self.result()
))
else:
code.putln("%s[%s%s] = %s;" % (
self.result(),
i,
offset,
arg.result()))
if self.mult_factor:
code.putln("}")
code.putln("}")
elif self.type.is_struct:
for arg, member in zip(self.args, self.type.scope.var_entries):
code.putln("%s.%s = %s;" % (
self.result(),
member.cname,
arg.result()))
else:
raise InternalError("List type never specified")
class ScopedExprNode(ExprNode):
# Abstract base class for ExprNodes that have their own local
# scope, such as generator expressions.
#
# expr_scope Scope the inner scope of the expression
subexprs = []
expr_scope = None
# does this node really have a local scope, e.g. does it leak loop
# variables or not? non-leaking Py3 behaviour is default, except
# for list comprehensions where the behaviour differs in Py2 and
# Py3 (set in Parsing.py based on parser context)
has_local_scope = True
def init_scope(self, outer_scope, expr_scope=None):
if expr_scope is not None:
self.expr_scope = expr_scope
elif self.has_local_scope:
self.expr_scope = Symtab.GeneratorExpressionScope(outer_scope)
else:
self.expr_scope = None
def analyse_declarations(self, env):
self.init_scope(env)
def analyse_scoped_declarations(self, env):
# this is called with the expr_scope as env
pass
def analyse_types(self, env):
# no recursion here, the children will be analysed separately below
return self
def analyse_scoped_expressions(self, env):
# this is called with the expr_scope as env
return self
def generate_evaluation_code(self, code):
# set up local variables and free their references on exit
generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code
if not self.has_local_scope or not self.expr_scope.var_entries:
# no local variables => delegate, done
generate_inner_evaluation_code(code)
return
code.putln('{ /* enter inner scope */')
py_entries = []
for entry in self.expr_scope.var_entries:
if not entry.in_closure:
code.put_var_declaration(entry)
if entry.type.is_pyobject and entry.used:
py_entries.append(entry)
if not py_entries:
# no local Python references => no cleanup required
generate_inner_evaluation_code(code)
code.putln('} /* exit inner scope */')
return
# must free all local Python references at each exit point
old_loop_labels = tuple(code.new_loop_labels())
old_error_label = code.new_error_label()
generate_inner_evaluation_code(code)
# normal (non-error) exit
for entry in py_entries:
code.put_var_decref(entry)
# error/loop body exit points
exit_scope = code.new_label('exit_scope')
code.put_goto(exit_scope)
for label, old_label in ([(code.error_label, old_error_label)] +
list(zip(code.get_loop_labels(), old_loop_labels))):
if code.label_used(label):
code.put_label(label)
for entry in py_entries:
code.put_var_decref(entry)
code.put_goto(old_label)
code.put_label(exit_scope)
code.putln('} /* exit inner scope */')
code.set_loop_labels(old_loop_labels)
code.error_label = old_error_label
class ComprehensionNode(ScopedExprNode):
# A list/set/dict comprehension
child_attrs = ["loop"]
is_temp = True
def infer_type(self, env):
return self.type
def analyse_declarations(self, env):
self.append.target = self # this is used in the PyList_Append of the inner loop
self.init_scope(env)
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def analyse_types(self, env):
if not self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def analyse_scoped_expressions(self, env):
if self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_operation_code(self, code):
if self.type is Builtin.list_type:
create_code = 'PyList_New(0)'
elif self.type is Builtin.set_type:
create_code = 'PySet_New(NULL)'
elif self.type is Builtin.dict_type:
create_code = 'PyDict_New()'
else:
raise InternalError("illegal type for comprehension: %s" % self.type)
code.putln('%s = %s; %s' % (
self.result(), create_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
self.loop.generate_execution_code(code)
def annotate(self, code):
self.loop.annotate(code)
class ComprehensionAppendNode(Node):
# Need to be careful to avoid infinite recursion:
# target must not be in child_attrs/subexprs
child_attrs = ['expr']
target = None
type = PyrexTypes.c_int_type
def analyse_expressions(self, env):
self.expr = self.expr.analyse_expressions(env)
if not self.expr.type.is_pyobject:
self.expr = self.expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
if self.target.type is list_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ListCompAppend", "Optimize.c"))
function = "__Pyx_ListComp_Append"
elif self.target.type is set_type:
function = "PySet_Add"
else:
raise InternalError(
"Invalid type for comprehension node: %s" % self.target.type)
self.expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("%s(%s, (PyObject*)%s)" % (
function,
self.target.result(),
self.expr.result()
), self.pos))
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.expr.generate_function_definitions(env, code)
def annotate(self, code):
self.expr.annotate(code)
class DictComprehensionAppendNode(ComprehensionAppendNode):
child_attrs = ['key_expr', 'value_expr']
def analyse_expressions(self, env):
self.key_expr = self.key_expr.analyse_expressions(env)
if not self.key_expr.type.is_pyobject:
self.key_expr = self.key_expr.coerce_to_pyobject(env)
self.value_expr = self.value_expr.analyse_expressions(env)
if not self.value_expr.type.is_pyobject:
self.value_expr = self.value_expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
self.key_expr.generate_evaluation_code(code)
self.value_expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s)" % (
self.target.result(),
self.key_expr.result(),
self.value_expr.result()
), self.pos))
self.key_expr.generate_disposal_code(code)
self.key_expr.free_temps(code)
self.value_expr.generate_disposal_code(code)
self.value_expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.key_expr.generate_function_definitions(env, code)
self.value_expr.generate_function_definitions(env, code)
def annotate(self, code):
self.key_expr.annotate(code)
self.value_expr.annotate(code)
class InlinedGeneratorExpressionNode(ScopedExprNode):
# An inlined generator expression for which the result is
# calculated inside of the loop. This will only be created by
# transforms when replacing builtin calls on generator
# expressions.
#
# loop ForStatNode the for-loop, not containing any YieldExprNodes
# result_node ResultRefNode the reference to the result value temp
# orig_func String the name of the builtin function this node replaces
child_attrs = ["loop"]
loop_analysed = False
type = py_object_type
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def may_be_none(self):
return False
def annotate(self, code):
self.loop.annotate(code)
def infer_type(self, env):
return self.result_node.infer_type(env)
def analyse_types(self, env):
if not self.has_local_scope:
self.loop_analysed = True
self.loop = self.loop.analyse_expressions(env)
self.type = self.result_node.type
self.is_temp = True
return self
def analyse_scoped_expressions(self, env):
self.loop_analysed = True
if self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def coerce_to(self, dst_type, env):
if self.orig_func == 'sum' and dst_type.is_numeric and not self.loop_analysed:
# We can optimise by dropping the aggregation variable and
# the add operations into C. This can only be done safely
# before analysing the loop body, after that, the result
# reference type will have infected expressions and
# assignments.
self.result_node.type = self.type = dst_type
return self
return super(InlinedGeneratorExpressionNode, self).coerce_to(dst_type, env)
def generate_result_code(self, code):
self.result_node.result_code = self.result()
self.loop.generate_execution_code(code)
class SetNode(ExprNode):
# Set constructor.
type = set_type
subexprs = ['args']
gil_message = "Constructing Python set"
def analyse_types(self, env):
for i in range(len(self.args)):
arg = self.args[i]
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
self.type = set_type
self.is_temp = 1
return self
def may_be_none(self):
return False
def calculate_constant_result(self):
self.constant_result = set([arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = [arg.compile_time_value(denv) for arg in self.args]
try:
return set(values)
except Exception, e:
self.compile_time_value_error(e)
def generate_evaluation_code(self, code):
for arg in self.args:
arg.generate_evaluation_code(code)
self.allocate_temp_result(code)
code.putln(
"%s = PySet_New(0); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for arg in self.args:
code.put_error_if_neg(
self.pos,
"PySet_Add(%s, %s)" % (self.result(), arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
class DictNode(ExprNode):
# Dictionary constructor.
#
# key_value_pairs [DictItemNode]
# exclude_null_values [boolean] Do not add NULL values to dict
#
# obj_conversion_errors [PyrexError] used internally
subexprs = ['key_value_pairs']
is_temp = 1
exclude_null_values = False
type = dict_type
is_dict_literal = True
obj_conversion_errors = []
@classmethod
def from_pairs(cls, pos, pairs):
return cls(pos, key_value_pairs=[
DictItemNode(pos, key=k, value=v) for k, v in pairs])
def calculate_constant_result(self):
self.constant_result = dict([
item.constant_result for item in self.key_value_pairs])
def compile_time_value(self, denv):
pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.key_value_pairs]
try:
return dict(pairs)
except Exception, e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer struct constructors.
return dict_type
def analyse_types(self, env):
hold_errors()
self.key_value_pairs = [ item.analyse_types(env)
for item in self.key_value_pairs ]
self.obj_conversion_errors = held_errors()
release_errors(ignore=True)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
self.release_errors()
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
elif dst_type.is_struct_or_union:
self.type = dst_type
if not dst_type.is_struct and len(self.key_value_pairs) != 1:
error(self.pos, "Exactly one field must be specified to convert to union '%s'" % dst_type)
elif dst_type.is_struct and len(self.key_value_pairs) < len(dst_type.scope.var_entries):
warning(self.pos, "Not all members given for struct '%s'" % dst_type, 1)
for item in self.key_value_pairs:
if isinstance(item.key, CoerceToPyTypeNode):
item.key = item.key.arg
if not item.key.is_string_literal:
error(item.key.pos, "Invalid struct field identifier")
item.key = StringNode(item.key.pos, value="<error>")
else:
key = str(item.key.value) # converts string literals to unicode in Py3
member = dst_type.scope.lookup_here(key)
if not member:
error(item.key.pos, "struct '%s' has no field '%s'" % (dst_type, key))
else:
value = item.value
if isinstance(value, CoerceToPyTypeNode):
value = value.arg
item.value = value.coerce_to(member.type, env)
else:
self.type = error_type
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
return self
def release_errors(self):
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
# Custom method used here because key-value
# pairs are evaluated and used one at a time.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
if self.type.is_pyobject:
self.release_errors()
code.putln(
"%s = PyDict_New(); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for item in self.key_value_pairs:
item.generate_evaluation_code(code)
if self.type.is_pyobject:
if self.exclude_null_values:
code.putln('if (%s) {' % item.value.py_result())
code.put_error_if_neg(self.pos,
"PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
if self.exclude_null_values:
code.putln('}')
else:
code.putln("%s.%s = %s;" % (
self.result(),
item.key.value,
item.value.result()))
item.generate_disposal_code(code)
item.free_temps(code)
def annotate(self, code):
for item in self.key_value_pairs:
item.annotate(code)
class DictItemNode(ExprNode):
# Represents a single item in a DictNode
#
# key ExprNode
# value ExprNode
subexprs = ['key', 'value']
nogil_check = None # Parent DictNode takes care of it
def calculate_constant_result(self):
self.constant_result = (
self.key.constant_result, self.value.constant_result)
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
self.value = self.value.coerce_to_pyobject(env)
return self
def generate_evaluation_code(self, code):
self.key.generate_evaluation_code(code)
self.value.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.key.generate_disposal_code(code)
self.value.generate_disposal_code(code)
def free_temps(self, code):
self.key.free_temps(code)
self.value.free_temps(code)
def __iter__(self):
return iter([self.key, self.value])
class SortedDictKeysNode(ExprNode):
# build sorted list of dict keys, e.g. for dir()
subexprs = ['arg']
is_temp = True
def __init__(self, arg):
ExprNode.__init__(self, arg.pos, arg=arg)
self.type = Builtin.list_type
def analyse_types(self, env):
arg = self.arg.analyse_types(env)
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node(
"'NoneType' object is not iterable")
self.arg = arg
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
dict_result = self.arg.py_result()
if self.arg.type is Builtin.dict_type:
code.putln('%s = PyDict_Keys(%s); %s' % (
self.result(), dict_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
else:
# originally used PyMapping_Keys() here, but that may return a tuple
code.globalstate.use_utility_code(UtilityCode.load_cached(
'PyObjectCallMethod0', 'ObjectHandling.c'))
keys_cname = code.intern_identifier(StringEncoding.EncodedString("keys"))
code.putln('%s = __Pyx_PyObject_CallMethod0(%s, %s); %s' % (
self.result(), dict_result, keys_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.putln("if (unlikely(!PyList_Check(%s))) {" % self.result())
code.put_decref_set(self.result(), "PySequence_List(%s)" % self.result())
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
code.putln("}")
code.put_error_if_neg(
self.pos, 'PyList_Sort(%s)' % self.py_result())
class ModuleNameMixin(object):
def get_py_mod_name(self, code):
return code.get_py_string_const(
self.module_name, identifier=True)
def get_py_qualified_name(self, code):
return code.get_py_string_const(
self.qualname, identifier=True)
class ClassNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# bases ExprNode Base class tuple
# dict ExprNode Class dict (not owned by this node)
# doc ExprNode or None Doc string
# module_name EncodedString Name of defining module
subexprs = ['bases', 'doc']
type = py_object_type
is_temp = True
def infer_type(self, env):
# TODO: could return 'type' in some cases
return py_object_type
def analyse_types(self, env):
self.bases = self.bases.analyse_types(env)
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
env.use_utility_code(UtilityCode.load_cached("CreateClass", "ObjectHandling.c"))
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
if self.doc:
code.put_error_if_neg(self.pos,
'PyDict_SetItem(%s, %s, %s)' % (
self.dict.py_result(),
code.intern_identifier(
StringEncoding.EncodedString("__doc__")),
self.doc.py_result()))
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
code.putln(
'%s = __Pyx_CreateClass(%s, %s, %s, %s, %s); %s' % (
self.result(),
self.bases.py_result(),
self.dict.py_result(),
cname,
qualname,
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class Py3ClassNode(ExprNode):
# Helper class used in the implementation of Python3+
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# dict ExprNode Class dict (not owned by this node)
# module_name EncodedString Name of defining module
# calculate_metaclass bool should call CalculateMetaclass()
# allow_py2_metaclass bool should look for Py2 metaclass
subexprs = []
type = py_object_type
is_temp = True
def infer_type(self, env):
# TODO: could return 'type' in some cases
return py_object_type
def analyse_types(self, env):
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("Py3ClassCreate", "ObjectHandling.c"))
cname = code.intern_identifier(self.name)
if self.mkw:
mkw = self.mkw.py_result()
else:
mkw = 'NULL'
if self.metaclass:
metaclass = self.metaclass.result()
else:
metaclass = "((PyObject*)&__Pyx_DefaultClassType)"
code.putln(
'%s = __Pyx_Py3ClassCreate(%s, %s, %s, %s, %s, %d, %d); %s' % (
self.result(),
metaclass,
cname,
self.bases.py_result(),
self.dict.py_result(),
mkw,
self.calculate_metaclass,
self.allow_py2_metaclass,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class KeywordArgsNode(ExprNode):
# Helper class for keyword arguments.
#
# starstar_arg DictNode
# keyword_args [DictItemNode]
subexprs = ['starstar_arg', 'keyword_args']
is_temp = 1
type = dict_type
def calculate_constant_result(self):
result = dict(self.starstar_arg.constant_result)
for item in self.keyword_args:
key, value = item.constant_result
if key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
self.constant_result = result
def compile_time_value(self, denv):
result = self.starstar_arg.compile_time_value(denv)
pairs = [ (item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.keyword_args ]
try:
result = dict(result)
for key, value in pairs:
if key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
except Exception, e:
self.compile_time_value_error(e)
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return dict_type
def analyse_types(self, env):
arg = self.starstar_arg.analyse_types(env)
arg = arg.coerce_to_pyobject(env)
self.starstar_arg = arg.as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after ** must be a mapping, not NoneType')
self.keyword_args = [ item.analyse_types(env)
for item in self.keyword_args ]
return self
def may_be_none(self):
return False
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.starstar_arg.generate_evaluation_code(code)
if self.starstar_arg.type is not Builtin.dict_type:
# CPython supports calling functions with non-dicts, so do we
code.putln('if (likely(PyDict_Check(%s))) {' %
self.starstar_arg.py_result())
if self.keyword_args:
code.putln(
"%s = PyDict_Copy(%s); %s" % (
self.result(),
self.starstar_arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
else:
code.putln("%s = %s;" % (
self.result(),
self.starstar_arg.py_result()))
code.put_incref(self.result(), py_object_type)
if self.starstar_arg.type is not Builtin.dict_type:
code.putln('} else {')
code.putln(
"%s = PyObject_CallFunctionObjArgs("
"(PyObject*)&PyDict_Type, %s, NULL); %s" % (
self.result(),
self.starstar_arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.putln('}')
self.starstar_arg.generate_disposal_code(code)
self.starstar_arg.free_temps(code)
if not self.keyword_args:
return
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c"))
for item in self.keyword_args:
item.generate_evaluation_code(code)
code.putln("if (unlikely(PyDict_GetItem(%s, %s))) {" % (
self.result(),
item.key.py_result()))
# FIXME: find out function name at runtime!
code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
item.key.py_result(),
code.error_goto(self.pos)))
code.putln("}")
code.put_error_if_neg(self.pos,
"PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
item.generate_disposal_code(code)
item.free_temps(code)
def annotate(self, code):
self.starstar_arg.annotate(code)
for item in self.keyword_args:
item.annotate(code)
class PyClassMetaclassNode(ExprNode):
# Helper class holds Python3 metaclass object
#
# bases ExprNode Base class tuple (not owned by this node)
# mkw ExprNode Class keyword arguments (not owned by this node)
subexprs = []
def analyse_types(self, env):
self.type = py_object_type
self.is_temp = True
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
if self.mkw:
code.globalstate.use_utility_code(
UtilityCode.load_cached("Py3MetaclassGet", "ObjectHandling.c"))
call = "__Pyx_Py3MetaclassGet(%s, %s)" % (
self.bases.result(),
self.mkw.result())
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CalculateMetaclass", "ObjectHandling.c"))
call = "__Pyx_CalculateMetaclass(NULL, %s)" % (
self.bases.result())
code.putln(
"%s = %s; %s" % (
self.result(), call,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyClassNamespaceNode(ExprNode, ModuleNameMixin):
# Helper class holds Python3 namespace object
#
# All this are not owned by this node
# metaclass ExprNode Metaclass object
# bases ExprNode Base class tuple
# mkw ExprNode Class keyword arguments
# doc ExprNode or None Doc string (owned)
subexprs = ['doc']
def analyse_types(self, env):
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
if self.doc:
doc_code = self.doc.result()
else:
doc_code = '(PyObject *) NULL'
if self.mkw:
mkw = self.mkw.py_result()
else:
mkw = '(PyObject *) NULL'
if self.metaclass:
metaclass = self.metaclass.result()
else:
metaclass = "(PyObject *) NULL"
code.putln(
"%s = __Pyx_Py3MetaclassPrepare(%s, %s, %s, %s, %s, %s, %s); %s" % (
self.result(),
metaclass,
self.bases.result(),
cname,
qualname,
mkw,
py_mod_name,
doc_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ClassCellInjectorNode(ExprNode):
# Initialize CyFunction.func_classobj
is_temp = True
type = py_object_type
subexprs = []
is_active = False
def analyse_expressions(self, env):
if self.is_active:
env.use_utility_code(
UtilityCode.load_cached("CyFunctionClassCell", "CythonFunction.c"))
return self
def generate_evaluation_code(self, code):
if self.is_active:
self.allocate_temp_result(code)
code.putln(
'%s = PyList_New(0); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
def generate_injection_code(self, code, classobj_cname):
if self.is_active:
code.putln('__Pyx_CyFunction_InitClassCell(%s, %s);' % (
self.result(), classobj_cname))
class ClassCellNode(ExprNode):
# Class Cell for noargs super()
subexprs = []
is_temp = True
is_generator = False
type = py_object_type
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if not self.is_generator:
code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
self.result(),
Naming.self_cname))
else:
code.putln('%s = %s->classobj;' % (
self.result(), Naming.generator_cname))
code.putln(
'if (!%s) { PyErr_SetString(PyExc_SystemError, '
'"super(): empty __class__ cell"); %s }' % (
self.result(),
code.error_goto(self.pos)))
code.put_incref(self.result(), py_object_type)
class BoundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an bound method
# object from a class and a function.
#
# function ExprNode Function object
# self_object ExprNode self object
subexprs = ['function']
def analyse_types(self, env):
self.function = self.function.analyse_types(env)
self.type = py_object_type
self.is_temp = 1
return self
gil_message = "Constructing a bound method"
def generate_result_code(self, code):
code.putln(
"%s = __Pyx_PyMethod_New(%s, %s, (PyObject*)%s->ob_type); %s" % (
self.result(),
self.function.py_result(),
self.self_object.py_result(),
self.self_object.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class UnboundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an unbound method
# object from a class and a function.
#
# function ExprNode Function object
type = py_object_type
is_temp = 1
subexprs = ['function']
def analyse_types(self, env):
self.function = self.function.analyse_types(env)
return self
def may_be_none(self):
return False
gil_message = "Constructing an unbound method"
def generate_result_code(self, code):
class_cname = code.pyclass_stack[-1].classobj.result()
code.putln(
"%s = __Pyx_PyMethod_New(%s, 0, %s); %s" % (
self.result(),
self.function.py_result(),
class_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyCFunctionNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# functions. Constructs a PyCFunction object
# from a PyMethodDef struct.
#
# pymethdef_cname string PyMethodDef structure
# self_object ExprNode or None
# binding bool
# def_node DefNode the Python function node
# module_name EncodedString Name of defining module
# code_object CodeObjectNode the PyCodeObject creator node
subexprs = ['code_object', 'defaults_tuple', 'defaults_kwdict',
'annotations_dict']
self_object = None
code_object = None
binding = False
def_node = None
defaults = None
defaults_struct = None
defaults_pyobjects = 0
defaults_tuple = None
defaults_kwdict = None
annotations_dict = None
type = py_object_type
is_temp = 1
specialized_cpdefs = None
is_specialization = False
@classmethod
def from_defnode(cls, node, binding):
return cls(node.pos,
def_node=node,
pymethdef_cname=node.entry.pymethdef_cname,
binding=binding or node.specialized_cpdefs,
specialized_cpdefs=node.specialized_cpdefs,
code_object=CodeObjectNode(node))
def analyse_types(self, env):
if self.binding:
self.analyse_default_args(env)
return self
def analyse_default_args(self, env):
"""
Handle non-literal function's default arguments.
"""
nonliteral_objects = []
nonliteral_other = []
default_args = []
default_kwargs = []
annotations = []
for arg in self.def_node.args:
if arg.default:
if not arg.default.is_literal:
arg.is_dynamic = True
if arg.type.is_pyobject:
nonliteral_objects.append(arg)
else:
nonliteral_other.append(arg)
else:
arg.default = DefaultLiteralArgNode(arg.pos, arg.default)
if arg.kw_only:
default_kwargs.append(arg)
else:
default_args.append(arg)
if arg.annotation:
arg.annotation = arg.annotation.analyse_types(env)
if not arg.annotation.type.is_pyobject:
arg.annotation = arg.annotation.coerce_to_pyobject(env)
annotations.append((arg.pos, arg.name, arg.annotation))
for arg in (self.def_node.star_arg, self.def_node.starstar_arg):
if arg and arg.annotation:
arg.annotation = arg.annotation.analyse_types(env)
if not arg.annotation.type.is_pyobject:
arg.annotation = arg.annotation.coerce_to_pyobject(env)
annotations.append((arg.pos, arg.name, arg.annotation))
if self.def_node.return_type_annotation:
annotations.append((self.def_node.return_type_annotation.pos,
StringEncoding.EncodedString("return"),
self.def_node.return_type_annotation))
if nonliteral_objects or nonliteral_other:
module_scope = env.global_scope()
cname = module_scope.next_id(Naming.defaults_struct_prefix)
scope = Symtab.StructOrUnionScope(cname)
self.defaults = []
for arg in nonliteral_objects:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=True)
self.defaults.append((arg, entry))
for arg in nonliteral_other:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=False)
self.defaults.append((arg, entry))
entry = module_scope.declare_struct_or_union(
None, 'struct', scope, 1, None, cname=cname)
self.defaults_struct = scope
self.defaults_pyobjects = len(nonliteral_objects)
for arg, entry in self.defaults:
arg.default_value = '%s->%s' % (
Naming.dynamic_args_cname, entry.cname)
self.def_node.defaults_struct = self.defaults_struct.name
if default_args or default_kwargs:
if self.defaults_struct is None:
if default_args:
defaults_tuple = TupleNode(self.pos, args=[
arg.default for arg in default_args])
self.defaults_tuple = defaults_tuple.analyse_types(env).coerce_to_pyobject(env)
if default_kwargs:
defaults_kwdict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
arg.pos,
key=IdentifierStringNode(arg.pos, value=arg.name),
value=arg.default)
for arg in default_kwargs])
self.defaults_kwdict = defaults_kwdict.analyse_types(env)
else:
if default_args:
defaults_tuple = DefaultsTupleNode(
self.pos, default_args, self.defaults_struct)
else:
defaults_tuple = NoneNode(self.pos)
if default_kwargs:
defaults_kwdict = DefaultsKwDictNode(
self.pos, default_kwargs, self.defaults_struct)
else:
defaults_kwdict = NoneNode(self.pos)
defaults_getter = Nodes.DefNode(
self.pos, args=[], star_arg=None, starstar_arg=None,
body=Nodes.ReturnStatNode(
self.pos, return_type=py_object_type,
value=TupleNode(
self.pos, args=[defaults_tuple, defaults_kwdict])),
decorators=None,
name=StringEncoding.EncodedString("__defaults__"))
defaults_getter.analyse_declarations(env)
defaults_getter = defaults_getter.analyse_expressions(env)
defaults_getter.body = defaults_getter.body.analyse_expressions(
defaults_getter.local_scope)
defaults_getter.py_wrapper_required = False
defaults_getter.pymethdef_required = False
self.def_node.defaults_getter = defaults_getter
if annotations:
annotations_dict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
pos, key=IdentifierStringNode(pos, value=name),
value=value)
for pos, name, value in annotations])
self.annotations_dict = annotations_dict.analyse_types(env)
def may_be_none(self):
return False
gil_message = "Constructing Python function"
def self_result_code(self):
if self.self_object is None:
self_result = "NULL"
else:
self_result = self.self_object.py_result()
return self_result
def generate_result_code(self, code):
if self.binding:
self.generate_cyfunction_code(code)
else:
self.generate_pycfunction_code(code)
def generate_pycfunction_code(self, code):
py_mod_name = self.get_py_mod_name(code)
code.putln(
'%s = PyCFunction_NewEx(&%s, %s, %s); %s' % (
self.result(),
self.pymethdef_cname,
self.self_result_code(),
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def generate_cyfunction_code(self, code):
if self.specialized_cpdefs:
def_node = self.specialized_cpdefs[0]
else:
def_node = self.def_node
if self.specialized_cpdefs or self.is_specialization:
code.globalstate.use_utility_code(
UtilityCode.load_cached("FusedFunction", "CythonFunction.c"))
constructor = "__pyx_FusedFunction_NewEx"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CythonFunction", "CythonFunction.c"))
constructor = "__Pyx_CyFunction_NewEx"
if self.code_object:
code_object_result = self.code_object.py_result()
else:
code_object_result = 'NULL'
flags = []
if def_node.is_staticmethod:
flags.append('__Pyx_CYFUNCTION_STATICMETHOD')
elif def_node.is_classmethod:
flags.append('__Pyx_CYFUNCTION_CLASSMETHOD')
if def_node.local_scope.parent_scope.is_c_class_scope:
flags.append('__Pyx_CYFUNCTION_CCLASS')
if flags:
flags = ' | '.join(flags)
else:
flags = '0'
code.putln(
'%s = %s(&%s, %s, %s, %s, %s, %s, %s); %s' % (
self.result(),
constructor,
self.pymethdef_cname,
flags,
self.get_py_qualified_name(code),
self.self_result_code(),
self.get_py_mod_name(code),
Naming.moddict_cname,
code_object_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if def_node.requires_classobj:
assert code.pyclass_stack, "pyclass_stack is empty"
class_node = code.pyclass_stack[-1]
code.put_incref(self.py_result(), py_object_type)
code.putln(
'PyList_Append(%s, %s);' % (
class_node.class_cell.result(),
self.result()))
code.put_giveref(self.py_result())
if self.defaults:
code.putln(
'if (!__Pyx_CyFunction_InitDefaults(%s, sizeof(%s), %d)) %s' % (
self.result(), self.defaults_struct.name,
self.defaults_pyobjects, code.error_goto(self.pos)))
defaults = '__Pyx_CyFunction_Defaults(%s, %s)' % (
self.defaults_struct.name, self.result())
for arg, entry in self.defaults:
arg.generate_assignment_code(code, target='%s->%s' % (
defaults, entry.cname))
if self.defaults_tuple:
code.putln('__Pyx_CyFunction_SetDefaultsTuple(%s, %s);' % (
self.result(), self.defaults_tuple.py_result()))
if self.defaults_kwdict:
code.putln('__Pyx_CyFunction_SetDefaultsKwDict(%s, %s);' % (
self.result(), self.defaults_kwdict.py_result()))
if def_node.defaults_getter:
code.putln('__Pyx_CyFunction_SetDefaultsGetter(%s, %s);' % (
self.result(), def_node.defaults_getter.entry.pyfunc_cname))
if self.annotations_dict:
code.putln('__Pyx_CyFunction_SetAnnotationsDict(%s, %s);' % (
self.result(), self.annotations_dict.py_result()))
class InnerFunctionNode(PyCFunctionNode):
# Special PyCFunctionNode that depends on a closure class
#
binding = True
needs_self_code = True
def self_result_code(self):
if self.needs_self_code:
return "((PyObject*)%s)" % Naming.cur_scope_cname
return "NULL"
class CodeObjectNode(ExprNode):
# Create a PyCodeObject for a CyFunction instance.
#
# def_node DefNode the Python function node
# varnames TupleNode a tuple with all local variable names
subexprs = ['varnames']
is_temp = False
result_code = None
def __init__(self, def_node):
ExprNode.__init__(self, def_node.pos, def_node=def_node)
args = list(def_node.args)
# if we have args/kwargs, then the first two in var_entries are those
local_vars = [arg for arg in def_node.local_scope.var_entries if arg.name]
self.varnames = TupleNode(
def_node.pos,
args=[IdentifierStringNode(arg.pos, value=arg.name)
for arg in args + local_vars],
is_temp=0,
is_literal=1)
def may_be_none(self):
return False
def calculate_result_code(self, code=None):
if self.result_code is None:
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
return self.result_code
def generate_result_code(self, code):
if self.result_code is None:
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
func = self.def_node
func_name = code.get_py_string_const(
func.name, identifier=True, is_str=False, unicode_value=func.name)
# FIXME: better way to get the module file path at module init time? Encoding to use?
file_path = StringEncoding.BytesLiteral(func.pos[0].get_filenametable_entry().encode('utf8'))
file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True)
flags = []
if self.def_node.star_arg:
flags.append('CO_VARARGS')
if self.def_node.starstar_arg:
flags.append('CO_VARKEYWORDS')
code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % (
self.result_code,
len(func.args) - func.num_kwonly_args, # argcount
func.num_kwonly_args, # kwonlyargcount (Py3 only)
len(self.varnames.args), # nlocals
'|'.join(flags) or '0', # flags
Naming.empty_bytes, # code
Naming.empty_tuple, # consts
Naming.empty_tuple, # names (FIXME)
self.varnames.result(), # varnames
Naming.empty_tuple, # freevars (FIXME)
Naming.empty_tuple, # cellvars (FIXME)
file_path_const, # filename
func_name, # name
self.pos[1], # firstlineno
Naming.empty_bytes, # lnotab
code.error_goto_if_null(self.result_code, self.pos),
))
class DefaultLiteralArgNode(ExprNode):
# CyFunction's literal argument default value
#
# Evaluate literal only once.
subexprs = []
is_literal = True
is_temp = False
def __init__(self, pos, arg):
super(DefaultLiteralArgNode, self).__init__(pos)
self.arg = arg
self.type = self.arg.type
self.evaluated = False
def analyse_types(self, env):
return self
def generate_result_code(self, code):
pass
def generate_evaluation_code(self, code):
if not self.evaluated:
self.arg.generate_evaluation_code(code)
self.evaluated = True
def result(self):
return self.type.cast_code(self.arg.result())
class DefaultNonLiteralArgNode(ExprNode):
# CyFunction's non-literal argument default value
subexprs = []
def __init__(self, pos, arg, defaults_struct):
super(DefaultNonLiteralArgNode, self).__init__(pos)
self.arg = arg
self.defaults_struct = defaults_struct
def analyse_types(self, env):
self.type = self.arg.type
self.is_temp = False
return self
def generate_result_code(self, code):
pass
def result(self):
return '__Pyx_CyFunction_Defaults(%s, %s)->%s' % (
self.defaults_struct.name, Naming.self_cname,
self.defaults_struct.lookup(self.arg.name).cname)
class DefaultsTupleNode(TupleNode):
# CyFunction's __defaults__ tuple
def __init__(self, pos, defaults, defaults_struct):
args = []
for arg in defaults:
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
args.append(arg)
super(DefaultsTupleNode, self).__init__(pos, args=args)
def analyse_types(self, env, skip_children=False):
return super(DefaultsTupleNode, self).analyse_types(env, skip_children).coerce_to_pyobject(env)
class DefaultsKwDictNode(DictNode):
# CyFunction's __kwdefaults__ dict
def __init__(self, pos, defaults, defaults_struct):
items = []
for arg in defaults:
name = IdentifierStringNode(arg.pos, value=arg.name)
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
items.append(DictItemNode(arg.pos, key=name, value=arg))
super(DefaultsKwDictNode, self).__init__(pos, key_value_pairs=items)
class LambdaNode(InnerFunctionNode):
# Lambda expression node (only used as a function reference)
#
# args [CArgDeclNode] formal arguments
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
# lambda_name string a module-globally unique lambda name
# result_expr ExprNode
# def_node DefNode the underlying function 'def' node
child_attrs = ['def_node']
name = StringEncoding.EncodedString('<lambda>')
def analyse_declarations(self, env):
self.def_node.no_assignment_synthesis = True
self.def_node.pymethdef_required = True
self.def_node.analyse_declarations(env)
self.def_node.is_cyfunction = True
self.pymethdef_cname = self.def_node.entry.pymethdef_cname
env.add_lambda_def(self.def_node)
def analyse_types(self, env):
self.def_node = self.def_node.analyse_expressions(env)
return super(LambdaNode, self).analyse_types(env)
def generate_result_code(self, code):
self.def_node.generate_execution_code(code)
super(LambdaNode, self).generate_result_code(code)
class GeneratorExpressionNode(LambdaNode):
# A generator expression, e.g. (i for i in range(10))
#
# Result is a generator.
#
# loop ForStatNode the for-loop, containing a YieldExprNode
# def_node DefNode the underlying generator 'def' node
name = StringEncoding.EncodedString('genexpr')
binding = False
def analyse_declarations(self, env):
super(GeneratorExpressionNode, self).analyse_declarations(env)
# No pymethdef required
self.def_node.pymethdef_required = False
self.def_node.py_wrapper_required = False
self.def_node.is_cyfunction = False
# Force genexpr signature
self.def_node.entry.signature = TypeSlots.pyfunction_noargs
def generate_result_code(self, code):
code.putln(
'%s = %s(%s); %s' % (
self.result(),
self.def_node.entry.pyfunc_cname,
self.self_result_code(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class YieldExprNode(ExprNode):
# Yield expression node
#
# arg ExprNode the value to return from the generator
# label_num integer yield label number
# is_yield_from boolean is a YieldFromExprNode to delegate to another generator
subexprs = ['arg']
type = py_object_type
label_num = 0
is_yield_from = False
def analyse_types(self, env):
if not self.label_num:
error(self.pos, "'yield' not supported here")
self.is_temp = 1
if self.arg is not None:
self.arg = self.arg.analyse_types(env)
if not self.arg.type.is_pyobject:
self.coerce_yield_argument(env)
return self
def coerce_yield_argument(self, env):
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
if self.arg:
self.arg.generate_evaluation_code(code)
self.arg.make_owned_reference(code)
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_post_assignment_code(code)
self.arg.free_temps(code)
else:
code.put_init_to_py_none(Naming.retval_cname, py_object_type)
self.generate_yield_code(code)
def generate_yield_code(self, code):
"""
Generate the code to return the argument in 'Naming.retval_cname'
and to continue at the yield label.
"""
label_num, label_name = code.new_yield_label()
code.use_label(label_name)
saved = []
code.funcstate.closure_temps.reset()
for cname, type, manage_ref in code.funcstate.temps_in_use():
save_cname = code.funcstate.closure_temps.allocate_temp(type)
saved.append((cname, save_cname, type))
if type.is_pyobject:
code.put_xgiveref(cname)
code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname))
code.put_xgiveref(Naming.retval_cname)
code.put_finish_refcount_context()
code.putln("/* return from generator, yielding value */")
code.putln("%s->resume_label = %d;" % (
Naming.generator_cname, label_num))
code.putln("return %s;" % Naming.retval_cname)
code.put_label(label_name)
for cname, save_cname, type in saved:
code.putln('%s = %s->%s;' % (cname, Naming.cur_scope_cname, save_cname))
if type.is_pyobject:
code.putln('%s->%s = 0;' % (Naming.cur_scope_cname, save_cname))
code.put_xgotref(cname)
code.putln(code.error_goto_if_null(Naming.sent_value_cname, self.pos))
if self.result_is_used:
self.allocate_temp_result(code)
code.put('%s = %s; ' % (self.result(), Naming.sent_value_cname))
code.put_incref(self.result(), py_object_type)
class YieldFromExprNode(YieldExprNode):
# "yield from GEN" expression
is_yield_from = True
def coerce_yield_argument(self, env):
if not self.arg.type.is_string:
# FIXME: support C arrays and C++ iterators?
error(self.pos, "yielding from non-Python object not supported")
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("YieldFrom", "Generator.c"))
self.arg.generate_evaluation_code(code)
code.putln("%s = __Pyx_Generator_Yield_From(%s, %s);" % (
Naming.retval_cname,
Naming.generator_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_disposal_code(code)
self.arg.free_temps(code)
code.put_xgotref(Naming.retval_cname)
code.putln("if (likely(%s)) {" % Naming.retval_cname)
self.generate_yield_code(code)
code.putln("} else {")
# either error or sub-generator has normally terminated: return value => node result
if self.result_is_used:
# YieldExprNode has allocated the result temp for us
code.putln("%s = NULL;" % self.result())
code.putln("if (unlikely(__Pyx_PyGen_FetchStopIterationValue(&%s) < 0)) %s" % (
self.result(),
code.error_goto(self.pos)))
code.put_gotref(self.result())
else:
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(exc_type == PyExc_StopIteration ||"
" PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
code.putln("}")
class GlobalsExprNode(AtomicExprNode):
type = dict_type
is_temp = 1
def analyse_types(self, env):
env.use_utility_code(Builtin.globals_utility_code)
return self
gil_message = "Constructing globals dict"
def may_be_none(self):
return False
def generate_result_code(self, code):
code.putln('%s = __Pyx_Globals(); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class LocalsDictItemNode(DictItemNode):
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
if self.value.type.can_coerce_to_pyobject(env):
self.value = self.value.coerce_to_pyobject(env)
else:
self.value = None
return self
class FuncLocalsExprNode(DictNode):
def __init__(self, pos, env):
local_vars = sorted([
entry.name for entry in env.entries.values() if entry.name])
items = [LocalsDictItemNode(
pos, key=IdentifierStringNode(pos, value=var),
value=NameNode(pos, name=var, allow_null=True))
for var in local_vars]
DictNode.__init__(self, pos, key_value_pairs=items,
exclude_null_values=True)
def analyse_types(self, env):
node = super(FuncLocalsExprNode, self).analyse_types(env)
node.key_value_pairs = [ i for i in node.key_value_pairs
if i.value is not None ]
return node
class PyClassLocalsExprNode(AtomicExprNode):
def __init__(self, pos, pyclass_dict):
AtomicExprNode.__init__(self, pos)
self.pyclass_dict = pyclass_dict
def analyse_types(self, env):
self.type = self.pyclass_dict.type
self.is_temp = False
return self
def may_be_none(self):
return False
def result(self):
return self.pyclass_dict.result()
def generate_result_code(self, code):
pass
def LocalsExprNode(pos, scope_node, env):
if env.is_module_scope:
return GlobalsExprNode(pos)
if env.is_py_class_scope:
return PyClassLocalsExprNode(pos, scope_node.dict)
return FuncLocalsExprNode(pos, env)
#-------------------------------------------------------------------
#
# Unary operator nodes
#
#-------------------------------------------------------------------
compile_time_unary_operators = {
'not': operator.not_,
'~': operator.inv,
'-': operator.neg,
'+': operator.pos,
}
class UnopNode(ExprNode):
# operator string
# operand ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when the operand is not a pyobject.
# - Check operand type and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand']
infix = True
def calculate_constant_result(self):
func = compile_time_unary_operators[self.operator]
self.constant_result = func(self.operand.constant_result)
def compile_time_value(self, denv):
func = compile_time_unary_operators.get(self.operator)
if not func:
error(self.pos,
"Unary '%s' not supported in compile-time expression"
% self.operator)
operand = self.operand.compile_time_value(denv)
try:
return func(operand)
except Exception, e:
self.compile_time_value_error(e)
def infer_type(self, env):
operand_type = self.operand.infer_type(env)
if operand_type.is_cpp_class or operand_type.is_ptr:
cpp_type = operand_type.find_cpp_operation_type(self.operator)
if cpp_type is not None:
return cpp_type
return self.infer_unop_type(env, operand_type)
def infer_unop_type(self, env, operand_type):
if operand_type.is_pyobject:
return py_object_type
else:
return operand_type
def may_be_none(self):
if self.operand.type and self.operand.type.is_builtin_type:
if self.operand.type is not type_type:
return False
return ExprNode.may_be_none(self)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
if self.is_py_operation():
self.coerce_operand_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
return self
def check_const(self):
return self.operand.check_const()
def is_py_operation(self):
return self.operand.type.is_pyobject or self.operand.type.is_ctuple
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def is_cpp_operation(self):
type = self.operand.type
return type.is_cpp_class
def coerce_operand_to_pyobject(self, env):
self.operand = self.operand.coerce_to_pyobject(env)
def generate_result_code(self, code):
if self.operand.type.is_pyobject:
self.generate_py_operation_code(code)
def generate_py_operation_code(self, code):
function = self.py_operation_function(code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
function,
self.operand.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def type_error(self):
if not self.operand.type.is_error:
error(self.pos, "Invalid operand type for '%s' (%s)" %
(self.operator, self.operand.type))
self.type = PyrexTypes.error_type
def analyse_cpp_operation(self, env):
cpp_type = self.operand.type.find_cpp_operation_type(self.operator)
if cpp_type is None:
error(self.pos, "'%s' operator not defined for %s" % (
self.operator, type))
self.type_error()
return
self.type = cpp_type
class NotNode(UnopNode):
# 'not' operator
#
# operand ExprNode
operator = '!'
type = PyrexTypes.c_bint_type
def calculate_constant_result(self):
self.constant_result = not self.operand.constant_result
def compile_time_value(self, denv):
operand = self.operand.compile_time_value(denv)
try:
return not operand
except Exception, e:
self.compile_time_value_error(e)
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_bint_type
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
operand_type = self.operand.type
if operand_type.is_cpp_class:
cpp_type = operand_type.find_cpp_operation_type(self.operator)
if not cpp_type:
error(self.pos, "'!' operator not defined for %s" % operand_type)
self.type = PyrexTypes.error_type
return
self.type = cpp_type
else:
self.operand = self.operand.coerce_to_boolean(env)
return self
def calculate_result_code(self):
return "(!%s)" % self.operand.result()
def generate_result_code(self, code):
pass
class UnaryPlusNode(UnopNode):
# unary '+' operator
operator = '+'
def analyse_c_operation(self, env):
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
def py_operation_function(self, code):
return "PyNumber_Positive"
def calculate_result_code(self):
if self.is_cpp_operation():
return "(+%s)" % self.operand.result()
else:
return self.operand.result()
class UnaryMinusNode(UnopNode):
# unary '-' operator
operator = '-'
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
if self.type.is_complex:
self.infix = False
def py_operation_function(self, code):
return "PyNumber_Negative"
def calculate_result_code(self):
if self.infix:
return "(-%s)" % self.operand.result()
else:
return "%s(%s)" % (self.operand.type.unary_op('-'), self.operand.result())
def get_constant_c_result_code(self):
value = self.operand.get_constant_c_result_code()
if value:
return "(-%s)" % value
class TildeNode(UnopNode):
# unary '~' operator
def analyse_c_operation(self, env):
if self.operand.type.is_int:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
def py_operation_function(self, code):
return "PyNumber_Invert"
def calculate_result_code(self):
return "(~%s)" % self.operand.result()
class CUnopNode(UnopNode):
def is_py_operation(self):
return False
class DereferenceNode(CUnopNode):
# unary * operator
operator = '*'
def infer_unop_type(self, env, operand_type):
if operand_type.is_ptr:
return operand_type.base_type
else:
return PyrexTypes.error_type
def analyse_c_operation(self, env):
if self.operand.type.is_ptr:
self.type = self.operand.type.base_type
else:
self.type_error()
def calculate_result_code(self):
return "(*%s)" % self.operand.result()
class DecrementIncrementNode(CUnopNode):
# unary ++/-- operator
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_ptr:
self.type = self.operand.type
else:
self.type_error()
def calculate_result_code(self):
if self.is_prefix:
return "(%s%s)" % (self.operator, self.operand.result())
else:
return "(%s%s)" % (self.operand.result(), self.operator)
def inc_dec_constructor(is_prefix, operator):
return lambda pos, **kwds: DecrementIncrementNode(pos, is_prefix=is_prefix, operator=operator, **kwds)
class AmpersandNode(CUnopNode):
# The C address-of operator.
#
# operand ExprNode
operator = '&'
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_ptr_type(operand_type)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
argtype = self.operand.type
if argtype.is_cpp_class:
cpp_type = argtype.find_cpp_operation_type(self.operator)
if cpp_type is not None:
self.type = cpp_type
return self
if not (argtype.is_cfunction or argtype.is_reference or self.operand.is_addressable()):
if argtype.is_memoryviewslice:
self.error("Cannot take address of memoryview slice")
else:
self.error("Taking address of non-lvalue")
return self
if argtype.is_pyobject:
self.error("Cannot take address of Python variable")
return self
self.type = PyrexTypes.c_ptr_type(argtype)
return self
def check_const(self):
return self.operand.check_const_addr()
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
def calculate_result_code(self):
return "(&%s)" % self.operand.result()
def generate_result_code(self, code):
pass
unop_node_classes = {
"+": UnaryPlusNode,
"-": UnaryMinusNode,
"~": TildeNode,
}
def unop_node(pos, operator, operand):
# Construct unnop node of appropriate class for
# given operator.
if isinstance(operand, IntNode) and operator == '-':
return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)),
longness=operand.longness, unsigned=operand.unsigned)
elif isinstance(operand, UnopNode) and operand.operator == operator in '+-':
warning(pos, "Python has no increment/decrement operator: %s%sx == %s(%sx) == x" % ((operator,)*4), 5)
return unop_node_classes[operator](pos,
operator = operator,
operand = operand)
class TypecastNode(ExprNode):
# C type cast
#
# operand ExprNode
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# typecheck boolean
#
# If used from a transform, one can if wanted specify the attribute
# "type" directly and leave base_type and declarator to None
subexprs = ['operand']
base_type = declarator = type = None
def type_dependencies(self, env):
return ()
def infer_type(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
return self.type
def analyse_types(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
if self.operand.has_constant_result():
# Must be done after self.type is resolved.
self.calculate_constant_result()
if self.type.is_cfunction:
error(self.pos,
"Cannot cast to a function type")
self.type = PyrexTypes.error_type
self.operand = self.operand.analyse_types(env)
if self.type is PyrexTypes.c_bint_type:
# short circuit this to a coercion
return self.operand.coerce_to_boolean(env)
to_py = self.type.is_pyobject
from_py = self.operand.type.is_pyobject
if from_py and not to_py and self.operand.is_ephemeral():
if not self.type.is_numeric and not self.type.is_cpp_class:
error(self.pos, "Casting temporary Python object to non-numeric non-Python type")
if to_py and not from_py:
if self.type is bytes_type and self.operand.type.is_int:
return CoerceIntToBytesNode(self.operand, env)
elif self.operand.type.can_coerce_to_pyobject(env):
self.result_ctype = py_object_type
base_type = self.base_type.analyse(env)
self.operand = self.operand.coerce_to(base_type, env)
else:
if self.operand.type.is_ptr:
if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast from pointers of primitive types")
else:
# Should this be an error?
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.operand.type, self.type))
self.operand = self.operand.coerce_to_simple(env)
elif from_py and not to_py:
if self.type.create_from_py_utility_code(env):
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_ptr:
if not (self.type.base_type.is_void or self.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast to pointers of primitive types")
else:
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.type, self.operand.type))
elif from_py and to_py:
if self.typecheck:
self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True)
elif isinstance(self.operand, SliceIndexNode):
# This cast can influence the created type of string slices.
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_complex and self.operand.type.is_complex:
self.operand = self.operand.coerce_to_simple(env)
elif self.operand.type.is_fused:
self.operand = self.operand.coerce_to(self.type, env)
#self.type = self.operand.type
return self
def is_simple(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_simple()
def is_ephemeral(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_ephemeral()
def nonlocally_immutable(self):
return self.is_temp or self.operand.nonlocally_immutable()
def nogil_check(self, env):
if self.type and self.type.is_pyobject and self.is_temp:
self.gil_error()
def check_const(self):
return self.operand.check_const()
def calculate_constant_result(self):
self.constant_result = self.calculate_result_code(self.operand.constant_result)
def calculate_result_code(self, operand_result = None):
if operand_result is None:
operand_result = self.operand.result()
if self.type.is_complex:
operand_result = self.operand.result()
if self.operand.type.is_complex:
real_part = self.type.real_type.cast_code("__Pyx_CREAL(%s)" % operand_result)
imag_part = self.type.real_type.cast_code("__Pyx_CIMAG(%s)" % operand_result)
else:
real_part = self.type.real_type.cast_code(operand_result)
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
else:
return self.type.cast_code(operand_result)
def get_constant_c_result_code(self):
operand_result = self.operand.get_constant_c_result_code()
if operand_result:
return self.type.cast_code(operand_result)
def result_as(self, type):
if self.type.is_pyobject and not self.is_temp:
# Optimise away some unnecessary casting
return self.operand.result_as(type)
else:
return ExprNode.result_as(self, type)
def generate_result_code(self, code):
if self.is_temp:
code.putln(
"%s = (PyObject *)%s;" % (
self.result(),
self.operand.result()))
code.put_incref(self.result(), self.ctype())
ERR_START = "Start may not be given"
ERR_NOT_STOP = "Stop must be provided to indicate shape"
ERR_STEPS = ("Strides may only be given to indicate contiguity. "
"Consider slicing it after conversion")
ERR_NOT_POINTER = "Can only create cython.array from pointer or array"
ERR_BASE_TYPE = "Pointer base type does not match cython.array base type"
class CythonArrayNode(ExprNode):
"""
Used when a pointer of base_type is cast to a memoryviewslice with that
base type. i.e.
<int[:M:1, :N]> p
creates a fortran-contiguous cython.array.
We leave the type set to object so coercions to object are more efficient
and less work. Acquiring a memoryviewslice from this will be just as
efficient. ExprNode.coerce_to() will do the additional typecheck on
self.compile_time_type
This also handles <int[:, :]> my_c_array
operand ExprNode the thing we're casting
base_type_node MemoryViewSliceTypeNode the cast expression node
"""
subexprs = ['operand', 'shapes']
shapes = None
is_temp = True
mode = "c"
array_dtype = None
shape_type = PyrexTypes.c_py_ssize_t_type
def analyse_types(self, env):
from . import MemoryView
self.operand = self.operand.analyse_types(env)
if self.array_dtype:
array_dtype = self.array_dtype
else:
array_dtype = self.base_type_node.base_type_node.analyse(env)
axes = self.base_type_node.axes
MemoryView.validate_memslice_dtype(self.pos, array_dtype)
self.type = error_type
self.shapes = []
ndim = len(axes)
# Base type of the pointer or C array we are converting
base_type = self.operand.type
if not self.operand.type.is_ptr and not self.operand.type.is_array:
error(self.operand.pos, ERR_NOT_POINTER)
return self
# Dimension sizes of C array
array_dimension_sizes = []
if base_type.is_array:
while base_type.is_array:
array_dimension_sizes.append(base_type.size)
base_type = base_type.base_type
elif base_type.is_ptr:
base_type = base_type.base_type
else:
error(self.pos, "unexpected base type %s found" % base_type)
return self
if not (base_type.same_as(array_dtype) or base_type.is_void):
error(self.operand.pos, ERR_BASE_TYPE)
return self
elif self.operand.type.is_array and len(array_dimension_sizes) != ndim:
error(self.operand.pos,
"Expected %d dimensions, array has %d dimensions" %
(ndim, len(array_dimension_sizes)))
return self
# Verify the start, stop and step values
# In case of a C array, use the size of C array in each dimension to
# get an automatic cast
for axis_no, axis in enumerate(axes):
if not axis.start.is_none:
error(axis.start.pos, ERR_START)
return self
if axis.stop.is_none:
if array_dimension_sizes:
dimsize = array_dimension_sizes[axis_no]
axis.stop = IntNode(self.pos, value=str(dimsize),
constant_result=dimsize,
type=PyrexTypes.c_int_type)
else:
error(axis.pos, ERR_NOT_STOP)
return self
axis.stop = axis.stop.analyse_types(env)
shape = axis.stop.coerce_to(self.shape_type, env)
if not shape.is_literal:
shape.coerce_to_temp(env)
self.shapes.append(shape)
first_or_last = axis_no in (0, ndim - 1)
if not axis.step.is_none and first_or_last:
# '1' in the first or last dimension denotes F or C contiguity
axis.step = axis.step.analyse_types(env)
if (not axis.step.type.is_int and axis.step.is_literal and not
axis.step.type.is_error):
error(axis.step.pos, "Expected an integer literal")
return self
if axis.step.compile_time_value(env) != 1:
error(axis.step.pos, ERR_STEPS)
return self
if axis_no == 0:
self.mode = "fortran"
elif not axis.step.is_none and not first_or_last:
# step provided in some other dimension
error(axis.step.pos, ERR_STEPS)
return self
if not self.operand.is_name:
self.operand = self.operand.coerce_to_temp(env)
axes = [('direct', 'follow')] * len(axes)
if self.mode == "fortran":
axes[0] = ('direct', 'contig')
else:
axes[-1] = ('direct', 'contig')
self.coercion_type = PyrexTypes.MemoryViewSliceType(array_dtype, axes)
self.type = self.get_cython_array_type(env)
MemoryView.use_cython_array_utility_code(env)
env.use_utility_code(MemoryView.typeinfo_to_format_code)
return self
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("temp allocated mulitple times")
self.temp_code = code.funcstate.allocate_temp(self.type, True)
def infer_type(self, env):
return self.get_cython_array_type(env)
def get_cython_array_type(self, env):
return env.global_scope().context.cython_scope.viewscope.lookup("array").type
def generate_result_code(self, code):
from . import Buffer
shapes = [self.shape_type.cast_code(shape.result())
for shape in self.shapes]
dtype = self.coercion_type.dtype
shapes_temp = code.funcstate.allocate_temp(py_object_type, True)
format_temp = code.funcstate.allocate_temp(py_object_type, True)
itemsize = "sizeof(%s)" % dtype.empty_declaration_code()
type_info = Buffer.get_type_information_cname(code, dtype)
if self.operand.type.is_ptr:
code.putln("if (!%s) {" % self.operand.result())
code.putln( 'PyErr_SetString(PyExc_ValueError,'
'"Cannot create cython.array from NULL pointer");')
code.putln(code.error_goto(self.operand.pos))
code.putln("}")
code.putln("%s = __pyx_format_from_typeinfo(&%s);" %
(format_temp, type_info))
buildvalue_fmt = " __PYX_BUILD_PY_SSIZE_T " * len(shapes)
code.putln('%s = Py_BuildValue((char*) "(" %s ")", %s);' % (
shapes_temp, buildvalue_fmt, ", ".join(shapes)))
err = "!%s || !%s || !PyBytes_AsString(%s)" % (format_temp,
shapes_temp,
format_temp)
code.putln(code.error_goto_if(err, self.pos))
code.put_gotref(format_temp)
code.put_gotref(shapes_temp)
tup = (self.result(), shapes_temp, itemsize, format_temp,
self.mode, self.operand.result())
code.putln('%s = __pyx_array_new('
'%s, %s, PyBytes_AS_STRING(%s), '
'(char *) "%s", (char *) %s);' % tup)
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.result())
def dispose(temp):
code.put_decref_clear(temp, py_object_type)
code.funcstate.release_temp(temp)
dispose(shapes_temp)
dispose(format_temp)
@classmethod
def from_carray(cls, src_node, env):
"""
Given a C array type, return a CythonArrayNode
"""
pos = src_node.pos
base_type = src_node.type
none_node = NoneNode(pos)
axes = []
while base_type.is_array:
axes.append(SliceNode(pos, start=none_node, stop=none_node,
step=none_node))
base_type = base_type.base_type
axes[-1].step = IntNode(pos, value="1", is_c_literal=True)
memslicenode = Nodes.MemoryViewSliceTypeNode(pos, axes=axes,
base_type_node=base_type)
result = CythonArrayNode(pos, base_type_node=memslicenode,
operand=src_node, array_dtype=base_type)
result = result.analyse_types(env)
return result
class SizeofNode(ExprNode):
# Abstract base class for sizeof(x) expression nodes.
type = PyrexTypes.c_size_t_type
def check_const(self):
return True
def generate_result_code(self, code):
pass
class SizeofTypeNode(SizeofNode):
# C sizeof function applied to a type
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
subexprs = []
arg_type = None
def analyse_types(self, env):
# we may have incorrectly interpreted a dotted name as a type rather than an attribute
# this could be better handled by more uniformly treating types as runtime-available objects
if 0 and self.base_type.module_path:
path = self.base_type.module_path
obj = env.lookup(path[0])
if obj.as_module is None:
operand = NameNode(pos=self.pos, name=path[0])
for attr in path[1:]:
operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr)
operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name)
self.operand = operand
self.__class__ = SizeofVarNode
node = self.analyse_types(env)
return node
if self.arg_type is None:
base_type = self.base_type.analyse(env)
_, arg_type = self.declarator.analyse(base_type, env)
self.arg_type = arg_type
self.check_type()
return self
def check_type(self):
arg_type = self.arg_type
if arg_type.is_pyobject and not arg_type.is_extension_type:
error(self.pos, "Cannot take sizeof Python object")
elif arg_type.is_void:
error(self.pos, "Cannot take sizeof void")
elif not arg_type.is_complete():
error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type)
def calculate_result_code(self):
if self.arg_type.is_extension_type:
# the size of the pointer is boring
# we want the size of the actual struct
arg_code = self.arg_type.declaration_code("", deref=1)
else:
arg_code = self.arg_type.empty_declaration_code()
return "(sizeof(%s))" % arg_code
class SizeofVarNode(SizeofNode):
# C sizeof function applied to a variable
#
# operand ExprNode
subexprs = ['operand']
def analyse_types(self, env):
# We may actually be looking at a type rather than a variable...
# If we are, traditional analysis would fail...
operand_as_type = self.operand.analyse_as_type(env)
if operand_as_type:
self.arg_type = operand_as_type
if self.arg_type.is_fused:
self.arg_type = self.arg_type.specialize(env.fused_to_specific)
self.__class__ = SizeofTypeNode
self.check_type()
else:
self.operand = self.operand.analyse_types(env)
return self
def calculate_result_code(self):
return "(sizeof(%s))" % self.operand.result()
def generate_result_code(self, code):
pass
class TypeofNode(ExprNode):
# Compile-time type of an expression, as a string.
#
# operand ExprNode
# literal StringNode # internal
literal = None
type = py_object_type
subexprs = ['literal'] # 'operand' will be ignored after type analysis!
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
value = StringEncoding.EncodedString(str(self.operand.type)) #self.operand.type.typeof_name())
literal = StringNode(self.pos, value=value)
literal = literal.analyse_types(env)
self.literal = literal.coerce_to_pyobject(env)
return self
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
self.literal.generate_evaluation_code(code)
def calculate_result_code(self):
return self.literal.calculate_result_code()
#-------------------------------------------------------------------
#
# Binary operator nodes
#
#-------------------------------------------------------------------
try:
matmul_operator = operator.matmul
except AttributeError:
def matmul_operator(a, b):
try:
func = a.__matmul__
except AttributeError:
func = b.__rmatmul__
return func(a, b)
compile_time_binary_operators = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'is': operator.is_,
'is_not': operator.is_not,
'+': operator.add,
'&': operator.and_,
'/': operator.truediv,
'//': operator.floordiv,
'<<': operator.lshift,
'%': operator.mod,
'*': operator.mul,
'|': operator.or_,
'**': operator.pow,
'>>': operator.rshift,
'-': operator.sub,
'^': operator.xor,
'@': matmul_operator,
'in': lambda x, seq: x in seq,
'not_in': lambda x, seq: x not in seq,
}
def get_compile_time_binop(node):
func = compile_time_binary_operators.get(node.operator)
if not func:
error(node.pos,
"Binary '%s' not supported in compile-time expression"
% node.operator)
return func
class BinopNode(ExprNode):
# operator string
# operand1 ExprNode
# operand2 ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when neither operand is a pyobject.
# - Check operand types and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand1', 'operand2']
inplace = False
def calculate_constant_result(self):
func = compile_time_binary_operators[self.operator]
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
func = get_compile_time_binop(self)
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
return func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
def infer_type(self, env):
return self.result_type(self.operand1.infer_type(env),
self.operand2.infer_type(env))
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
self.analyse_operation(env)
return self
def analyse_operation(self, env):
if self.is_py_operation():
self.coerce_operands_to_pyobjects(env)
self.type = self.result_type(self.operand1.type,
self.operand2.type)
assert self.type.is_pyobject
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
def is_py_operation(self):
return self.is_py_operation_types(self.operand1.type, self.operand2.type)
def is_py_operation_types(self, type1, type2):
return type1.is_pyobject or type2.is_pyobject or type1.is_ctuple or type2.is_ctuple
def is_cpp_operation(self):
return (self.operand1.type.is_cpp_class
or self.operand2.type.is_cpp_class)
def analyse_cpp_operation(self, env):
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if not entry:
self.type_error()
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
def result_type(self, type1, type2):
if self.is_py_operation_types(type1, type2):
if type2.is_string:
type2 = Builtin.bytes_type
elif type2.is_pyunicode_ptr:
type2 = Builtin.unicode_type
if type1.is_string:
type1 = Builtin.bytes_type
elif type1.is_pyunicode_ptr:
type1 = Builtin.unicode_type
if type1.is_builtin_type or type2.is_builtin_type:
if type1 is type2 and self.operator in '**%+|&^':
# FIXME: at least these operators should be safe - others?
return type1
result_type = self.infer_builtin_types_operation(type1, type2)
if result_type is not None:
return result_type
return py_object_type
else:
return self.compute_c_result_type(type1, type2)
def infer_builtin_types_operation(self, type1, type2):
return None
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def coerce_operands_to_pyobjects(self, env):
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def is_ephemeral(self):
return (super(BinopNode, self).is_ephemeral() or
self.operand1.is_ephemeral() or self.operand2.is_ephemeral())
def generate_result_code(self, code):
#print "BinopNode.generate_result_code:", self.operand1, self.operand2 ###
if self.operand1.type.is_pyobject:
function = self.py_operation_function(code)
if self.operator == '**':
extra_args = ", Py_None"
else:
extra_args = ""
code.putln(
"%s = %s(%s, %s%s); %s" % (
self.result(),
function,
self.operand1.py_result(),
self.operand2.py_result(),
extra_args,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.is_temp:
code.putln("%s = %s;" % (self.result(), self.calculate_result_code()))
def type_error(self):
if not (self.operand1.type.is_error
or self.operand2.type.is_error):
error(self.pos, "Invalid operand types for '%s' (%s; %s)" %
(self.operator, self.operand1.type,
self.operand2.type))
self.type = PyrexTypes.error_type
class CBinopNode(BinopNode):
def analyse_types(self, env):
node = BinopNode.analyse_types(self, env)
if node.is_py_operation():
node.type = PyrexTypes.error_type
return node
def py_operation_function(self, code):
return ""
def calculate_result_code(self):
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
def compute_c_result_type(self, type1, type2):
cpp_type = None
if type1.is_cpp_class or type1.is_ptr:
cpp_type = type1.find_cpp_operation_type(self.operator, type2)
# FIXME: handle the reversed case?
#if cpp_type is None and (type2.is_cpp_class or type2.is_ptr):
# cpp_type = type2.find_cpp_operation_type(self.operator, type1)
# FIXME: do we need to handle other cases here?
return cpp_type
def c_binop_constructor(operator):
def make_binop_node(pos, **operands):
return CBinopNode(pos, operator=operator, **operands)
return make_binop_node
class NumBinopNode(BinopNode):
# Binary operation taking numeric arguments.
infix = True
overflow_check = False
overflow_bit_node = None
def analyse_c_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.type = self.compute_c_result_type(type1, type2)
if not self.type:
self.type_error()
return
if self.type.is_complex:
self.infix = False
if (self.type.is_int
and env.directives['overflowcheck']
and self.operator in self.overflow_op_names):
if (self.operator in ('+', '*')
and self.operand1.has_constant_result()
and not self.operand2.has_constant_result()):
self.operand1, self.operand2 = self.operand2, self.operand1
self.overflow_check = True
self.overflow_fold = env.directives['overflowcheck.fold']
self.func = self.type.overflow_check_binop(
self.overflow_op_names[self.operator],
env,
const_rhs = self.operand2.has_constant_result())
self.is_temp = True
if not self.infix or (type1.is_numeric and type2.is_numeric):
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
def compute_c_result_type(self, type1, type2):
if self.c_types_okay(type1, type2):
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
if widest_type is PyrexTypes.c_bint_type:
if self.operator not in '|^&':
# False + False == 0 # not False!
widest_type = PyrexTypes.c_int_type
else:
widest_type = PyrexTypes.widest_numeric_type(
widest_type, PyrexTypes.c_int_type)
return widest_type
else:
return None
def may_be_none(self):
if self.type and self.type.is_builtin_type:
# if we know the result type, we know the operation, so it can't be None
return False
type1 = self.operand1.type
type2 = self.operand2.type
if type1 and type1.is_builtin_type and type2 and type2.is_builtin_type:
# XXX: I can't think of any case where a binary operation
# on builtin types evaluates to None - add a special case
# here if there is one.
return False
return super(NumBinopNode, self).may_be_none()
def get_constant_c_result_code(self):
value1 = self.operand1.get_constant_c_result_code()
value2 = self.operand2.get_constant_c_result_code()
if value1 and value2:
return "(%s %s %s)" % (value1, self.operator, value2)
else:
return None
def c_types_okay(self, type1, type2):
#print "NumBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_numeric or type1.is_enum) \
and (type2.is_numeric or type2.is_enum)
def generate_evaluation_code(self, code):
if self.overflow_check:
self.overflow_bit_node = self
self.overflow_bit = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = 0;" % self.overflow_bit)
super(NumBinopNode, self).generate_evaluation_code(code)
if self.overflow_check:
code.putln("if (unlikely(%s)) {" % self.overflow_bit)
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large");')
code.putln(code.error_goto(self.pos))
code.putln("}")
code.funcstate.release_temp(self.overflow_bit)
def calculate_result_code(self):
if self.overflow_bit_node is not None:
return "%s(%s, %s, &%s)" % (
self.func,
self.operand1.result(),
self.operand2.result(),
self.overflow_bit_node.overflow_bit)
elif self.infix:
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
else:
func = self.type.binary_op(self.operator)
if func is None:
error(self.pos, "binary operator %s not supported for %s" % (self.operator, self.type))
return "%s(%s, %s)" % (
func,
self.operand1.result(),
self.operand2.result())
def is_py_operation_types(self, type1, type2):
return (type1.is_unicode_char or
type2.is_unicode_char or
BinopNode.is_py_operation_types(self, type1, type2))
def py_operation_function(self, code):
function_name = self.py_functions[self.operator]
if self.inplace:
function_name = function_name.replace('PyNumber_', 'PyNumber_InPlace')
return function_name
py_functions = {
"|": "PyNumber_Or",
"^": "PyNumber_Xor",
"&": "PyNumber_And",
"<<": "PyNumber_Lshift",
">>": "PyNumber_Rshift",
"+": "PyNumber_Add",
"-": "PyNumber_Subtract",
"*": "PyNumber_Multiply",
"@": "__Pyx_PyNumber_MatrixMultiply",
"/": "__Pyx_PyNumber_Divide",
"//": "PyNumber_FloorDivide",
"%": "PyNumber_Remainder",
"**": "PyNumber_Power",
}
overflow_op_names = {
"+": "add",
"-": "sub",
"*": "mul",
"<<": "lshift",
}
class IntBinopNode(NumBinopNode):
# Binary operation taking integer arguments.
def c_types_okay(self, type1, type2):
#print "IntBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_int or type1.is_enum) \
and (type2.is_int or type2.is_enum)
class AddNode(NumBinopNode):
# '+' operator.
def is_py_operation_types(self, type1, type2):
if type1.is_string and type2.is_string or type1.is_pyunicode_ptr and type2.is_pyunicode_ptr:
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# b'abc' + 'abc' raises an exception in Py3,
# so we can safely infer the Py2 type for bytes here
string_types = (bytes_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2 in string_types:
return string_types[max(string_types.index(type1),
string_types.index(type2))]
return None
def compute_c_result_type(self, type1, type2):
#print "AddNode.compute_c_result_type:", type1, self.operator, type2 ###
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type2.is_ptr or type2.is_array) and (type1.is_int or type1.is_enum):
return type2
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
def py_operation_function(self, code):
type1, type2 = self.operand1.type, self.operand2.type
if type1 is unicode_type or type2 is unicode_type:
if type1.is_builtin_type and type2.is_builtin_type:
if self.operand1.may_be_none() or self.operand2.may_be_none():
return '__Pyx_PyUnicode_ConcatSafe'
else:
return '__Pyx_PyUnicode_Concat'
return super(AddNode, self).py_operation_function(code)
class SubNode(NumBinopNode):
# '-' operator.
def compute_c_result_type(self, type1, type2):
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type1.is_ptr or type1.is_array) and (type2.is_ptr or type2.is_array):
return PyrexTypes.c_ptrdiff_t_type
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
class MulNode(NumBinopNode):
# '*' operator.
def is_py_operation_types(self, type1, type2):
if ((type1.is_string and type2.is_int) or
(type2.is_string and type1.is_int)):
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# let's assume that whatever builtin type you multiply a string with
# will either return a string of the same type or fail with an exception
string_types = (bytes_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2.is_builtin_type:
return type1
if type2 in string_types and type1.is_builtin_type:
return type2
# multiplication of containers/numbers with an integer value
# always (?) returns the same type
if type1.is_int:
return type2
if type2.is_int:
return type1
return None
class MatMultNode(NumBinopNode):
# '@' operator.
def is_py_operation_types(self, type1, type2):
return True
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("MatrixMultiply", "ObjectHandling.c"))
super(MatMultNode, self).generate_evaluation_code(code)
class DivNode(NumBinopNode):
# '/' or '//' operator.
cdivision = None
truedivision = None # == "unknown" if operator == '/'
ctruedivision = False
cdivision_warnings = False
zerodivision_check = None
def find_compile_time_binary_operator(self, op1, op2):
func = compile_time_binary_operators[self.operator]
if self.operator == '/' and self.truedivision is None:
# => true div for floats, floor div for integers
if isinstance(op1, (int,long)) and isinstance(op2, (int,long)):
func = compile_time_binary_operators['//']
return func
def calculate_constant_result(self):
op1 = self.operand1.constant_result
op2 = self.operand2.constant_result
func = self.find_compile_time_binary_operator(op1, op2)
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
func = self.find_compile_time_binary_operator(
operand1, operand2)
return func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
def analyse_operation(self, env):
if self.cdivision or env.directives['cdivision']:
self.ctruedivision = False
else:
self.ctruedivision = self.truedivision
NumBinopNode.analyse_operation(self, env)
if self.is_cpp_operation():
self.cdivision = True
if not self.type.is_pyobject:
self.zerodivision_check = (
self.cdivision is None and not env.directives['cdivision']
and (not self.operand2.has_constant_result() or
self.operand2.constant_result == 0))
if self.zerodivision_check or env.directives['cdivision_warnings']:
# Need to check ahead of time to warn or raise zero division error
self.operand1 = self.operand1.coerce_to_simple(env)
self.operand2 = self.operand2.coerce_to_simple(env)
def compute_c_result_type(self, type1, type2):
if self.operator == '/' and self.ctruedivision:
if not type1.is_float and not type2.is_float:
widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type)
widest_type = PyrexTypes.widest_numeric_type(type2, widest_type)
return widest_type
return NumBinopNode.compute_c_result_type(self, type1, type2)
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float division"
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.type.is_complex:
if self.cdivision is None:
self.cdivision = (code.globalstate.directives['cdivision']
or not self.type.signed
or self.type.is_float)
if not self.cdivision:
code.globalstate.use_utility_code(div_int_utility_code.specialize(self.type))
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def generate_div_warning_code(self, code):
if not self.type.is_pyobject:
if self.zerodivision_check:
if not self.infix:
zero_test = "%s(%s)" % (self.type.unary_op('zero'), self.operand2.result())
else:
zero_test = "%s == 0" % self.operand2.result()
code.putln("if (unlikely(%s)) {" % zero_test)
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message())
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if self.type.is_int and self.type.signed and self.operator != '%':
code.globalstate.use_utility_code(division_overflow_test_code)
if self.operand2.type.signed == 2:
# explicitly signed, no runtime check needed
minus1_check = 'unlikely(%s == -1)' % self.operand2.result()
else:
type_of_op2 = self.operand2.type.empty_declaration_code()
minus1_check = '(!(((%s)-1) > 0)) && unlikely(%s == (%s)-1)' % (
type_of_op2, self.operand2.result(), type_of_op2)
code.putln("else if (sizeof(%s) == sizeof(long) && %s "
" && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {" % (
self.type.empty_declaration_code(),
minus1_check,
self.operand1.result()))
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large to perform division");')
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if code.globalstate.directives['cdivision_warnings'] and self.operator != '/':
code.globalstate.use_utility_code(cdivision_warning_utility_code)
code.putln("if (unlikely((%s < 0) ^ (%s < 0))) {" % (
self.operand1.result(),
self.operand2.result()))
code.put_ensure_gil()
code.putln(code.set_error_info(self.pos, used=True))
code.putln("if (__Pyx_cdivision_warning(%(FILENAME)s, "
"%(LINENO)s)) {" % {
'FILENAME': Naming.filename_cname,
'LINENO': Naming.lineno_cname,
})
code.put_release_ensured_gil()
code.put_goto(code.error_label)
code.putln("}")
code.put_release_ensured_gil()
code.putln("}")
def calculate_result_code(self):
if self.type.is_complex:
return NumBinopNode.calculate_result_code(self)
elif self.type.is_float and self.operator == '//':
return "floor(%s / %s)" % (
self.operand1.result(),
self.operand2.result())
elif self.truedivision or self.cdivision:
op1 = self.operand1.result()
op2 = self.operand2.result()
if self.truedivision:
if self.type != self.operand1.type:
op1 = self.type.cast_code(op1)
if self.type != self.operand2.type:
op2 = self.type.cast_code(op2)
return "(%s / %s)" % (op1, op2)
else:
return "__Pyx_div_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
class ModNode(DivNode):
# '%' operator.
def is_py_operation_types(self, type1, type2):
return (type1.is_string
or type2.is_string
or NumBinopNode.is_py_operation_types(self, type1, type2))
def infer_builtin_types_operation(self, type1, type2):
# b'%s' % xyz raises an exception in Py3, so it's safe to infer the type for Py2
if type1 is unicode_type:
# None + xyz may be implemented by RHS
if type2.is_builtin_type or not self.operand1.may_be_none():
return type1
elif type1 in (bytes_type, str_type, basestring_type):
if type2 is unicode_type:
return type2
elif type2.is_numeric:
return type1
elif type1 is bytes_type and not type2.is_builtin_type:
return None # RHS might implement '% operator differently in Py3
else:
return basestring_type # either str or unicode, can't tell
return None
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float divmod()"
def analyse_operation(self, env):
DivNode.analyse_operation(self, env)
if not self.type.is_pyobject:
if self.cdivision is None:
self.cdivision = env.directives['cdivision'] or not self.type.signed
if not self.cdivision and not self.type.is_int and not self.type.is_float:
error(self.pos, "mod operator not supported for type '%s'" % self.type)
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.cdivision:
if self.type.is_int:
code.globalstate.use_utility_code(
mod_int_utility_code.specialize(self.type))
else: # float
code.globalstate.use_utility_code(
mod_float_utility_code.specialize(
self.type, math_h_modifier=self.type.math_h_modifier))
# note: skipping over DivNode here
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def calculate_result_code(self):
if self.cdivision:
if self.type.is_float:
return "fmod%s(%s, %s)" % (
self.type.math_h_modifier,
self.operand1.result(),
self.operand2.result())
else:
return "(%s %% %s)" % (
self.operand1.result(),
self.operand2.result())
else:
return "__Pyx_mod_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
def py_operation_function(self, code):
if self.operand1.type is unicode_type:
if self.operand1.may_be_none():
return '__Pyx_PyUnicode_FormatSafe'
else:
return 'PyUnicode_Format'
elif self.operand1.type is str_type:
if self.operand1.may_be_none():
return '__Pyx_PyString_FormatSafe'
else:
return '__Pyx_PyString_Format'
return super(ModNode, self).py_operation_function(code)
class PowNode(NumBinopNode):
# '**' operator.
def analyse_c_operation(self, env):
NumBinopNode.analyse_c_operation(self, env)
if self.type.is_complex:
if self.type.real_type.is_float:
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
self.pow_func = "__Pyx_c_pow" + self.type.real_type.math_h_modifier
else:
error(self.pos, "complex int powers not supported")
self.pow_func = "<error>"
elif self.type.is_float:
self.pow_func = "pow" + self.type.math_h_modifier
elif self.type.is_int:
self.pow_func = "__Pyx_pow_%s" % self.type.empty_declaration_code().replace(' ', '_')
env.use_utility_code(
int_pow_utility_code.specialize(
func_name=self.pow_func,
type=self.type.empty_declaration_code(),
signed=self.type.signed and 1 or 0))
elif not self.type.is_error:
error(self.pos, "got unexpected types for C power operator: %s, %s" %
(self.operand1.type, self.operand2.type))
def calculate_result_code(self):
# Work around MSVC overloading ambiguity.
def typecast(operand):
if self.type == operand.type:
return operand.result()
else:
return self.type.cast_code(operand.result())
return "%s(%s, %s)" % (
self.pow_func,
typecast(self.operand1),
typecast(self.operand2))
def py_operation_function(self, code):
if (self.type.is_pyobject and
self.operand1.constant_result == 2 and
isinstance(self.operand1.constant_result, (int, long)) and
self.operand2.type is py_object_type):
code.globalstate.use_utility_code(UtilityCode.load_cached('PyNumberPow2', 'Optimize.c'))
if self.inplace:
return '__Pyx_PyNumber_InPlacePowerOf2'
else:
return '__Pyx_PyNumber_PowerOf2'
return super(PowNode, self).py_operation_function(code)
class BoolBinopNode(ExprNode):
"""
Short-circuiting boolean operation.
Note that this node provides the same code generation method as
BoolBinopResultNode to simplify expression nesting.
operator string "and"/"or"
operand1 BoolBinopNode/BoolBinopResultNode left operand
operand2 BoolBinopNode/BoolBinopResultNode right operand
"""
subexprs = ['operand1', 'operand2']
is_temp = True
operator = None
operand1 = None
operand2 = None
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
return PyrexTypes.independent_spanning_type(type1, type2)
def may_be_none(self):
if self.operator == 'or':
return self.operand2.may_be_none()
else:
return self.operand1.may_be_none() or self.operand2.may_be_none()
def calculate_constant_result(self):
operand1 = self.operand1.constant_result
operand2 = self.operand2.constant_result
if self.operator == 'and':
self.constant_result = operand1 and operand2
else:
self.constant_result = operand1 or operand2
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
if self.operator == 'and':
return operand1 and operand2
else:
return operand1 or operand2
def is_ephemeral(self):
return self.operand1.is_ephemeral() or self.operand2.is_ephemeral()
def analyse_types(self, env):
# Note: we do not do any coercion here as we most likely do not know the final type anyway.
# We even accept to set self.type to ErrorType if both operands do not have a spanning type.
# The coercion to the final type and to a "simple" value is left to coerce_to().
operand1 = self.operand1.analyse_types(env)
operand2 = self.operand2.analyse_types(env)
self.type = PyrexTypes.independent_spanning_type(
operand1.type, operand2.type)
self.operand1 = self._wrap_operand(operand1, env)
self.operand2 = self._wrap_operand(operand2, env)
return self
def _wrap_operand(self, operand, env):
if not isinstance(operand, (BoolBinopNode, BoolBinopResultNode)):
operand = BoolBinopResultNode(operand, self.type, env)
return operand
def wrap_operands(self, env):
"""
Must get called by transforms that want to create a correct BoolBinopNode
after the type analysis phase.
"""
self.operand1 = self._wrap_operand(self.operand1, env)
self.operand2 = self._wrap_operand(self.operand2, env)
def coerce_to_boolean(self, env):
return self.coerce_to(PyrexTypes.c_bint_type, env)
def coerce_to(self, dst_type, env):
operand1 = self.operand1.coerce_to(dst_type, env)
operand2 = self.operand2.coerce_to(dst_type, env)
return BoolBinopNode.from_node(
self, type=dst_type,
operator=self.operator,
operand1=operand1, operand2=operand2)
def generate_bool_evaluation_code(self, code, final_result_temp, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
outer_labels = (and_label, or_label)
if self.operator == 'and':
my_label = and_label = code.new_label('next_and')
else:
my_label = or_label = code.new_label('next_or')
self.operand1.generate_bool_evaluation_code(
code, final_result_temp, and_label, or_label, end_label, my_label)
and_label, or_label = outer_labels
code.put_label(my_label)
self.operand2.generate_bool_evaluation_code(
code, final_result_temp, and_label, or_label, end_label, fall_through)
def generate_evaluation_code(self, code):
self.allocate_temp_result(code)
or_label = and_label = None
end_label = code.new_label('bool_binop_done')
self.generate_bool_evaluation_code(code, self.result(), and_label, or_label, end_label, end_label)
code.put_label(end_label)
gil_message = "Truth-testing Python object"
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def generate_subexpr_disposal_code(self, code):
pass # nothing to do here, all done in generate_evaluation_code()
def free_subexpr_temps(self, code):
pass # nothing to do here, all done in generate_evaluation_code()
def generate_operand1_test(self, code):
# Generate code to test the truth of the first operand.
if self.type.is_pyobject:
test_result = code.funcstate.allocate_temp(
PyrexTypes.c_bint_type, manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.operand1.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.operand1.result()
return (test_result, self.type.is_pyobject)
class BoolBinopResultNode(ExprNode):
"""
Intermediate result of a short-circuiting and/or expression.
Tests the result for 'truthiness' and takes care of coercing the final result
of the overall expression to the target type.
Note that this node provides the same code generation method as
BoolBinopNode to simplify expression nesting.
arg ExprNode the argument to test
value ExprNode the coerced result value node
"""
subexprs = ['arg', 'value']
is_temp = True
arg = None
value = None
def __init__(self, arg, result_type, env):
# using 'arg' multiple times, so it must be a simple/temp value
arg = arg.coerce_to_simple(env)
# wrap in ProxyNode, in case a transform wants to replace self.arg later
arg = ProxyNode(arg)
super(BoolBinopResultNode, self).__init__(
arg.pos, arg=arg, type=result_type,
value=CloneNode(arg).coerce_to(result_type, env))
def coerce_to_boolean(self, env):
return self.coerce_to(PyrexTypes.c_bint_type, env)
def coerce_to(self, dst_type, env):
# unwrap, coerce, rewrap
arg = self.arg.arg
if dst_type is PyrexTypes.c_bint_type:
arg = arg.coerce_to_boolean(env)
# TODO: unwrap more coercion nodes?
return BoolBinopResultNode(arg, dst_type, env)
def nogil_check(self, env):
# let's leave all errors to BoolBinopNode
pass
def generate_operand_test(self, code):
# Generate code to test the truth of the first operand.
if self.arg.type.is_pyobject:
test_result = code.funcstate.allocate_temp(
PyrexTypes.c_bint_type, manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.arg.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.arg.result()
return (test_result, self.arg.type.is_pyobject)
def generate_bool_evaluation_code(self, code, final_result_temp, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
# x => x
# x and ... or ... => next 'and' / 'or'
# False ... or x => next 'or'
# True and x => next 'and'
# True or x => True (operand)
self.arg.generate_evaluation_code(code)
if and_label or or_label:
test_result, uses_temp = self.generate_operand_test(code)
if uses_temp and (and_label and or_label):
# cannot become final result => free early
# disposal: uses_temp and (and_label and or_label)
self.arg.generate_disposal_code(code)
sense = '!' if or_label else ''
code.putln("if (%s%s) {" % (sense, test_result))
if uses_temp:
code.funcstate.release_temp(test_result)
if not uses_temp or not (and_label and or_label):
# disposal: (not uses_temp) or {not (and_label and or_label) [if]}
self.arg.generate_disposal_code(code)
if or_label and or_label != fall_through:
# value is false => short-circuit to next 'or'
code.put_goto(or_label)
if and_label:
# value is true => go to next 'and'
if or_label:
code.putln("} else {")
if not uses_temp:
# disposal: (not uses_temp) and {(and_label and or_label) [else]}
self.arg.generate_disposal_code(code)
if and_label != fall_through:
code.put_goto(and_label)
if not and_label or not or_label:
# if no next 'and' or 'or', we provide the result
if and_label or or_label:
code.putln("} else {")
self.value.generate_evaluation_code(code)
self.value.make_owned_reference(code)
code.putln("%s = %s;" % (final_result_temp, self.value.result()))
self.value.generate_post_assignment_code(code)
# disposal: {not (and_label and or_label) [else]}
self.arg.generate_disposal_code(code)
self.value.free_temps(code)
if end_label != fall_through:
code.put_goto(end_label)
if and_label or or_label:
code.putln("}")
self.arg.free_temps(code)
class CondExprNode(ExprNode):
# Short-circuiting conditional expression.
#
# test ExprNode
# true_val ExprNode
# false_val ExprNode
true_val = None
false_val = None
subexprs = ['test', 'true_val', 'false_val']
def type_dependencies(self, env):
return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
def infer_type(self, env):
return PyrexTypes.independent_spanning_type(
self.true_val.infer_type(env),
self.false_val.infer_type(env))
def calculate_constant_result(self):
if self.test.constant_result:
self.constant_result = self.true_val.constant_result
else:
self.constant_result = self.false_val.constant_result
def is_ephemeral(self):
return self.true_val.is_ephemeral() or self.false_val.is_ephemeral()
def analyse_types(self, env):
self.test = self.test.analyse_types(env).coerce_to_boolean(env)
self.true_val = self.true_val.analyse_types(env)
self.false_val = self.false_val.analyse_types(env)
self.is_temp = 1
return self.analyse_result_type(env)
def analyse_result_type(self, env):
self.type = PyrexTypes.independent_spanning_type(
self.true_val.type, self.false_val.type)
if self.type.is_pyobject:
self.result_ctype = py_object_type
elif self.true_val.is_ephemeral() or self.false_val.is_ephemeral():
error(self.pos, "Unsafe C derivative of temporary Python reference used in conditional expression")
if self.true_val.type.is_pyobject or self.false_val.type.is_pyobject:
self.true_val = self.true_val.coerce_to(self.type, env)
self.false_val = self.false_val.coerce_to(self.type, env)
if self.type.is_error:
self.type_error()
return self
def coerce_to(self, dst_type, env):
self.true_val = self.true_val.coerce_to(dst_type, env)
self.false_val = self.false_val.coerce_to(dst_type, env)
self.result_ctype = None
return self.analyse_result_type(env)
def type_error(self):
if not (self.true_val.type.is_error or self.false_val.type.is_error):
error(self.pos, "Incompatible types in conditional expression (%s; %s)" %
(self.true_val.type, self.false_val.type))
self.type = PyrexTypes.error_type
def check_const(self):
return (self.test.check_const()
and self.true_val.check_const()
and self.false_val.check_const())
def generate_evaluation_code(self, code):
# Because subexprs may not be evaluated we can use a more optimal
# subexpr allocation strategy than the default, so override evaluation_code.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.test.generate_evaluation_code(code)
code.putln("if (%s) {" % self.test.result())
self.eval_and_get(code, self.true_val)
code.putln("} else {")
self.eval_and_get(code, self.false_val)
code.putln("}")
self.test.generate_disposal_code(code)
self.test.free_temps(code)
def eval_and_get(self, code, expr):
expr.generate_evaluation_code(code)
if self.type.is_memoryviewslice:
expr.make_owned_memoryviewslice(code)
else:
expr.make_owned_reference(code)
code.putln('%s = %s;' % (self.result(), expr.result_as(self.ctype())))
expr.generate_post_assignment_code(code)
expr.free_temps(code)
def generate_subexpr_disposal_code(self, code):
pass # done explicitly above (cleanup must separately happen within the if/else blocks)
def free_subexpr_temps(self, code):
pass # done explicitly above (cleanup must separately happen within the if/else blocks)
richcmp_constants = {
"<" : "Py_LT",
"<=": "Py_LE",
"==": "Py_EQ",
"!=": "Py_NE",
"<>": "Py_NE",
">" : "Py_GT",
">=": "Py_GE",
# the following are faked by special compare functions
"in" : "Py_EQ",
"not_in": "Py_NE",
}
class CmpNode(object):
# Mixin class containing code common to PrimaryCmpNodes
# and CascadedCmpNodes.
special_bool_cmp_function = None
special_bool_cmp_utility_code = None
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def calculate_cascaded_constant_result(self, operand1_result):
func = compile_time_binary_operators[self.operator]
operand2_result = self.operand2.constant_result
if (isinstance(operand1_result, (bytes, unicode)) and
isinstance(operand2_result, (bytes, unicode)) and
type(operand1_result) != type(operand2_result)):
# string comparison of different types isn't portable
return
if self.operator in ('in', 'not_in'):
if isinstance(self.operand2, (ListNode, TupleNode, SetNode)):
if not self.operand2.args:
self.constant_result = self.operator == 'not_in'
return
elif isinstance(self.operand2, ListNode) and not self.cascade:
# tuples are more efficient to store than lists
self.operand2 = self.operand2.as_tuple()
elif isinstance(self.operand2, DictNode):
if not self.operand2.key_value_pairs:
self.constant_result = self.operator == 'not_in'
return
self.constant_result = func(operand1_result, operand2_result)
def cascaded_compile_time_value(self, operand1, denv):
func = get_compile_time_binop(self)
operand2 = self.operand2.compile_time_value(denv)
try:
result = func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
result = None
if result:
cascade = self.cascade
if cascade:
result = result and cascade.cascaded_compile_time_value(operand2, denv)
return result
def is_cpp_comparison(self):
return self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class
def find_common_int_type(self, env, op, operand1, operand2):
# type1 != type2 and at least one of the types is not a C int
type1 = operand1.type
type2 = operand2.type
type1_can_be_int = False
type2_can_be_int = False
if operand1.is_string_literal and operand1.can_coerce_to_char_literal():
type1_can_be_int = True
if operand2.is_string_literal and operand2.can_coerce_to_char_literal():
type2_can_be_int = True
if type1.is_int:
if type2_can_be_int:
return type1
elif type2.is_int:
if type1_can_be_int:
return type2
elif type1_can_be_int:
if type2_can_be_int:
if Builtin.unicode_type in (type1, type2):
return PyrexTypes.c_py_ucs4_type
else:
return PyrexTypes.c_uchar_type
return None
def find_common_type(self, env, op, operand1, common_type=None):
operand2 = self.operand2
type1 = operand1.type
type2 = operand2.type
new_common_type = None
# catch general errors
if type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or \
type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type)):
error(self.pos, "Comparisons between bytes/unicode and str are not portable to Python 3")
new_common_type = error_type
# try to use numeric comparisons where possible
elif type1.is_complex or type2.is_complex:
if op not in ('==', '!=') \
and (type1.is_complex or type1.is_numeric) \
and (type2.is_complex or type2.is_numeric):
error(self.pos, "complex types are unordered")
new_common_type = error_type
elif type1.is_pyobject:
new_common_type = type1
elif type2.is_pyobject:
new_common_type = type2
else:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif type1.is_numeric and type2.is_numeric:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif common_type is None or not common_type.is_pyobject:
new_common_type = self.find_common_int_type(env, op, operand1, operand2)
if new_common_type is None:
# fall back to generic type compatibility tests
if type1 == type2:
if type1.is_ctuple:
new_common_type = py_object_type
else:
new_common_type = type1
elif type1.is_pyobject or type2.is_pyobject:
if type2.is_numeric or type2.is_string:
if operand2.check_for_coercion_error(type1, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif type1.is_numeric or type1.is_string:
if operand1.check_for_coercion_error(type2, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif py_object_type.assignable_from(type1) and py_object_type.assignable_from(type2):
new_common_type = py_object_type
else:
# one Python type and one non-Python type, not assignable
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
elif type1.assignable_from(type2):
new_common_type = type1
elif type2.assignable_from(type1):
new_common_type = type2
else:
# C types that we couldn't handle up to here are an error
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
if new_common_type.is_string and (isinstance(operand1, BytesNode) or
isinstance(operand2, BytesNode)):
# special case when comparing char* to bytes literal: must
# compare string values!
new_common_type = bytes_type
# recursively merge types
if common_type is None or new_common_type.is_error:
common_type = new_common_type
else:
# we could do a lot better by splitting the comparison
# into a non-Python part and a Python part, but this is
# safer for now
common_type = PyrexTypes.spanning_type(common_type, new_common_type)
if self.cascade:
common_type = self.cascade.find_common_type(env, self.operator, operand2, common_type)
return common_type
def invalid_types_error(self, operand1, op, operand2):
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(op, operand1.type, operand2.type))
def is_python_comparison(self):
return (not self.is_ptr_contains()
and not self.is_c_string_contains()
and (self.has_python_operands()
or (self.cascade and self.cascade.is_python_comparison())
or self.operator in ('in', 'not_in')))
def coerce_operands_to(self, dst_type, env):
operand2 = self.operand2
if operand2.type != dst_type:
self.operand2 = operand2.coerce_to(dst_type, env)
if self.cascade:
self.cascade.coerce_operands_to(dst_type, env)
def is_python_result(self):
return ((self.has_python_operands() and
self.special_bool_cmp_function is None and
self.operator not in ('is', 'is_not', 'in', 'not_in') and
not self.is_c_string_contains() and
not self.is_ptr_contains())
or (self.cascade and self.cascade.is_python_result()))
def is_c_string_contains(self):
return self.operator in ('in', 'not_in') and \
((self.operand1.type.is_int
and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or
(self.operand1.type.is_unicode_char
and self.operand2.type is unicode_type))
def is_ptr_contains(self):
if self.operator in ('in', 'not_in'):
container_type = self.operand2.type
return (container_type.is_ptr or container_type.is_array) \
and not container_type.is_string
def find_special_bool_compare_function(self, env, operand1, result_is_bool=False):
# note: currently operand1 must get coerced to a Python object if we succeed here!
if self.operator in ('==', '!='):
type1, type2 = operand1.type, self.operand2.type
if result_is_bool or (type1.is_builtin_type and type2.is_builtin_type):
if type1 is Builtin.unicode_type or type2 is Builtin.unicode_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.bytes_type or type2 is Builtin.bytes_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("BytesEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyBytes_Equals"
return True
elif type1 is Builtin.basestring_type or type2 is Builtin.basestring_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.str_type or type2 is Builtin.str_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("StrEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyString_Equals"
return True
elif self.operator in ('in', 'not_in'):
if self.operand2.type is Builtin.dict_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PyDict_Contains"
return True
elif self.operand2.type is Builtin.unicode_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Contains"
return True
else:
if not self.operand2.type.is_pyobject:
self.operand2 = self.operand2.coerce_to_pyobject(env)
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySequenceContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PySequence_Contains"
return True
return False
def generate_operation_code(self, code, result_code,
operand1, op , operand2):
if self.type.is_pyobject:
error_clause = code.error_goto_if_null
got_ref = "__Pyx_XGOTREF(%s); " % result_code
if self.special_bool_cmp_function:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyBoolOrNullFromLong", "ObjectHandling.c"))
coerce_result = "__Pyx_PyBoolOrNull_FromLong"
else:
coerce_result = "__Pyx_PyBool_FromLong"
else:
error_clause = code.error_goto_if_neg
got_ref = ""
coerce_result = ""
if self.special_bool_cmp_function:
if operand1.type.is_pyobject:
result1 = operand1.py_result()
else:
result1 = operand1.result()
if operand2.type.is_pyobject:
result2 = operand2.py_result()
else:
result2 = operand2.result()
if self.special_bool_cmp_utility_code:
code.globalstate.use_utility_code(self.special_bool_cmp_utility_code)
code.putln(
"%s = %s(%s(%s, %s, %s)); %s%s" % (
result_code,
coerce_result,
self.special_bool_cmp_function,
result1, result2, richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_pyobject and op not in ('is', 'is_not'):
assert op not in ('in', 'not_in'), op
code.putln("%s = PyObject_RichCompare(%s, %s, %s); %s%s" % (
result_code,
operand1.py_result(),
operand2.py_result(),
richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_complex:
code.putln("%s = %s(%s%s(%s, %s));" % (
result_code,
coerce_result,
op == "!=" and "!" or "",
operand1.type.unary_op('eq'),
operand1.result(),
operand2.result()))
else:
type1 = operand1.type
type2 = operand2.type
if (type1.is_extension_type or type2.is_extension_type) \
and not type1.same_as(type2):
common_type = py_object_type
elif type1.is_numeric:
common_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
common_type = type1
code1 = operand1.result_as(common_type)
code2 = operand2.result_as(common_type)
code.putln("%s = %s(%s %s %s);" % (
result_code,
coerce_result,
code1,
self.c_operator(op),
code2))
def c_operator(self, op):
if op == 'is':
return "=="
elif op == 'is_not':
return "!="
else:
return op
class PrimaryCmpNode(ExprNode, CmpNode):
# Non-cascaded comparison or first comparison of
# a cascaded sequence.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
# cascade CascadedCmpNode
# We don't use the subexprs mechanism, because
# things here are too complicated for it to handle.
# Instead, we override all the framework methods
# which use it.
child_attrs = ['operand1', 'operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
is_memslice_nonecheck = False
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def calculate_constant_result(self):
assert not self.cascade
self.calculate_cascaded_constant_result(self.operand1.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
return self.cascaded_compile_time_value(operand1, denv)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
if self.is_cpp_comparison():
self.analyse_cpp_comparison(env)
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for cpp types.")
return self
if self.analyse_memoryviewslice_comparison(env):
return self
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
if self.operator in ('in', 'not_in'):
if self.is_c_string_contains():
self.is_pycmp = False
common_type = None
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for 'int_val in string'.")
return self
if self.operand2.type is unicode_type:
env.use_utility_code(UtilityCode.load_cached("PyUCS4InUnicode", "StringTools.c"))
else:
if self.operand1.type is PyrexTypes.c_uchar_type:
self.operand1 = self.operand1.coerce_to(PyrexTypes.c_char_type, env)
if self.operand2.type is not bytes_type:
self.operand2 = self.operand2.coerce_to(bytes_type, env)
env.use_utility_code(UtilityCode.load_cached("BytesContains", "StringTools.c"))
self.operand2 = self.operand2.as_none_safe_node(
"argument of type 'NoneType' is not iterable")
elif self.is_ptr_contains():
if self.cascade:
error(self.pos, "Cascading comparison not supported for 'val in sliced pointer'.")
self.type = PyrexTypes.c_bint_type
# Will be transformed by IterationTransform
return self
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = py_object_type
self.is_pycmp = True
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = self.find_common_type(env, self.operator, self.operand1)
self.is_pycmp = common_type.is_pyobject
if common_type is not None and not common_type.is_error:
if self.operand1.type != common_type:
self.operand1 = self.operand1.coerce_to(common_type, env)
self.coerce_operands_to(common_type, env)
if self.cascade:
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
operand2 = self.cascade.optimise_comparison(self.operand2, env)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
if self.is_python_result():
self.type = PyrexTypes.py_object_type
else:
self.type = PyrexTypes.c_bint_type
cdr = self.cascade
while cdr:
cdr.type = self.type
cdr = cdr.cascade
if self.is_pycmp or self.cascade or self.special_bool_cmp_function:
# 1) owned reference, 2) reused value, 3) potential function error return value
self.is_temp = 1
return self
def analyse_cpp_comparison(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if entry is None:
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(self.operator, type1, type2))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.is_pycmp = False
self.type = func_type.return_type
def analyse_memoryviewslice_comparison(self, env):
have_none = self.operand1.is_none or self.operand2.is_none
have_slice = (self.operand1.type.is_memoryviewslice or
self.operand2.type.is_memoryviewslice)
ops = ('==', '!=', 'is', 'is_not')
if have_slice and have_none and self.operator in ops:
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_memslice_nonecheck = True
return True
return False
def coerce_to_boolean(self, env):
if self.is_pycmp:
# coercing to bool => may allow for more efficient comparison code
if self.find_special_bool_compare_function(
env, self.operand1, result_is_bool=True):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_temp = 1
if self.cascade:
operand2 = self.cascade.optimise_comparison(
self.operand2, env, result_is_bool=True)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return self
# TODO: check if we can optimise parts of the cascade here
return ExprNode.coerce_to_boolean(self, env)
def has_python_operands(self):
return (self.operand1.type.is_pyobject
or self.operand2.type.is_pyobject)
def check_const(self):
if self.cascade:
self.not_const()
return False
else:
return self.operand1.check_const() and self.operand2.check_const()
def calculate_result_code(self):
if self.operand1.type.is_complex:
if self.operator == "!=":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
self.operand1.type.binary_op('=='),
self.operand1.result(),
self.operand2.result())
elif self.is_c_string_contains():
if self.operand2.type is unicode_type:
method = "__Pyx_UnicodeContainsUCS4"
else:
method = "__Pyx_BytesContains"
if self.operator == "not_in":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
method,
self.operand2.result(),
self.operand1.result())
else:
result1 = self.operand1.result()
result2 = self.operand2.result()
if self.is_memslice_nonecheck:
if self.operand1.type.is_memoryviewslice:
result1 = "((PyObject *) %s.memview)" % result1
else:
result2 = "((PyObject *) %s.memview)" % result2
return "(%s %s %s)" % (
result1,
self.c_operator(self.operator),
result2)
def generate_evaluation_code(self, code):
self.operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_operation_code(code, self.result(),
self.operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, self.result(), self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
self.operand1.generate_disposal_code(code)
self.operand1.free_temps(code)
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
def generate_subexpr_disposal_code(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.generate_disposal_code(code)
self.operand2.generate_disposal_code(code)
def free_subexpr_temps(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.free_temps(code)
self.operand2.free_temps(code)
def annotate(self, code):
self.operand1.annotate(code)
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
class CascadedCmpNode(Node, CmpNode):
# A CascadedCmpNode is not a complete expression node. It
# hangs off the side of another comparison node, shares
# its left operand with that node, and shares its result
# with the PrimaryCmpNode at the head of the chain.
#
# operator string
# operand2 ExprNode
# cascade CascadedCmpNode
child_attrs = ['operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
constant_result = constant_value_not_set # FIXME: where to calculate this?
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def analyse_types(self, env):
self.operand2 = self.operand2.analyse_types(env)
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
return self
def has_python_operands(self):
return self.operand2.type.is_pyobject
def optimise_comparison(self, operand1, env, result_is_bool=False):
if self.find_special_bool_compare_function(env, operand1, result_is_bool):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
if not operand1.type.is_pyobject:
operand1 = operand1.coerce_to_pyobject(env)
if self.cascade:
operand2 = self.cascade.optimise_comparison(self.operand2, env, result_is_bool)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return operand1
def coerce_operands_to_pyobjects(self, env):
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.operand2.type is dict_type and self.operator in ('in', 'not_in'):
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
if self.cascade:
self.cascade.coerce_operands_to_pyobjects(env)
def coerce_cascaded_operands_to_temp(self, env):
if self.cascade:
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
def generate_evaluation_code(self, code, result, operand1, needs_evaluation=False):
if self.type.is_pyobject:
code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result)
code.put_decref(result, self.type)
else:
code.putln("if (%s) {" % result)
if needs_evaluation:
operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
self.generate_operation_code(code, result,
operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, result, self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
if needs_evaluation:
operand1.generate_disposal_code(code)
operand1.free_temps(code)
# Cascaded cmp result is always temp
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
code.putln("}")
def annotate(self, code):
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
binop_node_classes = {
"or": BoolBinopNode,
"and": BoolBinopNode,
"|": IntBinopNode,
"^": IntBinopNode,
"&": IntBinopNode,
"<<": IntBinopNode,
">>": IntBinopNode,
"+": AddNode,
"-": SubNode,
"*": MulNode,
"@": MatMultNode,
"/": DivNode,
"//": DivNode,
"%": ModNode,
"**": PowNode,
}
def binop_node(pos, operator, operand1, operand2, inplace=False, **kwargs):
# Construct binop node of appropriate class for
# given operator.
return binop_node_classes[operator](
pos,
operator=operator,
operand1=operand1,
operand2=operand2,
inplace=inplace,
**kwargs)
#-------------------------------------------------------------------
#
# Coercion nodes
#
# Coercion nodes are special in that they are created during
# the analyse_types phase of parse tree processing.
# Their __init__ methods consequently incorporate some aspects
# of that phase.
#
#-------------------------------------------------------------------
class CoercionNode(ExprNode):
# Abstract base class for coercion nodes.
#
# arg ExprNode node being coerced
subexprs = ['arg']
constant_result = not_a_constant
def __init__(self, arg):
super(CoercionNode, self).__init__(arg.pos)
self.arg = arg
if debug_coercion:
print("%s Coercing %s" % (self, self.arg))
def calculate_constant_result(self):
# constant folding can break type coercion, so this is disabled
pass
def annotate(self, code):
self.arg.annotate(code)
if self.arg.type != self.type:
file, line, col = self.pos
code.annotate((file, line, col-1), AnnotationItem(
style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type)))
class CoerceToMemViewSliceNode(CoercionNode):
"""
Coerce an object to a memoryview slice. This holds a new reference in
a managed temp.
"""
def __init__(self, arg, dst_type, env):
assert dst_type.is_memoryviewslice
assert not arg.type.is_memoryviewslice
CoercionNode.__init__(self, arg)
self.type = dst_type
self.is_temp = 1
self.env = env
self.use_managed_ref = True
self.arg = arg
def generate_result_code(self, code):
self.type.create_from_py_utility_code(self.env)
code.putln("%s = %s(%s);" % (self.result(),
self.type.from_py_function,
self.arg.py_result()))
error_cond = self.type.error_condition(self.result())
code.putln(code.error_goto_if(error_cond, self.pos))
class CastNode(CoercionNode):
# Wrap a node in a C type cast.
def __init__(self, arg, new_type):
CoercionNode.__init__(self, arg)
self.type = new_type
def may_be_none(self):
return self.arg.may_be_none()
def calculate_result_code(self):
return self.arg.result_as(self.type)
def generate_result_code(self, code):
self.arg.generate_result_code(code)
class PyTypeTestNode(CoercionNode):
# This node is used to check that a generic Python
# object is an instance of a particular extension type.
# This node borrows the result of its argument node.
exact_builtin_type = True
def __init__(self, arg, dst_type, env, notnone=False):
# The arg is know to be a Python object, and
# the dst_type is known to be an extension type.
assert dst_type.is_extension_type or dst_type.is_builtin_type, "PyTypeTest on non extension type"
CoercionNode.__init__(self, arg)
self.type = dst_type
self.result_ctype = arg.ctype()
self.notnone = notnone
nogil_check = Node.gil_error
gil_message = "Python type test"
def analyse_types(self, env):
return self
def may_be_none(self):
if self.notnone:
return False
return self.arg.may_be_none()
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def is_ephemeral(self):
return self.arg.is_ephemeral()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_constant_result(self):
# FIXME
pass
def calculate_result_code(self):
return self.arg.result()
def generate_result_code(self, code):
if self.type.typeobj_is_available():
if self.type.is_builtin_type:
type_test = self.type.type_test_code(
self.arg.py_result(),
self.notnone, exact=self.exact_builtin_type)
else:
type_test = self.type.type_test_code(
self.arg.py_result(), self.notnone)
code.globalstate.use_utility_code(
UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
code.putln("if (!(%s)) %s" % (
type_test, code.error_goto(self.pos)))
else:
error(self.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class NoneCheckNode(CoercionNode):
# This node is used to check that a Python object is not None and
# raises an appropriate exception (as specified by the creating
# transform).
is_nonecheck = True
def __init__(self, arg, exception_type_cname, exception_message,
exception_format_args):
CoercionNode.__init__(self, arg)
self.type = arg.type
self.result_ctype = arg.ctype()
self.exception_type_cname = exception_type_cname
self.exception_message = exception_message
self.exception_format_args = tuple(exception_format_args or ())
nogil_check = None # this node only guards an operation that would fail already
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_result_code(self):
return self.arg.result()
def condition(self):
if self.type.is_pyobject:
return self.arg.py_result()
elif self.type.is_memoryviewslice:
return "((PyObject *) %s.memview)" % self.arg.result()
else:
raise Exception("unsupported type")
def put_nonecheck(self, code):
code.putln(
"if (unlikely(%s == Py_None)) {" % self.condition())
if self.in_nogil_context:
code.put_ensure_gil()
escape = StringEncoding.escape_byte_string
if self.exception_format_args:
code.putln('PyErr_Format(%s, "%s", %s);' % (
self.exception_type_cname,
StringEncoding.escape_byte_string(
self.exception_message.encode('UTF-8')),
', '.join([ '"%s"' % escape(str(arg).encode('UTF-8'))
for arg in self.exception_format_args ])))
else:
code.putln('PyErr_SetString(%s, "%s");' % (
self.exception_type_cname,
escape(self.exception_message.encode('UTF-8'))))
if self.in_nogil_context:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
def generate_result_code(self, code):
self.put_nonecheck(code)
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CoerceToPyTypeNode(CoercionNode):
# This node is used to convert a C data type
# to a Python object.
type = py_object_type
target_type = py_object_type
is_temp = 1
def __init__(self, arg, env, type=py_object_type):
if not arg.type.create_to_py_utility_code(env):
error(arg.pos, "Cannot convert '%s' to Python object" % arg.type)
elif arg.type.is_complex:
# special case: complex coercion is so complex that it
# uses a macro ("__pyx_PyComplex_FromComplex()"), for
# which the argument must be simple
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
if type is py_object_type:
# be specific about some known types
if arg.type.is_string or arg.type.is_cpp_string:
self.type = default_str_type(env)
elif arg.type.is_pyunicode_ptr or arg.type.is_unicode_char:
self.type = unicode_type
elif arg.type.is_complex:
self.type = Builtin.complex_type
self.target_type = self.type
elif arg.type.is_string or arg.type.is_cpp_string:
if (type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(arg.pos,
"default encoding required for conversion from '%s' to '%s'" %
(arg.type, type))
self.type = self.target_type = type
else:
# FIXME: check that the target type and the resulting type are compatible
self.target_type = type
gil_message = "Converting to Python object"
def may_be_none(self):
# FIXME: is this always safe?
return False
def coerce_to_boolean(self, env):
arg_type = self.arg.type
if (arg_type == PyrexTypes.c_bint_type or
(arg_type.is_pyobject and arg_type.name == 'bool')):
return self.arg.coerce_to_temp(env)
else:
return CoerceToBooleanNode(self, env)
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.arg.type.is_int:
return self.arg
else:
return self.arg.coerce_to(PyrexTypes.c_long_type, env)
def analyse_types(self, env):
# The arg is always already analysed
return self
def generate_result_code(self, code):
code.putln('%s; %s' % (
self.arg.type.to_py_call_code(
self.arg.result(),
self.result(),
self.target_type),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class CoerceIntToBytesNode(CoerceToPyTypeNode):
# This node is used to convert a C int type to a Python bytes
# object.
is_temp = 1
def __init__(self, arg, env):
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
self.type = Builtin.bytes_type
def generate_result_code(self, code):
arg = self.arg
arg_result = arg.result()
if arg.type not in (PyrexTypes.c_char_type,
PyrexTypes.c_uchar_type,
PyrexTypes.c_schar_type):
if arg.type.signed:
code.putln("if ((%s < 0) || (%s > 255)) {" % (
arg_result, arg_result))
else:
code.putln("if (%s > 255) {" % arg_result)
code.putln('PyErr_SetString(PyExc_OverflowError, '
'"value too large to pack into a byte"); %s' % (
code.error_goto(self.pos)))
code.putln('}')
temp = None
if arg.type is not PyrexTypes.c_char_type:
temp = code.funcstate.allocate_temp(PyrexTypes.c_char_type, manage_ref=False)
code.putln("%s = (char)%s;" % (temp, arg_result))
arg_result = temp
code.putln('%s = PyBytes_FromStringAndSize(&%s, 1); %s' % (
self.result(),
arg_result,
code.error_goto_if_null(self.result(), self.pos)))
if temp is not None:
code.funcstate.release_temp(temp)
code.put_gotref(self.py_result())
class CoerceFromPyTypeNode(CoercionNode):
# This node is used to convert a Python object
# to a C data type.
def __init__(self, result_type, arg, env):
CoercionNode.__init__(self, arg)
self.type = result_type
self.is_temp = 1
if not result_type.create_from_py_utility_code(env):
error(arg.pos,
"Cannot convert Python object to '%s'" % result_type)
if self.type.is_string or self.type.is_pyunicode_ptr:
if self.arg.is_name and self.arg.entry and self.arg.entry.is_pyglobal:
warning(arg.pos,
"Obtaining '%s' from externally modifiable global Python value" % result_type,
level=1)
def analyse_types(self, env):
# The arg is always already analysed
return self
def is_ephemeral(self):
return (self.type.is_ptr and not self.type.is_array) and self.arg.is_ephemeral()
def generate_result_code(self, code):
code.putln(self.type.from_py_call_code(
self.arg.py_result(), self.result(), self.pos, code))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def nogil_check(self, env):
error(self.pos, "Coercion from Python not allowed without the GIL")
class CoerceToBooleanNode(CoercionNode):
# This node is used when a result needs to be used
# in a boolean context.
type = PyrexTypes.c_bint_type
_special_builtins = {
Builtin.list_type : 'PyList_GET_SIZE',
Builtin.tuple_type : 'PyTuple_GET_SIZE',
Builtin.bytes_type : 'PyBytes_GET_SIZE',
Builtin.unicode_type : 'PyUnicode_GET_SIZE',
}
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
if arg.type.is_pyobject:
self.is_temp = 1
def nogil_check(self, env):
if self.arg.type.is_pyobject and self._special_builtins.get(self.arg.type) is None:
self.gil_error()
gil_message = "Truth-testing Python object"
def check_const(self):
if self.is_temp:
self.not_const()
return False
return self.arg.check_const()
def calculate_result_code(self):
return "(%s != 0)" % self.arg.result()
def generate_result_code(self, code):
if not self.is_temp:
return
test_func = self._special_builtins.get(self.arg.type)
if test_func is not None:
code.putln("%s = (%s != Py_None) && (%s(%s) != 0);" % (
self.result(),
self.arg.py_result(),
test_func,
self.arg.py_result()))
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_neg(self.result(), self.pos)))
class CoerceToComplexNode(CoercionNode):
def __init__(self, arg, dst_type, env):
if arg.type.is_complex:
arg = arg.coerce_to_simple(env)
self.type = dst_type
CoercionNode.__init__(self, arg)
dst_type.create_declaration_utility_code(env)
def calculate_result_code(self):
if self.arg.type.is_complex:
real_part = "__Pyx_CREAL(%s)" % self.arg.result()
imag_part = "__Pyx_CIMAG(%s)" % self.arg.result()
else:
real_part = self.arg.result()
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
def generate_result_code(self, code):
pass
class CoerceToTempNode(CoercionNode):
# This node is used to force the result of another node
# to be stored in a temporary. It is only used if the
# argument node's result is not already in a temporary.
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
self.type = self.arg.type.as_argument_type()
self.constant_result = self.arg.constant_result
self.is_temp = 1
if self.type.is_pyobject:
self.result_ctype = py_object_type
gil_message = "Creating temporary Python reference"
def analyse_types(self, env):
# The arg is always already analysed
return self
def coerce_to_boolean(self, env):
self.arg = self.arg.coerce_to_boolean(env)
if self.arg.is_simple():
return self.arg
self.type = self.arg.type
self.result_ctype = self.type
return self
def generate_result_code(self, code):
#self.arg.generate_evaluation_code(code) # Already done
# by generic generate_subexpr_evaluation_code!
code.putln("%s = %s;" % (
self.result(), self.arg.result_as(self.ctype())))
if self.use_managed_ref:
if self.type.is_pyobject:
code.put_incref(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_incref_memoryviewslice(self.result(),
not self.in_nogil_context)
class ProxyNode(CoercionNode):
"""
A node that should not be replaced by transforms or other means,
and hence can be useful to wrap the argument to a clone node
MyNode -> ProxyNode -> ArgNode
CloneNode -^
"""
nogil_check = None
def __init__(self, arg):
super(ProxyNode, self).__init__(arg)
self.constant_result = arg.constant_result
self._proxy_type()
def analyse_types(self, env):
self.arg = self.arg.analyse_expressions(env)
self._proxy_type()
return self
def infer_type(self, env):
return self.arg.infer_type(env)
def _proxy_type(self):
if hasattr(self.arg, 'type'):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def result(self):
return self.arg.result()
def is_simple(self):
return self.arg.is_simple()
def may_be_none(self):
return self.arg.may_be_none()
def generate_evaluation_code(self, code):
self.arg.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.arg.generate_disposal_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CloneNode(CoercionNode):
# This node is employed when the result of another node needs
# to be used multiple times. The argument node's result must
# be in a temporary. This node "borrows" the result from the
# argument node, and does not generate any evaluation or
# disposal code for it. The original owner of the argument
# node is responsible for doing those things.
subexprs = [] # Arg is not considered a subexpr
nogil_check = None
def __init__(self, arg):
CoercionNode.__init__(self, arg)
self.constant_result = arg.constant_result
if hasattr(arg, 'type'):
self.type = arg.type
self.result_ctype = arg.result_ctype
if hasattr(arg, 'entry'):
self.entry = arg.entry
def result(self):
return self.arg.result()
def may_be_none(self):
return self.arg.may_be_none()
def type_dependencies(self, env):
return self.arg.type_dependencies(env)
def infer_type(self, env):
return self.arg.infer_type(env)
def analyse_types(self, env):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
self.is_temp = 1
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
return self
def coerce_to(self, dest_type, env):
if self.arg.is_literal:
return self.arg.coerce_to(dest_type, env)
return super(CloneNode, self).coerce_to(dest_type, env)
def is_simple(self):
return True # result is always in a temp (or a name)
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
def generate_disposal_code(self, code):
pass
def free_temps(self, code):
pass
class CMethodSelfCloneNode(CloneNode):
# Special CloneNode for the self argument of builtin C methods
# that accepts subtypes of the builtin type. This is safe only
# for 'final' subtypes, as subtypes of the declared type may
# override the C method.
def coerce_to(self, dst_type, env):
if dst_type.is_builtin_type and self.type.subtype_of(dst_type):
return self
return CloneNode.coerce_to(self, dst_type, env)
class ModuleRefNode(ExprNode):
# Simple returns the module object
type = py_object_type
is_temp = False
subexprs = []
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def calculate_result_code(self):
return Naming.module_cname
def generate_result_code(self, code):
pass
class DocstringRefNode(ExprNode):
# Extracts the docstring of the body element
subexprs = ['body']
type = py_object_type
is_temp = True
def __init__(self, pos, body):
ExprNode.__init__(self, pos)
assert body.type.is_pyobject
self.body = body
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.putln('%s = __Pyx_GetAttr(%s, %s); %s' % (
self.result(), self.body.result(),
code.intern_identifier(StringEncoding.EncodedString("__doc__")),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
pyerr_occurred_withgil_utility_code= UtilityCode(
proto = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void); /* proto */
""",
impl = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) {
int err;
#ifdef WITH_THREAD
PyGILState_STATE _save = PyGILState_Ensure();
#endif
err = !!PyErr_Occurred();
#ifdef WITH_THREAD
PyGILState_Release(_save);
#endif
return err;
}
"""
)
#------------------------------------------------------------------------------------
raise_unbound_local_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
""")
raise_closure_name_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) {
PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname);
}
""")
# Don't inline the function, it should really never be called in production
raise_unbound_memoryview_utility_code_nogil = UtilityCode(
proto = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname);
""",
impl = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname) {
#ifdef WITH_THREAD
PyGILState_STATE gilstate = PyGILState_Ensure();
#endif
__Pyx_RaiseUnboundLocalError(varname);
#ifdef WITH_THREAD
PyGILState_Release(gilstate);
#endif
}
""",
requires = [raise_unbound_local_error_utility_code])
#------------------------------------------------------------------------------------
raise_too_many_values_to_unpack = UtilityCode.load_cached("RaiseTooManyValuesToUnpack", "ObjectHandling.c")
raise_need_more_values_to_unpack = UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c")
tuple_unpacking_error_code = UtilityCode.load_cached("UnpackTupleError", "ObjectHandling.c")
#------------------------------------------------------------------------------------
int_pow_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s %(func_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s %(func_name)s(%(type)s b, %(type)s e) {
%(type)s t = b;
switch (e) {
case 3:
t *= b;
case 2:
t *= b;
case 1:
return t;
case 0:
return 1;
}
#if %(signed)s
if (unlikely(e<0)) return 0;
#endif
t = 1;
while (likely(e)) {
t *= (b * (e&1)) | ((~e)&1); /* 1 or b */
b *= b;
e >>= 1;
}
return t;
}
""")
# ------------------------------ Division ------------------------------------
div_int_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s q = a / b;
%(type)s r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
""")
mod_int_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s r = a %% b;
r += ((r != 0) & ((r ^ b) < 0)) * b;
return r;
}
""")
mod_float_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s r = fmod%(math_h_modifier)s(a, b);
r += ((r != 0) & ((r < 0) ^ (b < 0))) * b;
return r;
}
""")
cdivision_warning_utility_code = UtilityCode(
proto="""
static int __Pyx_cdivision_warning(const char *, int); /* proto */
""",
impl="""
static int __Pyx_cdivision_warning(const char *filename, int lineno) {
#if CYTHON_COMPILING_IN_PYPY
filename++; // avoid compiler warnings
lineno++;
return PyErr_Warn(PyExc_RuntimeWarning,
"division with oppositely signed operands, C and Python semantics differ");
#else
return PyErr_WarnExplicit(PyExc_RuntimeWarning,
"division with oppositely signed operands, C and Python semantics differ",
filename,
lineno,
__Pyx_MODULE_NAME,
NULL);
#endif
}
""")
# from intobject.c
division_overflow_test_code = UtilityCode(
proto="""
#define UNARY_NEG_WOULD_OVERFLOW(x) \
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
""")
|
jos4uke/getSeqFlankBlatHit
|
lib/python2.7/site-packages/Cython/Compiler/ExprNodes.py
|
Python
|
gpl-2.0
| 469,145
|
[
"VisIt"
] |
e17a6b83921b4ea7f488219459f5cf9400b29e468399bf7e2505d1d6171febe8
|
"""
Courseware views functions
"""
import logging
import json
import textwrap
import urllib
from collections import OrderedDict
from datetime import datetime
from django.utils.translation import ugettext as _
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.db.models import Q
from django.utils.timezone import UTC
from django.views.decorators.http import require_GET, require_POST, require_http_methods
from django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import redirect
from certificates import api as certs_api
from edxmako.shortcuts import render_to_response, render_to_string, marketing_link
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from ipware.ip import get_ip
from markupsafe import escape
from rest_framework import status
import newrelic.agent
from courseware import grades
from courseware.access import has_access, _adjust_start_date_for_beta_testers
from courseware.access_response import StartDateError
from courseware.access_utils import in_preview_mode
from courseware.courses import (
get_courses,
get_course,
get_course_by_id,
get_permission_for_course_about,
get_studio_url,
get_course_overview_with_access,
get_course_with_access,
sort_by_announcement,
sort_by_start_date,
UserNotEnrolled
)
from courseware.masquerade import setup_masquerade
from openedx.core.djangoapps.credit.api import (
get_credit_requirement_status,
is_user_eligible_for_credit,
is_credit_course
)
from courseware.models import StudentModuleHistory
from courseware.model_data import FieldDataCache, ScoresClient
from .module_render import toc_for_course, get_module_for_descriptor, get_module, get_module_by_usage_id
from .entrance_exams import (
course_has_entrance_exam,
get_entrance_exam_content,
get_entrance_exam_score,
user_must_complete_entrance_exam,
user_has_passed_entrance_exam
)
from courseware.user_state_client import DjangoXBlockUserStateClient
from course_modes.models import CourseMode
from student.models import UserTestGroup, CourseEnrollment
from student.views import is_course_blocked
from util.cache import cache, cache_if_anonymous
from util.date_utils import strftime_localized
from util.db import outer_atomic
from xblock.fragment import Fragment
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule.tabs import CourseTabList
from xmodule.x_module import STUDENT_VIEW
import shoppingcart
from shoppingcart.models import CourseRegistrationCode
from shoppingcart.utils import is_shopping_cart_enabled
from opaque_keys import InvalidKeyError
from util.milestones_helpers import get_prerequisite_courses_display
from util.views import _record_feedback_in_zendesk
from microsite_configuration import microsite
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.keys import CourseKey, UsageKey
from instructor.enrollment import uses_shib
import survey.utils
import survey.views
from util.views import ensure_valid_course_key
from eventtracking import tracker
import analytics
from courseware.url_helpers import get_redirect_url
log = logging.getLogger("edx.courseware")
template_imports = {'urllib': urllib}
CONTENT_DEPTH = 2
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
@ensure_csrf_cookie
@cache_if_anonymous()
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
courses_list = []
course_discovery_meanings = getattr(settings, 'COURSE_DISCOVERY_MEANINGS', {})
if not settings.FEATURES.get('ENABLE_COURSE_DISCOVERY'):
courses_list = get_courses(request.user, request.META.get('HTTP_HOST'))
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses_list = sort_by_start_date(courses_list)
else:
courses_list = sort_by_announcement(courses_list)
return render_to_response(
"courseware/courses.html",
{'courses': courses_list, 'course_discovery_meanings': course_discovery_meanings}
)
def render_accordion(user, request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
toc = toc_for_course(user, request, course, chapter, section, field_data_cache)
context = dict([
('toc', toc),
('course_id', course.id.to_deprecated_string()),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)
] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule, min_depth=None):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first
child with children extending down to content_depth.
For example, if chapter_one has no position set, with two child sections,
section-A having no children and section-B having a discussion unit,
`get_current_child(chapter, min_depth=1)` will return section-B.
Returns None only if there are no children at all.
"""
def _get_default_child_module(child_modules):
"""Returns the first child of xmodule, subject to min_depth."""
if not child_modules:
default_child = None
elif not min_depth > 0:
default_child = child_modules[0]
else:
content_children = [child for child in child_modules if
child.has_children_at_depth(min_depth - 1) and child.get_display_items()]
default_child = content_children[0] if content_children else None
return default_child
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
return _get_default_child_module(xmodule.get_display_items())
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# module has a set position, but the position is out of range.
# return default child.
child = _get_default_child_module(children)
else:
child = None
return child
def redirect_to_course_position(course_module, content_depth):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.id.to_deprecated_string()}
chapter = get_current_child(course_module, min_depth=content_depth)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter, min_depth=content_depth - 1)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.location.name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def save_positions_recursively_up(user, request, field_data_cache, xmodule, course=None):
"""
Recurses up the course tree starting from a leaf
Saving the position property based on the previous node as it goes
"""
current_module = xmodule
while current_module:
parent_location = modulestore().get_parent_location(current_module.location)
parent = None
if parent_location:
parent_descriptor = modulestore().get_item(parent_location)
parent = get_module_for_descriptor(
user,
request,
parent_descriptor,
field_data_cache,
current_module.location.course_key,
course=course
)
if parent and hasattr(parent, 'position'):
save_child_position(parent, current_module.location.name)
current_module = parent
@transaction.non_atomic_requests
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@ensure_valid_course_key
@outer_atomic(read_committed=True)
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
course_key = CourseKey.from_string(course_id)
# Gather metrics for New Relic so we can slice data in New Relic Insights
newrelic.agent.add_custom_parameter('course_id', unicode(course_key))
newrelic.agent.add_custom_parameter('org', unicode(course_key.org))
user = User.objects.prefetch_related("groups").get(id=request.user.id)
redeemed_registration_codes = CourseRegistrationCode.objects.filter(
course_id=course_key,
registrationcoderedemption__redeemed_by=request.user
)
# Redirect to dashboard if the course is blocked due to non-payment.
if is_course_blocked(request, redeemed_registration_codes, course_key):
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
log.warning(
u'User %s cannot access the course %s because payment has not yet been received',
user,
course_key.to_deprecated_string()
)
return redirect(reverse('dashboard'))
request.user = user # keep just one instance of User
with modulestore().bulk_operations(course_key):
return _index_bulk_op(request, course_key, chapter, section, position)
# pylint: disable=too-many-statements
def _index_bulk_op(request, course_key, chapter, section, position):
"""
Render the index page for the specified course.
"""
# Verify that position a string is in fact an int
if position is not None:
try:
int(position)
except ValueError:
raise Http404(u"Position {} is not an integer!".format(position))
course = get_course_with_access(request.user, 'load', course_key, depth=2)
staff_access = has_access(request.user, 'staff', course)
masquerade, user = setup_masquerade(request, course_key, staff_access, reset_masquerade_data=True)
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.to_deprecated_string())
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
# see if all pre-requisites (as per the milestones app feature) have been fulfilled
# Note that if the pre-requisite feature flag has been turned off (default) then this check will
# always pass
if not has_access(user, 'view_courseware_with_prerequisites', course):
# prerequisites have not been fulfilled therefore redirect to the Dashboard
log.info(
u'User %d tried to view course %s '
u'without fulfilling prerequisites',
user.id, unicode(course.id))
return redirect(reverse('dashboard'))
# Entrance Exam Check
# If the course has an entrance exam and the requested chapter is NOT the entrance exam, and
# the user hasn't yet met the criteria to bypass the entrance exam, redirect them to the exam.
if chapter and course_has_entrance_exam(course):
chapter_descriptor = course.get_child_by(lambda m: m.location.name == chapter)
if chapter_descriptor and not getattr(chapter_descriptor, 'is_entrance_exam', False) \
and user_must_complete_entrance_exam(request, user, course):
log.info(u'User %d tried to view course %s without passing entrance exam', user.id, unicode(course.id))
return redirect(reverse('courseware', args=[unicode(course.id)]))
# check to see if there is a required survey that must be taken before
# the user can access the course.
if survey.utils.must_answer_survey(course, user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_key, user, course, depth=2)
course_module = get_module_for_descriptor(
user, request, course, field_data_cache, course_key, course=course
)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
studio_url = get_studio_url(course, 'course')
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(user, request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'studio_url': studio_url,
'masquerade': masquerade,
'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"),
}
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(user, course, course_key)
if not in_preview_mode() and staff_access and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
has_content = course.has_children_at_depth(CONTENT_DEPTH)
if not has_content:
# Show empty courseware for a course with no units
return render_to_response('courseware/courseware.html', context)
elif chapter is None:
# Check first to see if we should instead redirect the user to an Entrance Exam
if course_has_entrance_exam(course):
exam_chapter = get_entrance_exam_content(request, course)
if exam_chapter:
exam_section = None
if exam_chapter.get_children():
exam_section = exam_chapter.get_children()[0]
if exam_section:
return redirect('courseware_section',
course_id=unicode(course_key),
chapter=exam_chapter.url_name,
section=exam_section.url_name)
# passing CONTENT_DEPTH avoids returning 404 for a course with an
# empty first section and a second section with content
return redirect_to_course_position(course_module, CONTENT_DEPTH)
chapter_descriptor = course.get_child_by(lambda m: m.location.name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.location.name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masquerade and masquerade.role == 'student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masquerading as student: no chapter %s', chapter)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
if course_has_entrance_exam(course):
# Message should not appear outside the context of entrance exam subsection.
# if section is none then we don't need to show message on welcome back screen also.
if getattr(chapter_module, 'is_entrance_exam', False) and section is not None:
context['entrance_exam_current_score'] = get_entrance_exam_score(request, course)
context['entrance_exam_passed'] = user_has_passed_entrance_exam(request, course)
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.location.name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masquerade and masquerade.role == 'student': # don't 404 if staff is masquerading as student
log.debug('staff masquerading as student: no section %s', section)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
## Allow chromeless operation
if section_descriptor.chrome:
chrome = [s.strip() for s in section_descriptor.chrome.lower().split(",")]
if 'accordion' not in chrome:
context['disable_accordion'] = True
if 'tabs' not in chrome:
context['disable_tabs'] = True
if section_descriptor.default_tab:
context['default_tab'] = section_descriptor.default_tab
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_item(section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
field_data_cache.add_descriptor_descendents(
section_descriptor, depth=None
)
section_module = get_module_for_descriptor(
user,
request,
section_descriptor,
field_data_cache,
course_key,
position,
course=course
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter.
save_child_position(chapter_module, section)
section_render_context = {'activate_block_id': request.GET.get('activate_block_id')}
context['fragment'] = section_module.render(STUDENT_VIEW, section_render_context)
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
studio_url = get_studio_url(course, 'course')
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user.
# Clearing out the last-visited state and showing "first-time" view by redirecting
# to courseware.
course_module.position = None
course_module.save()
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
prev_section_url = reverse('courseware_section', kwargs={
'course_id': course_key.to_deprecated_string(),
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name
})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'studio_url': studio_url,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
# Doesn't bar Unicode characters from URL, but if Unicode characters do
# cause an error it is a graceful failure.
if isinstance(e, UnicodeEncodeError):
raise Http404("URL contains Unicode characters")
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception(
u"Error in index view: user=%s, effective_user=%s, course=%s, chapter=%s section=%s position=%s",
request.user, user, course, chapter, section, position
)
try:
result = render_to_response('courseware/courseware-error.html', {
'staff_access': staff_access,
'course': course
})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@ensure_csrf_cookie
@ensure_valid_course_key
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
items = modulestore().get_items(course_key, qualifiers={'name': module_id})
if len(items) == 0:
raise Http404(
u"Could not find id: {0} in course_id: {1}. Referer: {2}".format(
module_id, course_id, request.META.get("HTTP_REFERER", "")
))
if len(items) > 1:
log.warning(
u"Multiple items found with id: %s in course_id: %s. Referer: %s. Using first: %s",
module_id,
course_id,
request.META.get("HTTP_REFERER", ""),
items[0].location.to_deprecated_string()
)
return jump_to(request, course_id, items[0].location.to_deprecated_string())
@ensure_csrf_cookie
def jump_to(_request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
try:
course_key = CourseKey.from_string(course_id)
usage_key = UsageKey.from_string(location).replace(course_key=course_key)
except InvalidKeyError:
raise Http404(u"Invalid course_key or usage_key")
try:
redirect_url = get_redirect_url(course_key, usage_key)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(usage_key))
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(usage_key))
return redirect(redirect_url)
@ensure_csrf_cookie
@ensure_valid_course_key
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
course = get_course_by_id(course_key, depth=2)
access_response = has_access(request.user, 'load', course, course_key)
if not access_response:
# The user doesn't have access to the course. If they're
# denied permission due to the course not being live yet,
# redirect to the dashboard page.
if isinstance(access_response, StartDateError):
start_date = strftime_localized(course.start, 'SHORT_DATE')
params = urllib.urlencode({'notlive': start_date})
return redirect('{0}?{1}'.format(reverse('dashboard'), params))
# Otherwise, give a 404 to avoid leaking info about access
# control.
raise Http404("Course not found.")
staff_access = has_access(request.user, 'staff', course)
masquerade, user = setup_masquerade(request, course_key, staff_access, reset_masquerade_data=True)
# If the user needs to take an entrance exam to access this course, then we'll need
# to send them to that specific course module before allowing them into other areas
if user_must_complete_entrance_exam(request, user, course):
return redirect(reverse('courseware', args=[unicode(course.id)]))
# check to see if there is a required survey that must be taken before
# the user can access the course.
if request.user.is_authenticated() and survey.utils.must_answer_survey(course, user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
studio_url = get_studio_url(course, 'course_info')
# link to where the student should go to enroll in the course:
# about page if there is not marketing site, SITE_NAME if there is
url_to_enroll = reverse(course_about, args=[course_id])
if settings.FEATURES.get('ENABLE_MKTG_SITE'):
url_to_enroll = marketing_link('COURSES')
show_enroll_banner = request.user.is_authenticated() and not CourseEnrollment.is_enrolled(user, course.id)
context = {
'request': request,
'course_id': course_key.to_deprecated_string(),
'cache': None,
'course': course,
'staff_access': staff_access,
'masquerade': masquerade,
'studio_url': studio_url,
'show_enroll_banner': show_enroll_banner,
'url_to_enroll': url_to_enroll,
}
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(user, course, course_key)
if not in_preview_mode() and staff_access and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
return render_to_response('courseware/info.html', context)
@ensure_csrf_cookie
@ensure_valid_course_key
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
tab = CourseTabList.get_tab_by_slug(course.tabs, tab_slug)
if tab is None:
raise Http404
contents = get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
return render_to_response('courseware/static_tab.html', {
'course': course,
'tab': tab,
'tab_contents': contents,
})
@ensure_csrf_cookie
@ensure_valid_course_key
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
return render_to_response('courseware/syllabus.html', {
'course': course,
'staff_access': staff_access,
})
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
def get_cosmetic_display_price(course, registration_price):
"""
Return Course Price as a string preceded by correct currency, or 'Free'
"""
currency_symbol = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
price = course.cosmetic_display_price
if registration_price > 0:
price = registration_price
if price:
# Translators: This will look like '$50', where {currency_symbol} is a symbol such as '$' and {price} is a
# numerical amount in that currency. Adjust this display as needed for your language.
return _("{currency_symbol}{price}").format(currency_symbol=currency_symbol, price=price)
else:
# Translators: This refers to the cost of the course. In this case, the course costs nothing so it is free.
return _('Free')
@ensure_csrf_cookie
@cache_if_anonymous()
def course_about(request, course_id):
"""
Display the course's about page.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
permission = get_permission_for_course_about()
course = get_course_with_access(request.user, permission, course_key)
if microsite.get_value('ENABLE_MKTG_SITE', settings.FEATURES.get('ENABLE_MKTG_SITE', False)):
return redirect(reverse('info', args=[course.id.to_deprecated_string()]))
registered = registered_for_course(course, request.user)
staff_access = bool(has_access(request.user, 'staff', course))
studio_url = get_studio_url(course, 'settings/details')
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
show_courseware_link = bool(
(
has_access(request.user, 'load', course)
and has_access(request.user, 'view_courseware_with_prerequisites', course)
)
or settings.FEATURES.get('ENABLE_LMS_MIGRATION')
)
# Note: this is a flow for payment for course registration, not the Verified Certificate flow.
registration_price = 0
in_cart = False
reg_then_add_to_cart_link = ""
_is_shopping_cart_enabled = is_shopping_cart_enabled()
if _is_shopping_cart_enabled:
registration_price = CourseMode.min_course_price_for_currency(course_key,
settings.PAID_COURSE_REGISTRATION_CURRENCY[0])
if request.user.is_authenticated():
cart = shoppingcart.models.Order.get_cart_for_user(request.user)
in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_key) or \
shoppingcart.models.CourseRegCodeItem.contained_in_order(cart, course_key)
reg_then_add_to_cart_link = "{reg_url}?course_id={course_id}&enrollment_action=add_to_cart".format(
reg_url=reverse('register_user'), course_id=urllib.quote(str(course_id)))
course_price = get_cosmetic_display_price(course, registration_price)
can_add_course_to_cart = _is_shopping_cart_enabled and registration_price
# Used to provide context to message to student if enrollment not allowed
can_enroll = bool(has_access(request.user, 'enroll', course))
invitation_only = course.invitation_only
is_course_full = CourseEnrollment.objects.is_course_full(course)
# Register button should be disabled if one of the following is true:
# - Student is already registered for course
# - Course is already full
# - Student cannot enroll in course
active_reg_button = not(registered or is_course_full or not can_enroll)
is_shib_course = uses_shib(course)
# get prerequisite courses display names
pre_requisite_courses = get_prerequisite_courses_display(course)
return render_to_response('courseware/course_about.html', {
'course': course,
'staff_access': staff_access,
'studio_url': studio_url,
'registered': registered,
'course_target': course_target,
'is_cosmetic_price_enabled': settings.FEATURES.get('ENABLE_COSMETIC_DISPLAY_PRICE'),
'course_price': course_price,
'in_cart': in_cart,
'reg_then_add_to_cart_link': reg_then_add_to_cart_link,
'show_courseware_link': show_courseware_link,
'is_course_full': is_course_full,
'can_enroll': can_enroll,
'invitation_only': invitation_only,
'active_reg_button': active_reg_button,
'is_shib_course': is_shib_course,
# We do not want to display the internal courseware header, which is used when the course is found in the
# context. This value is therefor explicitly set to render the appropriate header.
'disable_courseware_header': True,
'can_add_course_to_cart': can_add_course_to_cart,
'cart_link': reverse('shoppingcart.views.show_cart'),
'pre_requisite_courses': pre_requisite_courses
})
@transaction.non_atomic_requests
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@ensure_valid_course_key
def progress(request, course_id, student_id=None):
""" Display the progress page. """
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
return _progress(request, course_key, student_id)
def _progress(request, course_key, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, 'load', course_key, depth=None, check_if_enrolled=True)
# check to see if there is a required survey that must be taken before
# the user can access the course.
if survey.utils.must_answer_survey(course, request.user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
staff_access = bool(has_access(request.user, 'staff', course))
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
try:
student = User.objects.get(id=student_id)
# Check for ValueError if 'student_id' cannot be converted to integer.
except (ValueError, User.DoesNotExist):
raise Http404
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
with outer_atomic():
field_data_cache = grades.field_data_cache_for_grading(course, student)
scores_client = ScoresClient.from_field_data_cache(field_data_cache)
courseware_summary = grades.progress_summary(
student, request, course, field_data_cache=field_data_cache, scores_client=scores_client
)
grade_summary = grades.grade(
student, request, course, field_data_cache=field_data_cache, scores_client=scores_client
)
studio_url = get_studio_url(course, 'settings/grading')
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
# checking certificate generation configuration
show_generate_cert_btn = certs_api.cert_generation_enabled(course_key)
context = {
'course': course,
'courseware_summary': courseware_summary,
'studio_url': studio_url,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
'passed': is_course_passed(course, grade_summary),
'show_generate_cert_btn': show_generate_cert_btn,
'credit_course_requirements': _credit_course_requirements(course_key, student),
}
if show_generate_cert_btn:
cert_status = certs_api.certificate_downloadable_status(student, course_key)
context.update(cert_status)
# showing the certificate web view button if feature flags are enabled.
if certs_api.has_html_certificates_enabled(course_key, course):
if certs_api.get_active_web_certificate(course) is not None:
context.update({
'show_cert_web_view': True,
'cert_web_view_url': certs_api.get_certificate_url(course_id=course_key, uuid=cert_status['uuid']),
})
else:
context.update({
'is_downloadable': False,
'is_generating': True,
'download_url': None
})
with outer_atomic():
response = render_to_response('courseware/progress.html', context)
return response
def _credit_course_requirements(course_key, student):
"""Return information about which credit requirements a user has satisfied.
Arguments:
course_key (CourseKey): Identifier for the course.
student (User): Currently logged in user.
Returns: dict
"""
# If credit eligibility is not enabled or this is not a credit course,
# short-circuit and return `None`. This indicates that credit requirements
# should NOT be displayed on the progress page.
if not (settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY", False) and is_credit_course(course_key)):
return None
# Credit requirement statuses for which user does not remain eligible to get credit.
non_eligible_statuses = ['failed', 'declined']
# Retrieve the status of the user for each eligibility requirement in the course.
# For each requirement, the user's status is either "satisfied", "failed", or None.
# In this context, `None` means that we don't know the user's status, either because
# the user hasn't done something (for example, submitting photos for verification)
# or we're waiting on more information (for example, a response from the photo
# verification service).
requirement_statuses = get_credit_requirement_status(course_key, student.username)
# If the user has been marked as "eligible", then they are *always* eligible
# unless someone manually intervenes. This could lead to some strange behavior
# if the requirements change post-launch. For example, if the user was marked as eligible
# for credit, then a new requirement was added, the user will see that they're eligible
# AND that one of the requirements is still pending.
# We're assuming here that (a) we can mitigate this by properly training course teams,
# and (b) it's a better user experience to allow students who were at one time
# marked as eligible to continue to be eligible.
# If we need to, we can always manually move students back to ineligible by
# deleting CreditEligibility records in the database.
if is_user_eligible_for_credit(student.username, course_key):
eligibility_status = "eligible"
# If the user has *failed* any requirements (for example, if a photo verification is denied),
# then the user is NOT eligible for credit.
elif any(requirement['status'] in non_eligible_statuses for requirement in requirement_statuses):
eligibility_status = "not_eligible"
# Otherwise, the user may be eligible for credit, but the user has not
# yet completed all the requirements.
else:
eligibility_status = "partial_eligible"
return {
'eligibility_status': eligibility_status,
'requirements': requirement_statuses,
}
@login_required
@ensure_valid_course_key
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
usage_key = course_key.make_usage_key_from_deprecated_string(location)
except (InvalidKeyError, AssertionError):
return HttpResponse(escape(_(u'Invalid location.')))
course = get_course_overview_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
user_state_client = DjangoXBlockUserStateClient()
try:
history_entries = list(user_state_client.get_history(student_username, usage_key))
except DjangoXBlockUserStateClient.DoesNotExist:
return HttpResponse(escape(_(u'User {username} has never accessed problem {location}').format(
username=student_username,
location=location
)))
# This is ugly, but until we have a proper submissions API that we can use to provide
# the scores instead, it will have to do.
scores = list(StudentModuleHistory.objects.filter(
student_module__module_state_key=usage_key,
student_module__student__username=student_username,
student_module__course_id=course_key
).order_by('-id'))
if len(scores) != len(history_entries):
log.warning(
"Mismatch when fetching scores for student "
"history for course %s, user %s, xblock %s. "
"%d scores were found, and %d history entries were found. "
"Matching scores to history entries by date for display.",
course_id,
student_username,
location,
len(scores),
len(history_entries),
)
scores_by_date = {
score.created: score
for score in scores
}
scores = [
scores_by_date[history.updated]
for history in history_entries
]
context = {
'history_entries': history_entries,
'scores': scores,
'username': student_username,
'location': location,
'course_id': course_key.to_deprecated_string()
}
return render_to_response('courseware/submission_history.html', context)
def get_static_tab_contents(request, course, tab):
"""
Returns the contents for the given static tab
"""
loc = course.id.make_usage_key(
tab.type,
tab.url_slug,
)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, modulestore().get_item(loc), depth=0
)
tab_module = get_module(
request.user, request, loc, field_data_cache, static_asset_path=course.static_asset_path, course=course
)
logging.debug('course_module = %s', tab_module)
html = ''
if tab_module is not None:
try:
html = tab_module.render(STUDENT_VIEW).content
except Exception: # pylint: disable=broad-except
html = render_to_string('courseware/error-message.html', None)
log.exception(
u"Error rendering course=%s, tab=%s", course, tab['url_slug']
)
return html
@require_GET
@ensure_valid_course_key
def get_course_lti_endpoints(request, course_id):
"""
View that, given a course_id, returns the a JSON object that enumerates all of the LTI endpoints for that course.
The LTI 2.0 result service spec at
http://www.imsglobal.org/lti/ltiv2p0/uml/purl.imsglobal.org/vocab/lis/v2/outcomes/Result/service.html
says "This specification document does not prescribe a method for discovering the endpoint URLs." This view
function implements one way of discovering these endpoints, returning a JSON array when accessed.
Arguments:
request (django request object): the HTTP request object that triggered this view function
course_id (unicode): id associated with the course
Returns:
(django response object): HTTP response. 404 if course is not found, otherwise 200 with JSON body.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
course = get_course(course_key, depth=2)
except ValueError:
return HttpResponse(status=404)
anonymous_user = AnonymousUser()
anonymous_user.known = False # make these "noauth" requests like module_render.handle_xblock_callback_noauth
lti_descriptors = modulestore().get_items(course.id, qualifiers={'category': 'lti'})
lti_noauth_modules = [
get_module_for_descriptor(
anonymous_user,
request,
descriptor,
FieldDataCache.cache_for_descriptor_descendents(
course_key,
anonymous_user,
descriptor
),
course_key,
course=course
)
for descriptor in lti_descriptors
]
endpoints = [
{
'display_name': module.display_name,
'lti_2_0_result_service_json_endpoint': module.get_outcome_service_url(
service_name='lti_2_0_result_rest_handler') + "/user/{anon_user_id}",
'lti_1_1_result_service_xml_endpoint': module.get_outcome_service_url(
service_name='grade_handler'),
}
for module in lti_noauth_modules
]
return HttpResponse(json.dumps(endpoints), content_type='application/json')
@login_required
def course_survey(request, course_id):
"""
URL endpoint to present a survey that is associated with a course_id
Note that the actual implementation of course survey is handled in the
views.py file in the Survey Djangoapp
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
redirect_url = reverse('info', args=[course_id])
# if there is no Survey associated with this course,
# then redirect to the course instead
if not course.course_survey_name:
return redirect(redirect_url)
return survey.views.view_student_survey(
request.user,
course.course_survey_name,
course=course,
redirect_url=redirect_url,
is_required=course.course_survey_required,
)
def is_course_passed(course, grade_summary=None, student=None, request=None):
"""
check user's course passing status. return True if passed
Arguments:
course : course object
grade_summary (dict) : contains student grade details.
student : user object
request (HttpRequest)
Returns:
returns bool value
"""
nonzero_cutoffs = [cutoff for cutoff in course.grade_cutoffs.values() if cutoff > 0]
success_cutoff = min(nonzero_cutoffs) if nonzero_cutoffs else None
if grade_summary is None:
grade_summary = grades.grade(student, request, course)
return success_cutoff and grade_summary['percent'] >= success_cutoff
# Grades can potentially be written - if so, let grading manage the transaction.
@transaction.non_atomic_requests
@require_POST
def generate_user_cert(request, course_id):
"""Start generating a new certificate for the user.
Certificate generation is allowed if:
* The user has passed the course, and
* The user does not already have a pending/completed certificate.
Note that if an error occurs during certificate generation
(for example, if the queue is down), then we simply mark the
certificate generation task status as "error" and re-run
the task with a management command. To students, the certificate
will appear to be "generating" until it is re-run.
Args:
request (HttpRequest): The POST request to this view.
course_id (unicode): The identifier for the course.
Returns:
HttpResponse: 200 on success, 400 if a new certificate cannot be generated.
"""
if not request.user.is_authenticated():
log.info(u"Anon user trying to generate certificate for %s", course_id)
return HttpResponseBadRequest(
_('You must be signed in to {platform_name} to create a certificate.').format(
platform_name=settings.PLATFORM_NAME
)
)
student = request.user
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key, depth=2)
if not course:
return HttpResponseBadRequest(_("Course is not valid"))
if not is_course_passed(course, None, student, request):
return HttpResponseBadRequest(_("Your certificate will be available when you pass the course."))
certificate_status = certs_api.certificate_downloadable_status(student, course.id)
if certificate_status["is_downloadable"]:
return HttpResponseBadRequest(_("Certificate has already been created."))
elif certificate_status["is_generating"]:
return HttpResponseBadRequest(_("Certificate is being created."))
else:
# If the certificate is not already in-process or completed,
# then create a new certificate generation task.
# If the certificate cannot be added to the queue, this will
# mark the certificate with "error" status, so it can be re-run
# with a management command. From the user's perspective,
# it will appear that the certificate task was submitted successfully.
certs_api.generate_user_certificates(student, course.id, course=course, generation_mode='self')
_track_successful_certificate_generation(student.id, course.id)
return HttpResponse()
def _track_successful_certificate_generation(user_id, course_id): # pylint: disable=invalid-name
"""
Track a successful certificate generation event.
Arguments:
user_id (str): The ID of the user generting the certificate.
course_id (CourseKey): Identifier for the course.
Returns:
None
"""
if settings.LMS_SEGMENT_KEY:
event_name = 'edx.bi.user.certificate.generate'
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(
user_id,
event_name,
{
'category': 'certificates',
'label': unicode(course_id)
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
@require_http_methods(["GET", "POST"])
def render_xblock(request, usage_key_string, check_if_enrolled=True):
"""
Returns an HttpResponse with HTML content for the xBlock with the given usage_key.
The returned HTML is a chromeless rendering of the xBlock (excluding content of the containing courseware).
"""
usage_key = UsageKey.from_string(usage_key_string)
usage_key = usage_key.replace(course_key=modulestore().fill_in_run(usage_key.course_key))
course_key = usage_key.course_key
requested_view = request.GET.get('view', 'student_view')
if requested_view != 'student_view':
return HttpResponseBadRequest("Rendering of the xblock view '{}' is not supported.".format(requested_view))
with modulestore().bulk_operations(course_key):
# verify the user has access to the course, including enrollment check
try:
course = get_course_with_access(request.user, 'load', course_key, check_if_enrolled=check_if_enrolled)
except UserNotEnrolled:
raise Http404("Course not found.")
# get the block, which verifies whether the user has access to the block.
block, _ = get_module_by_usage_id(
request, unicode(course_key), unicode(usage_key), disable_staff_debug_info=True, course=course
)
context = {
'fragment': block.render('student_view', context=request.GET),
'course': course,
'disable_accordion': True,
'allow_iframing': True,
'disable_header': True,
'disable_footer': True,
'disable_window_wrap': True,
'disable_preview_menu': True,
'staff_access': bool(has_access(request.user, 'staff', course)),
'xqa_server': settings.FEATURES.get('XQA_SERVER', 'http://your_xqa_server.com'),
}
return render_to_response('courseware/courseware-chromeless.html', context)
# Translators: "percent_sign" is the symbol "%". "platform_name" is a
# string identifying the name of this installation, such as "edX".
FINANCIAL_ASSISTANCE_HEADER = _(
'{platform_name} now offers financial assistance for learners who want to earn Verified Certificates but'
' who may not be able to pay the Verified Certificate fee. Eligible learners may receive up to 90{percent_sign} off'
' the Verified Certificate fee for a course.\nTo apply for financial assistance, enroll in the'
' audit track for a course that offers Verified Certificates, and then complete this application.'
' Note that you must complete a separate application for each course you take.\n We plan to use this'
' information to evaluate your application for financial assistance and to further develop our'
' financial assistance program.'
).format(
percent_sign="%",
platform_name=settings.PLATFORM_NAME
).split('\n')
FA_INCOME_LABEL = _('Annual Household Income')
FA_REASON_FOR_APPLYING_LABEL = _(
'Tell us about your current financial situation. Why do you need assistance?'
)
FA_GOALS_LABEL = _(
'Tell us about your learning or professional goals. How will a Verified Certificate in'
' this course help you achieve these goals?'
)
FA_EFFORT_LABEL = _(
'Tell us about your plans for this course. What steps will you take to help you complete'
' the course work and receive a certificate?'
)
FA_SHORT_ANSWER_INSTRUCTIONS = _('Use between 250 and 500 words or so in your response.')
@login_required
def financial_assistance(_request):
"""Render the initial financial assistance page."""
return render_to_response('financial-assistance/financial-assistance.html', {
'header_text': FINANCIAL_ASSISTANCE_HEADER
})
@login_required
@require_POST
def financial_assistance_request(request):
"""Submit a request for financial assistance to Zendesk."""
try:
data = json.loads(request.body)
# Simple sanity check that the session belongs to the user
# submitting an FA request
username = data['username']
if request.user.username != username:
return HttpResponseForbidden()
course_id = data['course']
course = modulestore().get_course(CourseKey.from_string(course_id))
legal_name = data['name']
email = data['email']
country = data['country']
income = data['income']
reason_for_applying = data['reason_for_applying']
goals = data['goals']
effort = data['effort']
marketing_permission = data['mktg-permission']
ip_address = get_ip(request)
except ValueError:
# Thrown if JSON parsing fails
return HttpResponseBadRequest(u'Could not parse request JSON.')
except InvalidKeyError:
# Thrown if course key parsing fails
return HttpResponseBadRequest(u'Could not parse request course key.')
except KeyError as err:
# Thrown if fields are missing
return HttpResponseBadRequest(u'The field {} is required.'.format(err.message))
zendesk_submitted = _record_feedback_in_zendesk(
legal_name,
email,
u'Financial assistance request for learner {username} in course {course_name}'.format(
username=username,
course_name=course.display_name
),
u'Financial Assistance Request',
{'course_id': course_id},
# Send the application as additional info on the ticket so
# that it is not shown when support replies. This uses
# OrderedDict so that information is presented in the right
# order.
OrderedDict((
('Username', username),
('Full Name', legal_name),
('Course ID', course_id),
('Annual Household Income', income),
('Country', country),
('Allowed for marketing purposes', 'Yes' if marketing_permission else 'No'),
(FA_REASON_FOR_APPLYING_LABEL, '\n' + reason_for_applying + '\n\n'),
(FA_GOALS_LABEL, '\n' + goals + '\n\n'),
(FA_EFFORT_LABEL, '\n' + effort + '\n\n'),
('Client IP', ip_address),
)),
group_name='Financial Assistance',
require_update=True
)
if not zendesk_submitted:
# The call to Zendesk failed. The frontend will display a
# message to the user.
return HttpResponse(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
@login_required
def financial_assistance_form(request):
"""Render the financial assistance application form page."""
user = request.user
enrolled_courses = [
{'name': enrollment.course_overview.display_name, 'value': unicode(enrollment.course_id)}
for enrollment in CourseEnrollment.enrollments_for_user(user).order_by('-created')
if CourseMode.objects.filter(
Q(_expiration_datetime__isnull=True) | Q(_expiration_datetime__gt=datetime.now(UTC())),
course_id=enrollment.course_id,
mode_slug=CourseMode.VERIFIED
).exists()
and enrollment.mode != CourseMode.VERIFIED
]
return render_to_response('financial-assistance/apply.html', {
'header_text': FINANCIAL_ASSISTANCE_HEADER,
'student_faq_url': marketing_link('FAQ'),
'dashboard_url': reverse('dashboard'),
'account_settings_url': reverse('account_settings'),
'platform_name': settings.PLATFORM_NAME,
'user_details': {
'email': user.email,
'username': user.username,
'name': user.profile.name,
'country': str(user.profile.country.name),
},
'submit_url': reverse('submit_financial_assistance_request'),
'fields': [
{
'name': 'course',
'type': 'select',
'label': _('Course'),
'placeholder': '',
'defaultValue': '',
'required': True,
'options': enrolled_courses,
'instructions': _(
'Select the course for which you want to earn a verified certificate. If'
' the course does not appear in the list, make sure that you have enrolled'
' in the audit track for the course.'
)
},
{
'name': 'income',
'type': 'text',
'label': FA_INCOME_LABEL,
'placeholder': _('income in US Dollars ($)'),
'defaultValue': '',
'required': True,
'restrictions': {},
'instructions': _('Specify your annual household income in US Dollars.')
},
{
'name': 'reason_for_applying',
'type': 'textarea',
'label': FA_REASON_FOR_APPLYING_LABEL,
'placeholder': '',
'defaultValue': '',
'required': True,
'restrictions': {
'min_length': settings.FINANCIAL_ASSISTANCE_MIN_LENGTH,
'max_length': settings.FINANCIAL_ASSISTANCE_MAX_LENGTH
},
'instructions': FA_SHORT_ANSWER_INSTRUCTIONS
},
{
'name': 'goals',
'type': 'textarea',
'label': FA_GOALS_LABEL,
'placeholder': '',
'defaultValue': '',
'required': True,
'restrictions': {
'min_length': settings.FINANCIAL_ASSISTANCE_MIN_LENGTH,
'max_length': settings.FINANCIAL_ASSISTANCE_MAX_LENGTH
},
'instructions': FA_SHORT_ANSWER_INSTRUCTIONS
},
{
'name': 'effort',
'type': 'textarea',
'label': FA_EFFORT_LABEL,
'placeholder': '',
'defaultValue': '',
'required': True,
'restrictions': {
'min_length': settings.FINANCIAL_ASSISTANCE_MIN_LENGTH,
'max_length': settings.FINANCIAL_ASSISTANCE_MAX_LENGTH
},
'instructions': FA_SHORT_ANSWER_INSTRUCTIONS
},
{
'placeholder': '',
'name': 'mktg-permission',
'label': _(
'I allow edX to use the information provided in this application '
'(except for financial information) for edX marketing purposes.'
),
'defaultValue': '',
'type': 'checkbox',
'required': False,
'instructions': '',
'restrictions': {}
}
],
})
|
shurihell/testasia
|
lms/djangoapps/courseware/views.py
|
Python
|
agpl-3.0
| 65,149
|
[
"VisIt"
] |
046fe7be0472254e782e5aea3e4bda0a33b5b8a7f2a640884fcdbbfceb2df3d8
|
import os
import sys
import yaml
import unittest
import luigi
import logging
import yaml
import ratatosk
from ratatosk.config import get_config, get_custom_config, RatatoskConfigParser
from ratatosk import interface, backend
import ratatosk.lib.align.bwa
import ratatosk.lib.files.fastq
import ratatosk.lib.tools.gatk
import ratatosk.lib.tools.samtools
import ratatosk.lib.tools.picard
logging.basicConfig(level=logging.DEBUG)
configfile = os.path.join(os.path.dirname(__file__), "pipeconf.yaml")
ratatosk_file = os.path.join(os.pardir, "config", "ratatosk.yaml")
def setUpModule():
global cnf
cnf = get_custom_config()
cnf.clear()
def tearDownModule():
cnf.clear()
# FIX ME:
class TestConfigParser(unittest.TestCase):
yaml_config = None
@classmethod
def setUpClass(cls):
with open(configfile) as fh:
cls.yaml_config = yaml.load(fh)
os.environ["GATK_HOME_MOCK"] = os.path.abspath(os.curdir)
os.environ["PICARD_HOME_MOCK"] = os.path.abspath(os.curdir)
with open("mock.yaml", "w") as fp:
fp.write(yaml.safe_dump({'ratatosk.lib.tools.gatk':{'java':'java', 'path': '$GATK_HOME_MOCK'},
'ratatosk.lib.tools.picard':{'java':'java', 'path': '$PICARD_HOME_MOCK/test'}}, default_flow_style=False))
@classmethod
def tearDownClass(cls):
if os.path.exists("mock.yaml"):
os.unlink("mock.yaml")
del os.environ["GATK_HOME_MOCK"]
del os.environ["PICARD_HOME_MOCK"]
def setUp(self):
cnf.clear()
self.assertEqual([], cnf._instance._custom_config_paths)
def tearDown(self):
cnf.clear()
def test_get_config(self):
"""Test getting config instance"""
cnf.add_config_path(configfile)
self.assertIsInstance(cnf, ratatosk.config.RatatoskConfigParser)
cnf.del_config_path(configfile)
def test_get_list(self):
"""Make sure list parsing ok"""
cnf.add_config_path(configfile)
self.assertIsInstance(cnf.get(section="ratatosk.lib.tools.gatk", option="knownSites"), list)
self.assertListEqual(sorted(os.path.basename(x) for x in cnf.get(section="ratatosk.lib.tools.gatk", option="knownSites")),
['knownSites1.vcf', 'knownSites2.vcf'])
cnf.del_config_path(configfile)
def test_add_config_path(self):
"""Test adding same config again"""
cnf.add_config_path(configfile)
self.assertEqual(1, len(cnf._instance._custom_config_paths))
cnf.del_config_path(configfile)
def test_del_config_path(self):
"""Test deleting config path"""
cnf.add_config_path(configfile)
cnf.del_config_path(configfile)
self.assertEqual([], cnf._instance._custom_config_paths)
def test_expand_vars(self):
cnf = get_config()
cnf.add_config_path("mock.yaml")
self.assertEqual(os.getenv("GATK_HOME_MOCK"), cnf._sections['ratatosk.lib.tools.gatk']['path'])
self.assertEqual(os.path.join(os.getenv("PICARD_HOME_MOCK"), "test"), cnf._sections['ratatosk.lib.tools.picard']['path'])
cnf.del_config_path("mock.yaml")
class TestConfigUpdate(unittest.TestCase):
@classmethod
def setUpClass(self):
with open("mock.yaml", "w") as fp:
fp.write(yaml.safe_dump({'ratatosk.lib.tools.gatk':{'parent_task':'another.class', 'UnifiedGenotyper':{'parent_task': 'no.such.class', 'options':['-stand_call_conf 10.0', '-stand_emit_conf 3.0']}}}, default_flow_style=False))
with open("custommock.yaml", "w") as fp:
fp.write(yaml.safe_dump({'ratatosk.lib.tools.gatk':{'parent_task':'another.class', 'UnifiedGenotyper':{'parent_task': 'no.such.class', 'options':['-stand_call_conf 20.0', '-stand_emit_conf 30.0']}}}, default_flow_style=False))
@classmethod
def tearDownClass(self):
if os.path.exists("mock.yaml"):
os.unlink("mock.yaml")
cnf.clear()
def test_config_update(self):
"""Test updating config with and without disable_parent_task_update"""
# Main gatk task
luigi.run(['--config-file', ratatosk_file, '--target', 'mock.fastq.gz', '--dry-run'], main_task_cls=ratatosk.lib.files.fastq.FastqFileLink)
gatkjt = ratatosk.lib.tools.gatk.GATKJobTask()
self.assertEqual(gatkjt.parent_task, ("ratatosk.lib.tools.gatk.InputBamFile", ))
cnf.add_config_path("mock.yaml")
kwargs = gatkjt._update_config(cnf, {})
self.assertEqual(kwargs['parent_task'], 'another.class')
kwargs = gatkjt._update_config(cnf, {}, disable_parent_task_update=True)
self.assertIsNone(kwargs.get('parent_task'))
cnf.del_config_path("mock.yaml")
cnf.clear()
def test_config_update_main(self):
"""Test updating main subsection"""
# UnifiedGenotyper
#
# Incidentally, this verifies that subsection key value 'no.such.class'
# overrides section key 'another.class'
luigi.run(['--config-file', ratatosk_file, '--target', 'mock.bam', '--dry-run'], main_task_cls=ratatosk.lib.files.fastq.FastqFileLink)
ug = ratatosk.lib.tools.gatk.UnifiedGenotyper()
self.assertEqual(ug.parent_task, "ratatosk.lib.tools.gatk.ClipReads")
cnf.del_config_path(ratatosk_file)
cnf.add_config_path("mock.yaml")
kwargs = ug._update_config(cnf, {})
self.assertEqual(kwargs.get('parent_task'), 'no.such.class')
kwargs = ug._update_config(cnf, {}, disable_parent_task_update=True)
self.assertIsNone(kwargs.get('parent_task'))
cnf.del_config_path("mock.yaml")
def test_config_update_only_default(self):
"""Test that default parameters are correct"""
ug = ratatosk.lib.tools.gatk.UnifiedGenotyper()
for key, value in ug.get_param_values(ug.get_params(), [], {}):
self.assertEqual(value, ug.get_param_default(key))
def test_config_update_with_config(self):
"""Test that configuration file overrides default values"""
ug = ratatosk.lib.tools.gatk.UnifiedGenotyper()
param_values_dict = {x[0]:x[1] for x in ug.get_param_values(ug.get_params(), [], {})}
cnf = get_config()
cnf.clear()
cnf.add_config_path("mock.yaml")
kwargs = ug._update_config(cnf, param_values_dict)
self.assertEqual(kwargs['options'], ['-stand_call_conf 10.0', '-stand_emit_conf 3.0'])
def test_config_update_with_custom_config(self):
"""Test that custom configuration overrides configuration setting"""
ug = ratatosk.lib.tools.gatk.UnifiedGenotyper()
param_values_dict = {x[0]:x[1] for x in ug.get_param_values(ug.get_params(), [], {})}
cnf = get_config()
cnf.clear()
cnf.add_config_path("mock.yaml")
customcnf = get_custom_config()
customcnf.clear()
customcnf.add_config_path("custommock.yaml")
kwargs = ug._update_config(cnf, param_values_dict)
self.assertEqual(kwargs['options'], ['-stand_call_conf 10.0', '-stand_emit_conf 3.0'])
kwargs = ug._update_config(customcnf, param_values_dict, disable_parent_task_update=True)
self.assertEqual(kwargs['options'], ['-stand_call_conf 20.0', '-stand_emit_conf 30.0'])
def test_config_update_with_command_line_parameter(self):
"""Test that command line parameter overrides configuration setting"""
ug = ratatosk.lib.tools.gatk.UnifiedGenotyper(options='test')
param_values_dict = {x[0]:x[1] for x in ug.get_param_values(ug.get_params(), [], {'options':'test'})}
cnf = get_config()
cnf.clear()
cnf.add_config_path("mock.yaml")
customcnf = get_custom_config()
customcnf.clear()
customcnf.add_config_path("custommock.yaml")
kwargs = ug._update_config(cnf, param_values_dict)
self.assertEqual(kwargs['options'], ['-stand_call_conf 10.0', '-stand_emit_conf 3.0'])
kwargs = ug._update_config(customcnf, param_values_dict, disable_parent_task_update=True)
self.assertEqual(kwargs['options'], ['-stand_call_conf 20.0', '-stand_emit_conf 30.0'])
for key, value in ug.get_params():
new_value = None
# Got a command line option => override config file. Currently overriding parent_task *is* possible here (FIX ME?)
if value.default != param_values_dict.get(key, None):
new_value = param_values_dict.get(key, None)
kwargs[key] = new_value
self.assertEqual(kwargs['options'], 'test')
class TestGlobalConfig(unittest.TestCase):
def setUp(self):
with open(ratatosk_file) as fp:
self.ratatosk = yaml.load(fp)
def test_global_config(self):
"""Test that backend.__global_config__ is updated correctly when instantiating a task.
FIXME: currently not working, see :func:`ratatosk.job.BaseJobTask.__init__`"""
backend.__global_config__ = {}
cnf.clear()
self.assertEqual(backend.__global_config__, {})
cnf.add_config_path(ratatosk_file)
ug = ratatosk.lib.tools.gatk.UnifiedGenotyper()
ug._update_config(cnf, {})
# self.assertEqual(backend.__global_config__['ratatosk.lib.tools.picard'], self.ratatosk['ratatosk.lib.tools.picard'])
# self.assertEqual(backend.__global_config__['ratatosk.lib.tools.gatk'].get('UnifiedGenotyper').get('options'),
# ('-stand_call_conf 30.0 -stand_emit_conf 10.0 --downsample_to_coverage 30 --output_mode EMIT_VARIANTS_ONLY -glm BOTH',))
cnf.clear()
|
percyfal/ratatosk
|
test/test_config.py
|
Python
|
apache-2.0
| 9,636
|
[
"BWA"
] |
a6cb330108f21a5f8fad0c1e5632646d83563152ca1324897beabd47730e577e
|
#!/usr/bin/env python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import print_function
import os
import sys
import subprocess
qcdb_module = os.path.normpath(os.path.dirname(os.path.abspath(__file__)) + '../../../../../driver')
sys.path.append(qcdb_module)
import qcdb
from qcdb.libmintsbasissetparser import Gaussian94BasisSetParser
output = subprocess.check_output("ls -1 *cc-*.gbs | grep -v 'autogen' | grep -v 'tight' | grep -v 'polarization' | grep -v 'molpro' | grep -v 'diffuse' | grep -v 'basis' | grep -v 'corevalence' | grep -v 'hold'", shell=True)
real_dunnings = output.decode().split('\n')
parser = Gaussian94BasisSetParser()
os.system("echo '#differing basis sets' > basisdunningfiles.txt")
for bfl in real_dunnings:
if not bfl:
continue
with open(bfl, 'r') as basfile:
bascontents = basfile.readlines()
if bascontents[0] != "spherical\n":
print('{:30} {:4}'.format(bfl, 'sph '))
with open(bfl, 'w') as basfile:
basfile.write("spherical\n\n")
for ln in bascontents:
basfile.write(ln)
else:
print('{:30} {:4}'.format(bfl, ''))
for bfl in sorted(real_dunnings, key=lambda v: v[::-1], reverse=True):
if not bfl:
continue
os.system('./diff_gbs.py {} ../{}'.format(bfl, bfl))
|
psi4/psi4
|
psi4/share/psi4/basis/primitives/dunning_prepend_and_checknew.py
|
Python
|
lgpl-3.0
| 2,192
|
[
"Molpro",
"Psi4"
] |
4c2419fbe7e4d9bdd566836f2425018002d2258c89e42408e222e3da10d6ad9c
|
"""
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
# Create a the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
clf_1 = DecisionTreeRegressor(max_depth=4)
clf_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
clf_1.fit(X, y)
clf_2.fit(X, y)
# Predict
y_1 = clf_1.predict(X)
y_2 = clf_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
|
treycausey/scikit-learn
|
examples/ensemble/plot_adaboost_regression.py
|
Python
|
bsd-3-clause
| 1,425
|
[
"Gaussian"
] |
e5f6ff56d534af285526dde8892721af14bd97d368bd54a0ff1d101722c04447
|
#
# Copyright (C) 2010-2019 The ESPResSo project
# Copyright (C) 2002,2003,2004,2005,2006,2007,2008,2009,2010
# Max-Planck-Institute for Polymer Research, Theory Group
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import espressomd
from espressomd import assert_features, electrostatics, electrostatic_extensions
from espressomd.shapes import Wall
from espressomd.minimize_energy import steepest_descent
from espressomd import visualization_opengl
import numpy
from threading import Thread
assert_features(["ELECTROSTATICS", "MASS", "LENNARD_JONES"])
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
numpy.random.seed(seed=42)
print("\n--->Setup system")
# System parameters
n_part = 1000
n_ionpairs = n_part / 2
density = 1.1138
time_step = 0.001823
temp = 1198.3
gamma = 50
#l_bjerrum = 0.885^2 * e^2/(4*pi*epsilon_0*k_B*T)
l_bjerrum = 130878.0 / temp
Vz = 0 # potential difference between the electrodes
Vz_to_Ez = 364.5 # conversion from potential to electrical field
# Particle parameters
types = {"Cl": 0, "Na": 1, "Electrode": 2}
numbers = {"Cl": n_ionpairs, "Na": n_ionpairs}
charges = {"Cl": -1.0, "Na": 1.0}
lj_sigmas = {"Cl": 3.85, "Na": 2.52, "Electrode": 3.37}
lj_epsilons = {"Cl": 192.45, "Na": 17.44, "Electrode": 24.72}
lj_cuts = {"Cl": 3.0 * lj_sigmas["Cl"],
"Na": 3.0 * lj_sigmas["Na"],
"Electrode": 3.0 * lj_sigmas["Electrode"]}
masses = {"Cl": 35.453, "Na": 22.99, "Electrode": 12.01}
# Setup System
box_l = (n_ionpairs * sum(masses.values()) / density)**(1. / 3.)
box_z = box_l + 2.0 * (lj_sigmas["Electrode"])
box_volume = box_l * box_l * box_z
elc_gap = box_z * 0.15
system.box_l = [box_l, box_l, box_z + elc_gap]
system.periodicity = [True, True, True]
system.time_step = time_step
system.cell_system.skin = 0.3
system.thermostat.set_langevin(kT=temp, gamma=gamma, seed=42)
# Visualizer
visualizer = visualization_opengl.openGLLive(
system,
camera_position=[-3 * box_l, box_l * 0.5, box_l * 0.5],
camera_right=[0, 0, 1],
drag_force=5 * 298,
background_color=[1, 1, 1],
light_pos=[30, 30, 30],
ext_force_arrows_type_scale=[0.0001],
ext_force_arrows=False)
# Walls
system.constraints.add(shape=Wall(
dist=0, normal=[0, 0, 1]), particle_type=types["Electrode"])
system.constraints.add(shape=Wall(
dist=-box_z, normal=[0, 0, -1]), particle_type=types["Electrode"])
# Place particles
for i in range(int(n_ionpairs)):
p = numpy.random.random(3) * box_l
p[2] += lj_sigmas["Electrode"]
system.part.add(id=len(system.part), type=types["Cl"],
pos=p, q=charges["Cl"], mass=masses["Cl"])
for i in range(int(n_ionpairs)):
p = numpy.random.random(3) * box_l
p[2] += lj_sigmas["Electrode"]
system.part.add(id=len(system.part), type=types["Na"],
pos=p, q=charges["Na"], mass=masses["Na"])
# Lennard-Jones interactions parameters
def combination_rule_epsilon(rule, eps1, eps2):
if rule == "Lorentz":
return (eps1 * eps2)**0.5
else:
return ValueError("No combination rule defined")
def combination_rule_sigma(rule, sig1, sig2):
if rule == "Berthelot":
return (sig1 + sig2) * 0.5
else:
return ValueError("No combination rule defined")
for s in [["Cl", "Na"], ["Cl", "Cl"], ["Na", "Na"],
["Na", "Electrode"], ["Cl", "Electrode"]]:
lj_sig = combination_rule_sigma(
"Berthelot", lj_sigmas[s[0]], lj_sigmas[s[1]])
lj_cut = combination_rule_sigma("Berthelot", lj_cuts[s[0]], lj_cuts[s[1]])
lj_eps = combination_rule_epsilon(
"Lorentz", lj_epsilons[s[0]], lj_epsilons[s[1]])
system.non_bonded_inter[types[s[0]], types[s[1]]].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto")
steepest_descent(system, f_max=10, gamma=10, max_steps=2000,
max_displacement=0.1)
print("\n--->Tuning Electrostatics")
p3m = electrostatics.P3M(prefactor=l_bjerrum, accuracy=1e-2)
system.actors.add(p3m)
elc = electrostatic_extensions.ELC(gap_size=elc_gap, maxPWerror=1e-3)
system.actors.add(elc)
def increaseElectricField():
global Vz
Vz += 3
for p in system.part:
p.ext_force = [0, 0, p.q * Vz * Vz_to_Ez]
print('Potential difference: {:.0f}'.format(Vz))
def decreaseElectricField():
global Vz
Vz -= 3
for p in system.part:
p.ext_force = [0, 0, p.q * Vz * Vz_to_Ez]
print('Potential difference: {:.0f}'.format(Vz))
# Register buttons
visualizer.keyboard_manager.register_button(visualization_opengl.KeyboardButtonEvent(
'u', visualization_opengl.KeyboardFireEvent.Hold, increaseElectricField))
visualizer.keyboard_manager.register_button(visualization_opengl.KeyboardButtonEvent(
'j', visualization_opengl.KeyboardFireEvent.Hold, decreaseElectricField))
def main():
print("\n--->Integration")
system.time = 0.0
while True:
system.integrator.run(1)
visualizer.update()
# Start simulation in separate thread
t = Thread(target=main)
t.daemon = True
t.start()
# Start blocking visualizer
visualizer.start()
|
KaiSzuttor/espresso
|
doc/tutorials/02-charged_system/scripts/nacl_units_confined_vis.py
|
Python
|
gpl-3.0
| 5,696
|
[
"ESPResSo"
] |
a482de1c640253e18a32355235c52afefa29c31b3fda275d2ce892a4df688ad1
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os
from unittest.case import skip
from commoncode.testcase import FileBasedTesting
from licensedcode import detect
class TestMatchingPerf(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
# Comment the skip decorator to run this test
@skip('Use only for local profiling')
def test_detect_license_performance_profiling(self):
# pre-index : we are profiling only the detection, not the indexing
import licensedcode.detect
licensedcode.detect.get_index()
import cProfile as profile
import pstats
stats = 'detect_license_performance_profile_log.txt'
from itertools import repeat
def detect_lic():
for location in locations:
list(detect.detect_license(location, perfect=True))
tf = ['perf/test1.txt', 'perf/whatever.py', 'perf/udll.cxx']
locations = [self.get_test_loc(f) for f in tf]
test_py = 'detect_lic()'
profile.runctx(test_py, globals(), locals(), stats)
p = pstats.Stats(stats)
p.sort_stats('cumulative').print_stats()
# p.print_stats()
class TestIndexingPerformance(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
# Comment the skip decorator to run this test
@skip('Use only for local profiling')
def test_build_index_performance_profiling(self):
# pre-load the JSON : we are profiling only the indexing here
# from licensedcode.json_rules import load_license_rules
# rules = load_license_rules()
import cProfile as profile
import pstats
from licensedcode import detect
stats = 'build_index_performance_profile_log.txt'
test_py = 'detect.get_license_index()'
profile.runctx(test_py, globals(), locals(), stats)
p = pstats.Stats(stats)
p.sort_stats('time').print_stats(40)
print()
print()
print()
# p.print_stats()
class TestTokenzingPerformance(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
# Comment the skip decorator to run this test
@skip('Use only for local profiling')
def test_get_tokens_timing(self):
from timeit import timeit
setup = '''
from licensedcode.models import get_tokens
from commoncode.fileutils import file_iter
from licensedcode import rules_data_dir
import os
rule_files = [os.path.join(rules_data_dir, f) for f in os.listdir(rules_data_dir) if f.endswith('.RULE')]
'''
test = '''
for f in rule_files:
get_tokens(f, template=False)
try:
get_tokens(f, template=True)
except:
pass
'''
print()
print('WITH OBJECT')
print(timeit(stmt=test, setup=setup, number=5))
#
# Comment the skip decorator to run this test
@skip('Use only for local profiling')
def test_get_all_rules_performance_timing(self):
from timeit import timeit
print()
print('With Object or namedtuple')
print(timeit(stmt='from licensedcode.models import get_all_rules;get_all_rules()', number=10))
|
retrography/scancode-toolkit
|
tests/licensedcode/test_performance.py
|
Python
|
apache-2.0
| 4,566
|
[
"VisIt"
] |
ef54e5dc32863f2e8a4d57af6e21610592746228c9749783b1012fe2c3d78646
|
import cgi
import os
import os.path
import sys
import pgdb
import cStringIO
class http_response(object):
def __init__(self, environ, start_response):
import cStringIO
self.buffer = cStringIO.StringIO()
self.environ = environ
self.start_response = start_response
self.status = '200 OK'
self.headers = [('Content-type', 'text/html; charset=utf-8'), ('P3P', '''policyref="/w3c/p3p.xml", CP="NOI NOR CURa OUR"''')]
def write(self, data):
self.buffer.write(data)
def finalise(self):
"""
Closes the output buffer, writes the correct header/s and returns
something suitable for returning from the top-level application() call
"""
self.html_footers()
self.value = self.buffer.getvalue()
self.buffer.close()
self.headers.append(('Content-Length', str(len(self.value))))
self.start_response(self.status, self.headers)
return [self.value]
def boom(self, msg):
self.status = '500 Server side error'
print >>self.buffer, "Critical error, HTTP status 500<br />"
print >>self.buffer, str(msg)
return self.finalise()
def oops(self, msg):
print >>self.buffer, """<div style="border:1px black dotted;">"""
print >>self.buffer, "Oops! " + str(msg) + "<br />"
print >>self.buffer, """<a href="index.py">Go home</a><br />\n"""
print >>self.buffer, """</div>"""
return self.finalise()
def html_headers(self, *head_items):
print >>self.buffer, """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"\n "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">"""
print >>self.buffer, """<html>\n<head>
<title>QQC!</title>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
<link rel="stylesheet" href="qqc.css" type="text/css" title="QQC" />
<link rel="P3Pv1" href="/w3c/p3p.xml" />"""
for item in head_items:
print >>self.buffer, "\t%s" % item
print >>self.buffer, """</head>\n<body onLoad="toggle_howto();">"""
def html_footers(self):
print >>self.buffer, """\n<hr />\n<div>"""
print >>self.buffer, """<a href="http://validator.w3.org/check?uri=referer"><img src="valid-xhtml11.png" alt="Valid XHTML 1.1" height="31" width="88" /></a>"""
print >>self.buffer, """</div>\n</body>\n</html>"""
def application(environ, start_response):
# cwd gets set to /, which is annoying :(
cwd = os.path.split(environ['SCRIPT_FILENAME'])[0]
sys.path.insert(0, cwd)
import qqc_include as inc
# Setup our output
output = http_response(environ, start_response)
sys.stdout = output
wsgi_errors = environ['wsgi.errors']
# Get all our form input
form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
TABLENAME = str(form.getfirst("script_name", ''))
PROOFREADER = str(form.getfirst("proofreader", ''))
ORDER = str(form.getfirst("newest", ''))
ERROR_ID = form.getfirst("error_id", '')
LINENUM = str(form.getfirst("linenum", ''))
GET_ALL = form.getfirst("all", None)
REMOTE_USER = environ.get("REMOTE_USER", 'USERNAME_ERROR')
try:
REMOTE_USER = [x for x in REMOTE_USER.split('/') if x.startswith('CN=')][0][3:]
except:
pass
# Connect to DB
try:
connection = pgdb.connect(host=inc.db_host, database=inc.db_name, user=inc.db_user, password=inc.db_pass)
cursor = connection.cursor()
except:
return output.boom("Could not connect to database")
# Get useful things from the DB
try:
cursor.execute('''SELECT "name","privileged" FROM "proofreaders"''')
name_tuples = cursor.fetchall()
names = [x[0] for x in name_tuples]
cursor.execute('''SELECT DISTINCT "script_name" FROM "lines"''')
scripts = [x[0] for x in cursor.fetchall()]
except:
return output.boom("Could not get stuff from database")
try:
PRIVILEGED = [x for x in name_tuples if x[0] == REMOTE_USER][0][1]
except:
PRIVILEGED = False
# Handle deletion if one was requested
try:
if ERROR_ID and PRIVILEGED:
# Only perform the deletion if the user has the privileged flag set
cursor.execute('''SELECT clear_error(''' + ERROR_ID + ''')''')
connection.commit()
except Exception, data:
return output.oops("Error during deletion - " + str(data))
# Start output
output.html_headers()
# Header bar
print """
<div class="header">
<h5 style="position: absolute;">
<a href="qqc_report.py">Report an error</a><br />
<a href="qqc_view.py">View reported errors</a>
</h5>
<h1 style="font-family:fantasy; padding:0px;">QQC!</h1>
<small>Written by Barney Desmond</small>
</div>
"""
# Sort out critera and inform the user
link = environ['SCRIPT_NAME'] + '?'
if PROOFREADER:
link += '''&proofreader='''+PROOFREADER
if TABLENAME:
link += '''&script_name='''+TABLENAME
if ORDER:
order = 'Sort by script name, Z-A'
swap = '''<small>(<a href="%s">sort A-Z</a>)</small>''' % link
else:
order = 'Sort by script name, A-Z'
swap = '''<small>(<a href="%s&newest=true">sort Z-A</a>)</small>''' % link
print """
<fieldset>
<legend>Criteria</legend>
<ul>
\t<li>%s %s</li>""" % (order, swap)
if TABLENAME:
print """\t<li>Show reports for <strong>%s</strong></li>""" % TABLENAME
if PROOFREADER:
print """\t<li>Show reports from <strong>%s</strong></li>""" % PROOFREADER
print """\t</ul>
<a href="%s">Show all error reports</a>
</fieldset>
""" % environ['SCRIPT_NAME']
# Retrieve all errors
resultset = []
query = """SELECT "error_id","script_name","linenum"::integer,"proofreader","description","img_filename","ttext","fixed_flag" FROM "errors" NATURAL JOIN "lines" WHERE TRUE """
if TABLENAME:
query += """ AND "script_name"=%(script)s"""
if PROOFREADER:
query += """ AND "proofreader"=%(reader)s"""
if LINENUM:
query += """ AND "linenum"=%(linenum)s"""
if ORDER:
query += """ ORDER BY "fixed_flag", "script_name" DESC, "linenum" ASC"""
else:
query += """ ORDER BY "fixed_flag", "script_name" ASC, "linenum" ASC"""
query_dict = { "script": TABLENAME, "reader": PROOFREADER, "linenum": LINENUM }
try:
cursor.execute(query, query_dict)
resultset = cursor.fetchall()
except Exception, data:
return output.oops("Error trying to retrieve reports - " + str(data))
# Start showing the errors
print """
<fieldset>
<legend>%s Reports (plus %s dismissed reports)</legend>
<table class="results">
<tr>
<th>Script file</th>
<th>Line number</th>
<th>Proofreader</th>
<th>Problem</th>
<th>Screenshot</th>
<th>Translated line</th>
<th></th>
</tr>
""" % ( len([x for x in resultset if not x[7]]) , len([x for x in resultset if x[7]]) )
extra_params = ''
if TABLENAME:
extra_params += """<input type="hidden" value="%s" name="script_name" />""" % TABLENAME
if PROOFREADER:
extra_params += """<input type="hidden" value="%s" name="proofreader" />""" % PROOFREADER
if ORDER:
extra_params += """<input type="hidden" value="%s" name="newest" />""" % ORDER
for row in resultset:
if row[7] and not GET_ALL:
continue
error_params = {}
row[2] = str(row[2])
link = environ['SCRIPT_NAME'] + '?'
if row[1] != TABLENAME:
link += """&script_name="""+row[1]
if PROOFREADER:
link += """&proofreader="""+PROOFREADER
if ORDER:
link += """&newest="""+ORDER
error_params['script_file'] = """<a href="%s">%s</a>""" % (link, cgi.escape(row[1]))
else:
error_params['script_file'] = cgi.escape(row[1])
error_params['line_num'] = cgi.escape(row[2])
link = environ['SCRIPT_NAME'] + '?'
if row[3] != PROOFREADER:
link += """&proofreader="""+row[3]
if TABLENAME:
link += """&script_name="""+TABLENAME
if ORDER:
link += """&newest="""+ORDER
error_params['proofreader'] = """<a href="%s">%s</a>""" % (link, cgi.escape(row[3]))
else:
error_params['proofreader'] = cgi.escape(row[3])
error_params['problem'] = cgi.escape(row[4])
if row[5]:
error_params['screenshot'] = """<a href="uploads/%s">File</a>""" % row[5]
else:
error_params['screenshot'] = 'None'
error_params['error_id'] = int(row[0])
error_params['ttext'] = cgi.escape(row[6])
error_params['fixed_flag'] = row[7]
error_params['extra'] = extra_params
if row[7]:
error_params['style'] = ''' class="fixed"'''
error_params['disabled'] = '''disabled="disabled"'''
else:
error_params['style'] = ''' class="unfixed"'''
error_params['disabled'] = ''
print """<form method="get" action="%s">""" % environ['SCRIPT_NAME']
if PRIVILEGED:
print """<tr%(style)s>
<td>%(script_file)s</td>
<td>%(line_num)s</td>
<td>%(proofreader)s</td>
<td>%(problem)s</td>
<td>%(screenshot)s</td>
<td>%(ttext)s</td>
<td><input type="hidden" name="error_id" value="%(error_id)s" />%(extra)s<input type="submit" value="Dismiss" %(disabled)s /></td>
</tr>
</form>
""" % error_params
else:
print """<tr%(style)s>
<td>%(script_file)s</td>
<td>%(line_num)s</td>
<td>%(proofreader)s</td>
<td>%(problem)s</td>
<td>%(screenshot)s</td>
<td>%(ttext)s</td>
<td><input type="hidden" name="error_id" value="%(error_id)s" />%(extra)s</td>
</tr>
</form>
""" % error_params
print """
</table>
</fieldset>
"""
# All regular output should be done by now
connection.close()
return output.finalise()
from paste.exceptions.errormiddleware import ErrorMiddleware
application = ErrorMiddleware(application, debug=True)
|
barneydesmond/qqc
|
qqc_view.py
|
Python
|
mit
| 9,170
|
[
"Desmond"
] |
b59ef2c6f2e888def18b209515899d970260ff31c9387aafbab6bc371fb7a28e
|
#!/usr/bin/env python
""""
Simple implementation of http://arxiv.org/pdf/1502.04623v2.pdf in TensorFlow
Example Usage:
python draw.py --data_dir=/tmp/draw --read_attn=True --write_attn=True
Author: Eric Jang
"""
import tensorflow as tf
from tensorflow.examples.tutorials import mnist
import numpy as np
import os
tf.flags.DEFINE_string("data_dir", "/tmp/draw", "")
tf.flags.DEFINE_boolean("read_attn", True, "enable attention for reader")
tf.flags.DEFINE_boolean("write_attn",True, "enable attention for writer")
FLAGS = tf.flags.FLAGS
## MODEL PARAMETERS ##
A,B = 28,28 # image width,height
img_size = B*A # the canvas size
enc_size = 256 # number of hidden units / output size in LSTM
dec_size = 256
read_n = 5 # read glimpse grid width/height
write_n = 5 # write glimpse grid width/height
read_size = 2*read_n*read_n if FLAGS.read_attn else 2*img_size
write_size = write_n*write_n if FLAGS.write_attn else img_size
z_size=10 # QSampler output size
T=20 # MNIST generation sequence length
batch_size=100 # training minibatch size
train_iters=10000
learning_rate=1e-3 # learning rate for optimizer
eps=1e-8 # epsilon for numerical stability
## BUILD MODEL ##
DO_SHARE=None # workaround for variable_scope(reuse=True)
x = tf.placeholder(tf.float32,shape=(batch_size,img_size)) # input (batch_size * img_size)
e=tf.random_normal((batch_size,z_size), mean=0, stddev=1) # Qsampler noise
lstm_enc = tf.contrib.rnn.LSTMCell(enc_size, state_is_tuple=True) # encoder Op
lstm_dec = tf.contrib.rnn.LSTMCell(dec_size, state_is_tuple=True) # decoder Op
def linear(x,output_dim):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
w=tf.get_variable("w", [x.get_shape()[1], output_dim])
b=tf.get_variable("b", [output_dim], initializer=tf.constant_initializer(0.0))
return tf.matmul(x,w)+b
def filterbank(gx, gy, sigma2,delta, N):
grid_i = tf.reshape(tf.cast(tf.range(N), tf.float32), [1, -1])
mu_x = gx + (grid_i - N / 2 - 0.5) * delta # eq 19
mu_y = gy + (grid_i - N / 2 - 0.5) * delta # eq 20
a = tf.reshape(tf.cast(tf.range(A), tf.float32), [1, 1, -1])
b = tf.reshape(tf.cast(tf.range(B), tf.float32), [1, 1, -1])
mu_x = tf.reshape(mu_x, [-1, N, 1])
mu_y = tf.reshape(mu_y, [-1, N, 1])
sigma2 = tf.reshape(sigma2, [-1, 1, 1])
Fx = tf.exp(-tf.square((a - mu_x) / (2*sigma2))) # 2*sigma2?
Fy = tf.exp(-tf.square((b - mu_y) / (2*sigma2))) # batch x N x B
# normalize, sum over A and B dims
Fx=Fx/tf.maximum(tf.reduce_sum(Fx,2,keep_dims=True),eps)
Fy=Fy/tf.maximum(tf.reduce_sum(Fy,2,keep_dims=True),eps)
return Fx,Fy
def attn_window(scope,h_dec,N):
with tf.variable_scope(scope,reuse=DO_SHARE):
params=linear(h_dec,5)
# gx_,gy_,log_sigma2,log_delta,log_gamma=tf.split(1,5,params)
gx_,gy_,log_sigma2,log_delta,log_gamma=tf.split(params,5,1)
gx=(A+1)/2*(gx_+1)
gy=(B+1)/2*(gy_+1)
sigma2=tf.exp(log_sigma2)
delta=(max(A,B)-1)/(N-1)*tf.exp(log_delta) # batch x N
return filterbank(gx,gy,sigma2,delta,N)+(tf.exp(log_gamma),)
## READ ##
def read_no_attn(x,x_hat,h_dec_prev):
return tf.concat([x,x_hat], 1)
def read_attn(x,x_hat,h_dec_prev):
Fx,Fy,gamma=attn_window("read",h_dec_prev,read_n)
def filter_img(img,Fx,Fy,gamma,N):
Fxt=tf.transpose(Fx,perm=[0,2,1])
img=tf.reshape(img,[-1,B,A])
glimpse=tf.matmul(Fy,tf.matmul(img,Fxt))
glimpse=tf.reshape(glimpse,[-1,N*N])
return glimpse*tf.reshape(gamma,[-1,1])
x=filter_img(x,Fx,Fy,gamma,read_n) # batch x (read_n*read_n)
x_hat=filter_img(x_hat,Fx,Fy,gamma,read_n)
return tf.concat([x,x_hat], 1) # concat along feature axis
read = read_attn if FLAGS.read_attn else read_no_attn
## ENCODE ##
def encode(state,input):
"""
run LSTM
state = previous encoder state
input = cat(read,h_dec_prev)
returns: (output, new_state)
"""
with tf.variable_scope("encoder",reuse=DO_SHARE):
return lstm_enc(input,state)
## Q-SAMPLER (VARIATIONAL AUTOENCODER) ##
def sampleQ(h_enc):
"""
Samples Zt ~ normrnd(mu,sigma) via reparameterization trick for normal dist
mu is (batch,z_size)
"""
with tf.variable_scope("mu",reuse=DO_SHARE):
mu=linear(h_enc,z_size)
with tf.variable_scope("sigma",reuse=DO_SHARE):
logsigma=linear(h_enc,z_size)
sigma=tf.exp(logsigma)
return (mu + sigma*e, mu, logsigma, sigma)
## DECODER ##
def decode(state,input):
with tf.variable_scope("decoder",reuse=DO_SHARE):
return lstm_dec(input, state)
## WRITER ##
def write_no_attn(h_dec):
with tf.variable_scope("write",reuse=DO_SHARE):
return linear(h_dec,img_size)
def write_attn(h_dec):
with tf.variable_scope("writeW",reuse=DO_SHARE):
w=linear(h_dec,write_size) # batch x (write_n*write_n)
N=write_n
w=tf.reshape(w,[batch_size,N,N])
Fx,Fy,gamma=attn_window("write",h_dec,write_n)
Fyt=tf.transpose(Fy,perm=[0,2,1])
wr=tf.matmul(Fyt,tf.matmul(w,Fx))
wr=tf.reshape(wr,[batch_size,B*A])
#gamma=tf.tile(gamma,[1,B*A])
return wr*tf.reshape(1.0/gamma,[-1,1])
write=write_attn if FLAGS.write_attn else write_no_attn
## STATE VARIABLES ##
cs=[0]*T # sequence of canvases
mus,logsigmas,sigmas=[0]*T,[0]*T,[0]*T # gaussian params generated by SampleQ. We will need these for computing loss.
# initial states
h_dec_prev=tf.zeros((batch_size,dec_size))
enc_state=lstm_enc.zero_state(batch_size, tf.float32)
dec_state=lstm_dec.zero_state(batch_size, tf.float32)
## DRAW MODEL ##
# construct the unrolled computational graph
for t in range(T):
c_prev = tf.zeros((batch_size,img_size)) if t==0 else cs[t-1]
x_hat=x-tf.sigmoid(c_prev) # error image
r=read(x,x_hat,h_dec_prev)
h_enc,enc_state=encode(enc_state,tf.concat([r,h_dec_prev], 1))
z,mus[t],logsigmas[t],sigmas[t]=sampleQ(h_enc)
h_dec,dec_state=decode(dec_state,z)
cs[t]=c_prev+write(h_dec) # store results
h_dec_prev=h_dec
DO_SHARE=True # from now on, share variables
## LOSS FUNCTION ##
def binary_crossentropy(t,o):
return -(t*tf.log(o+eps) + (1.0-t)*tf.log(1.0-o+eps))
# reconstruction term appears to have been collapsed down to a single scalar value (rather than one per item in minibatch)
x_recons=tf.nn.sigmoid(cs[-1])
# after computing binary cross entropy, sum across features then take the mean of those sums across minibatches
Lx=tf.reduce_sum(binary_crossentropy(x,x_recons),1) # reconstruction term
Lx=tf.reduce_mean(Lx)
kl_terms=[0]*T
for t in range(T):
mu2=tf.square(mus[t])
sigma2=tf.square(sigmas[t])
logsigma=logsigmas[t]
kl_terms[t]=0.5*tf.reduce_sum(mu2+sigma2-2*logsigma-1,1)#-T*.5 # each kl term is (1xminibatch)
KL=tf.add_n(kl_terms) # this is 1xminibatch, corresponding to summing kl_terms from 1:T
Lz=tf.reduce_mean(KL) # average over minibatches
cost=Lx+Lz
## OPTIMIZER ##
optimizer=tf.train.AdamOptimizer(learning_rate, beta1=0.5)
grads=optimizer.compute_gradients(cost)
for i,(g,v) in enumerate(grads):
if g is not None:
grads[i]=(tf.clip_by_norm(g,5),v) # clip gradients
train_op=optimizer.apply_gradients(grads)
## RUN TRAINING ##
data_directory = os.path.join(FLAGS.data_dir, "mnist")
if not os.path.exists(data_directory):
os.makedirs(data_directory)
train_data = mnist.input_data.read_data_sets(data_directory, one_hot=True).train # binarized (0-1) mnist data
fetches=[]
fetches.extend([Lx,Lz,train_op])
Lxs=[0]*train_iters
Lzs=[0]*train_iters
sess=tf.InteractiveSession()
saver = tf.train.Saver() # saves variables learned during training
tf.global_variables_initializer().run()
#saver.restore(sess, "/tmp/draw/drawmodel.ckpt") # to restore from model, uncomment this line
for i in range(train_iters):
xtrain,_=train_data.next_batch(batch_size) # xtrain is (batch_size x img_size)
feed_dict={x:xtrain}
results=sess.run(fetches,feed_dict)
Lxs[i],Lzs[i],_=results
if i%100==0:
print("iter=%d : Lx: %f Lz: %f" % (i,Lxs[i],Lzs[i]))
## TRAINING FINISHED ##
canvases=sess.run(cs,feed_dict) # generate some examples
canvases=np.array(canvases) # T x batch x img_size
out_file=os.path.join(FLAGS.data_dir,"draw_data.npy")
np.save(out_file,[canvases,Lxs,Lzs])
print("Outputs saved in file: %s" % out_file)
ckpt_file=os.path.join(FLAGS.data_dir,"drawmodel.ckpt")
print("Model saved in file: %s" % saver.save(sess,ckpt_file))
sess.close()
|
HaydenFaulkner/phd
|
tensorflow_code/external_libraries/draw-ericjang/draw.py
|
Python
|
mit
| 8,395
|
[
"Gaussian"
] |
49ae937241b70a3bb06bfd2c0a8f02b1191045c540f33fa5f018971ac42f4892
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
for k in range(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
|
WangWenjun559/Weiss
|
summary/sumy/sklearn/tree/tree.py
|
Python
|
apache-2.0
| 34,561
|
[
"Brian"
] |
cc325638b76d77d8cb50b21dacf332e72694bb90cf47df88627f8d2f0e423e27
|
import re
from openelex.models import Contest, Candidate, Office, Result
import logging
import time
import os
# if not os.path.isdir("logs"):
# os.makedirs("logs")
# logging.basicConfig(filename=time.strftime("logs/%Y%m%d-%H%M%S-validate.log"),level=logging.DEBUG)
# Generic validation helpers
def _validate_candidate_votes(election_id, reporting_level, contest_slug,
candidate_slug, expected_votes):
"""Sum sub-contest level results and compare them to known totals"""
msg = "Expected {} votes for contest {} and candidate {}, found {}"
votes = Result.objects.filter(election_id=election_id,
contest_slug=contest_slug, candidate_slug=candidate_slug,
reporting_level=reporting_level).sum('votes')
if votes != expected_votes:
logging.debug("db.getCollection('result').find({election_id:\"%s\", \
contest_slug:\"%s\", candidate_slug:\"%s\", \
reporting_level:\"%s\"})", election_id, contest_slug, candidate_slug, reporting_level)
assert votes == expected_votes, msg.format(expected_votes, contest_slug,
candidate_slug, votes)
def _validate_many_candidate_votes(election_id, reporting_level,
candidates):
"""
Sum sub-contest level results and compare them to known totals for
multiple contests and candidates.
Arguments:
election_id - Election ID of the election of interest.
reporting_level - Reporting level to use to aggregate results.
candidates - Tuple of contests slug, candidate slug and expected votes.
"""
for candidate_info in candidates:
contest, candidate, expected = candidate_info
_validate_candidate_votes(election_id, reporting_level,
contest, candidate, expected)
def validate_results_2012_president_general():
"""Sum some county-level results for 2012 general presidential and compare with known totals"""
election_id = 'vt-2012-11-06-general'
known_results = [
('president', 'barack-obama', 199053),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2014_house_general():
"""Sum some county-level results for 2014 general and compare with known totals"""
election_id = 'vt-2014-11-04-general'
known_results = [
('us-house-of-representatives', 'peter-welch', 123349),
('us-house-of-representatives', 'mark-donka', 59432),
('us-house-of-representatives', 'cris-ericson', 2750),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2014_house_primary():
"""Sum some county-level results for 2014 house primary and compare with known totals"""
election_id = 'vt-2014-08-26-primary'
known_results = [
('us-house-of-representatives-d', 'peter-welch', 19248),
('us-house-of-representatives-d', 'writeins', 224),
('us-house-of-representatives-r', 'mark-donka', 4340),
('us-house-of-representatives-r', 'donald-russell', 4026),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2002_lt_gov_general():
"""Sum some county-level results for 2002 lt-gov general and compare with known totals"""
election_id = 'vt-2002-11-05-general'
known_results = [
('lieutenant-governor', 'peter-shumlin', 73501),
('lieutenant-governor', 'brian-e-dubie', 94044),
('lieutenant-governor', 'anthony-pollina', 56564),
('lieutenant-governor', 'sally-ann-jones', 4310),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2002_lt_gov_primary():
"""Sum some county-level results for 2002 lt-gov primary and compare with known totals"""
election_id = 'vt-2002-09-10-primary'
known_results = [
('lieutenant-governor-d', 'peter-shumlin', 22633),
('lieutenant-governor-r', 'brian-e-dubie', 22584),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2004_misc_results_general():
"""Sum some state specific results for 2004 general and compare with known totals"""
election_id = 'vt-2004-11-02-general'
known_results = [
('treasurer', 'jeb-spaulding', 273705),
('secretary-of-state', 'deb-markowitz', 270744),
('auditor', 'randy-brock', 152848),
('auditor', 'elizabeth-m-ready', 122498),
('auditor', 'jerry-levy', 17685),
('attorney-general', 'william-h-sorrell', 169726),
# there is an error on the vermont website, I talked to the VT Sec state and the real result should be 81,285
# ('attorney-general', 'dennis-carver', 90285),
('attorney-general', 'susan-a-davis', 14351),
('attorney-general', 'james-mark-leas', 8769),
('attorney-general', 'karen-kerin', 6357),
('attorney-general', 'boots-wardinski', 2944),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2008_state_senate_primary():
"""Sum some county-level results for 2008 state senate primary and compare with known totals"""
election_id = 'vt-2008-09-08-primary'
known_results = [
('state-senate-orange-d', 'mark-a-macdonald', 557),
('state-senate-franklin-r', 'randy-brock', 879),
('state-senate-franklin-r', 'willard-rowell', 782),
('state-senate-essexorleans-d', 'robert-a-starr', 748),
('state-senate-essexorleans-d', 'writeins', 112),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2010_state_senate_general():
"""Sum some county-level results for 2010 state senate general and compare with known totals"""
election_id = 'vt-2010-11-02-general'
known_results = [
('state-senate-orange', 'mark-a-macdonald', 4524),
('state-senate-orange', 'stephen-w-webster', 3517),
('state-senate-franklin', 'randy-brock', 9014),
('state-senate-franklin', 'peter-d-moss', 793),
('state-senate-essexorleans', 'robert-a-starr', 9902),
('state-senate-essexorleans', 'vincent-illuzzi', 9231),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2012_state_house_primary():
"""Sum some county-level results for 2012 state house primary and compare with known totals"""
election_id = 'vt-2012-03-06-primary'
known_results = [
('house-of-representatives-addison-5-d', 'edward-v-mcguire', 220),
('house-of-representatives-addison-5-r', 'harvey-smith', 75),
('house-of-representatives-addison-1-d', 'betty-a-nuovo', 486),
('house-of-representatives-addison-1-d', 'paul-ralston', 446),
('house-of-representatives-bennington-1-d', 'bill-botzow', 152),
('house-of-representatives-caledonia-1-r', 'leigh-b-larocque', 72),
('house-of-representatives-chittenden-61-d', 'joanna-cole', 658),
('house-of-representatives-chittenden-61-d', 'bill-aswad', 619),
('house-of-representatives-chittenden-61-d', 'robert-hooper', 536),
('house-of-representatives-chittenden-61-r', 'kurt-wright', 116),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2012_state_house_general():
"""Sum some county-level results for 2012 state house general and compare with known totals"""
election_id = 'vt-2012-11-06-general'
known_results = [
('house-of-representatives-addison-5', 'edward-v-mcguire', 982),
('house-of-representatives-addison-5', 'harvey-smith', 1151),
('house-of-representatives-addison-1', 'betty-a-nuovo', 2601),
('house-of-representatives-addison-1', 'paul-ralston', 2378),
('house-of-representatives-bennington-1', 'bill-botzow', 1613),
('house-of-representatives-caledonia-1', 'leigh-b-larocque', 1143),
('house-of-representatives-chittenden-61', 'joanna-cole', 2008),
('house-of-representatives-chittenden-61', 'bill-aswad', 1987),
('house-of-representatives-chittenden-61', 'kurt-wright', 2332),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
|
openelections/openelections-core
|
openelex/us/vt/validate/validates.py
|
Python
|
mit
| 8,951
|
[
"Brian"
] |
b9da8cd685829336345a01002fbb16925983b7f6b61801066ba2ff33a02ed7b6
|
# Copyright (c) 2017 Brian Ginsburg, Julie Rutherford-Fields
# This work is available under the "3-clause ('new') BSD License”.
# Please see the file COPYING in this distribution
# for license terms.
import nltk
from nltk import CFG
from nltk.corpus import stopwords
from nltk.parse.generate import generate
from nltk.tokenize import WhitespaceTokenizer
import random
# # Import email body - for debugging
# with open("bodies/16.txt", "r") as file:
# body_text = file.read()
# A simple grammar for statements to express disbelief
def make_disbelief_grammar():
disbelief_grammar = """
S -> N | M | L
N -> 'Are' P
M -> 'Am' Q
L -> 'How is' R | 'Is' R
P -> 'you' V
Q -> 'I' U
R -> 'this' V
V -> 'serious?' | 'real?' | 'for sure?' | 'for certain?'
U -> 'just lucky?' | 'dreaming?' | 'confused?' | 'special?'
"""
return disbelief_grammar
# A simple grammar for statements to express general excitement
def make_exclamation_grammar():
holy_words = ["'cow!'", "'Moses!'", "'Mary mother of God!'", "'guacamole!'", "'macaroni!'", "'crud!'", "'hand grenade!'"]
exclamation_grammar = """
S -> N
N -> 'Wow!' | 'Gracious me!' | 'Holy' M | 'Wowzers!' | 'Ay caramba!' | 'Oh my!' | 'Whoa!'
M ->""" + random.choice(holy_words)
return exclamation_grammar
# A simple grammar for statements related to making incoming ot outgoing transactions
def make_transaction_grammar(dollar_words):
generic_funds = ["'funds'", "'percentage'", "'portion'", "'piece of the pie'", "'entitlement'", "'property'", "'finances'", "'money'", "'wealth'", "'fortune'", "'proceeds'"]
outgoing_funds = ["'transaction'", "'deposit'", "'investment'", "'ante'", "'advance'"]
# Add custom dollar amounts to generic fund list
incoming_funds = generic_funds + dollar_words
transaction_grammar = """
S -> N
N -> 'How' M | 'When' M | 'Where' M
M -> 'do' L | 'can' L | 'should' L
L -> 'I' P
P -> 'collect my' Q | 'get my' Q | 'make the' R | 'perform the' R | 'do this?' | 'proceed?' | 'meet' V
Q ->""" + random.choice(incoming_funds) + """ '?'
R ->""" + random.choice(outgoing_funds) + """ '?'
V -> 'you?'
"""
return transaction_grammar
# A simple grammar for statements related to the amount of money being "offered"
def make_amount_grammar(dollar_words):
if(len(dollar_words) > 0):
amount_grammar = """
S -> N
N -> """ + random.choice(dollar_words) + """'is' M
M -> Q | 'not' Q
Q -> 'a little' R | 'a lot of' R | 'enough' R | 'significant' R | 'major' R | 'minor' R
R -> 'money' T | 'cash' T | 'capital' T | 'riches' T
T -> 'for' U | '.'
U -> 'someone like me.' | 'anyone!' | 'one person.'
"""
return amount_grammar
else:
return ""
# Parses the body of an email and returns a list of filtered words (minus stop words)
def parse_body(body_text):
# Define stop words (very common words/characters to ignore)
stop_words = set(stopwords.words("english"))
stop_words.update(['.', ',', '"', "'", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}'])
wt = WhitespaceTokenizer()
# Tokenize email body and filter out stop words
all_words = wt.tokenize(body_text)
# List of words absent stop words
filtered_words = [word for word in all_words if not word.lower() in stop_words]
return filtered_words
# Parses out dollar amount and percentages from email bodies returns a list of dollar words
def parse_dollar_keywords(filtered_words):
# Custom vocabulary for terminals (parsed from email body)
dollar_words = [word for word in filtered_words if '$' in word]
percent_words = [word for word in filtered_words if '%' in word]
# Formats custom strings for grammar
dollar_words_formatted = ["'" + word + "'" for word in dollar_words]
return dollar_words_formatted
# Returns a randomized statement from a grammar
def get_statement(grammar):
statements = []
# Add statements generated by the grammars to list
for n, s in enumerate(generate(grammar, n=1000), 1):
statements.append(s)
index = random.randint(0, (len(statements) - 1))
statement = ' '.join(statements[index])
return statement
# Combines statements into an email response
def make_response(body_text):
# Parse body text, extract keywords
filtered_words = parse_body(body_text)
dollar_words = parse_dollar_keywords(filtered_words)
# Create keyword grammars
disbelief = make_disbelief_grammar()
exclamation = make_exclamation_grammar()
transaction = make_transaction_grammar(dollar_words)
amount = make_amount_grammar(dollar_words)
# Create list of all grammars
grammar_list = [disbelief, exclamation, transaction, amount]
# Get a random number for total of non-repeating grammar statements
num_stats = random.randint(1, len(grammar_list))
response = ""
# Max index of grammar list
max = num_stats - 1
for i in range(0, num_stats):
# Get random index of a grammar that has not been used
j = random.randint(0, max)
response += (get_statement((CFG.fromstring(grammar_list[j])))) + " "
# Swap j with max and decrement max so the value can not be randomly selected again
grammar_list[j], grammar_list[max] = grammar_list[max], grammar_list[j]
max -= 1
return response
|
thuselem/teergrube
|
app/email/grammars.py
|
Python
|
bsd-3-clause
| 5,113
|
[
"Brian"
] |
1e2eb5f01a236ba773b5b61219fcd8c9d6f8d222f73fdcf215bd13862bfb6d30
|
"""
Copyright (c) 2009 John Markus Bjoerndalen <jmb@cs.uit.no>,
Brian Vinter <vinter@nbi.dk>, Rune M. Friborg <rune.m.friborg@gmail.com>.
See LICENSE.txt for licensing details (MIT License).
"""
from pycsp_import import *
import sys
@process
def source(chan_out):
for i in range(10):
chan_out("Hello world (%d)\n" % (i))
poison(chan_out)
@process
def sink(chan_in):
while True:
sys.stdout.write(chan_in())
chan = Channel()
Parallel(source(chan.writer()) * 5,
sink(chan.reader()) * 5)
shutdown()
|
runefriborg/pycsp
|
examples/TerminationRacePoison.py
|
Python
|
mit
| 548
|
[
"Brian"
] |
37ac823089583b9908e9ac64ecea01c2138a517431f347fb92d34d630baa14a1
|
#!/usr/bin/env python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys, os
root = os.path.dirname(os.path.realpath(__file__))
# => Driver Code <= #
if __name__ == '__main__':
# > Working Dirname < #
if len(sys.argv) == 1:
dirname = '.'
elif len(sys.argv) == 2:
dirname = sys.argv[1]
else:
raise Exception('Usage: fsapt.py [dirname]')
# > Copy Files < #
os.system('cp %s/pymol/*pymol %s' % (root, dirname))
|
kannon92/psi4
|
psi4/share/psi4/fsapt/copy_pymol.py
|
Python
|
gpl-2.0
| 1,373
|
[
"Psi4",
"PyMOL"
] |
83ac4c099cb6633f0272ae11a30633aad7d2b46a1cabf22aaf20ce9ec2e36b30
|
#!/usr/bin/env python
#
# Author: Oliver J. Backhouse <olbackhouse@gmail.com>
# George H. Booth <george.booth@kcl.ac.uk>
#
'''
Use a converged AGF2 calculation to build the full photoemission (quasiparticle) spectrum
Default AGF2 corresponds to the AGF2(1,0) method outlined in the papers:
- O. J. Backhouse, M. Nusspickel and G. H. Booth, J. Chem. Theory Comput., 16, 1090 (2020).
- O. J. Backhouse and G. H. Booth, J. Chem. Theory Comput., 16, 6294 (2020).
'''
import numpy
from pyscf import gto, scf, agf2
mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='cc-pvdz')
mf = scf.RHF(mol)
mf.conv_tol = 1e-12
mf.run()
# Run an AGF2 calculation
gf2 = agf2.AGF2(mf)
gf2.conv_tol = 1e-7
gf2.run()
# Access the GreensFunction object and compute the spectrum
gf = gf2.gf
grid = numpy.linspace(-10.0, 10.0, 1000)
eta = 0.02
spectrum = gf.real_freq_spectrum(grid, eta=eta)
# The array `spectrum` is now a length nfreq array of the
# spectral function -1/pi * Tr[Im[G(\omega + i\eta)]].
# Note that for UHF AGF2 calculations, the individual gf
# elements can be passed to real_freq_spectrum in order
# to obtain the spin-resolved spectra.
# We can also build the self-energy on the real-frequency axis
# by accessing the renormalized auxiliary states:
e = gf2.se.energy - gf2.se.chempot
v = gf2.se.coupling
denom = grid[:,None] - (e + numpy.sign(e)*eta*1.0j)[None]
se = numpy.einsum('xk,yk,wk->wxy', v, v.conj(), 1./denom)
|
sunqm/pyscf
|
examples/agf2/03-photoemission_spectra.py
|
Python
|
apache-2.0
| 1,438
|
[
"PySCF"
] |
ada80af6b50d26f8537e57f0be28edc635542293d9334f33fdc53bbab8b14238
|
from collections import OrderedDict
from marshmallow_jsonapi import Schema, fields
from tornado import gen
from tornado_rest_jsonapi import exceptions
from tornado_rest_jsonapi.data_layers.base import BaseDataLayer
from tornado_rest_jsonapi.resource import ResourceDetails, ResourceList
class WorkingDataLayer(BaseDataLayer):
"""Base class for tests. Still missing the resource_class
that must be set in the derived class."""
collection = OrderedDict()
id = 0
@gen.coroutine
def create_object(self, data, view_kwargs):
id = str(type(self).id)
data["id"] = id
self.collection[id] = data
type(self).id += 1
return data
@gen.coroutine
def get_object(self, kwargs):
identifier = kwargs.get("id")
if identifier not in self.collection:
raise exceptions.ObjectNotFound()
return self.collection[identifier]
@gen.coroutine
def update_object(self, obj, data, view_kwargs):
obj.update(data)
return True
@gen.coroutine
def delete_object(self, obj, view_kwargs):
identifier = view_kwargs.get("id")
if identifier not in self.collection:
raise exceptions.ObjectNotFound()
del self.collection[identifier]
@gen.coroutine
def get_collection(self, qs, view_kwargs):
pagination = qs.pagination
number = pagination.get("number", 0)
size = pagination.get("size", 10)
interval = slice(number*size, (number+1)*size)
# if filter_ is not None:
# values = [x for x in self.collection.values() if filter_(x)]
# else:
# values = [x for x in self.collection.values()]
values = [x for x in self.collection.values()][interval]
return len(self.collection.values()), values
class StudentSchema(Schema):
class Meta:
type_ = "student"
self_url = '/api/v1/students/{id}/'
self_url_kwargs = {'id': '<id>'}
self_url_many = '/api/v1/students/'
id = fields.Int()
name = fields.String(required=True)
age = fields.Int(required=True)
class StudentDetails(ResourceDetails):
schema = StudentSchema
data_layer = {
"class": WorkingDataLayer
}
class StudentList(ResourceList):
schema = StudentSchema
data_layer = {
"class": WorkingDataLayer,
}
# class Teacher(Schema):
# name = fields.String()
# age = fields.Int(required=False)
# discipline = fields.List(fields.String())
#
#
# class TeacherModelConn(ModelConnector):
# pass
#
#
# class TeacherDetails(ResourceDetails):
# schema = Teacher
# model_connector = TeacherModelConn
#
#
# class TeacherList(ResourceDetails):
# schema = Teacher
# model_connector = TeacherModelConn
#
#
# class Person(Schema):
# name = fields.String()
# age = fields.Int()
#
#
# class City(Schema):
# name = fields.String()
# mayor = fields.Nested(Person())
#
#
# class CityModelConn(WorkingModelConn):
# pass
#
#
# class CityDetails(ResourceDetails):
# schema = City
# model_connector = CityModelConn
#
#
# class ServerInfo(Schema):
# uptime = fields.Int()
# status = fields.String()
#
#
# class ServerInfoModelConn(SingletonModelConn):
# resource_class = ServerInfo
#
#
# class ServerInfoDetails(ResourceSingletonDetails):
# schema = ServerInfo
# model_connector = ServerInfoModelConn
#
#
# class UnsupportAll(Schema):
# pass
#
#
# class UnsupportAllModelConn(ModelConnector):
# pass
#
#
# class UnsupportAllDetails(ResourceDetails):
# schema = UnsupportAll
# model_connector = UnsupportAllModelConn
#
#
# class UnsupportAllList(ResourceList):
# schema = UnsupportAll
# model_connector = UnsupportAllModelConn
#
#
# class Unprocessable(Schema):
# pass
#
#
# class UnprocessableModelConn(ModelConnector):
# @gen.coroutine
# def create_object(self, instance, **kwargs):
# raise exceptions.BadRepresentation("unprocessable", foo="bar")
#
# @gen.coroutine
# def replace_object(self, instance, **kwargs):
# raise exceptions.BadRepresentation("unprocessable", foo="bar")
#
# @gen.coroutine
# def retrieve_object(self, instance, **kwargs):
# raise exceptions.BadRepresentation("unprocessable", foo="bar")
#
# @gen.coroutine
# def retrieve_collection(
# self, items_response, offset=None, limit=None, **kwargs):
# raise exceptions.BadRepresentation("unprocessable", foo="bar")
#
#
# class UnprocessableDetails(ResourceDetails):
# schema = Unprocessable
# model_connector = UnprocessableModelConn
#
#
# class UnprocessableList(ResourceList):
# schema = Unprocessable
# model_connector = UnprocessableModelConn
#
#
# class UnsupportsCollection(Schema):
# pass
#
#
# class UnsupportsCollectionModelConn(ModelConnector):
#
# @gen.coroutine
# def items(self, items_response, offset=None, limit=None, **kwargs):
# raise NotImplementedError()
#
#
# class UnsupportsCollectionList(ResourceList):
# schema = UnsupportsCollection
# model_connector = UnsupportsCollectionModelConn
#
#
# class Broken(Schema):
# pass
#
#
# class BrokenModelConn(ModelConnector):
# @gen.coroutine
# def boom(self, *args):
# raise Exception("Boom!")
#
# create_object = boom
# retrieve_object = boom
# replace_object = boom
# delete_object = boom
# retrieve_collection = boom
#
#
# class BrokenDetails(ResourceDetails):
# schema = Broken
# model_connector = BrokenModelConn
#
#
# class BrokenList(ResourceList):
# schema = Broken
# model_connector = BrokenModelConn
#
#
# class AlreadyPresent(Schema):
# pass
#
#
# class AlreadyPresentModelConn(ModelConnector):
#
# @gen.coroutine
# def create_object(self, *args, **kwargs):
# raise exceptions.Exists()
#
#
# class AlreadyPresentDetails(ResourceDetails):
# schema = AlreadyPresent
# model_connector = AlreadyPresentModelConn
#
#
# class AlreadyPresentList(ResourceList):
# schema = AlreadyPresent
# model_connector = AlreadyPresentModelConn
#
#
# class Sheep(Schema):
# @classmethod
# def collection_name(cls):
# return "sheep"
#
#
# class SheepModelConn(ModelConnector):
# """Sheep plural is the same as singular."""
#
#
# class SheepDetails(ResourceDetails):
# schema = Sheep
# model_connector = SheepModelConn
#
#
# class Octopus(Schema):
# @classmethod
# def collection_name(cls):
# return "octopi"
#
#
# class OctopusModelConn(ModelConnector):
# """Octopus plural is a matter of debate."""
# resource_class = Octopus
#
#
# class OctopusDetails(ResourceDetails):
# schema = Octopus
# model_connector = OctopusModelConn
#
#
# class Frobnicator(Schema):
# pass
#
#
# class FrobnicatorModelConn(ModelConnector):
# """A weird name to test if it's kept"""
#
#
# class FrobnicatorDetails(ResourceDetails):
# schema = Frobnicator
# model_connector = FrobnicatorModelConn
|
force-h2020/tornado-rest-jsonapi
|
tornado_rest_jsonapi/tests/resource_handlers.py
|
Python
|
bsd-3-clause
| 7,040
|
[
"Octopus"
] |
288dcafeee575c788c0374427184ea172560d51c5f7b3ada8e8a40de35e63dd2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.